Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .github/workflows/update_space.yml +28 -0
- .gitignore +48 -0
- LICENSE +21 -0
- README.md +81 -8
- example_for_mac.py +28 -0
- example_tts.py +19 -0
- example_vc.py +6 -0
- gradio_tts_app.py +84 -0
- gradio_vc_app.py +27 -0
- pyproject.toml +35 -0
- src/chatterbox/__init__.py +2 -0
- src/chatterbox/models/s3gen/__init__.py +2 -0
- src/chatterbox/models/s3gen/const.py +1 -0
- src/chatterbox/models/s3gen/decoder.py +317 -0
- src/chatterbox/models/s3gen/f0_predictor.py +55 -0
- src/chatterbox/models/s3gen/flow.py +242 -0
- src/chatterbox/models/s3gen/flow_matching.py +228 -0
- src/chatterbox/models/s3gen/hifigan.py +474 -0
- src/chatterbox/models/s3gen/matcha/decoder.py +443 -0
- src/chatterbox/models/s3gen/matcha/flow_matching.py +129 -0
- src/chatterbox/models/s3gen/matcha/text_encoder.py +413 -0
- src/chatterbox/models/s3gen/matcha/transformer.py +316 -0
- src/chatterbox/models/s3gen/s3gen.py +305 -0
- src/chatterbox/models/s3gen/transformer/__init__.py +0 -0
- src/chatterbox/models/s3gen/transformer/activation.py +84 -0
- src/chatterbox/models/s3gen/transformer/attention.py +330 -0
- src/chatterbox/models/s3gen/transformer/convolution.py +145 -0
- src/chatterbox/models/s3gen/transformer/embedding.py +294 -0
- src/chatterbox/models/s3gen/transformer/encoder_layer.py +236 -0
- src/chatterbox/models/s3gen/transformer/positionwise_feed_forward.py +115 -0
- src/chatterbox/models/s3gen/transformer/subsampling.py +383 -0
- src/chatterbox/models/s3gen/transformer/upsample_encoder.py +318 -0
- src/chatterbox/models/s3gen/utils/class_utils.py +71 -0
- src/chatterbox/models/s3gen/utils/mask.py +193 -0
- src/chatterbox/models/s3gen/utils/mel.py +81 -0
- src/chatterbox/models/s3gen/xvector.py +428 -0
- src/chatterbox/models/s3tokenizer/__init__.py +30 -0
- src/chatterbox/models/s3tokenizer/s3tokenizer.py +168 -0
- src/chatterbox/models/t3/__init__.py +1 -0
- src/chatterbox/models/t3/inference/alignment_stream_analyzer.py +154 -0
- src/chatterbox/models/t3/inference/t3_hf_backend.py +116 -0
- src/chatterbox/models/t3/llama_configs.py +37 -0
- src/chatterbox/models/t3/modules/cond_enc.py +97 -0
- src/chatterbox/models/t3/modules/learned_pos_emb.py +32 -0
- src/chatterbox/models/t3/modules/perceiver.py +212 -0
- src/chatterbox/models/t3/modules/t3_config.py +27 -0
- src/chatterbox/models/t3/t3.py +381 -0
- src/chatterbox/models/tokenizers/__init__.py +1 -0
- src/chatterbox/models/tokenizers/tokenizer.py +50 -0
- src/chatterbox/models/voice_encoder/__init__.py +1 -0
.github/workflows/update_space.yml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Run Python script
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
build:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
|
12 |
+
steps:
|
13 |
+
- name: Checkout
|
14 |
+
uses: actions/checkout@v2
|
15 |
+
|
16 |
+
- name: Set up Python
|
17 |
+
uses: actions/setup-python@v2
|
18 |
+
with:
|
19 |
+
python-version: '3.9'
|
20 |
+
|
21 |
+
- name: Install Gradio
|
22 |
+
run: python -m pip install gradio
|
23 |
+
|
24 |
+
- name: Log in to Hugging Face
|
25 |
+
run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
|
26 |
+
|
27 |
+
- name: Deploy to Spaces
|
28 |
+
run: gradio deploy
|
.gitignore
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.vscode
|
2 |
+
|
3 |
+
# Pylance
|
4 |
+
pyrightconfig.json
|
5 |
+
|
6 |
+
# Byte-compiled / optimized / DLL files
|
7 |
+
__pycache__/
|
8 |
+
*.py[cod]
|
9 |
+
*$py.class
|
10 |
+
|
11 |
+
# C extensions
|
12 |
+
*.so
|
13 |
+
|
14 |
+
# Distribution / packaging
|
15 |
+
.Python
|
16 |
+
build/
|
17 |
+
develop-eggs/
|
18 |
+
dist/
|
19 |
+
downloads/
|
20 |
+
eggs/
|
21 |
+
.eggs/
|
22 |
+
lib/
|
23 |
+
lib64/
|
24 |
+
parts/
|
25 |
+
sdist/
|
26 |
+
var/
|
27 |
+
wheels/
|
28 |
+
*.egg-info/
|
29 |
+
.installed.cfg
|
30 |
+
*.egg
|
31 |
+
MANIFEST
|
32 |
+
|
33 |
+
# PyInstaller
|
34 |
+
# Usually these files are written by a python script from a template
|
35 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
36 |
+
*.manifest
|
37 |
+
*.spec
|
38 |
+
|
39 |
+
# Installer logs
|
40 |
+
pip-log.txt
|
41 |
+
pip-delete-this-directory.txt
|
42 |
+
|
43 |
+
syn_out/
|
44 |
+
checkpoints/
|
45 |
+
.gradio
|
46 |
+
|
47 |
+
# Ignore generated sample .wav files
|
48 |
+
**/*.wav
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2025 Resemble AI
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,12 +1,85 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: chatterbox_tts
|
3 |
+
app_file: gradio_tts_app.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
+
sdk_version: 3.41.2
|
|
|
|
|
6 |
---
|
7 |
|
8 |
+
<img width="1200" alt="cb-big2" src="https://github.com/user-attachments/assets/bd8c5f03-e91d-4ee5-b680-57355da204d1" />
|
9 |
+
|
10 |
+
# Chatterbox TTS
|
11 |
+
|
12 |
+
[](https://resemble-ai.github.io/chatterbox_demopage/)
|
13 |
+
[](https://huggingface.co/spaces/ResembleAI/Chatterbox)
|
14 |
+
[](https://podonos.com/resembleai/chatterbox)
|
15 |
+
[](https://discord.gg/XqS7RxUp)
|
16 |
+
|
17 |
+
_Made with ♥️ by <a href="https://resemble.ai" target="_blank"><img width="100" alt="resemble-logo-horizontal" src="https://github.com/user-attachments/assets/35cf756b-3506-4943-9c72-c05ddfa4e525" /></a>
|
18 |
+
|
19 |
+
We're excited to introduce Chatterbox, [Resemble AI's](https://resemble.ai) first production-grade open source TTS model. Licensed under MIT, Chatterbox has been benchmarked against leading closed-source systems like ElevenLabs, and is consistently preferred in side-by-side evaluations.
|
20 |
+
|
21 |
+
Whether you're working on memes, videos, games, or AI agents, Chatterbox brings your content to life. It's also the first open source TTS model to support **emotion exaggeration control**, a powerful feature that makes your voices stand out. Try it now on our [Hugging Face Gradio app.](https://huggingface.co/spaces/ResembleAI/Chatterbox)
|
22 |
+
|
23 |
+
If you like the model but need to scale or tune it for higher accuracy, check out our competitively priced TTS service (<a href="https://resemble.ai">link</a>). It delivers reliable performance with ultra-low latency of sub 200ms—ideal for production use in agents, applications, or interactive media.
|
24 |
+
|
25 |
+
# Key Details
|
26 |
+
- SoTA zeroshot TTS
|
27 |
+
- 0.5B Llama backbone
|
28 |
+
- Unique exaggeration/intensity control
|
29 |
+
- Ultra-stable with alignment-informed inference
|
30 |
+
- Trained on 0.5M hours of cleaned data
|
31 |
+
- Watermarked outputs
|
32 |
+
- Easy voice conversion script
|
33 |
+
- [Outperforms ElevenLabs](https://podonos.com/resembleai/chatterbox)
|
34 |
+
|
35 |
+
# Tips
|
36 |
+
- **General Use (TTS and Voice Agents):**
|
37 |
+
- The default settings (`exaggeration=0.5`, `cfg_weight=0.5`) work well for most prompts.
|
38 |
+
- If the reference speaker has a fast speaking style, lowering `cfg_weight` to around `0.3` can improve pacing.
|
39 |
+
|
40 |
+
- **Expressive or Dramatic Speech:**
|
41 |
+
- Try lower `cfg_weight` values (e.g. `~0.3`) and increase `exaggeration` to around `0.7` or higher.
|
42 |
+
- Higher `exaggeration` tends to speed up speech; reducing `cfg_weight` helps compensate with slower, more deliberate pacing.
|
43 |
+
|
44 |
+
|
45 |
+
# Installation
|
46 |
+
```
|
47 |
+
pip install chatterbox-tts
|
48 |
+
```
|
49 |
+
|
50 |
+
|
51 |
+
# Usage
|
52 |
+
```python
|
53 |
+
import torchaudio as ta
|
54 |
+
from chatterbox.tts import ChatterboxTTS
|
55 |
+
|
56 |
+
model = ChatterboxTTS.from_pretrained(device="cuda")
|
57 |
+
|
58 |
+
text = "Ezreal and Jinx teamed up with Ahri, Yasuo, and Teemo to take down the enemy's Nexus in an epic late-game pentakill."
|
59 |
+
wav = model.generate(text)
|
60 |
+
ta.save("test-1.wav", wav, model.sr)
|
61 |
+
|
62 |
+
# If you want to synthesize with a different voice, specify the audio prompt
|
63 |
+
AUDIO_PROMPT_PATH="YOUR_FILE.wav"
|
64 |
+
wav = model.generate(text, audio_prompt_path=AUDIO_PROMPT_PATH)
|
65 |
+
ta.save("test-2.wav", wav, model.sr)
|
66 |
+
```
|
67 |
+
See `example_tts.py` for more examples.
|
68 |
+
|
69 |
+
# Acknowledgements
|
70 |
+
- [Cosyvoice](https://github.com/FunAudioLLM/CosyVoice)
|
71 |
+
- [Real-Time-Voice-Cloning](https://github.com/CorentinJ/Real-Time-Voice-Cloning)
|
72 |
+
- [HiFT-GAN](https://github.com/yl4579/HiFTNet)
|
73 |
+
- [Llama 3](https://github.com/meta-llama/llama3)
|
74 |
+
- [S3Tokenizer](https://github.com/xingchensong/S3Tokenizer)
|
75 |
+
|
76 |
+
# Built-in PerTh Watermarking for Responsible AI
|
77 |
+
|
78 |
+
Every audio file generated by Chatterbox includes [Resemble AI's Perth (Perceptual Threshold) Watermarker](https://github.com/resemble-ai/perth) - imperceptible neural watermarks that survive MP3 compression, audio editing, and common manipulations while maintaining nearly 100% detection accuracy.
|
79 |
+
|
80 |
+
# Official Discord
|
81 |
+
|
82 |
+
👋 Join us on [Discord](https://discord.gg/XqS7RxUp) and let's build something awesome together!
|
83 |
+
|
84 |
+
# Disclaimer
|
85 |
+
Don't use this model to do bad things. Prompts are sourced from freely available data on the internet.
|
example_for_mac.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchaudio as ta
|
3 |
+
from chatterbox.tts import ChatterboxTTS
|
4 |
+
|
5 |
+
# Detect device (Mac with M1/M2/M3/M4)
|
6 |
+
device = "mps" if torch.backends.mps.is_available() else "cpu"
|
7 |
+
map_location = torch.device(device)
|
8 |
+
|
9 |
+
torch_load_original = torch.load
|
10 |
+
def patched_torch_load(*args, **kwargs):
|
11 |
+
if 'map_location' not in kwargs:
|
12 |
+
kwargs['map_location'] = map_location
|
13 |
+
return torch_load_original(*args, **kwargs)
|
14 |
+
|
15 |
+
torch.load = patched_torch_load
|
16 |
+
|
17 |
+
model = ChatterboxTTS.from_pretrained(device=device)
|
18 |
+
text = "Today is the day. I want to move like a titan at dawn, sweat like a god forging lightning. No more excuses. From now on, my mornings will be temples of discipline. I am going to work out like the gods… every damn day."
|
19 |
+
|
20 |
+
# If you want to synthesize with a different voice, specify the audio prompt
|
21 |
+
AUDIO_PROMPT_PATH = "YOUR_FILE.wav"
|
22 |
+
wav = model.generate(
|
23 |
+
text,
|
24 |
+
audio_prompt_path=AUDIO_PROMPT_PATH,
|
25 |
+
exaggeration=2.0,
|
26 |
+
cfg_weight=0.5
|
27 |
+
)
|
28 |
+
ta.save("test-2.wav", wav, model.sr)
|
example_tts.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torchaudio as ta
|
2 |
+
import torch
|
3 |
+
from chatterbox.tts import ChatterboxTTS
|
4 |
+
|
5 |
+
# Automatically detect the best available device
|
6 |
+
if torch.cuda.is_available():
|
7 |
+
device = "cuda"
|
8 |
+
elif torch.backends.mps.is_available():
|
9 |
+
device = "mps"
|
10 |
+
else:
|
11 |
+
device = "cpu"
|
12 |
+
|
13 |
+
print(f"Using device: {device}")
|
14 |
+
|
15 |
+
model = ChatterboxTTS.from_pretrained(device=device)
|
16 |
+
|
17 |
+
text = "Ezreal and Jinx teamed up with Ahri, Yasuo, and Teemo to take down the enemy's Nexus in an epic late-game pentakill."
|
18 |
+
wav = model.generate(text)
|
19 |
+
ta.save("test-1.wav", wav, model.sr)
|
example_vc.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from chatterbox.vc import ChatterboxVC
|
2 |
+
|
3 |
+
model = ChatterboxVC.from_pretrained("cuda")
|
4 |
+
wav = model.generate("tests/trimmed_8b7f38b1.wav")
|
5 |
+
import torchaudio as ta
|
6 |
+
ta.save("testvc.wav", wav, model.sr)
|
gradio_tts_app.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import numpy as np
|
3 |
+
import torch
|
4 |
+
import gradio as gr
|
5 |
+
from chatterbox.tts import ChatterboxTTS
|
6 |
+
|
7 |
+
|
8 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
9 |
+
|
10 |
+
|
11 |
+
def set_seed(seed: int):
|
12 |
+
torch.manual_seed(seed)
|
13 |
+
torch.cuda.manual_seed(seed)
|
14 |
+
torch.cuda.manual_seed_all(seed)
|
15 |
+
random.seed(seed)
|
16 |
+
np.random.seed(seed)
|
17 |
+
|
18 |
+
|
19 |
+
def load_model():
|
20 |
+
model = ChatterboxTTS.from_pretrained(DEVICE)
|
21 |
+
return model
|
22 |
+
|
23 |
+
|
24 |
+
def generate(model, text, audio_prompt_path, exaggeration, temperature, seed_num, cfgw):
|
25 |
+
if model is None:
|
26 |
+
model = ChatterboxTTS.from_pretrained(DEVICE)
|
27 |
+
|
28 |
+
if seed_num != 0:
|
29 |
+
set_seed(int(seed_num))
|
30 |
+
|
31 |
+
wav = model.generate(
|
32 |
+
text,
|
33 |
+
audio_prompt_path=audio_prompt_path,
|
34 |
+
exaggeration=exaggeration,
|
35 |
+
temperature=temperature,
|
36 |
+
cfg_weight=cfgw,
|
37 |
+
)
|
38 |
+
return (model.sr, wav.squeeze(0).numpy())
|
39 |
+
|
40 |
+
|
41 |
+
with gr.Blocks() as demo:
|
42 |
+
model_state = gr.State(None) # Loaded once per session/user
|
43 |
+
|
44 |
+
with gr.Row():
|
45 |
+
with gr.Column():
|
46 |
+
text = gr.Textbox(
|
47 |
+
value="Now let's make my mum's favourite. So three mars bars into the pan. Then we add the tuna and just stir for a bit, just let the chocolate and fish infuse. A sprinkle of olive oil and some tomato ketchup. Now smell that. Oh boy this is going to be incredible.",
|
48 |
+
label="Text to synthesize (max chars 300)",
|
49 |
+
max_lines=5
|
50 |
+
)
|
51 |
+
ref_wav = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Reference Audio File", value=None)
|
52 |
+
exaggeration = gr.Slider(0.25, 2, step=.05, label="Exaggeration (Neutral = 0.5, extreme values can be unstable)", value=.5)
|
53 |
+
cfg_weight = gr.Slider(0.0, 1, step=.05, label="CFG/Pace", value=0.5)
|
54 |
+
|
55 |
+
with gr.Accordion("More options", open=False):
|
56 |
+
seed_num = gr.Number(value=0, label="Random seed (0 for random)")
|
57 |
+
temp = gr.Slider(0.05, 5, step=.05, label="temperature", value=.8)
|
58 |
+
|
59 |
+
run_btn = gr.Button("Generate", variant="primary")
|
60 |
+
|
61 |
+
with gr.Column():
|
62 |
+
audio_output = gr.Audio(label="Output Audio")
|
63 |
+
|
64 |
+
demo.load(fn=load_model, inputs=[], outputs=model_state)
|
65 |
+
|
66 |
+
run_btn.click(
|
67 |
+
fn=generate,
|
68 |
+
inputs=[
|
69 |
+
model_state,
|
70 |
+
text,
|
71 |
+
ref_wav,
|
72 |
+
exaggeration,
|
73 |
+
temp,
|
74 |
+
seed_num,
|
75 |
+
cfg_weight,
|
76 |
+
],
|
77 |
+
outputs=audio_output,
|
78 |
+
)
|
79 |
+
|
80 |
+
if __name__ == "__main__":
|
81 |
+
demo.queue(
|
82 |
+
max_size=50,
|
83 |
+
default_concurrency_limit=1,
|
84 |
+
).launch(share=True)
|
gradio_vc_app.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import gradio as gr
|
3 |
+
from chatterbox.vc import ChatterboxVC
|
4 |
+
|
5 |
+
|
6 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
+
|
8 |
+
|
9 |
+
model = ChatterboxVC.from_pretrained(DEVICE)
|
10 |
+
def generate(audio, target_voice_path):
|
11 |
+
wav = model.generate(
|
12 |
+
audio, target_voice_path=target_voice_path,
|
13 |
+
)
|
14 |
+
return model.sr, wav.squeeze(0).numpy()
|
15 |
+
|
16 |
+
|
17 |
+
demo = gr.Interface(
|
18 |
+
generate,
|
19 |
+
[
|
20 |
+
gr.Audio(sources=["upload", "microphone"], type="filepath", label="Input audio file"),
|
21 |
+
gr.Audio(sources=["upload", "microphone"], type="filepath", label="Target voice audio file (if none, the default voice is used)", value=None),
|
22 |
+
],
|
23 |
+
"audio",
|
24 |
+
)
|
25 |
+
|
26 |
+
if __name__ == "__main__":
|
27 |
+
demo.launch()
|
pyproject.toml
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "chatterbox-tts"
|
3 |
+
version = "0.1.2"
|
4 |
+
description = "Chatterbox: Open Source TTS and Voice Conversion by Resemble AI"
|
5 |
+
readme = "README.md"
|
6 |
+
requires-python = ">=3.8"
|
7 |
+
license = {file = "LICENSE"}
|
8 |
+
authors = [
|
9 |
+
{name = "resemble-ai", email = "[email protected]"}
|
10 |
+
]
|
11 |
+
dependencies = [
|
12 |
+
"numpy~=1.26.0",
|
13 |
+
"resampy==0.4.3",
|
14 |
+
"librosa==0.11.0",
|
15 |
+
"s3tokenizer",
|
16 |
+
"torch==2.6.0",
|
17 |
+
"torchaudio==2.6.0",
|
18 |
+
"transformers==4.46.3",
|
19 |
+
"diffusers==0.29.0",
|
20 |
+
"resemble-perth==1.0.1",
|
21 |
+
"omegaconf==2.3.0",
|
22 |
+
"conformer==0.3.2",
|
23 |
+
"safetensors==0.5.3"
|
24 |
+
]
|
25 |
+
|
26 |
+
[project.urls]
|
27 |
+
Homepage = "https://github.com/resemble-ai/chatterbox"
|
28 |
+
Repository = "https://github.com/resemble-ai/chatterbox"
|
29 |
+
|
30 |
+
[build-system]
|
31 |
+
requires = ["setuptools>=61.0"]
|
32 |
+
build-backend = "setuptools.build_meta"
|
33 |
+
|
34 |
+
[tool.setuptools.packages.find]
|
35 |
+
where = ["src"]
|
src/chatterbox/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .tts import ChatterboxTTS
|
2 |
+
from .vc import ChatterboxVC
|
src/chatterbox/models/s3gen/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .s3gen import S3Token2Wav as S3Gen
|
2 |
+
from .const import S3GEN_SR
|
src/chatterbox/models/s3gen/const.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
S3GEN_SR = 24000
|
src/chatterbox/models/s3gen/decoder.py
ADDED
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import torch
|
15 |
+
import torch.nn as nn
|
16 |
+
import torch.nn.functional as F
|
17 |
+
from einops import pack, rearrange, repeat
|
18 |
+
|
19 |
+
from .utils.mask import add_optional_chunk_mask
|
20 |
+
from .matcha.decoder import SinusoidalPosEmb, Block1D, ResnetBlock1D, Downsample1D, \
|
21 |
+
TimestepEmbedding, Upsample1D
|
22 |
+
from .matcha.transformer import BasicTransformerBlock
|
23 |
+
|
24 |
+
|
25 |
+
def mask_to_bias(mask: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
|
26 |
+
assert mask.dtype == torch.bool
|
27 |
+
assert dtype in [torch.float32, torch.bfloat16, torch.float16]
|
28 |
+
mask = mask.to(dtype)
|
29 |
+
# attention mask bias
|
30 |
+
# NOTE(Mddct): torch.finfo jit issues
|
31 |
+
# chunk_masks = (1.0 - chunk_masks) * torch.finfo(dtype).min
|
32 |
+
mask = (1.0 - mask) * -1.0e+10
|
33 |
+
return mask
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
class Transpose(torch.nn.Module):
|
38 |
+
def __init__(self, dim0: int, dim1: int):
|
39 |
+
super().__init__()
|
40 |
+
self.dim0 = dim0
|
41 |
+
self.dim1 = dim1
|
42 |
+
|
43 |
+
def forward(self, x: torch.Tensor):
|
44 |
+
x = torch.transpose(x, self.dim0, self.dim1)
|
45 |
+
return x
|
46 |
+
|
47 |
+
|
48 |
+
class CausalBlock1D(Block1D):
|
49 |
+
def __init__(self, dim: int, dim_out: int):
|
50 |
+
super(CausalBlock1D, self).__init__(dim, dim_out)
|
51 |
+
self.block = torch.nn.Sequential(
|
52 |
+
CausalConv1d(dim, dim_out, 3),
|
53 |
+
Transpose(1, 2),
|
54 |
+
nn.LayerNorm(dim_out),
|
55 |
+
Transpose(1, 2),
|
56 |
+
nn.Mish(),
|
57 |
+
)
|
58 |
+
|
59 |
+
def forward(self, x: torch.Tensor, mask: torch.Tensor):
|
60 |
+
output = self.block(x * mask)
|
61 |
+
return output * mask
|
62 |
+
|
63 |
+
|
64 |
+
class CausalResnetBlock1D(ResnetBlock1D):
|
65 |
+
def __init__(self, dim: int, dim_out: int, time_emb_dim: int, groups: int = 8):
|
66 |
+
super(CausalResnetBlock1D, self).__init__(dim, dim_out, time_emb_dim, groups)
|
67 |
+
self.block1 = CausalBlock1D(dim, dim_out)
|
68 |
+
self.block2 = CausalBlock1D(dim_out, dim_out)
|
69 |
+
|
70 |
+
|
71 |
+
class CausalConv1d(torch.nn.Conv1d):
|
72 |
+
def __init__(
|
73 |
+
self,
|
74 |
+
in_channels: int,
|
75 |
+
out_channels: int,
|
76 |
+
kernel_size: int,
|
77 |
+
stride: int = 1,
|
78 |
+
dilation: int = 1,
|
79 |
+
groups: int = 1,
|
80 |
+
bias: bool = True,
|
81 |
+
padding_mode: str = 'zeros',
|
82 |
+
device=None,
|
83 |
+
dtype=None
|
84 |
+
) -> None:
|
85 |
+
super(CausalConv1d, self).__init__(in_channels, out_channels,
|
86 |
+
kernel_size, stride,
|
87 |
+
padding=0, dilation=dilation,
|
88 |
+
groups=groups, bias=bias,
|
89 |
+
padding_mode=padding_mode,
|
90 |
+
device=device, dtype=dtype)
|
91 |
+
assert stride == 1
|
92 |
+
self.causal_padding = (kernel_size - 1, 0)
|
93 |
+
|
94 |
+
def forward(self, x: torch.Tensor):
|
95 |
+
x = F.pad(x, self.causal_padding)
|
96 |
+
x = super(CausalConv1d, self).forward(x)
|
97 |
+
return x
|
98 |
+
|
99 |
+
|
100 |
+
class ConditionalDecoder(nn.Module):
|
101 |
+
def __init__(
|
102 |
+
self,
|
103 |
+
in_channels=320,
|
104 |
+
out_channels=80,
|
105 |
+
causal=True,
|
106 |
+
channels=[256],
|
107 |
+
dropout=0.0,
|
108 |
+
attention_head_dim=64,
|
109 |
+
n_blocks=4,
|
110 |
+
num_mid_blocks=12,
|
111 |
+
num_heads=8,
|
112 |
+
act_fn="gelu",
|
113 |
+
):
|
114 |
+
"""
|
115 |
+
This decoder requires an input with the same shape of the target. So, if your text content
|
116 |
+
is shorter or longer than the outputs, please re-sampling it before feeding to the decoder.
|
117 |
+
"""
|
118 |
+
super().__init__()
|
119 |
+
channels = tuple(channels)
|
120 |
+
self.in_channels = in_channels
|
121 |
+
self.out_channels = out_channels
|
122 |
+
self.causal = causal
|
123 |
+
self.time_embeddings = SinusoidalPosEmb(in_channels)
|
124 |
+
time_embed_dim = channels[0] * 4
|
125 |
+
self.time_mlp = TimestepEmbedding(
|
126 |
+
in_channels=in_channels,
|
127 |
+
time_embed_dim=time_embed_dim,
|
128 |
+
act_fn="silu",
|
129 |
+
)
|
130 |
+
self.down_blocks = nn.ModuleList([])
|
131 |
+
self.mid_blocks = nn.ModuleList([])
|
132 |
+
self.up_blocks = nn.ModuleList([])
|
133 |
+
|
134 |
+
# NOTE jrm: `static_chunk_size` is missing?
|
135 |
+
self.static_chunk_size = 0
|
136 |
+
|
137 |
+
output_channel = in_channels
|
138 |
+
for i in range(len(channels)): # pylint: disable=consider-using-enumerate
|
139 |
+
input_channel = output_channel
|
140 |
+
output_channel = channels[i]
|
141 |
+
is_last = i == len(channels) - 1
|
142 |
+
resnet = CausalResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) if self.causal else \
|
143 |
+
ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
|
144 |
+
transformer_blocks = nn.ModuleList(
|
145 |
+
[
|
146 |
+
BasicTransformerBlock(
|
147 |
+
dim=output_channel,
|
148 |
+
num_attention_heads=num_heads,
|
149 |
+
attention_head_dim=attention_head_dim,
|
150 |
+
dropout=dropout,
|
151 |
+
activation_fn=act_fn,
|
152 |
+
)
|
153 |
+
for _ in range(n_blocks)
|
154 |
+
]
|
155 |
+
)
|
156 |
+
downsample = (
|
157 |
+
Downsample1D(output_channel) if not is_last else
|
158 |
+
CausalConv1d(output_channel, output_channel, 3) if self.causal else nn.Conv1d(output_channel, output_channel, 3, padding=1)
|
159 |
+
)
|
160 |
+
self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample]))
|
161 |
+
|
162 |
+
for _ in range(num_mid_blocks):
|
163 |
+
input_channel = channels[-1]
|
164 |
+
out_channels = channels[-1]
|
165 |
+
resnet = CausalResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) if self.causal else \
|
166 |
+
ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
|
167 |
+
|
168 |
+
transformer_blocks = nn.ModuleList(
|
169 |
+
[
|
170 |
+
BasicTransformerBlock(
|
171 |
+
dim=output_channel,
|
172 |
+
num_attention_heads=num_heads,
|
173 |
+
attention_head_dim=attention_head_dim,
|
174 |
+
dropout=dropout,
|
175 |
+
activation_fn=act_fn,
|
176 |
+
)
|
177 |
+
for _ in range(n_blocks)
|
178 |
+
]
|
179 |
+
)
|
180 |
+
|
181 |
+
self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks]))
|
182 |
+
|
183 |
+
channels = channels[::-1] + (channels[0],)
|
184 |
+
for i in range(len(channels) - 1):
|
185 |
+
input_channel = channels[i] * 2
|
186 |
+
output_channel = channels[i + 1]
|
187 |
+
is_last = i == len(channels) - 2
|
188 |
+
resnet = CausalResnetBlock1D(
|
189 |
+
dim=input_channel,
|
190 |
+
dim_out=output_channel,
|
191 |
+
time_emb_dim=time_embed_dim,
|
192 |
+
) if self.causal else ResnetBlock1D(
|
193 |
+
dim=input_channel,
|
194 |
+
dim_out=output_channel,
|
195 |
+
time_emb_dim=time_embed_dim,
|
196 |
+
)
|
197 |
+
transformer_blocks = nn.ModuleList(
|
198 |
+
[
|
199 |
+
BasicTransformerBlock(
|
200 |
+
dim=output_channel,
|
201 |
+
num_attention_heads=num_heads,
|
202 |
+
attention_head_dim=attention_head_dim,
|
203 |
+
dropout=dropout,
|
204 |
+
activation_fn=act_fn,
|
205 |
+
)
|
206 |
+
for _ in range(n_blocks)
|
207 |
+
]
|
208 |
+
)
|
209 |
+
upsample = (
|
210 |
+
Upsample1D(output_channel, use_conv_transpose=True)
|
211 |
+
if not is_last
|
212 |
+
else CausalConv1d(output_channel, output_channel, 3) if self.causal else nn.Conv1d(output_channel, output_channel, 3, padding=1)
|
213 |
+
)
|
214 |
+
self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample]))
|
215 |
+
self.final_block = CausalBlock1D(channels[-1], channels[-1]) if self.causal else Block1D(channels[-1], channels[-1])
|
216 |
+
self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1)
|
217 |
+
self.initialize_weights()
|
218 |
+
|
219 |
+
def initialize_weights(self):
|
220 |
+
for m in self.modules():
|
221 |
+
if isinstance(m, nn.Conv1d):
|
222 |
+
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
|
223 |
+
if m.bias is not None:
|
224 |
+
nn.init.constant_(m.bias, 0)
|
225 |
+
elif isinstance(m, nn.GroupNorm):
|
226 |
+
nn.init.constant_(m.weight, 1)
|
227 |
+
nn.init.constant_(m.bias, 0)
|
228 |
+
elif isinstance(m, nn.Linear):
|
229 |
+
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
|
230 |
+
if m.bias is not None:
|
231 |
+
nn.init.constant_(m.bias, 0)
|
232 |
+
|
233 |
+
def forward(self, x, mask, mu, t, spks=None, cond=None):
|
234 |
+
"""Forward pass of the UNet1DConditional model.
|
235 |
+
|
236 |
+
Args:
|
237 |
+
x (torch.Tensor): shape (batch_size, in_channels, time)
|
238 |
+
mask (_type_): shape (batch_size, 1, time)
|
239 |
+
t (_type_): shape (batch_size)
|
240 |
+
spks (_type_, optional): shape: (batch_size, condition_channels). Defaults to None.
|
241 |
+
cond (_type_, optional): placeholder for future use. Defaults to None.
|
242 |
+
|
243 |
+
Raises:
|
244 |
+
ValueError: _description_
|
245 |
+
ValueError: _description_
|
246 |
+
|
247 |
+
Returns:
|
248 |
+
_type_: _description_
|
249 |
+
"""
|
250 |
+
|
251 |
+
t = self.time_embeddings(t).to(t.dtype)
|
252 |
+
t = self.time_mlp(t)
|
253 |
+
|
254 |
+
x = pack([x, mu], "b * t")[0]
|
255 |
+
|
256 |
+
if spks is not None:
|
257 |
+
spks = repeat(spks, "b c -> b c t", t=x.shape[-1])
|
258 |
+
x = pack([x, spks], "b * t")[0]
|
259 |
+
if cond is not None:
|
260 |
+
x = pack([x, cond], "b * t")[0]
|
261 |
+
|
262 |
+
hiddens = []
|
263 |
+
masks = [mask]
|
264 |
+
for resnet, transformer_blocks, downsample in self.down_blocks:
|
265 |
+
mask_down = masks[-1]
|
266 |
+
x = resnet(x, mask_down, t)
|
267 |
+
x = rearrange(x, "b c t -> b t c").contiguous()
|
268 |
+
# attn_mask = torch.matmul(mask_down.transpose(1, 2).contiguous(), mask_down)
|
269 |
+
attn_mask = add_optional_chunk_mask(x, mask_down.bool(), False, False, 0, self.static_chunk_size, -1)
|
270 |
+
attn_mask = mask_to_bias(attn_mask == 1, x.dtype)
|
271 |
+
for transformer_block in transformer_blocks:
|
272 |
+
x = transformer_block(
|
273 |
+
hidden_states=x,
|
274 |
+
attention_mask=attn_mask,
|
275 |
+
timestep=t,
|
276 |
+
)
|
277 |
+
x = rearrange(x, "b t c -> b c t").contiguous()
|
278 |
+
hiddens.append(x) # Save hidden states for skip connections
|
279 |
+
x = downsample(x * mask_down)
|
280 |
+
masks.append(mask_down[:, :, ::2])
|
281 |
+
masks = masks[:-1]
|
282 |
+
mask_mid = masks[-1]
|
283 |
+
|
284 |
+
for resnet, transformer_blocks in self.mid_blocks:
|
285 |
+
x = resnet(x, mask_mid, t)
|
286 |
+
x = rearrange(x, "b c t -> b t c").contiguous()
|
287 |
+
# attn_mask = torch.matmul(mask_mid.transpose(1, 2).contiguous(), mask_mid)
|
288 |
+
attn_mask = add_optional_chunk_mask(x, mask_mid.bool(), False, False, 0, self.static_chunk_size, -1)
|
289 |
+
attn_mask = mask_to_bias(attn_mask == 1, x.dtype)
|
290 |
+
for transformer_block in transformer_blocks:
|
291 |
+
x = transformer_block(
|
292 |
+
hidden_states=x,
|
293 |
+
attention_mask=attn_mask,
|
294 |
+
timestep=t,
|
295 |
+
)
|
296 |
+
x = rearrange(x, "b t c -> b c t").contiguous()
|
297 |
+
|
298 |
+
for resnet, transformer_blocks, upsample in self.up_blocks:
|
299 |
+
mask_up = masks.pop()
|
300 |
+
skip = hiddens.pop()
|
301 |
+
x = pack([x[:, :, :skip.shape[-1]], skip], "b * t")[0]
|
302 |
+
x = resnet(x, mask_up, t)
|
303 |
+
x = rearrange(x, "b c t -> b t c").contiguous()
|
304 |
+
# attn_mask = torch.matmul(mask_up.transpose(1, 2).contiguous(), mask_up)
|
305 |
+
attn_mask = add_optional_chunk_mask(x, mask_up.bool(), False, False, 0, self.static_chunk_size, -1)
|
306 |
+
attn_mask = mask_to_bias(attn_mask == 1, x.dtype)
|
307 |
+
for transformer_block in transformer_blocks:
|
308 |
+
x = transformer_block(
|
309 |
+
hidden_states=x,
|
310 |
+
attention_mask=attn_mask,
|
311 |
+
timestep=t,
|
312 |
+
)
|
313 |
+
x = rearrange(x, "b t c -> b c t").contiguous()
|
314 |
+
x = upsample(x * mask_up)
|
315 |
+
x = self.final_block(x, mask_up)
|
316 |
+
output = self.final_proj(x * mask_up)
|
317 |
+
return output * mask
|
src/chatterbox/models/s3gen/f0_predictor.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import torch
|
15 |
+
import torch.nn as nn
|
16 |
+
from torch.nn.utils.parametrizations import weight_norm
|
17 |
+
|
18 |
+
|
19 |
+
class ConvRNNF0Predictor(nn.Module):
|
20 |
+
def __init__(self,
|
21 |
+
num_class: int = 1,
|
22 |
+
in_channels: int = 80,
|
23 |
+
cond_channels: int = 512
|
24 |
+
):
|
25 |
+
super().__init__()
|
26 |
+
|
27 |
+
self.num_class = num_class
|
28 |
+
self.condnet = nn.Sequential(
|
29 |
+
weight_norm(
|
30 |
+
nn.Conv1d(in_channels, cond_channels, kernel_size=3, padding=1)
|
31 |
+
),
|
32 |
+
nn.ELU(),
|
33 |
+
weight_norm(
|
34 |
+
nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
|
35 |
+
),
|
36 |
+
nn.ELU(),
|
37 |
+
weight_norm(
|
38 |
+
nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
|
39 |
+
),
|
40 |
+
nn.ELU(),
|
41 |
+
weight_norm(
|
42 |
+
nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
|
43 |
+
),
|
44 |
+
nn.ELU(),
|
45 |
+
weight_norm(
|
46 |
+
nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
|
47 |
+
),
|
48 |
+
nn.ELU(),
|
49 |
+
)
|
50 |
+
self.classifier = nn.Linear(in_features=cond_channels, out_features=self.num_class)
|
51 |
+
|
52 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
53 |
+
x = self.condnet(x)
|
54 |
+
x = x.transpose(1, 2)
|
55 |
+
return torch.abs(self.classifier(x).squeeze(-1))
|
src/chatterbox/models/s3gen/flow.py
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import logging
|
15 |
+
import random
|
16 |
+
from typing import Dict, Optional
|
17 |
+
import torch
|
18 |
+
import torch.nn as nn
|
19 |
+
from torch.nn import functional as F
|
20 |
+
from omegaconf import DictConfig
|
21 |
+
from .utils.mask import make_pad_mask
|
22 |
+
|
23 |
+
|
24 |
+
class MaskedDiffWithXvec(torch.nn.Module):
|
25 |
+
def __init__(self,
|
26 |
+
input_size: int = 512,
|
27 |
+
output_size: int = 80,
|
28 |
+
spk_embed_dim: int = 192,
|
29 |
+
output_type: str = "mel",
|
30 |
+
vocab_size: int = 4096,
|
31 |
+
input_frame_rate: int = 50,
|
32 |
+
only_mask_loss: bool = True,
|
33 |
+
encoder: torch.nn.Module = None,
|
34 |
+
length_regulator: torch.nn.Module = None,
|
35 |
+
decoder: torch.nn.Module = None,
|
36 |
+
decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1,
|
37 |
+
'cfm_params': DictConfig({'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine',
|
38 |
+
'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7, 'reg_loss_type': 'l1'}),
|
39 |
+
'decoder_params': {'channels': [256, 256], 'dropout': 0.0, 'attention_head_dim': 64,
|
40 |
+
'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8, 'act_fn': 'gelu'}},
|
41 |
+
mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050,
|
42 |
+
'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}):
|
43 |
+
super().__init__()
|
44 |
+
self.input_size = input_size
|
45 |
+
self.output_size = output_size
|
46 |
+
self.decoder_conf = decoder_conf
|
47 |
+
self.mel_feat_conf = mel_feat_conf
|
48 |
+
self.vocab_size = vocab_size
|
49 |
+
self.output_type = output_type
|
50 |
+
self.input_frame_rate = input_frame_rate
|
51 |
+
logging.info(f"input frame rate={self.input_frame_rate}")
|
52 |
+
self.input_embedding = nn.Embedding(vocab_size, input_size)
|
53 |
+
self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size)
|
54 |
+
self.encoder = encoder
|
55 |
+
self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size)
|
56 |
+
self.decoder = decoder
|
57 |
+
self.length_regulator = length_regulator
|
58 |
+
self.only_mask_loss = only_mask_loss
|
59 |
+
|
60 |
+
def forward(
|
61 |
+
self,
|
62 |
+
batch: dict,
|
63 |
+
device: torch.device,
|
64 |
+
) -> Dict[str, Optional[torch.Tensor]]:
|
65 |
+
token = batch['speech_token'].to(device)
|
66 |
+
token_len = batch['speech_token_len'].to(device)
|
67 |
+
feat = batch['speech_feat'].to(device)
|
68 |
+
feat_len = batch['speech_feat_len'].to(device)
|
69 |
+
embedding = batch['embedding'].to(device)
|
70 |
+
|
71 |
+
# xvec projection
|
72 |
+
embedding = F.normalize(embedding, dim=1)
|
73 |
+
embedding = self.spk_embed_affine_layer(embedding)
|
74 |
+
|
75 |
+
# concat text and prompt_text
|
76 |
+
mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(device)
|
77 |
+
token = self.input_embedding(torch.clamp(token, min=0)) * mask
|
78 |
+
|
79 |
+
# text encode
|
80 |
+
h, h_lengths = self.encoder(token, token_len)
|
81 |
+
h = self.encoder_proj(h)
|
82 |
+
h, h_lengths = self.length_regulator(h, feat_len)
|
83 |
+
|
84 |
+
# get conditions
|
85 |
+
conds = torch.zeros(feat.shape, device=token.device)
|
86 |
+
for i, j in enumerate(feat_len):
|
87 |
+
if random.random() < 0.5:
|
88 |
+
continue
|
89 |
+
index = random.randint(0, int(0.3 * j))
|
90 |
+
conds[i, :index] = feat[i, :index]
|
91 |
+
conds = conds.transpose(1, 2)
|
92 |
+
|
93 |
+
mask = (~make_pad_mask(feat_len)).to(h)
|
94 |
+
feat = F.interpolate(feat.unsqueeze(dim=1), size=h.shape[1:], mode="nearest").squeeze(dim=1)
|
95 |
+
loss, _ = self.decoder.compute_loss(
|
96 |
+
feat.transpose(1, 2).contiguous(),
|
97 |
+
mask.unsqueeze(1),
|
98 |
+
h.transpose(1, 2).contiguous(),
|
99 |
+
embedding,
|
100 |
+
cond=conds
|
101 |
+
)
|
102 |
+
return {'loss': loss}
|
103 |
+
|
104 |
+
@torch.inference_mode()
|
105 |
+
def inference(self,
|
106 |
+
token,
|
107 |
+
token_len,
|
108 |
+
prompt_token,
|
109 |
+
prompt_token_len,
|
110 |
+
prompt_feat,
|
111 |
+
prompt_feat_len,
|
112 |
+
embedding,
|
113 |
+
flow_cache):
|
114 |
+
if self.fp16 is True:
|
115 |
+
prompt_feat = prompt_feat.half()
|
116 |
+
embedding = embedding.half()
|
117 |
+
|
118 |
+
assert token.shape[0] == 1
|
119 |
+
# xvec projection
|
120 |
+
embedding = F.normalize(embedding, dim=1)
|
121 |
+
embedding = self.spk_embed_affine_layer(embedding)
|
122 |
+
|
123 |
+
# concat text and prompt_text
|
124 |
+
token_len1, token_len2 = prompt_token.shape[1], token.shape[1]
|
125 |
+
token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len
|
126 |
+
mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding)
|
127 |
+
token = self.input_embedding(torch.clamp(token, min=0)) * mask
|
128 |
+
|
129 |
+
# text encode
|
130 |
+
h, h_lengths = self.encoder(token, token_len)
|
131 |
+
h = self.encoder_proj(h)
|
132 |
+
mel_len1, mel_len2 = prompt_feat.shape[1], int(token_len2 / self.input_frame_rate * 22050 / 256)
|
133 |
+
h, h_lengths = self.length_regulator.inference(h[:, :token_len1], h[:, token_len1:], mel_len1, mel_len2, self.input_frame_rate)
|
134 |
+
|
135 |
+
# get conditions
|
136 |
+
conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype)
|
137 |
+
conds[:, :mel_len1] = prompt_feat
|
138 |
+
conds = conds.transpose(1, 2)
|
139 |
+
|
140 |
+
mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h)
|
141 |
+
feat, flow_cache = self.decoder(
|
142 |
+
mu=h.transpose(1, 2).contiguous(),
|
143 |
+
mask=mask.unsqueeze(1),
|
144 |
+
spks=embedding,
|
145 |
+
cond=conds,
|
146 |
+
n_timesteps=10,
|
147 |
+
prompt_len=mel_len1,
|
148 |
+
flow_cache=flow_cache
|
149 |
+
)
|
150 |
+
feat = feat[:, :, mel_len1:]
|
151 |
+
assert feat.shape[2] == mel_len2
|
152 |
+
return feat.float(), flow_cache
|
153 |
+
|
154 |
+
|
155 |
+
class CausalMaskedDiffWithXvec(torch.nn.Module):
|
156 |
+
def __init__(self,
|
157 |
+
input_size: int = 512,
|
158 |
+
output_size: int = 80,
|
159 |
+
spk_embed_dim: int = 192,
|
160 |
+
output_type: str = "mel",
|
161 |
+
vocab_size: int = 6561,
|
162 |
+
input_frame_rate: int = 25,
|
163 |
+
only_mask_loss: bool = True,
|
164 |
+
token_mel_ratio: int = 2,
|
165 |
+
pre_lookahead_len: int = 3,
|
166 |
+
encoder: torch.nn.Module = None,
|
167 |
+
decoder: torch.nn.Module = None,
|
168 |
+
decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1,
|
169 |
+
'cfm_params': DictConfig({'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine',
|
170 |
+
'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7, 'reg_loss_type': 'l1'}),
|
171 |
+
'decoder_params': {'channels': [256, 256], 'dropout': 0.0, 'attention_head_dim': 64,
|
172 |
+
'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8, 'act_fn': 'gelu'}},
|
173 |
+
mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050,
|
174 |
+
'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}):
|
175 |
+
super().__init__()
|
176 |
+
self.input_size = input_size
|
177 |
+
self.output_size = output_size
|
178 |
+
self.decoder_conf = decoder_conf
|
179 |
+
self.mel_feat_conf = mel_feat_conf
|
180 |
+
self.vocab_size = vocab_size
|
181 |
+
self.output_type = output_type
|
182 |
+
self.input_frame_rate = input_frame_rate
|
183 |
+
logging.info(f"input frame rate={self.input_frame_rate}")
|
184 |
+
self.input_embedding = nn.Embedding(vocab_size, input_size)
|
185 |
+
self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size)
|
186 |
+
self.encoder = encoder
|
187 |
+
self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size)
|
188 |
+
self.decoder = decoder
|
189 |
+
self.only_mask_loss = only_mask_loss
|
190 |
+
self.token_mel_ratio = token_mel_ratio
|
191 |
+
self.pre_lookahead_len = pre_lookahead_len
|
192 |
+
|
193 |
+
# FIXME: this was missing - just putting it in as false
|
194 |
+
self.fp16 = False
|
195 |
+
|
196 |
+
@torch.inference_mode()
|
197 |
+
def inference(self,
|
198 |
+
token,
|
199 |
+
token_len,
|
200 |
+
prompt_token,
|
201 |
+
prompt_token_len,
|
202 |
+
prompt_feat,
|
203 |
+
prompt_feat_len,
|
204 |
+
embedding,
|
205 |
+
finalize):
|
206 |
+
if self.fp16 is True:
|
207 |
+
prompt_feat = prompt_feat.half()
|
208 |
+
embedding = embedding.half()
|
209 |
+
|
210 |
+
assert token.shape[0] == 1
|
211 |
+
# xvec projection
|
212 |
+
embedding = F.normalize(embedding, dim=1)
|
213 |
+
embedding = self.spk_embed_affine_layer(embedding)
|
214 |
+
|
215 |
+
# concat text and prompt_text
|
216 |
+
token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len
|
217 |
+
mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding)
|
218 |
+
token = self.input_embedding(torch.clamp(token, min=0)) * mask
|
219 |
+
|
220 |
+
# text encode
|
221 |
+
h, h_lengths = self.encoder(token, token_len)
|
222 |
+
if finalize is False:
|
223 |
+
h = h[:, :-self.pre_lookahead_len * self.token_mel_ratio]
|
224 |
+
mel_len1, mel_len2 = prompt_feat.shape[1], h.shape[1] - prompt_feat.shape[1]
|
225 |
+
h = self.encoder_proj(h)
|
226 |
+
|
227 |
+
# get conditions
|
228 |
+
conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype)
|
229 |
+
conds[:, :mel_len1] = prompt_feat
|
230 |
+
conds = conds.transpose(1, 2)
|
231 |
+
|
232 |
+
mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h)
|
233 |
+
feat, _ = self.decoder(
|
234 |
+
mu=h.transpose(1, 2).contiguous(),
|
235 |
+
mask=mask.unsqueeze(1),
|
236 |
+
spks=embedding,
|
237 |
+
cond=conds,
|
238 |
+
n_timesteps=10
|
239 |
+
)
|
240 |
+
feat = feat[:, :, mel_len1:]
|
241 |
+
assert feat.shape[2] == mel_len2
|
242 |
+
return feat.float(), None # NOTE jrm: why are they returning None here?
|
src/chatterbox/models/s3gen/flow_matching.py
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import threading
|
15 |
+
import torch
|
16 |
+
import torch.nn.functional as F
|
17 |
+
from .matcha.flow_matching import BASECFM
|
18 |
+
from omegaconf import OmegaConf
|
19 |
+
|
20 |
+
|
21 |
+
CFM_PARAMS = OmegaConf.create({
|
22 |
+
"sigma_min": 1e-06,
|
23 |
+
"solver": "euler",
|
24 |
+
"t_scheduler": "cosine",
|
25 |
+
"training_cfg_rate": 0.2,
|
26 |
+
"inference_cfg_rate": 0.7,
|
27 |
+
"reg_loss_type": "l1"
|
28 |
+
})
|
29 |
+
|
30 |
+
|
31 |
+
class ConditionalCFM(BASECFM):
|
32 |
+
def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None):
|
33 |
+
super().__init__(
|
34 |
+
n_feats=in_channels,
|
35 |
+
cfm_params=cfm_params,
|
36 |
+
n_spks=n_spks,
|
37 |
+
spk_emb_dim=spk_emb_dim,
|
38 |
+
)
|
39 |
+
self.t_scheduler = cfm_params.t_scheduler
|
40 |
+
self.training_cfg_rate = cfm_params.training_cfg_rate
|
41 |
+
self.inference_cfg_rate = cfm_params.inference_cfg_rate
|
42 |
+
in_channels = in_channels + (spk_emb_dim if n_spks > 0 else 0)
|
43 |
+
# Just change the architecture of the estimator here
|
44 |
+
self.estimator = estimator
|
45 |
+
self.lock = threading.Lock()
|
46 |
+
|
47 |
+
@torch.inference_mode()
|
48 |
+
def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None, prompt_len=0, flow_cache=torch.zeros(1, 80, 0, 2)):
|
49 |
+
"""Forward diffusion
|
50 |
+
|
51 |
+
Args:
|
52 |
+
mu (torch.Tensor): output of encoder
|
53 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
54 |
+
mask (torch.Tensor): output_mask
|
55 |
+
shape: (batch_size, 1, mel_timesteps)
|
56 |
+
n_timesteps (int): number of diffusion steps
|
57 |
+
temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
|
58 |
+
spks (torch.Tensor, optional): speaker ids. Defaults to None.
|
59 |
+
shape: (batch_size, spk_emb_dim)
|
60 |
+
cond: Not used but kept for future purposes
|
61 |
+
|
62 |
+
Returns:
|
63 |
+
sample: generated mel-spectrogram
|
64 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
65 |
+
"""
|
66 |
+
|
67 |
+
z = torch.randn_like(mu).to(mu.device).to(mu.dtype) * temperature
|
68 |
+
cache_size = flow_cache.shape[2]
|
69 |
+
# fix prompt and overlap part mu and z
|
70 |
+
if cache_size != 0:
|
71 |
+
z[:, :, :cache_size] = flow_cache[:, :, :, 0]
|
72 |
+
mu[:, :, :cache_size] = flow_cache[:, :, :, 1]
|
73 |
+
z_cache = torch.concat([z[:, :, :prompt_len], z[:, :, -34:]], dim=2)
|
74 |
+
mu_cache = torch.concat([mu[:, :, :prompt_len], mu[:, :, -34:]], dim=2)
|
75 |
+
flow_cache = torch.stack([z_cache, mu_cache], dim=-1)
|
76 |
+
|
77 |
+
t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype)
|
78 |
+
if self.t_scheduler == 'cosine':
|
79 |
+
t_span = 1 - torch.cos(t_span * 0.5 * torch.pi)
|
80 |
+
return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), flow_cache
|
81 |
+
|
82 |
+
def solve_euler(self, x, t_span, mu, mask, spks, cond):
|
83 |
+
"""
|
84 |
+
Fixed euler solver for ODEs.
|
85 |
+
Args:
|
86 |
+
x (torch.Tensor): random noise
|
87 |
+
t_span (torch.Tensor): n_timesteps interpolated
|
88 |
+
shape: (n_timesteps + 1,)
|
89 |
+
mu (torch.Tensor): output of encoder
|
90 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
91 |
+
mask (torch.Tensor): output_mask
|
92 |
+
shape: (batch_size, 1, mel_timesteps)
|
93 |
+
spks (torch.Tensor, optional): speaker ids. Defaults to None.
|
94 |
+
shape: (batch_size, spk_emb_dim)
|
95 |
+
cond: Not used but kept for future purposes
|
96 |
+
"""
|
97 |
+
t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0]
|
98 |
+
t = t.unsqueeze(dim=0)
|
99 |
+
|
100 |
+
# I am storing this because I can later plot it by putting a debugger here and saving it to a file
|
101 |
+
# Or in future might add like a return_all_steps flag
|
102 |
+
sol = []
|
103 |
+
|
104 |
+
# Do not use concat, it may cause memory format changed and trt infer with wrong results!
|
105 |
+
x_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype)
|
106 |
+
mask_in = torch.zeros([2, 1, x.size(2)], device=x.device, dtype=x.dtype)
|
107 |
+
mu_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype)
|
108 |
+
t_in = torch.zeros([2], device=x.device, dtype=x.dtype)
|
109 |
+
spks_in = torch.zeros([2, 80], device=x.device, dtype=x.dtype)
|
110 |
+
cond_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype)
|
111 |
+
for step in range(1, len(t_span)):
|
112 |
+
# Classifier-Free Guidance inference introduced in VoiceBox
|
113 |
+
x_in[:] = x
|
114 |
+
mask_in[:] = mask
|
115 |
+
mu_in[0] = mu
|
116 |
+
t_in[:] = t.unsqueeze(0)
|
117 |
+
spks_in[0] = spks
|
118 |
+
cond_in[0] = cond
|
119 |
+
dphi_dt = self.forward_estimator(
|
120 |
+
x_in, mask_in,
|
121 |
+
mu_in, t_in,
|
122 |
+
spks_in,
|
123 |
+
cond_in
|
124 |
+
)
|
125 |
+
dphi_dt, cfg_dphi_dt = torch.split(dphi_dt, [x.size(0), x.size(0)], dim=0)
|
126 |
+
dphi_dt = ((1.0 + self.inference_cfg_rate) * dphi_dt - self.inference_cfg_rate * cfg_dphi_dt)
|
127 |
+
x = x + dt * dphi_dt
|
128 |
+
t = t + dt
|
129 |
+
sol.append(x)
|
130 |
+
if step < len(t_span) - 1:
|
131 |
+
dt = t_span[step + 1] - t
|
132 |
+
|
133 |
+
return sol[-1].float()
|
134 |
+
|
135 |
+
def forward_estimator(self, x, mask, mu, t, spks, cond):
|
136 |
+
if isinstance(self.estimator, torch.nn.Module):
|
137 |
+
return self.estimator.forward(x, mask, mu, t, spks, cond)
|
138 |
+
else:
|
139 |
+
with self.lock:
|
140 |
+
self.estimator.set_input_shape('x', (2, 80, x.size(2)))
|
141 |
+
self.estimator.set_input_shape('mask', (2, 1, x.size(2)))
|
142 |
+
self.estimator.set_input_shape('mu', (2, 80, x.size(2)))
|
143 |
+
self.estimator.set_input_shape('t', (2,))
|
144 |
+
self.estimator.set_input_shape('spks', (2, 80))
|
145 |
+
self.estimator.set_input_shape('cond', (2, 80, x.size(2)))
|
146 |
+
# run trt engine
|
147 |
+
self.estimator.execute_v2([x.contiguous().data_ptr(),
|
148 |
+
mask.contiguous().data_ptr(),
|
149 |
+
mu.contiguous().data_ptr(),
|
150 |
+
t.contiguous().data_ptr(),
|
151 |
+
spks.contiguous().data_ptr(),
|
152 |
+
cond.contiguous().data_ptr(),
|
153 |
+
x.data_ptr()])
|
154 |
+
return x
|
155 |
+
|
156 |
+
def compute_loss(self, x1, mask, mu, spks=None, cond=None):
|
157 |
+
"""Computes diffusion loss
|
158 |
+
|
159 |
+
Args:
|
160 |
+
x1 (torch.Tensor): Target
|
161 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
162 |
+
mask (torch.Tensor): target mask
|
163 |
+
shape: (batch_size, 1, mel_timesteps)
|
164 |
+
mu (torch.Tensor): output of encoder
|
165 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
166 |
+
spks (torch.Tensor, optional): speaker embedding. Defaults to None.
|
167 |
+
shape: (batch_size, spk_emb_dim)
|
168 |
+
|
169 |
+
Returns:
|
170 |
+
loss: conditional flow matching loss
|
171 |
+
y: conditional flow
|
172 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
173 |
+
"""
|
174 |
+
b, _, t = mu.shape
|
175 |
+
|
176 |
+
# random timestep
|
177 |
+
t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype)
|
178 |
+
if self.t_scheduler == 'cosine':
|
179 |
+
t = 1 - torch.cos(t * 0.5 * torch.pi)
|
180 |
+
# sample noise p(x_0)
|
181 |
+
z = torch.randn_like(x1)
|
182 |
+
|
183 |
+
y = (1 - (1 - self.sigma_min) * t) * z + t * x1
|
184 |
+
u = x1 - (1 - self.sigma_min) * z
|
185 |
+
|
186 |
+
# during training, we randomly drop condition to trade off mode coverage and sample fidelity
|
187 |
+
if self.training_cfg_rate > 0:
|
188 |
+
cfg_mask = torch.rand(b, device=x1.device) > self.training_cfg_rate
|
189 |
+
mu = mu * cfg_mask.view(-1, 1, 1)
|
190 |
+
spks = spks * cfg_mask.view(-1, 1)
|
191 |
+
cond = cond * cfg_mask.view(-1, 1, 1)
|
192 |
+
|
193 |
+
pred = self.estimator(y, mask, mu, t.squeeze(), spks, cond)
|
194 |
+
loss = F.mse_loss(pred * mask, u * mask, reduction="sum") / (torch.sum(mask) * u.shape[1])
|
195 |
+
return loss, y
|
196 |
+
|
197 |
+
|
198 |
+
class CausalConditionalCFM(ConditionalCFM):
|
199 |
+
def __init__(self, in_channels=240, cfm_params=CFM_PARAMS, n_spks=1, spk_emb_dim=80, estimator=None):
|
200 |
+
super().__init__(in_channels, cfm_params, n_spks, spk_emb_dim, estimator)
|
201 |
+
self.rand_noise = torch.randn([1, 80, 50 * 300])
|
202 |
+
|
203 |
+
@torch.inference_mode()
|
204 |
+
def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None):
|
205 |
+
"""Forward diffusion
|
206 |
+
|
207 |
+
Args:
|
208 |
+
mu (torch.Tensor): output of encoder
|
209 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
210 |
+
mask (torch.Tensor): output_mask
|
211 |
+
shape: (batch_size, 1, mel_timesteps)
|
212 |
+
n_timesteps (int): number of diffusion steps
|
213 |
+
temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
|
214 |
+
spks (torch.Tensor, optional): speaker ids. Defaults to None.
|
215 |
+
shape: (batch_size, spk_emb_dim)
|
216 |
+
cond: Not used but kept for future purposes
|
217 |
+
|
218 |
+
Returns:
|
219 |
+
sample: generated mel-spectrogram
|
220 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
221 |
+
"""
|
222 |
+
|
223 |
+
z = self.rand_noise[:, :, :mu.size(2)].to(mu.device).to(mu.dtype) * temperature
|
224 |
+
# fix prompt and overlap part mu and z
|
225 |
+
t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype)
|
226 |
+
if self.t_scheduler == 'cosine':
|
227 |
+
t_span = 1 - torch.cos(t_span * 0.5 * torch.pi)
|
228 |
+
return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), None
|
src/chatterbox/models/s3gen/hifigan.py
ADDED
@@ -0,0 +1,474 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# jrm: adapted from CosyVoice/cosyvoice/hifigan/generator.py
|
2 |
+
# most modules should be reusable, but I found their SineGen changed a git.
|
3 |
+
|
4 |
+
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
+
# you may not use this file except in compliance with the License.
|
8 |
+
# You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing, software
|
13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
+
# See the License for the specific language governing permissions and
|
16 |
+
# limitations under the License.
|
17 |
+
|
18 |
+
"""HIFI-GAN"""
|
19 |
+
|
20 |
+
from typing import Dict, Optional, List
|
21 |
+
import numpy as np
|
22 |
+
from scipy.signal import get_window
|
23 |
+
import torch
|
24 |
+
import torch.nn.functional as F
|
25 |
+
from torch.nn import Conv1d
|
26 |
+
from torch.nn import ConvTranspose1d
|
27 |
+
from torch.nn.utils import remove_weight_norm
|
28 |
+
from torch.nn.utils.parametrizations import weight_norm
|
29 |
+
from torch.distributions.uniform import Uniform
|
30 |
+
from torch import nn, sin, pow
|
31 |
+
from torch.nn import Parameter
|
32 |
+
|
33 |
+
|
34 |
+
class Snake(nn.Module):
|
35 |
+
'''
|
36 |
+
Implementation of a sine-based periodic activation function
|
37 |
+
Shape:
|
38 |
+
- Input: (B, C, T)
|
39 |
+
- Output: (B, C, T), same shape as the input
|
40 |
+
Parameters:
|
41 |
+
- alpha - trainable parameter
|
42 |
+
References:
|
43 |
+
- This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
44 |
+
https://arxiv.org/abs/2006.08195
|
45 |
+
Examples:
|
46 |
+
>>> a1 = snake(256)
|
47 |
+
>>> x = torch.randn(256)
|
48 |
+
>>> x = a1(x)
|
49 |
+
'''
|
50 |
+
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
|
51 |
+
'''
|
52 |
+
Initialization.
|
53 |
+
INPUT:
|
54 |
+
- in_features: shape of the input
|
55 |
+
- alpha: trainable parameter
|
56 |
+
alpha is initialized to 1 by default, higher values = higher-frequency.
|
57 |
+
alpha will be trained along with the rest of your model.
|
58 |
+
'''
|
59 |
+
super(Snake, self).__init__()
|
60 |
+
self.in_features = in_features
|
61 |
+
|
62 |
+
# initialize alpha
|
63 |
+
self.alpha_logscale = alpha_logscale
|
64 |
+
if self.alpha_logscale: # log scale alphas initialized to zeros
|
65 |
+
self.alpha = Parameter(torch.zeros(in_features) * alpha)
|
66 |
+
else: # linear scale alphas initialized to ones
|
67 |
+
self.alpha = Parameter(torch.ones(in_features) * alpha)
|
68 |
+
|
69 |
+
self.alpha.requires_grad = alpha_trainable
|
70 |
+
|
71 |
+
self.no_div_by_zero = 0.000000001
|
72 |
+
|
73 |
+
def forward(self, x):
|
74 |
+
'''
|
75 |
+
Forward pass of the function.
|
76 |
+
Applies the function to the input elementwise.
|
77 |
+
Snake ∶= x + 1/a * sin^2 (xa)
|
78 |
+
'''
|
79 |
+
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
|
80 |
+
if self.alpha_logscale:
|
81 |
+
alpha = torch.exp(alpha)
|
82 |
+
x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
|
83 |
+
|
84 |
+
return x
|
85 |
+
|
86 |
+
|
87 |
+
|
88 |
+
def get_padding(kernel_size, dilation=1):
|
89 |
+
return int((kernel_size * dilation - dilation) / 2)
|
90 |
+
|
91 |
+
def init_weights(m, mean=0.0, std=0.01):
|
92 |
+
classname = m.__class__.__name__
|
93 |
+
if classname.find("Conv") != -1:
|
94 |
+
m.weight.data.normal_(mean, std)
|
95 |
+
|
96 |
+
|
97 |
+
"""hifigan based generator implementation.
|
98 |
+
|
99 |
+
This code is modified from https://github.com/jik876/hifi-gan
|
100 |
+
,https://github.com/kan-bayashi/ParallelWaveGAN and
|
101 |
+
https://github.com/NVIDIA/BigVGAN
|
102 |
+
|
103 |
+
"""
|
104 |
+
|
105 |
+
|
106 |
+
class ResBlock(torch.nn.Module):
|
107 |
+
"""Residual block module in HiFiGAN/BigVGAN."""
|
108 |
+
def __init__(
|
109 |
+
self,
|
110 |
+
channels: int = 512,
|
111 |
+
kernel_size: int = 3,
|
112 |
+
dilations: List[int] = [1, 3, 5],
|
113 |
+
):
|
114 |
+
super(ResBlock, self).__init__()
|
115 |
+
self.convs1 = nn.ModuleList()
|
116 |
+
self.convs2 = nn.ModuleList()
|
117 |
+
|
118 |
+
for dilation in dilations:
|
119 |
+
self.convs1.append(
|
120 |
+
weight_norm(
|
121 |
+
Conv1d(
|
122 |
+
channels,
|
123 |
+
channels,
|
124 |
+
kernel_size,
|
125 |
+
1,
|
126 |
+
dilation=dilation,
|
127 |
+
padding=get_padding(kernel_size, dilation)
|
128 |
+
)
|
129 |
+
)
|
130 |
+
)
|
131 |
+
self.convs2.append(
|
132 |
+
weight_norm(
|
133 |
+
Conv1d(
|
134 |
+
channels,
|
135 |
+
channels,
|
136 |
+
kernel_size,
|
137 |
+
1,
|
138 |
+
dilation=1,
|
139 |
+
padding=get_padding(kernel_size, 1)
|
140 |
+
)
|
141 |
+
)
|
142 |
+
)
|
143 |
+
self.convs1.apply(init_weights)
|
144 |
+
self.convs2.apply(init_weights)
|
145 |
+
self.activations1 = nn.ModuleList([
|
146 |
+
Snake(channels, alpha_logscale=False)
|
147 |
+
for _ in range(len(self.convs1))
|
148 |
+
])
|
149 |
+
self.activations2 = nn.ModuleList([
|
150 |
+
Snake(channels, alpha_logscale=False)
|
151 |
+
for _ in range(len(self.convs2))
|
152 |
+
])
|
153 |
+
|
154 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
155 |
+
for idx in range(len(self.convs1)):
|
156 |
+
xt = self.activations1[idx](x)
|
157 |
+
xt = self.convs1[idx](xt)
|
158 |
+
xt = self.activations2[idx](xt)
|
159 |
+
xt = self.convs2[idx](xt)
|
160 |
+
x = xt + x
|
161 |
+
return x
|
162 |
+
|
163 |
+
def remove_weight_norm(self):
|
164 |
+
for idx in range(len(self.convs1)):
|
165 |
+
remove_weight_norm(self.convs1[idx])
|
166 |
+
remove_weight_norm(self.convs2[idx])
|
167 |
+
|
168 |
+
|
169 |
+
class SineGen(torch.nn.Module):
|
170 |
+
""" Definition of sine generator
|
171 |
+
SineGen(samp_rate, harmonic_num = 0,
|
172 |
+
sine_amp = 0.1, noise_std = 0.003,
|
173 |
+
voiced_threshold = 0,
|
174 |
+
flag_for_pulse=False)
|
175 |
+
samp_rate: sampling rate in Hz
|
176 |
+
harmonic_num: number of harmonic overtones (default 0)
|
177 |
+
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
178 |
+
noise_std: std of Gaussian noise (default 0.003)
|
179 |
+
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
180 |
+
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
181 |
+
Note: when flag_for_pulse is True, the first time step of a voiced
|
182 |
+
segment is always sin(np.pi) or cos(0)
|
183 |
+
"""
|
184 |
+
|
185 |
+
def __init__(self, samp_rate, harmonic_num=0,
|
186 |
+
sine_amp=0.1, noise_std=0.003,
|
187 |
+
voiced_threshold=0):
|
188 |
+
super(SineGen, self).__init__()
|
189 |
+
self.sine_amp = sine_amp
|
190 |
+
self.noise_std = noise_std
|
191 |
+
self.harmonic_num = harmonic_num
|
192 |
+
self.sampling_rate = samp_rate
|
193 |
+
self.voiced_threshold = voiced_threshold
|
194 |
+
|
195 |
+
def _f02uv(self, f0):
|
196 |
+
# generate uv signal
|
197 |
+
uv = (f0 > self.voiced_threshold).type(torch.float32)
|
198 |
+
return uv
|
199 |
+
|
200 |
+
@torch.no_grad()
|
201 |
+
def forward(self, f0):
|
202 |
+
"""
|
203 |
+
:param f0: [B, 1, sample_len], Hz
|
204 |
+
:return: [B, 1, sample_len]
|
205 |
+
"""
|
206 |
+
|
207 |
+
F_mat = torch.zeros((f0.size(0), self.harmonic_num + 1, f0.size(-1))).to(f0.device)
|
208 |
+
for i in range(self.harmonic_num + 1):
|
209 |
+
F_mat[:, i: i + 1, :] = f0 * (i + 1) / self.sampling_rate
|
210 |
+
|
211 |
+
theta_mat = 2 * np.pi * (torch.cumsum(F_mat, dim=-1) % 1)
|
212 |
+
u_dist = Uniform(low=-np.pi, high=np.pi)
|
213 |
+
phase_vec = u_dist.sample(sample_shape=(f0.size(0), self.harmonic_num + 1, 1)).to(F_mat.device)
|
214 |
+
phase_vec[:, 0, :] = 0
|
215 |
+
|
216 |
+
# generate sine waveforms
|
217 |
+
sine_waves = self.sine_amp * torch.sin(theta_mat + phase_vec)
|
218 |
+
|
219 |
+
# generate uv signal
|
220 |
+
uv = self._f02uv(f0)
|
221 |
+
|
222 |
+
# noise: for unvoiced should be similar to sine_amp
|
223 |
+
# std = self.sine_amp/3 -> max value ~ self.sine_amp
|
224 |
+
# . for voiced regions is self.noise_std
|
225 |
+
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
226 |
+
noise = noise_amp * torch.randn_like(sine_waves)
|
227 |
+
|
228 |
+
# first: set the unvoiced part to 0 by uv
|
229 |
+
# then: additive noise
|
230 |
+
sine_waves = sine_waves * uv + noise
|
231 |
+
return sine_waves, uv, noise
|
232 |
+
|
233 |
+
|
234 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
235 |
+
""" SourceModule for hn-nsf
|
236 |
+
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
237 |
+
add_noise_std=0.003, voiced_threshod=0)
|
238 |
+
sampling_rate: sampling_rate in Hz
|
239 |
+
harmonic_num: number of harmonic above F0 (default: 0)
|
240 |
+
sine_amp: amplitude of sine source signal (default: 0.1)
|
241 |
+
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
242 |
+
note that amplitude of noise in unvoiced is decided
|
243 |
+
by sine_amp
|
244 |
+
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
245 |
+
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
246 |
+
F0_sampled (batchsize, length, 1)
|
247 |
+
Sine_source (batchsize, length, 1)
|
248 |
+
noise_source (batchsize, length 1)
|
249 |
+
uv (batchsize, length, 1)
|
250 |
+
"""
|
251 |
+
|
252 |
+
def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1,
|
253 |
+
add_noise_std=0.003, voiced_threshod=0):
|
254 |
+
super(SourceModuleHnNSF, self).__init__()
|
255 |
+
|
256 |
+
self.sine_amp = sine_amp
|
257 |
+
self.noise_std = add_noise_std
|
258 |
+
|
259 |
+
# to produce sine waveforms
|
260 |
+
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
|
261 |
+
sine_amp, add_noise_std, voiced_threshod)
|
262 |
+
|
263 |
+
# to merge source harmonics into a single excitation
|
264 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
265 |
+
self.l_tanh = torch.nn.Tanh()
|
266 |
+
|
267 |
+
def forward(self, x):
|
268 |
+
"""
|
269 |
+
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
270 |
+
F0_sampled (batchsize, length, 1)
|
271 |
+
Sine_source (batchsize, length, 1)
|
272 |
+
noise_source (batchsize, length 1)
|
273 |
+
"""
|
274 |
+
# source for harmonic branch
|
275 |
+
with torch.no_grad():
|
276 |
+
sine_wavs, uv, _ = self.l_sin_gen(x.transpose(1, 2))
|
277 |
+
sine_wavs = sine_wavs.transpose(1, 2)
|
278 |
+
uv = uv.transpose(1, 2)
|
279 |
+
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
280 |
+
|
281 |
+
# source for noise branch, in the same shape as uv
|
282 |
+
noise = torch.randn_like(uv) * self.sine_amp / 3
|
283 |
+
return sine_merge, noise, uv
|
284 |
+
|
285 |
+
|
286 |
+
class HiFTGenerator(nn.Module):
|
287 |
+
"""
|
288 |
+
HiFTNet Generator: Neural Source Filter + ISTFTNet
|
289 |
+
https://arxiv.org/abs/2309.09493
|
290 |
+
"""
|
291 |
+
def __init__(
|
292 |
+
self,
|
293 |
+
in_channels: int = 80,
|
294 |
+
base_channels: int = 512,
|
295 |
+
nb_harmonics: int = 8,
|
296 |
+
sampling_rate: int = 22050,
|
297 |
+
nsf_alpha: float = 0.1,
|
298 |
+
nsf_sigma: float = 0.003,
|
299 |
+
nsf_voiced_threshold: float = 10,
|
300 |
+
upsample_rates: List[int] = [8, 8],
|
301 |
+
upsample_kernel_sizes: List[int] = [16, 16],
|
302 |
+
istft_params: Dict[str, int] = {"n_fft": 16, "hop_len": 4},
|
303 |
+
resblock_kernel_sizes: List[int] = [3, 7, 11],
|
304 |
+
resblock_dilation_sizes: List[List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
305 |
+
source_resblock_kernel_sizes: List[int] = [7, 11],
|
306 |
+
source_resblock_dilation_sizes: List[List[int]] = [[1, 3, 5], [1, 3, 5]],
|
307 |
+
lrelu_slope: float = 0.1,
|
308 |
+
audio_limit: float = 0.99,
|
309 |
+
f0_predictor: torch.nn.Module = None,
|
310 |
+
):
|
311 |
+
super(HiFTGenerator, self).__init__()
|
312 |
+
|
313 |
+
self.out_channels = 1
|
314 |
+
self.nb_harmonics = nb_harmonics
|
315 |
+
self.sampling_rate = sampling_rate
|
316 |
+
self.istft_params = istft_params
|
317 |
+
self.lrelu_slope = lrelu_slope
|
318 |
+
self.audio_limit = audio_limit
|
319 |
+
|
320 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
321 |
+
self.num_upsamples = len(upsample_rates)
|
322 |
+
self.m_source = SourceModuleHnNSF(
|
323 |
+
sampling_rate=sampling_rate,
|
324 |
+
upsample_scale=np.prod(upsample_rates) * istft_params["hop_len"],
|
325 |
+
harmonic_num=nb_harmonics,
|
326 |
+
sine_amp=nsf_alpha,
|
327 |
+
add_noise_std=nsf_sigma,
|
328 |
+
voiced_threshod=nsf_voiced_threshold)
|
329 |
+
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates) * istft_params["hop_len"])
|
330 |
+
|
331 |
+
self.conv_pre = weight_norm(
|
332 |
+
Conv1d(in_channels, base_channels, 7, 1, padding=3)
|
333 |
+
)
|
334 |
+
|
335 |
+
# Up
|
336 |
+
self.ups = nn.ModuleList()
|
337 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
338 |
+
self.ups.append(
|
339 |
+
weight_norm(
|
340 |
+
ConvTranspose1d(
|
341 |
+
base_channels // (2**i),
|
342 |
+
base_channels // (2**(i + 1)),
|
343 |
+
k,
|
344 |
+
u,
|
345 |
+
padding=(k - u) // 2,
|
346 |
+
)
|
347 |
+
)
|
348 |
+
)
|
349 |
+
|
350 |
+
# Down
|
351 |
+
self.source_downs = nn.ModuleList()
|
352 |
+
self.source_resblocks = nn.ModuleList()
|
353 |
+
downsample_rates = [1] + upsample_rates[::-1][:-1]
|
354 |
+
downsample_cum_rates = np.cumprod(downsample_rates)
|
355 |
+
for i, (u, k, d) in enumerate(zip(downsample_cum_rates[::-1], source_resblock_kernel_sizes, source_resblock_dilation_sizes)):
|
356 |
+
if u == 1:
|
357 |
+
self.source_downs.append(
|
358 |
+
Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), 1, 1)
|
359 |
+
)
|
360 |
+
else:
|
361 |
+
self.source_downs.append(
|
362 |
+
Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), u * 2, u, padding=(u // 2))
|
363 |
+
)
|
364 |
+
|
365 |
+
self.source_resblocks.append(
|
366 |
+
ResBlock(base_channels // (2 ** (i + 1)), k, d)
|
367 |
+
)
|
368 |
+
|
369 |
+
self.resblocks = nn.ModuleList()
|
370 |
+
for i in range(len(self.ups)):
|
371 |
+
ch = base_channels // (2**(i + 1))
|
372 |
+
for _, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
373 |
+
self.resblocks.append(ResBlock(ch, k, d))
|
374 |
+
|
375 |
+
self.conv_post = weight_norm(Conv1d(ch, istft_params["n_fft"] + 2, 7, 1, padding=3))
|
376 |
+
self.ups.apply(init_weights)
|
377 |
+
self.conv_post.apply(init_weights)
|
378 |
+
self.reflection_pad = nn.ReflectionPad1d((1, 0))
|
379 |
+
self.stft_window = torch.from_numpy(get_window("hann", istft_params["n_fft"], fftbins=True).astype(np.float32))
|
380 |
+
self.f0_predictor = f0_predictor
|
381 |
+
|
382 |
+
def remove_weight_norm(self):
|
383 |
+
print('Removing weight norm...')
|
384 |
+
for l in self.ups:
|
385 |
+
remove_weight_norm(l)
|
386 |
+
for l in self.resblocks:
|
387 |
+
l.remove_weight_norm()
|
388 |
+
remove_weight_norm(self.conv_pre)
|
389 |
+
remove_weight_norm(self.conv_post)
|
390 |
+
self.m_source.remove_weight_norm()
|
391 |
+
for l in self.source_downs:
|
392 |
+
remove_weight_norm(l)
|
393 |
+
for l in self.source_resblocks:
|
394 |
+
l.remove_weight_norm()
|
395 |
+
|
396 |
+
def _stft(self, x):
|
397 |
+
spec = torch.stft(
|
398 |
+
x,
|
399 |
+
self.istft_params["n_fft"], self.istft_params["hop_len"], self.istft_params["n_fft"], window=self.stft_window.to(x.device),
|
400 |
+
return_complex=True)
|
401 |
+
spec = torch.view_as_real(spec) # [B, F, TT, 2]
|
402 |
+
return spec[..., 0], spec[..., 1]
|
403 |
+
|
404 |
+
def _istft(self, magnitude, phase):
|
405 |
+
magnitude = torch.clip(magnitude, max=1e2)
|
406 |
+
real = magnitude * torch.cos(phase)
|
407 |
+
img = magnitude * torch.sin(phase)
|
408 |
+
inverse_transform = torch.istft(torch.complex(real, img), self.istft_params["n_fft"], self.istft_params["hop_len"],
|
409 |
+
self.istft_params["n_fft"], window=self.stft_window.to(magnitude.device))
|
410 |
+
return inverse_transform
|
411 |
+
|
412 |
+
def decode(self, x: torch.Tensor, s: torch.Tensor = torch.zeros(1, 1, 0)) -> torch.Tensor:
|
413 |
+
s_stft_real, s_stft_imag = self._stft(s.squeeze(1))
|
414 |
+
s_stft = torch.cat([s_stft_real, s_stft_imag], dim=1)
|
415 |
+
|
416 |
+
x = self.conv_pre(x)
|
417 |
+
for i in range(self.num_upsamples):
|
418 |
+
x = F.leaky_relu(x, self.lrelu_slope)
|
419 |
+
x = self.ups[i](x)
|
420 |
+
|
421 |
+
if i == self.num_upsamples - 1:
|
422 |
+
x = self.reflection_pad(x)
|
423 |
+
|
424 |
+
# fusion
|
425 |
+
si = self.source_downs[i](s_stft)
|
426 |
+
si = self.source_resblocks[i](si)
|
427 |
+
x = x + si
|
428 |
+
|
429 |
+
xs = None
|
430 |
+
for j in range(self.num_kernels):
|
431 |
+
if xs is None:
|
432 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
433 |
+
else:
|
434 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
435 |
+
x = xs / self.num_kernels
|
436 |
+
|
437 |
+
x = F.leaky_relu(x)
|
438 |
+
x = self.conv_post(x)
|
439 |
+
magnitude = torch.exp(x[:, :self.istft_params["n_fft"] // 2 + 1, :])
|
440 |
+
phase = torch.sin(x[:, self.istft_params["n_fft"] // 2 + 1:, :]) # actually, sin is redundancy
|
441 |
+
|
442 |
+
x = self._istft(magnitude, phase)
|
443 |
+
x = torch.clamp(x, -self.audio_limit, self.audio_limit)
|
444 |
+
return x
|
445 |
+
|
446 |
+
def forward(
|
447 |
+
self,
|
448 |
+
batch: dict,
|
449 |
+
device: torch.device,
|
450 |
+
) -> Dict[str, Optional[torch.Tensor]]:
|
451 |
+
speech_feat = batch['speech_feat'].transpose(1, 2).to(device)
|
452 |
+
# mel->f0
|
453 |
+
f0 = self.f0_predictor(speech_feat)
|
454 |
+
# f0->source
|
455 |
+
s = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
|
456 |
+
s, _, _ = self.m_source(s)
|
457 |
+
s = s.transpose(1, 2)
|
458 |
+
# mel+source->speech
|
459 |
+
generated_speech = self.decode(x=speech_feat, s=s)
|
460 |
+
return generated_speech, f0
|
461 |
+
|
462 |
+
@torch.inference_mode()
|
463 |
+
def inference(self, speech_feat: torch.Tensor, cache_source: torch.Tensor = torch.zeros(1, 1, 0)) -> torch.Tensor:
|
464 |
+
# mel->f0
|
465 |
+
f0 = self.f0_predictor(speech_feat)
|
466 |
+
# f0->source
|
467 |
+
s = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
|
468 |
+
s, _, _ = self.m_source(s)
|
469 |
+
s = s.transpose(1, 2)
|
470 |
+
# use cache_source to avoid glitch
|
471 |
+
if cache_source.shape[2] != 0:
|
472 |
+
s[:, :, :cache_source.shape[2]] = cache_source
|
473 |
+
generated_speech = self.decode(x=speech_feat, s=s)
|
474 |
+
return generated_speech, s
|
src/chatterbox/models/s3gen/matcha/decoder.py
ADDED
@@ -0,0 +1,443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from typing import Optional
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
import torch.nn.functional as F
|
7 |
+
from conformer import ConformerBlock
|
8 |
+
from diffusers.models.activations import get_activation
|
9 |
+
from einops import pack, rearrange, repeat
|
10 |
+
|
11 |
+
from .transformer import BasicTransformerBlock
|
12 |
+
|
13 |
+
|
14 |
+
class SinusoidalPosEmb(torch.nn.Module):
|
15 |
+
def __init__(self, dim):
|
16 |
+
super().__init__()
|
17 |
+
self.dim = dim
|
18 |
+
assert self.dim % 2 == 0, "SinusoidalPosEmb requires dim to be even"
|
19 |
+
|
20 |
+
def forward(self, x, scale=1000):
|
21 |
+
if x.ndim < 1:
|
22 |
+
x = x.unsqueeze(0)
|
23 |
+
device = x.device
|
24 |
+
half_dim = self.dim // 2
|
25 |
+
emb = math.log(10000) / (half_dim - 1)
|
26 |
+
emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb)
|
27 |
+
emb = scale * x.unsqueeze(1) * emb.unsqueeze(0)
|
28 |
+
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
|
29 |
+
return emb
|
30 |
+
|
31 |
+
|
32 |
+
class Block1D(torch.nn.Module):
|
33 |
+
def __init__(self, dim, dim_out, groups=8):
|
34 |
+
super().__init__()
|
35 |
+
self.block = torch.nn.Sequential(
|
36 |
+
torch.nn.Conv1d(dim, dim_out, 3, padding=1),
|
37 |
+
torch.nn.GroupNorm(groups, dim_out),
|
38 |
+
nn.Mish(),
|
39 |
+
)
|
40 |
+
|
41 |
+
def forward(self, x, mask):
|
42 |
+
output = self.block(x * mask)
|
43 |
+
return output * mask
|
44 |
+
|
45 |
+
|
46 |
+
class ResnetBlock1D(torch.nn.Module):
|
47 |
+
def __init__(self, dim, dim_out, time_emb_dim, groups=8):
|
48 |
+
super().__init__()
|
49 |
+
self.mlp = torch.nn.Sequential(nn.Mish(), torch.nn.Linear(time_emb_dim, dim_out))
|
50 |
+
|
51 |
+
self.block1 = Block1D(dim, dim_out, groups=groups)
|
52 |
+
self.block2 = Block1D(dim_out, dim_out, groups=groups)
|
53 |
+
|
54 |
+
self.res_conv = torch.nn.Conv1d(dim, dim_out, 1)
|
55 |
+
|
56 |
+
def forward(self, x, mask, time_emb):
|
57 |
+
h = self.block1(x, mask)
|
58 |
+
h += self.mlp(time_emb).unsqueeze(-1)
|
59 |
+
h = self.block2(h, mask)
|
60 |
+
output = h + self.res_conv(x * mask)
|
61 |
+
return output
|
62 |
+
|
63 |
+
|
64 |
+
class Downsample1D(nn.Module):
|
65 |
+
def __init__(self, dim):
|
66 |
+
super().__init__()
|
67 |
+
self.conv = torch.nn.Conv1d(dim, dim, 3, 2, 1)
|
68 |
+
|
69 |
+
def forward(self, x):
|
70 |
+
return self.conv(x)
|
71 |
+
|
72 |
+
|
73 |
+
class TimestepEmbedding(nn.Module):
|
74 |
+
def __init__(
|
75 |
+
self,
|
76 |
+
in_channels: int,
|
77 |
+
time_embed_dim: int,
|
78 |
+
act_fn: str = "silu",
|
79 |
+
out_dim: int = None,
|
80 |
+
post_act_fn: Optional[str] = None,
|
81 |
+
cond_proj_dim=None,
|
82 |
+
):
|
83 |
+
super().__init__()
|
84 |
+
|
85 |
+
self.linear_1 = nn.Linear(in_channels, time_embed_dim)
|
86 |
+
|
87 |
+
if cond_proj_dim is not None:
|
88 |
+
self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False)
|
89 |
+
else:
|
90 |
+
self.cond_proj = None
|
91 |
+
|
92 |
+
self.act = get_activation(act_fn)
|
93 |
+
|
94 |
+
if out_dim is not None:
|
95 |
+
time_embed_dim_out = out_dim
|
96 |
+
else:
|
97 |
+
time_embed_dim_out = time_embed_dim
|
98 |
+
self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out)
|
99 |
+
|
100 |
+
if post_act_fn is None:
|
101 |
+
self.post_act = None
|
102 |
+
else:
|
103 |
+
self.post_act = get_activation(post_act_fn)
|
104 |
+
|
105 |
+
def forward(self, sample, condition=None):
|
106 |
+
if condition is not None:
|
107 |
+
sample = sample + self.cond_proj(condition)
|
108 |
+
sample = self.linear_1(sample)
|
109 |
+
|
110 |
+
if self.act is not None:
|
111 |
+
sample = self.act(sample)
|
112 |
+
|
113 |
+
sample = self.linear_2(sample)
|
114 |
+
|
115 |
+
if self.post_act is not None:
|
116 |
+
sample = self.post_act(sample)
|
117 |
+
return sample
|
118 |
+
|
119 |
+
|
120 |
+
class Upsample1D(nn.Module):
|
121 |
+
"""A 1D upsampling layer with an optional convolution.
|
122 |
+
|
123 |
+
Parameters:
|
124 |
+
channels (`int`):
|
125 |
+
number of channels in the inputs and outputs.
|
126 |
+
use_conv (`bool`, default `False`):
|
127 |
+
option to use a convolution.
|
128 |
+
use_conv_transpose (`bool`, default `False`):
|
129 |
+
option to use a convolution transpose.
|
130 |
+
out_channels (`int`, optional):
|
131 |
+
number of output channels. Defaults to `channels`.
|
132 |
+
"""
|
133 |
+
|
134 |
+
def __init__(self, channels, use_conv=False, use_conv_transpose=True, out_channels=None, name="conv"):
|
135 |
+
super().__init__()
|
136 |
+
self.channels = channels
|
137 |
+
self.out_channels = out_channels or channels
|
138 |
+
self.use_conv = use_conv
|
139 |
+
self.use_conv_transpose = use_conv_transpose
|
140 |
+
self.name = name
|
141 |
+
|
142 |
+
self.conv = None
|
143 |
+
if use_conv_transpose:
|
144 |
+
self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1)
|
145 |
+
elif use_conv:
|
146 |
+
self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1)
|
147 |
+
|
148 |
+
def forward(self, inputs):
|
149 |
+
assert inputs.shape[1] == self.channels
|
150 |
+
if self.use_conv_transpose:
|
151 |
+
return self.conv(inputs)
|
152 |
+
|
153 |
+
outputs = F.interpolate(inputs, scale_factor=2.0, mode="nearest")
|
154 |
+
|
155 |
+
if self.use_conv:
|
156 |
+
outputs = self.conv(outputs)
|
157 |
+
|
158 |
+
return outputs
|
159 |
+
|
160 |
+
|
161 |
+
class ConformerWrapper(ConformerBlock):
|
162 |
+
def __init__( # pylint: disable=useless-super-delegation
|
163 |
+
self,
|
164 |
+
*,
|
165 |
+
dim,
|
166 |
+
dim_head=64,
|
167 |
+
heads=8,
|
168 |
+
ff_mult=4,
|
169 |
+
conv_expansion_factor=2,
|
170 |
+
conv_kernel_size=31,
|
171 |
+
attn_dropout=0,
|
172 |
+
ff_dropout=0,
|
173 |
+
conv_dropout=0,
|
174 |
+
conv_causal=False,
|
175 |
+
):
|
176 |
+
super().__init__(
|
177 |
+
dim=dim,
|
178 |
+
dim_head=dim_head,
|
179 |
+
heads=heads,
|
180 |
+
ff_mult=ff_mult,
|
181 |
+
conv_expansion_factor=conv_expansion_factor,
|
182 |
+
conv_kernel_size=conv_kernel_size,
|
183 |
+
attn_dropout=attn_dropout,
|
184 |
+
ff_dropout=ff_dropout,
|
185 |
+
conv_dropout=conv_dropout,
|
186 |
+
conv_causal=conv_causal,
|
187 |
+
)
|
188 |
+
|
189 |
+
def forward(
|
190 |
+
self,
|
191 |
+
hidden_states,
|
192 |
+
attention_mask,
|
193 |
+
encoder_hidden_states=None,
|
194 |
+
encoder_attention_mask=None,
|
195 |
+
timestep=None,
|
196 |
+
):
|
197 |
+
return super().forward(x=hidden_states, mask=attention_mask.bool())
|
198 |
+
|
199 |
+
|
200 |
+
class Decoder(nn.Module):
|
201 |
+
def __init__(
|
202 |
+
self,
|
203 |
+
in_channels,
|
204 |
+
out_channels,
|
205 |
+
channels=(256, 256),
|
206 |
+
dropout=0.05,
|
207 |
+
attention_head_dim=64,
|
208 |
+
n_blocks=1,
|
209 |
+
num_mid_blocks=2,
|
210 |
+
num_heads=4,
|
211 |
+
act_fn="snake",
|
212 |
+
down_block_type="transformer",
|
213 |
+
mid_block_type="transformer",
|
214 |
+
up_block_type="transformer",
|
215 |
+
):
|
216 |
+
super().__init__()
|
217 |
+
channels = tuple(channels)
|
218 |
+
self.in_channels = in_channels
|
219 |
+
self.out_channels = out_channels
|
220 |
+
|
221 |
+
self.time_embeddings = SinusoidalPosEmb(in_channels)
|
222 |
+
time_embed_dim = channels[0] * 4
|
223 |
+
self.time_mlp = TimestepEmbedding(
|
224 |
+
in_channels=in_channels,
|
225 |
+
time_embed_dim=time_embed_dim,
|
226 |
+
act_fn="silu",
|
227 |
+
)
|
228 |
+
|
229 |
+
self.down_blocks = nn.ModuleList([])
|
230 |
+
self.mid_blocks = nn.ModuleList([])
|
231 |
+
self.up_blocks = nn.ModuleList([])
|
232 |
+
|
233 |
+
output_channel = in_channels
|
234 |
+
for i in range(len(channels)): # pylint: disable=consider-using-enumerate
|
235 |
+
input_channel = output_channel
|
236 |
+
output_channel = channels[i]
|
237 |
+
is_last = i == len(channels) - 1
|
238 |
+
resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
|
239 |
+
transformer_blocks = nn.ModuleList(
|
240 |
+
[
|
241 |
+
self.get_block(
|
242 |
+
down_block_type,
|
243 |
+
output_channel,
|
244 |
+
attention_head_dim,
|
245 |
+
num_heads,
|
246 |
+
dropout,
|
247 |
+
act_fn,
|
248 |
+
)
|
249 |
+
for _ in range(n_blocks)
|
250 |
+
]
|
251 |
+
)
|
252 |
+
downsample = (
|
253 |
+
Downsample1D(output_channel) if not is_last else nn.Conv1d(output_channel, output_channel, 3, padding=1)
|
254 |
+
)
|
255 |
+
|
256 |
+
self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample]))
|
257 |
+
|
258 |
+
for i in range(num_mid_blocks):
|
259 |
+
input_channel = channels[-1]
|
260 |
+
out_channels = channels[-1]
|
261 |
+
|
262 |
+
resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
|
263 |
+
|
264 |
+
transformer_blocks = nn.ModuleList(
|
265 |
+
[
|
266 |
+
self.get_block(
|
267 |
+
mid_block_type,
|
268 |
+
output_channel,
|
269 |
+
attention_head_dim,
|
270 |
+
num_heads,
|
271 |
+
dropout,
|
272 |
+
act_fn,
|
273 |
+
)
|
274 |
+
for _ in range(n_blocks)
|
275 |
+
]
|
276 |
+
)
|
277 |
+
|
278 |
+
self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks]))
|
279 |
+
|
280 |
+
channels = channels[::-1] + (channels[0],)
|
281 |
+
for i in range(len(channels) - 1):
|
282 |
+
input_channel = channels[i]
|
283 |
+
output_channel = channels[i + 1]
|
284 |
+
is_last = i == len(channels) - 2
|
285 |
+
|
286 |
+
resnet = ResnetBlock1D(
|
287 |
+
dim=2 * input_channel,
|
288 |
+
dim_out=output_channel,
|
289 |
+
time_emb_dim=time_embed_dim,
|
290 |
+
)
|
291 |
+
transformer_blocks = nn.ModuleList(
|
292 |
+
[
|
293 |
+
self.get_block(
|
294 |
+
up_block_type,
|
295 |
+
output_channel,
|
296 |
+
attention_head_dim,
|
297 |
+
num_heads,
|
298 |
+
dropout,
|
299 |
+
act_fn,
|
300 |
+
)
|
301 |
+
for _ in range(n_blocks)
|
302 |
+
]
|
303 |
+
)
|
304 |
+
upsample = (
|
305 |
+
Upsample1D(output_channel, use_conv_transpose=True)
|
306 |
+
if not is_last
|
307 |
+
else nn.Conv1d(output_channel, output_channel, 3, padding=1)
|
308 |
+
)
|
309 |
+
|
310 |
+
self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample]))
|
311 |
+
|
312 |
+
self.final_block = Block1D(channels[-1], channels[-1])
|
313 |
+
self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1)
|
314 |
+
|
315 |
+
self.initialize_weights()
|
316 |
+
# nn.init.normal_(self.final_proj.weight)
|
317 |
+
|
318 |
+
@staticmethod
|
319 |
+
def get_block(block_type, dim, attention_head_dim, num_heads, dropout, act_fn):
|
320 |
+
if block_type == "conformer":
|
321 |
+
block = ConformerWrapper(
|
322 |
+
dim=dim,
|
323 |
+
dim_head=attention_head_dim,
|
324 |
+
heads=num_heads,
|
325 |
+
ff_mult=1,
|
326 |
+
conv_expansion_factor=2,
|
327 |
+
ff_dropout=dropout,
|
328 |
+
attn_dropout=dropout,
|
329 |
+
conv_dropout=dropout,
|
330 |
+
conv_kernel_size=31,
|
331 |
+
)
|
332 |
+
elif block_type == "transformer":
|
333 |
+
block = BasicTransformerBlock(
|
334 |
+
dim=dim,
|
335 |
+
num_attention_heads=num_heads,
|
336 |
+
attention_head_dim=attention_head_dim,
|
337 |
+
dropout=dropout,
|
338 |
+
activation_fn=act_fn,
|
339 |
+
)
|
340 |
+
else:
|
341 |
+
raise ValueError(f"Unknown block type {block_type}")
|
342 |
+
|
343 |
+
return block
|
344 |
+
|
345 |
+
def initialize_weights(self):
|
346 |
+
for m in self.modules():
|
347 |
+
if isinstance(m, nn.Conv1d):
|
348 |
+
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
|
349 |
+
|
350 |
+
if m.bias is not None:
|
351 |
+
nn.init.constant_(m.bias, 0)
|
352 |
+
|
353 |
+
elif isinstance(m, nn.GroupNorm):
|
354 |
+
nn.init.constant_(m.weight, 1)
|
355 |
+
nn.init.constant_(m.bias, 0)
|
356 |
+
|
357 |
+
elif isinstance(m, nn.Linear):
|
358 |
+
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
|
359 |
+
|
360 |
+
if m.bias is not None:
|
361 |
+
nn.init.constant_(m.bias, 0)
|
362 |
+
|
363 |
+
def forward(self, x, mask, mu, t, spks=None, cond=None):
|
364 |
+
"""Forward pass of the UNet1DConditional model.
|
365 |
+
|
366 |
+
Args:
|
367 |
+
x (torch.Tensor): shape (batch_size, in_channels, time)
|
368 |
+
mask (_type_): shape (batch_size, 1, time)
|
369 |
+
t (_type_): shape (batch_size)
|
370 |
+
spks (_type_, optional): shape: (batch_size, condition_channels). Defaults to None.
|
371 |
+
cond (_type_, optional): placeholder for future use. Defaults to None.
|
372 |
+
|
373 |
+
Raises:
|
374 |
+
ValueError: _description_
|
375 |
+
ValueError: _description_
|
376 |
+
|
377 |
+
Returns:
|
378 |
+
_type_: _description_
|
379 |
+
"""
|
380 |
+
|
381 |
+
t = self.time_embeddings(t)
|
382 |
+
t = self.time_mlp(t)
|
383 |
+
|
384 |
+
x = pack([x, mu], "b * t")[0]
|
385 |
+
|
386 |
+
if spks is not None:
|
387 |
+
spks = repeat(spks, "b c -> b c t", t=x.shape[-1])
|
388 |
+
x = pack([x, spks], "b * t")[0]
|
389 |
+
|
390 |
+
hiddens = []
|
391 |
+
masks = [mask]
|
392 |
+
for resnet, transformer_blocks, downsample in self.down_blocks:
|
393 |
+
mask_down = masks[-1]
|
394 |
+
x = resnet(x, mask_down, t)
|
395 |
+
x = rearrange(x, "b c t -> b t c")
|
396 |
+
mask_down = rearrange(mask_down, "b 1 t -> b t")
|
397 |
+
for transformer_block in transformer_blocks:
|
398 |
+
x = transformer_block(
|
399 |
+
hidden_states=x,
|
400 |
+
attention_mask=mask_down,
|
401 |
+
timestep=t,
|
402 |
+
)
|
403 |
+
x = rearrange(x, "b t c -> b c t")
|
404 |
+
mask_down = rearrange(mask_down, "b t -> b 1 t")
|
405 |
+
hiddens.append(x) # Save hidden states for skip connections
|
406 |
+
x = downsample(x * mask_down)
|
407 |
+
masks.append(mask_down[:, :, ::2])
|
408 |
+
|
409 |
+
masks = masks[:-1]
|
410 |
+
mask_mid = masks[-1]
|
411 |
+
|
412 |
+
for resnet, transformer_blocks in self.mid_blocks:
|
413 |
+
x = resnet(x, mask_mid, t)
|
414 |
+
x = rearrange(x, "b c t -> b t c")
|
415 |
+
mask_mid = rearrange(mask_mid, "b 1 t -> b t")
|
416 |
+
for transformer_block in transformer_blocks:
|
417 |
+
x = transformer_block(
|
418 |
+
hidden_states=x,
|
419 |
+
attention_mask=mask_mid,
|
420 |
+
timestep=t,
|
421 |
+
)
|
422 |
+
x = rearrange(x, "b t c -> b c t")
|
423 |
+
mask_mid = rearrange(mask_mid, "b t -> b 1 t")
|
424 |
+
|
425 |
+
for resnet, transformer_blocks, upsample in self.up_blocks:
|
426 |
+
mask_up = masks.pop()
|
427 |
+
x = resnet(pack([x, hiddens.pop()], "b * t")[0], mask_up, t)
|
428 |
+
x = rearrange(x, "b c t -> b t c")
|
429 |
+
mask_up = rearrange(mask_up, "b 1 t -> b t")
|
430 |
+
for transformer_block in transformer_blocks:
|
431 |
+
x = transformer_block(
|
432 |
+
hidden_states=x,
|
433 |
+
attention_mask=mask_up,
|
434 |
+
timestep=t,
|
435 |
+
)
|
436 |
+
x = rearrange(x, "b t c -> b c t")
|
437 |
+
mask_up = rearrange(mask_up, "b t -> b 1 t")
|
438 |
+
x = upsample(x * mask_up)
|
439 |
+
|
440 |
+
x = self.final_block(x, mask_up)
|
441 |
+
output = self.final_proj(x * mask_up)
|
442 |
+
|
443 |
+
return output * mask
|
src/chatterbox/models/s3gen/matcha/flow_matching.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
|
6 |
+
from .decoder import Decoder
|
7 |
+
|
8 |
+
|
9 |
+
class BASECFM(torch.nn.Module, ABC):
|
10 |
+
def __init__(
|
11 |
+
self,
|
12 |
+
n_feats,
|
13 |
+
cfm_params,
|
14 |
+
n_spks=1,
|
15 |
+
spk_emb_dim=128,
|
16 |
+
):
|
17 |
+
super().__init__()
|
18 |
+
self.n_feats = n_feats
|
19 |
+
self.n_spks = n_spks
|
20 |
+
self.spk_emb_dim = spk_emb_dim
|
21 |
+
self.solver = cfm_params.solver
|
22 |
+
if hasattr(cfm_params, "sigma_min"):
|
23 |
+
self.sigma_min = cfm_params.sigma_min
|
24 |
+
else:
|
25 |
+
self.sigma_min = 1e-4
|
26 |
+
|
27 |
+
self.estimator = None
|
28 |
+
|
29 |
+
@torch.inference_mode()
|
30 |
+
def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None):
|
31 |
+
"""Forward diffusion
|
32 |
+
|
33 |
+
Args:
|
34 |
+
mu (torch.Tensor): output of encoder
|
35 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
36 |
+
mask (torch.Tensor): output_mask
|
37 |
+
shape: (batch_size, 1, mel_timesteps)
|
38 |
+
n_timesteps (int): number of diffusion steps
|
39 |
+
temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
|
40 |
+
spks (torch.Tensor, optional): speaker ids. Defaults to None.
|
41 |
+
shape: (batch_size, spk_emb_dim)
|
42 |
+
cond: Not used but kept for future purposes
|
43 |
+
|
44 |
+
Returns:
|
45 |
+
sample: generated mel-spectrogram
|
46 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
47 |
+
"""
|
48 |
+
z = torch.randn_like(mu) * temperature
|
49 |
+
t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device)
|
50 |
+
return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond)
|
51 |
+
|
52 |
+
def solve_euler(self, x, t_span, mu, mask, spks, cond):
|
53 |
+
"""
|
54 |
+
Fixed euler solver for ODEs.
|
55 |
+
Args:
|
56 |
+
x (torch.Tensor): random noise
|
57 |
+
t_span (torch.Tensor): n_timesteps interpolated
|
58 |
+
shape: (n_timesteps + 1,)
|
59 |
+
mu (torch.Tensor): output of encoder
|
60 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
61 |
+
mask (torch.Tensor): output_mask
|
62 |
+
shape: (batch_size, 1, mel_timesteps)
|
63 |
+
spks (torch.Tensor, optional): speaker ids. Defaults to None.
|
64 |
+
shape: (batch_size, spk_emb_dim)
|
65 |
+
cond: Not used but kept for future purposes
|
66 |
+
"""
|
67 |
+
t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0]
|
68 |
+
|
69 |
+
# I am storing this because I can later plot it by putting a debugger here and saving it to a file
|
70 |
+
# Or in future might add like a return_all_steps flag
|
71 |
+
sol = []
|
72 |
+
|
73 |
+
for step in range(1, len(t_span)):
|
74 |
+
dphi_dt = self.estimator(x, mask, mu, t, spks, cond)
|
75 |
+
|
76 |
+
x = x + dt * dphi_dt
|
77 |
+
t = t + dt
|
78 |
+
sol.append(x)
|
79 |
+
if step < len(t_span) - 1:
|
80 |
+
dt = t_span[step + 1] - t
|
81 |
+
|
82 |
+
return sol[-1]
|
83 |
+
|
84 |
+
def compute_loss(self, x1, mask, mu, spks=None, cond=None):
|
85 |
+
"""Computes diffusion loss
|
86 |
+
|
87 |
+
Args:
|
88 |
+
x1 (torch.Tensor): Target
|
89 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
90 |
+
mask (torch.Tensor): target mask
|
91 |
+
shape: (batch_size, 1, mel_timesteps)
|
92 |
+
mu (torch.Tensor): output of encoder
|
93 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
94 |
+
spks (torch.Tensor, optional): speaker embedding. Defaults to None.
|
95 |
+
shape: (batch_size, spk_emb_dim)
|
96 |
+
|
97 |
+
Returns:
|
98 |
+
loss: conditional flow matching loss
|
99 |
+
y: conditional flow
|
100 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
101 |
+
"""
|
102 |
+
b, _, t = mu.shape
|
103 |
+
|
104 |
+
# random timestep
|
105 |
+
t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype)
|
106 |
+
# sample noise p(x_0)
|
107 |
+
z = torch.randn_like(x1)
|
108 |
+
|
109 |
+
y = (1 - (1 - self.sigma_min) * t) * z + t * x1
|
110 |
+
u = x1 - (1 - self.sigma_min) * z
|
111 |
+
|
112 |
+
loss = F.mse_loss(self.estimator(y, mask, mu, t.squeeze(), spks), u, reduction="sum") / (
|
113 |
+
torch.sum(mask) * u.shape[1]
|
114 |
+
)
|
115 |
+
return loss, y
|
116 |
+
|
117 |
+
|
118 |
+
class CFM(BASECFM):
|
119 |
+
def __init__(self, in_channels, out_channel, cfm_params, decoder_params, n_spks=1, spk_emb_dim=64):
|
120 |
+
super().__init__(
|
121 |
+
n_feats=in_channels,
|
122 |
+
cfm_params=cfm_params,
|
123 |
+
n_spks=n_spks,
|
124 |
+
spk_emb_dim=spk_emb_dim,
|
125 |
+
)
|
126 |
+
|
127 |
+
in_channels = in_channels + (spk_emb_dim if n_spks > 1 else 0)
|
128 |
+
# Just change the architecture of the estimator here
|
129 |
+
self.estimator = Decoder(in_channels=in_channels, out_channels=out_channel, **decoder_params)
|
src/chatterbox/models/s3gen/matcha/text_encoder.py
ADDED
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" from https://github.com/jaywalnut310/glow-tts """
|
2 |
+
|
3 |
+
import math
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch.nn as nn
|
7 |
+
from einops import rearrange
|
8 |
+
|
9 |
+
|
10 |
+
def sequence_mask(length, max_length=None):
|
11 |
+
if max_length is None:
|
12 |
+
max_length = length.max()
|
13 |
+
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
14 |
+
return x.unsqueeze(0) < length.unsqueeze(1)
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
class LayerNorm(nn.Module):
|
19 |
+
def __init__(self, channels, eps=1e-4):
|
20 |
+
super().__init__()
|
21 |
+
self.channels = channels
|
22 |
+
self.eps = eps
|
23 |
+
|
24 |
+
self.gamma = torch.nn.Parameter(torch.ones(channels))
|
25 |
+
self.beta = torch.nn.Parameter(torch.zeros(channels))
|
26 |
+
|
27 |
+
def forward(self, x):
|
28 |
+
n_dims = len(x.shape)
|
29 |
+
mean = torch.mean(x, 1, keepdim=True)
|
30 |
+
variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
|
31 |
+
|
32 |
+
x = (x - mean) * torch.rsqrt(variance + self.eps)
|
33 |
+
|
34 |
+
shape = [1, -1] + [1] * (n_dims - 2)
|
35 |
+
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
|
36 |
+
return x
|
37 |
+
|
38 |
+
|
39 |
+
class ConvReluNorm(nn.Module):
|
40 |
+
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
|
41 |
+
super().__init__()
|
42 |
+
self.in_channels = in_channels
|
43 |
+
self.hidden_channels = hidden_channels
|
44 |
+
self.out_channels = out_channels
|
45 |
+
self.kernel_size = kernel_size
|
46 |
+
self.n_layers = n_layers
|
47 |
+
self.p_dropout = p_dropout
|
48 |
+
|
49 |
+
self.conv_layers = torch.nn.ModuleList()
|
50 |
+
self.norm_layers = torch.nn.ModuleList()
|
51 |
+
self.conv_layers.append(torch.nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
|
52 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
53 |
+
self.relu_drop = torch.nn.Sequential(torch.nn.ReLU(), torch.nn.Dropout(p_dropout))
|
54 |
+
for _ in range(n_layers - 1):
|
55 |
+
self.conv_layers.append(
|
56 |
+
torch.nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)
|
57 |
+
)
|
58 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
59 |
+
self.proj = torch.nn.Conv1d(hidden_channels, out_channels, 1)
|
60 |
+
self.proj.weight.data.zero_()
|
61 |
+
self.proj.bias.data.zero_()
|
62 |
+
|
63 |
+
def forward(self, x, x_mask):
|
64 |
+
x_org = x
|
65 |
+
for i in range(self.n_layers):
|
66 |
+
x = self.conv_layers[i](x * x_mask)
|
67 |
+
x = self.norm_layers[i](x)
|
68 |
+
x = self.relu_drop(x)
|
69 |
+
x = x_org + self.proj(x)
|
70 |
+
return x * x_mask
|
71 |
+
|
72 |
+
|
73 |
+
class DurationPredictor(nn.Module):
|
74 |
+
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout):
|
75 |
+
super().__init__()
|
76 |
+
self.in_channels = in_channels
|
77 |
+
self.filter_channels = filter_channels
|
78 |
+
self.p_dropout = p_dropout
|
79 |
+
|
80 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
81 |
+
self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
|
82 |
+
self.norm_1 = LayerNorm(filter_channels)
|
83 |
+
self.conv_2 = torch.nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
|
84 |
+
self.norm_2 = LayerNorm(filter_channels)
|
85 |
+
self.proj = torch.nn.Conv1d(filter_channels, 1, 1)
|
86 |
+
|
87 |
+
def forward(self, x, x_mask):
|
88 |
+
x = self.conv_1(x * x_mask)
|
89 |
+
x = torch.relu(x)
|
90 |
+
x = self.norm_1(x)
|
91 |
+
x = self.drop(x)
|
92 |
+
x = self.conv_2(x * x_mask)
|
93 |
+
x = torch.relu(x)
|
94 |
+
x = self.norm_2(x)
|
95 |
+
x = self.drop(x)
|
96 |
+
x = self.proj(x * x_mask)
|
97 |
+
return x * x_mask
|
98 |
+
|
99 |
+
|
100 |
+
class RotaryPositionalEmbeddings(nn.Module):
|
101 |
+
"""
|
102 |
+
## RoPE module
|
103 |
+
|
104 |
+
Rotary encoding transforms pairs of features by rotating in the 2D plane.
|
105 |
+
That is, it organizes the $d$ features as $\frac{d}{2}$ pairs.
|
106 |
+
Each pair can be considered a coordinate in a 2D plane, and the encoding will rotate it
|
107 |
+
by an angle depending on the position of the token.
|
108 |
+
"""
|
109 |
+
|
110 |
+
def __init__(self, d: int, base: int = 10_000):
|
111 |
+
r"""
|
112 |
+
* `d` is the number of features $d$
|
113 |
+
* `base` is the constant used for calculating $\Theta$
|
114 |
+
"""
|
115 |
+
super().__init__()
|
116 |
+
|
117 |
+
self.base = base
|
118 |
+
self.d = int(d)
|
119 |
+
self.cos_cached = None
|
120 |
+
self.sin_cached = None
|
121 |
+
|
122 |
+
def _build_cache(self, x: torch.Tensor):
|
123 |
+
r"""
|
124 |
+
Cache $\cos$ and $\sin$ values
|
125 |
+
"""
|
126 |
+
# Return if cache is already built
|
127 |
+
if self.cos_cached is not None and x.shape[0] <= self.cos_cached.shape[0]:
|
128 |
+
return
|
129 |
+
|
130 |
+
# Get sequence length
|
131 |
+
seq_len = x.shape[0]
|
132 |
+
|
133 |
+
# $\Theta = {\theta_i = 10000^{-\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
|
134 |
+
theta = 1.0 / (self.base ** (torch.arange(0, self.d, 2).float() / self.d)).to(x.device)
|
135 |
+
|
136 |
+
# Create position indexes `[0, 1, ..., seq_len - 1]`
|
137 |
+
seq_idx = torch.arange(seq_len, device=x.device).float().to(x.device)
|
138 |
+
|
139 |
+
# Calculate the product of position index and $\theta_i$
|
140 |
+
idx_theta = torch.einsum("n,d->nd", seq_idx, theta)
|
141 |
+
|
142 |
+
# Concatenate so that for row $m$ we have
|
143 |
+
# $[m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}, m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}]$
|
144 |
+
idx_theta2 = torch.cat([idx_theta, idx_theta], dim=1)
|
145 |
+
|
146 |
+
# Cache them
|
147 |
+
self.cos_cached = idx_theta2.cos()[:, None, None, :]
|
148 |
+
self.sin_cached = idx_theta2.sin()[:, None, None, :]
|
149 |
+
|
150 |
+
def _neg_half(self, x: torch.Tensor):
|
151 |
+
# $\frac{d}{2}$
|
152 |
+
d_2 = self.d // 2
|
153 |
+
|
154 |
+
# Calculate $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
|
155 |
+
return torch.cat([-x[:, :, :, d_2:], x[:, :, :, :d_2]], dim=-1)
|
156 |
+
|
157 |
+
def forward(self, x: torch.Tensor):
|
158 |
+
"""
|
159 |
+
* `x` is the Tensor at the head of a key or a query with shape `[seq_len, batch_size, n_heads, d]`
|
160 |
+
"""
|
161 |
+
# Cache $\cos$ and $\sin$ values
|
162 |
+
x = rearrange(x, "b h t d -> t b h d")
|
163 |
+
|
164 |
+
self._build_cache(x)
|
165 |
+
|
166 |
+
# Split the features, we can choose to apply rotary embeddings only to a partial set of features.
|
167 |
+
x_rope, x_pass = x[..., : self.d], x[..., self.d :]
|
168 |
+
|
169 |
+
# Calculate
|
170 |
+
# $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
|
171 |
+
neg_half_x = self._neg_half(x_rope)
|
172 |
+
|
173 |
+
x_rope = (x_rope * self.cos_cached[: x.shape[0]]) + (neg_half_x * self.sin_cached[: x.shape[0]])
|
174 |
+
|
175 |
+
return rearrange(torch.cat((x_rope, x_pass), dim=-1), "t b h d -> b h t d")
|
176 |
+
|
177 |
+
|
178 |
+
class MultiHeadAttention(nn.Module):
|
179 |
+
def __init__(
|
180 |
+
self,
|
181 |
+
channels,
|
182 |
+
out_channels,
|
183 |
+
n_heads,
|
184 |
+
heads_share=True,
|
185 |
+
p_dropout=0.0,
|
186 |
+
proximal_bias=False,
|
187 |
+
proximal_init=False,
|
188 |
+
):
|
189 |
+
super().__init__()
|
190 |
+
assert channels % n_heads == 0
|
191 |
+
|
192 |
+
self.channels = channels
|
193 |
+
self.out_channels = out_channels
|
194 |
+
self.n_heads = n_heads
|
195 |
+
self.heads_share = heads_share
|
196 |
+
self.proximal_bias = proximal_bias
|
197 |
+
self.p_dropout = p_dropout
|
198 |
+
self.attn = None
|
199 |
+
|
200 |
+
self.k_channels = channels // n_heads
|
201 |
+
self.conv_q = torch.nn.Conv1d(channels, channels, 1)
|
202 |
+
self.conv_k = torch.nn.Conv1d(channels, channels, 1)
|
203 |
+
self.conv_v = torch.nn.Conv1d(channels, channels, 1)
|
204 |
+
|
205 |
+
# from https://nn.labml.ai/transformers/rope/index.html
|
206 |
+
self.query_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5)
|
207 |
+
self.key_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5)
|
208 |
+
|
209 |
+
self.conv_o = torch.nn.Conv1d(channels, out_channels, 1)
|
210 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
211 |
+
|
212 |
+
torch.nn.init.xavier_uniform_(self.conv_q.weight)
|
213 |
+
torch.nn.init.xavier_uniform_(self.conv_k.weight)
|
214 |
+
if proximal_init:
|
215 |
+
self.conv_k.weight.data.copy_(self.conv_q.weight.data)
|
216 |
+
self.conv_k.bias.data.copy_(self.conv_q.bias.data)
|
217 |
+
torch.nn.init.xavier_uniform_(self.conv_v.weight)
|
218 |
+
|
219 |
+
def forward(self, x, c, attn_mask=None):
|
220 |
+
q = self.conv_q(x)
|
221 |
+
k = self.conv_k(c)
|
222 |
+
v = self.conv_v(c)
|
223 |
+
|
224 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
225 |
+
|
226 |
+
x = self.conv_o(x)
|
227 |
+
return x
|
228 |
+
|
229 |
+
def attention(self, query, key, value, mask=None):
|
230 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
231 |
+
query = rearrange(query, "b (h c) t-> b h t c", h=self.n_heads)
|
232 |
+
key = rearrange(key, "b (h c) t-> b h t c", h=self.n_heads)
|
233 |
+
value = rearrange(value, "b (h c) t-> b h t c", h=self.n_heads)
|
234 |
+
|
235 |
+
query = self.query_rotary_pe(query)
|
236 |
+
key = self.key_rotary_pe(key)
|
237 |
+
|
238 |
+
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels)
|
239 |
+
|
240 |
+
if self.proximal_bias:
|
241 |
+
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
242 |
+
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
243 |
+
if mask is not None:
|
244 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
245 |
+
p_attn = torch.nn.functional.softmax(scores, dim=-1)
|
246 |
+
p_attn = self.drop(p_attn)
|
247 |
+
output = torch.matmul(p_attn, value)
|
248 |
+
output = output.transpose(2, 3).contiguous().view(b, d, t_t)
|
249 |
+
return output, p_attn
|
250 |
+
|
251 |
+
@staticmethod
|
252 |
+
def _attention_bias_proximal(length):
|
253 |
+
r = torch.arange(length, dtype=torch.float32)
|
254 |
+
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
255 |
+
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
256 |
+
|
257 |
+
|
258 |
+
class FFN(nn.Module):
|
259 |
+
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0):
|
260 |
+
super().__init__()
|
261 |
+
self.in_channels = in_channels
|
262 |
+
self.out_channels = out_channels
|
263 |
+
self.filter_channels = filter_channels
|
264 |
+
self.kernel_size = kernel_size
|
265 |
+
self.p_dropout = p_dropout
|
266 |
+
|
267 |
+
self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
|
268 |
+
self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size, padding=kernel_size // 2)
|
269 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
270 |
+
|
271 |
+
def forward(self, x, x_mask):
|
272 |
+
x = self.conv_1(x * x_mask)
|
273 |
+
x = torch.relu(x)
|
274 |
+
x = self.drop(x)
|
275 |
+
x = self.conv_2(x * x_mask)
|
276 |
+
return x * x_mask
|
277 |
+
|
278 |
+
|
279 |
+
class Encoder(nn.Module):
|
280 |
+
def __init__(
|
281 |
+
self,
|
282 |
+
hidden_channels,
|
283 |
+
filter_channels,
|
284 |
+
n_heads,
|
285 |
+
n_layers,
|
286 |
+
kernel_size=1,
|
287 |
+
p_dropout=0.0,
|
288 |
+
**kwargs,
|
289 |
+
):
|
290 |
+
super().__init__()
|
291 |
+
self.hidden_channels = hidden_channels
|
292 |
+
self.filter_channels = filter_channels
|
293 |
+
self.n_heads = n_heads
|
294 |
+
self.n_layers = n_layers
|
295 |
+
self.kernel_size = kernel_size
|
296 |
+
self.p_dropout = p_dropout
|
297 |
+
|
298 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
299 |
+
self.attn_layers = torch.nn.ModuleList()
|
300 |
+
self.norm_layers_1 = torch.nn.ModuleList()
|
301 |
+
self.ffn_layers = torch.nn.ModuleList()
|
302 |
+
self.norm_layers_2 = torch.nn.ModuleList()
|
303 |
+
for _ in range(self.n_layers):
|
304 |
+
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
305 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
306 |
+
self.ffn_layers.append(
|
307 |
+
FFN(
|
308 |
+
hidden_channels,
|
309 |
+
hidden_channels,
|
310 |
+
filter_channels,
|
311 |
+
kernel_size,
|
312 |
+
p_dropout=p_dropout,
|
313 |
+
)
|
314 |
+
)
|
315 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
316 |
+
|
317 |
+
def forward(self, x, x_mask):
|
318 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
319 |
+
for i in range(self.n_layers):
|
320 |
+
x = x * x_mask
|
321 |
+
y = self.attn_layers[i](x, x, attn_mask)
|
322 |
+
y = self.drop(y)
|
323 |
+
x = self.norm_layers_1[i](x + y)
|
324 |
+
y = self.ffn_layers[i](x, x_mask)
|
325 |
+
y = self.drop(y)
|
326 |
+
x = self.norm_layers_2[i](x + y)
|
327 |
+
x = x * x_mask
|
328 |
+
return x
|
329 |
+
|
330 |
+
|
331 |
+
class TextEncoder(nn.Module):
|
332 |
+
def __init__(
|
333 |
+
self,
|
334 |
+
encoder_type,
|
335 |
+
encoder_params,
|
336 |
+
duration_predictor_params,
|
337 |
+
n_vocab,
|
338 |
+
n_spks=1,
|
339 |
+
spk_emb_dim=128,
|
340 |
+
):
|
341 |
+
super().__init__()
|
342 |
+
self.encoder_type = encoder_type
|
343 |
+
self.n_vocab = n_vocab
|
344 |
+
self.n_feats = encoder_params.n_feats
|
345 |
+
self.n_channels = encoder_params.n_channels
|
346 |
+
self.spk_emb_dim = spk_emb_dim
|
347 |
+
self.n_spks = n_spks
|
348 |
+
|
349 |
+
self.emb = torch.nn.Embedding(n_vocab, self.n_channels)
|
350 |
+
torch.nn.init.normal_(self.emb.weight, 0.0, self.n_channels**-0.5)
|
351 |
+
|
352 |
+
if encoder_params.prenet:
|
353 |
+
self.prenet = ConvReluNorm(
|
354 |
+
self.n_channels,
|
355 |
+
self.n_channels,
|
356 |
+
self.n_channels,
|
357 |
+
kernel_size=5,
|
358 |
+
n_layers=3,
|
359 |
+
p_dropout=0.5,
|
360 |
+
)
|
361 |
+
else:
|
362 |
+
self.prenet = lambda x, x_mask: x
|
363 |
+
|
364 |
+
self.encoder = Encoder(
|
365 |
+
encoder_params.n_channels + (spk_emb_dim if n_spks > 1 else 0),
|
366 |
+
encoder_params.filter_channels,
|
367 |
+
encoder_params.n_heads,
|
368 |
+
encoder_params.n_layers,
|
369 |
+
encoder_params.kernel_size,
|
370 |
+
encoder_params.p_dropout,
|
371 |
+
)
|
372 |
+
|
373 |
+
self.proj_m = torch.nn.Conv1d(self.n_channels + (spk_emb_dim if n_spks > 1 else 0), self.n_feats, 1)
|
374 |
+
self.proj_w = DurationPredictor(
|
375 |
+
self.n_channels + (spk_emb_dim if n_spks > 1 else 0),
|
376 |
+
duration_predictor_params.filter_channels_dp,
|
377 |
+
duration_predictor_params.kernel_size,
|
378 |
+
duration_predictor_params.p_dropout,
|
379 |
+
)
|
380 |
+
|
381 |
+
def forward(self, x, x_lengths, spks=None):
|
382 |
+
"""Run forward pass to the transformer based encoder and duration predictor
|
383 |
+
|
384 |
+
Args:
|
385 |
+
x (torch.Tensor): text input
|
386 |
+
shape: (batch_size, max_text_length)
|
387 |
+
x_lengths (torch.Tensor): text input lengths
|
388 |
+
shape: (batch_size,)
|
389 |
+
spks (torch.Tensor, optional): speaker ids. Defaults to None.
|
390 |
+
shape: (batch_size,)
|
391 |
+
|
392 |
+
Returns:
|
393 |
+
mu (torch.Tensor): average output of the encoder
|
394 |
+
shape: (batch_size, n_feats, max_text_length)
|
395 |
+
logw (torch.Tensor): log duration predicted by the duration predictor
|
396 |
+
shape: (batch_size, 1, max_text_length)
|
397 |
+
x_mask (torch.Tensor): mask for the text input
|
398 |
+
shape: (batch_size, 1, max_text_length)
|
399 |
+
"""
|
400 |
+
x = self.emb(x) * math.sqrt(self.n_channels)
|
401 |
+
x = torch.transpose(x, 1, -1)
|
402 |
+
x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
403 |
+
|
404 |
+
x = self.prenet(x, x_mask)
|
405 |
+
if self.n_spks > 1:
|
406 |
+
x = torch.cat([x, spks.unsqueeze(-1).repeat(1, 1, x.shape[-1])], dim=1)
|
407 |
+
x = self.encoder(x, x_mask)
|
408 |
+
mu = self.proj_m(x) * x_mask
|
409 |
+
|
410 |
+
x_dp = torch.detach(x)
|
411 |
+
logw = self.proj_w(x_dp, x_mask)
|
412 |
+
|
413 |
+
return mu, logw, x_mask
|
src/chatterbox/models/s3gen/matcha/transformer.py
ADDED
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, Optional
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
from diffusers.models.attention import (
|
6 |
+
GEGLU,
|
7 |
+
GELU,
|
8 |
+
AdaLayerNorm,
|
9 |
+
AdaLayerNormZero,
|
10 |
+
ApproximateGELU,
|
11 |
+
)
|
12 |
+
from diffusers.models.attention_processor import Attention
|
13 |
+
from diffusers.models.lora import LoRACompatibleLinear
|
14 |
+
from diffusers.utils.torch_utils import maybe_allow_in_graph
|
15 |
+
|
16 |
+
|
17 |
+
class SnakeBeta(nn.Module):
|
18 |
+
"""
|
19 |
+
A modified Snake function which uses separate parameters for the magnitude of the periodic components
|
20 |
+
Shape:
|
21 |
+
- Input: (B, C, T)
|
22 |
+
- Output: (B, C, T), same shape as the input
|
23 |
+
Parameters:
|
24 |
+
- alpha - trainable parameter that controls frequency
|
25 |
+
- beta - trainable parameter that controls magnitude
|
26 |
+
References:
|
27 |
+
- This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
28 |
+
https://arxiv.org/abs/2006.08195
|
29 |
+
Examples:
|
30 |
+
>>> a1 = snakebeta(256)
|
31 |
+
>>> x = torch.randn(256)
|
32 |
+
>>> x = a1(x)
|
33 |
+
"""
|
34 |
+
|
35 |
+
def __init__(self, in_features, out_features, alpha=1.0, alpha_trainable=True, alpha_logscale=True):
|
36 |
+
"""
|
37 |
+
Initialization.
|
38 |
+
INPUT:
|
39 |
+
- in_features: shape of the input
|
40 |
+
- alpha - trainable parameter that controls frequency
|
41 |
+
- beta - trainable parameter that controls magnitude
|
42 |
+
alpha is initialized to 1 by default, higher values = higher-frequency.
|
43 |
+
beta is initialized to 1 by default, higher values = higher-magnitude.
|
44 |
+
alpha will be trained along with the rest of your model.
|
45 |
+
"""
|
46 |
+
super().__init__()
|
47 |
+
self.in_features = out_features if isinstance(out_features, list) else [out_features]
|
48 |
+
self.proj = LoRACompatibleLinear(in_features, out_features)
|
49 |
+
|
50 |
+
# initialize alpha
|
51 |
+
self.alpha_logscale = alpha_logscale
|
52 |
+
if self.alpha_logscale: # log scale alphas initialized to zeros
|
53 |
+
self.alpha = nn.Parameter(torch.zeros(self.in_features) * alpha)
|
54 |
+
self.beta = nn.Parameter(torch.zeros(self.in_features) * alpha)
|
55 |
+
else: # linear scale alphas initialized to ones
|
56 |
+
self.alpha = nn.Parameter(torch.ones(self.in_features) * alpha)
|
57 |
+
self.beta = nn.Parameter(torch.ones(self.in_features) * alpha)
|
58 |
+
|
59 |
+
self.alpha.requires_grad = alpha_trainable
|
60 |
+
self.beta.requires_grad = alpha_trainable
|
61 |
+
|
62 |
+
self.no_div_by_zero = 0.000000001
|
63 |
+
|
64 |
+
def forward(self, x):
|
65 |
+
"""
|
66 |
+
Forward pass of the function.
|
67 |
+
Applies the function to the input elementwise.
|
68 |
+
SnakeBeta ∶= x + 1/b * sin^2 (xa)
|
69 |
+
"""
|
70 |
+
x = self.proj(x)
|
71 |
+
if self.alpha_logscale:
|
72 |
+
alpha = torch.exp(self.alpha)
|
73 |
+
beta = torch.exp(self.beta)
|
74 |
+
else:
|
75 |
+
alpha = self.alpha
|
76 |
+
beta = self.beta
|
77 |
+
|
78 |
+
x = x + (1.0 / (beta + self.no_div_by_zero)) * torch.pow(torch.sin(x * alpha), 2)
|
79 |
+
|
80 |
+
return x
|
81 |
+
|
82 |
+
|
83 |
+
class FeedForward(nn.Module):
|
84 |
+
r"""
|
85 |
+
A feed-forward layer.
|
86 |
+
|
87 |
+
Parameters:
|
88 |
+
dim (`int`): The number of channels in the input.
|
89 |
+
dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
|
90 |
+
mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
|
91 |
+
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
92 |
+
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
93 |
+
final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
|
94 |
+
"""
|
95 |
+
|
96 |
+
def __init__(
|
97 |
+
self,
|
98 |
+
dim: int,
|
99 |
+
dim_out: Optional[int] = None,
|
100 |
+
mult: int = 4,
|
101 |
+
dropout: float = 0.0,
|
102 |
+
activation_fn: str = "geglu",
|
103 |
+
final_dropout: bool = False,
|
104 |
+
):
|
105 |
+
super().__init__()
|
106 |
+
inner_dim = int(dim * mult)
|
107 |
+
dim_out = dim_out if dim_out is not None else dim
|
108 |
+
|
109 |
+
if activation_fn == "gelu":
|
110 |
+
act_fn = GELU(dim, inner_dim)
|
111 |
+
if activation_fn == "gelu-approximate":
|
112 |
+
act_fn = GELU(dim, inner_dim, approximate="tanh")
|
113 |
+
elif activation_fn == "geglu":
|
114 |
+
act_fn = GEGLU(dim, inner_dim)
|
115 |
+
elif activation_fn == "geglu-approximate":
|
116 |
+
act_fn = ApproximateGELU(dim, inner_dim)
|
117 |
+
elif activation_fn == "snakebeta":
|
118 |
+
act_fn = SnakeBeta(dim, inner_dim)
|
119 |
+
|
120 |
+
self.net = nn.ModuleList([])
|
121 |
+
# project in
|
122 |
+
self.net.append(act_fn)
|
123 |
+
# project dropout
|
124 |
+
self.net.append(nn.Dropout(dropout))
|
125 |
+
# project out
|
126 |
+
self.net.append(LoRACompatibleLinear(inner_dim, dim_out))
|
127 |
+
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
|
128 |
+
if final_dropout:
|
129 |
+
self.net.append(nn.Dropout(dropout))
|
130 |
+
|
131 |
+
def forward(self, hidden_states):
|
132 |
+
for module in self.net:
|
133 |
+
hidden_states = module(hidden_states)
|
134 |
+
return hidden_states
|
135 |
+
|
136 |
+
|
137 |
+
@maybe_allow_in_graph
|
138 |
+
class BasicTransformerBlock(nn.Module):
|
139 |
+
r"""
|
140 |
+
A basic Transformer block.
|
141 |
+
|
142 |
+
Parameters:
|
143 |
+
dim (`int`): The number of channels in the input and output.
|
144 |
+
num_attention_heads (`int`): The number of heads to use for multi-head attention.
|
145 |
+
attention_head_dim (`int`): The number of channels in each head.
|
146 |
+
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
147 |
+
cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
|
148 |
+
only_cross_attention (`bool`, *optional*):
|
149 |
+
Whether to use only cross-attention layers. In this case two cross attention layers are used.
|
150 |
+
double_self_attention (`bool`, *optional*):
|
151 |
+
Whether to use two self-attention layers. In this case no cross attention layers are used.
|
152 |
+
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
153 |
+
num_embeds_ada_norm (:
|
154 |
+
obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
|
155 |
+
attention_bias (:
|
156 |
+
obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
|
157 |
+
"""
|
158 |
+
|
159 |
+
def __init__(
|
160 |
+
self,
|
161 |
+
dim: int,
|
162 |
+
num_attention_heads: int,
|
163 |
+
attention_head_dim: int,
|
164 |
+
dropout=0.0,
|
165 |
+
cross_attention_dim: Optional[int] = None,
|
166 |
+
activation_fn: str = "geglu",
|
167 |
+
num_embeds_ada_norm: Optional[int] = None,
|
168 |
+
attention_bias: bool = False,
|
169 |
+
only_cross_attention: bool = False,
|
170 |
+
double_self_attention: bool = False,
|
171 |
+
upcast_attention: bool = False,
|
172 |
+
norm_elementwise_affine: bool = True,
|
173 |
+
norm_type: str = "layer_norm",
|
174 |
+
final_dropout: bool = False,
|
175 |
+
):
|
176 |
+
super().__init__()
|
177 |
+
self.only_cross_attention = only_cross_attention
|
178 |
+
|
179 |
+
self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
|
180 |
+
self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
|
181 |
+
|
182 |
+
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
|
183 |
+
raise ValueError(
|
184 |
+
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
|
185 |
+
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
|
186 |
+
)
|
187 |
+
|
188 |
+
# Define 3 blocks. Each block has its own normalization layer.
|
189 |
+
# 1. Self-Attn
|
190 |
+
if self.use_ada_layer_norm:
|
191 |
+
self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
|
192 |
+
elif self.use_ada_layer_norm_zero:
|
193 |
+
self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
|
194 |
+
else:
|
195 |
+
self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
196 |
+
self.attn1 = Attention(
|
197 |
+
query_dim=dim,
|
198 |
+
heads=num_attention_heads,
|
199 |
+
dim_head=attention_head_dim,
|
200 |
+
dropout=dropout,
|
201 |
+
bias=attention_bias,
|
202 |
+
cross_attention_dim=cross_attention_dim if only_cross_attention else None,
|
203 |
+
upcast_attention=upcast_attention,
|
204 |
+
)
|
205 |
+
|
206 |
+
# 2. Cross-Attn
|
207 |
+
if cross_attention_dim is not None or double_self_attention:
|
208 |
+
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
|
209 |
+
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
|
210 |
+
# the second cross attention block.
|
211 |
+
self.norm2 = (
|
212 |
+
AdaLayerNorm(dim, num_embeds_ada_norm)
|
213 |
+
if self.use_ada_layer_norm
|
214 |
+
else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
215 |
+
)
|
216 |
+
self.attn2 = Attention(
|
217 |
+
query_dim=dim,
|
218 |
+
cross_attention_dim=cross_attention_dim if not double_self_attention else None,
|
219 |
+
heads=num_attention_heads,
|
220 |
+
dim_head=attention_head_dim,
|
221 |
+
dropout=dropout,
|
222 |
+
bias=attention_bias,
|
223 |
+
upcast_attention=upcast_attention,
|
224 |
+
# scale_qk=False, # uncomment this to not to use flash attention
|
225 |
+
) # is self-attn if encoder_hidden_states is none
|
226 |
+
else:
|
227 |
+
self.norm2 = None
|
228 |
+
self.attn2 = None
|
229 |
+
|
230 |
+
# 3. Feed-forward
|
231 |
+
self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
232 |
+
self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
|
233 |
+
|
234 |
+
# let chunk size default to None
|
235 |
+
self._chunk_size = None
|
236 |
+
self._chunk_dim = 0
|
237 |
+
|
238 |
+
def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int):
|
239 |
+
# Sets chunk feed-forward
|
240 |
+
self._chunk_size = chunk_size
|
241 |
+
self._chunk_dim = dim
|
242 |
+
|
243 |
+
def forward(
|
244 |
+
self,
|
245 |
+
hidden_states: torch.FloatTensor,
|
246 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
247 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
248 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
249 |
+
timestep: Optional[torch.LongTensor] = None,
|
250 |
+
cross_attention_kwargs: Dict[str, Any] = None,
|
251 |
+
class_labels: Optional[torch.LongTensor] = None,
|
252 |
+
):
|
253 |
+
# Notice that normalization is always applied before the real computation in the following blocks.
|
254 |
+
# 1. Self-Attention
|
255 |
+
if self.use_ada_layer_norm:
|
256 |
+
norm_hidden_states = self.norm1(hidden_states, timestep)
|
257 |
+
elif self.use_ada_layer_norm_zero:
|
258 |
+
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
|
259 |
+
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
|
260 |
+
)
|
261 |
+
else:
|
262 |
+
norm_hidden_states = self.norm1(hidden_states)
|
263 |
+
|
264 |
+
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
|
265 |
+
|
266 |
+
attn_output = self.attn1(
|
267 |
+
norm_hidden_states,
|
268 |
+
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
|
269 |
+
attention_mask=encoder_attention_mask if self.only_cross_attention else attention_mask,
|
270 |
+
**cross_attention_kwargs,
|
271 |
+
)
|
272 |
+
if self.use_ada_layer_norm_zero:
|
273 |
+
attn_output = gate_msa.unsqueeze(1) * attn_output
|
274 |
+
hidden_states = attn_output + hidden_states
|
275 |
+
|
276 |
+
# 2. Cross-Attention
|
277 |
+
if self.attn2 is not None:
|
278 |
+
norm_hidden_states = (
|
279 |
+
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
|
280 |
+
)
|
281 |
+
|
282 |
+
attn_output = self.attn2(
|
283 |
+
norm_hidden_states,
|
284 |
+
encoder_hidden_states=encoder_hidden_states,
|
285 |
+
attention_mask=encoder_attention_mask,
|
286 |
+
**cross_attention_kwargs,
|
287 |
+
)
|
288 |
+
hidden_states = attn_output + hidden_states
|
289 |
+
|
290 |
+
# 3. Feed-forward
|
291 |
+
norm_hidden_states = self.norm3(hidden_states)
|
292 |
+
|
293 |
+
if self.use_ada_layer_norm_zero:
|
294 |
+
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
|
295 |
+
|
296 |
+
if self._chunk_size is not None:
|
297 |
+
# "feed_forward_chunk_size" can be used to save memory
|
298 |
+
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
|
299 |
+
raise ValueError(
|
300 |
+
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
|
301 |
+
)
|
302 |
+
|
303 |
+
num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
|
304 |
+
ff_output = torch.cat(
|
305 |
+
[self.ff(hid_slice) for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)],
|
306 |
+
dim=self._chunk_dim,
|
307 |
+
)
|
308 |
+
else:
|
309 |
+
ff_output = self.ff(norm_hidden_states)
|
310 |
+
|
311 |
+
if self.use_ada_layer_norm_zero:
|
312 |
+
ff_output = gate_mlp.unsqueeze(1) * ff_output
|
313 |
+
|
314 |
+
hidden_states = ff_output + hidden_states
|
315 |
+
|
316 |
+
return hidden_states
|
src/chatterbox/models/s3gen/s3gen.py
ADDED
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Modified from CosyVoice https://github.com/FunAudioLLM/CosyVoice
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import logging
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
import torch
|
19 |
+
import torchaudio as ta
|
20 |
+
from functools import lru_cache
|
21 |
+
from typing import Optional
|
22 |
+
from omegaconf import DictConfig
|
23 |
+
|
24 |
+
from ..s3tokenizer import S3_SR, SPEECH_VOCAB_SIZE, S3Tokenizer
|
25 |
+
from .const import S3GEN_SR
|
26 |
+
from .flow import CausalMaskedDiffWithXvec
|
27 |
+
from .xvector import CAMPPlus
|
28 |
+
from .utils.mel import mel_spectrogram
|
29 |
+
from .f0_predictor import ConvRNNF0Predictor
|
30 |
+
from .hifigan import HiFTGenerator
|
31 |
+
from .transformer.upsample_encoder import UpsampleConformerEncoder
|
32 |
+
from .flow_matching import CausalConditionalCFM
|
33 |
+
from .decoder import ConditionalDecoder
|
34 |
+
|
35 |
+
|
36 |
+
def drop_invalid_tokens(x):
|
37 |
+
assert len(x.shape) <= 2 and x.shape[0] == 1, "only batch size of one allowed for now"
|
38 |
+
return x[x < SPEECH_VOCAB_SIZE]
|
39 |
+
|
40 |
+
|
41 |
+
# TODO: global resampler cache
|
42 |
+
@lru_cache(100)
|
43 |
+
def get_resampler(src_sr, dst_sr, device):
|
44 |
+
return ta.transforms.Resample(src_sr, dst_sr).to(device)
|
45 |
+
|
46 |
+
|
47 |
+
class S3Token2Mel(torch.nn.Module):
|
48 |
+
"""
|
49 |
+
CosyVoice2's CFM decoder maps S3 speech tokens to mel-spectrograms.
|
50 |
+
|
51 |
+
TODO: make these modules configurable?
|
52 |
+
"""
|
53 |
+
def __init__(self):
|
54 |
+
super().__init__()
|
55 |
+
self.tokenizer = S3Tokenizer("speech_tokenizer_v2_25hz")
|
56 |
+
self.mel_extractor = mel_spectrogram # TODO: make it a torch module?
|
57 |
+
self.speaker_encoder = CAMPPlus() # use default args
|
58 |
+
|
59 |
+
encoder = UpsampleConformerEncoder(
|
60 |
+
output_size=512,
|
61 |
+
attention_heads=8,
|
62 |
+
linear_units=2048,
|
63 |
+
num_blocks=6,
|
64 |
+
dropout_rate=0.1,
|
65 |
+
positional_dropout_rate=0.1,
|
66 |
+
attention_dropout_rate=0.1,
|
67 |
+
normalize_before=True,
|
68 |
+
input_layer='linear',
|
69 |
+
pos_enc_layer_type='rel_pos_espnet',
|
70 |
+
selfattention_layer_type='rel_selfattn',
|
71 |
+
input_size=512,
|
72 |
+
use_cnn_module=False,
|
73 |
+
macaron_style=False,
|
74 |
+
)
|
75 |
+
|
76 |
+
estimator = ConditionalDecoder(
|
77 |
+
in_channels=320,
|
78 |
+
out_channels=80,
|
79 |
+
causal=True,
|
80 |
+
channels=[256],
|
81 |
+
dropout=0.0,
|
82 |
+
attention_head_dim=64,
|
83 |
+
n_blocks=4,
|
84 |
+
num_mid_blocks=12,
|
85 |
+
num_heads=8,
|
86 |
+
act_fn='gelu',
|
87 |
+
)
|
88 |
+
cfm_params = DictConfig({
|
89 |
+
"sigma_min": 1e-06,
|
90 |
+
"solver": 'euler',
|
91 |
+
"t_scheduler": 'cosine',
|
92 |
+
"training_cfg_rate": 0.2,
|
93 |
+
"inference_cfg_rate": 0.7,
|
94 |
+
"reg_loss_type": 'l1',
|
95 |
+
})
|
96 |
+
decoder = CausalConditionalCFM(
|
97 |
+
spk_emb_dim=80,
|
98 |
+
cfm_params=cfm_params,
|
99 |
+
estimator=estimator,
|
100 |
+
)
|
101 |
+
|
102 |
+
self.flow = CausalMaskedDiffWithXvec(
|
103 |
+
encoder=encoder,
|
104 |
+
decoder=decoder
|
105 |
+
)
|
106 |
+
|
107 |
+
self.resamplers = {}
|
108 |
+
|
109 |
+
@property
|
110 |
+
def device(self):
|
111 |
+
params = self.tokenizer.parameters()
|
112 |
+
return next(params).device
|
113 |
+
|
114 |
+
def embed_ref(
|
115 |
+
self,
|
116 |
+
ref_wav: torch.Tensor,
|
117 |
+
ref_sr: int,
|
118 |
+
device="auto",
|
119 |
+
ref_fade_out=True,
|
120 |
+
):
|
121 |
+
device = self.device if device == "auto" else device
|
122 |
+
if isinstance(ref_wav, np.ndarray):
|
123 |
+
ref_wav = torch.from_numpy(ref_wav).float()
|
124 |
+
|
125 |
+
if ref_wav.device != device:
|
126 |
+
ref_wav = ref_wav.to(device)
|
127 |
+
|
128 |
+
if len(ref_wav.shape) == 1:
|
129 |
+
ref_wav = ref_wav.unsqueeze(0) # (B, L)
|
130 |
+
|
131 |
+
if ref_wav.size(1) > 10 * ref_sr:
|
132 |
+
print("WARNING: cosydec received ref longer than 10s")
|
133 |
+
|
134 |
+
ref_wav_24 = ref_wav
|
135 |
+
if ref_sr != S3GEN_SR:
|
136 |
+
ref_wav_24 = get_resampler(ref_sr, S3GEN_SR, device)(ref_wav)
|
137 |
+
|
138 |
+
ref_mels_24 = self.mel_extractor(ref_wav_24).transpose(1, 2).to(device)
|
139 |
+
ref_mels_24_len = None
|
140 |
+
|
141 |
+
# Resample to 16kHz
|
142 |
+
ref_wav_16 = get_resampler(ref_sr, S3_SR, device)(ref_wav).to(device)
|
143 |
+
|
144 |
+
# Speaker embedding
|
145 |
+
ref_x_vector = self.speaker_encoder.inference(ref_wav_16)
|
146 |
+
|
147 |
+
# Tokenize 16khz reference
|
148 |
+
ref_speech_tokens, ref_speech_token_lens = self.tokenizer(ref_wav_16)
|
149 |
+
|
150 |
+
# Make sure mel_len = 2 * stoken_len (happens when the input is not padded to multiple of 40ms)
|
151 |
+
if ref_mels_24.shape[1] != 2 * ref_speech_tokens.shape[1]:
|
152 |
+
logging.warning(
|
153 |
+
"Reference mel length is not equal to 2 * reference token length.\n"
|
154 |
+
)
|
155 |
+
ref_speech_tokens = ref_speech_tokens[:, :ref_mels_24.shape[1] // 2]
|
156 |
+
ref_speech_token_lens[0] = ref_speech_tokens.shape[1]
|
157 |
+
|
158 |
+
return dict(
|
159 |
+
prompt_token=ref_speech_tokens.to(device),
|
160 |
+
prompt_token_len=ref_speech_token_lens,
|
161 |
+
prompt_feat=ref_mels_24,
|
162 |
+
prompt_feat_len=ref_mels_24_len,
|
163 |
+
embedding=ref_x_vector,
|
164 |
+
)
|
165 |
+
|
166 |
+
def forward(
|
167 |
+
self,
|
168 |
+
speech_tokens: torch.LongTensor,
|
169 |
+
# locally-computed ref embedding (mutex with ref_dict)
|
170 |
+
ref_wav: Optional[torch.Tensor],
|
171 |
+
ref_sr: Optional[int],
|
172 |
+
# pre-computed ref embedding (prod API)
|
173 |
+
ref_dict: Optional[dict] = None,
|
174 |
+
finalize: bool = False,
|
175 |
+
):
|
176 |
+
"""
|
177 |
+
Generate waveforms from S3 speech tokens and a reference waveform, which the speaker timbre is inferred from.
|
178 |
+
|
179 |
+
NOTE:
|
180 |
+
- The speaker encoder accepts 16 kHz waveform.
|
181 |
+
- S3TokenizerV2 accepts 16 kHz waveform.
|
182 |
+
- The mel-spectrogram for the reference assumes 24 kHz input signal.
|
183 |
+
- This function is designed for batch_size=1 only.
|
184 |
+
|
185 |
+
Args
|
186 |
+
----
|
187 |
+
- `speech_tokens`: S3 speech tokens [B=1, T]
|
188 |
+
- `ref_wav`: reference waveform (`torch.Tensor` with shape=[B=1, T])
|
189 |
+
- `ref_sr`: reference sample rate
|
190 |
+
- `finalize`: whether streaming is finished or not. Note that if False, the last 3 tokens will be ignored.
|
191 |
+
"""
|
192 |
+
assert (ref_wav is None) ^ (ref_dict is None), f"Must provide exactly one of ref_wav or ref_dict (got {ref_wav} and {ref_dict})"
|
193 |
+
|
194 |
+
if ref_dict is None:
|
195 |
+
ref_dict = self.embed_ref(ref_wav, ref_sr)
|
196 |
+
else:
|
197 |
+
# type/device casting (all values will be numpy if it's from a prod API call)
|
198 |
+
for rk in list(ref_dict):
|
199 |
+
if isinstance(ref_dict[rk], np.ndarray):
|
200 |
+
ref_dict[rk] = torch.from_numpy(ref_dict[rk])
|
201 |
+
if torch.is_tensor(ref_dict[rk]):
|
202 |
+
ref_dict[rk] = ref_dict[rk].to(self.device)
|
203 |
+
|
204 |
+
if len(speech_tokens.shape) == 1:
|
205 |
+
speech_tokens = speech_tokens.unsqueeze(0)
|
206 |
+
|
207 |
+
# assert speech_tokens.shape[0] == 1, "only batch size of one allowed for now"
|
208 |
+
speech_token_lens = torch.LongTensor([speech_tokens.size(1)]).to(self.device)
|
209 |
+
|
210 |
+
output_mels, _ = self.flow.inference(
|
211 |
+
token=speech_tokens,
|
212 |
+
token_len=speech_token_lens,
|
213 |
+
finalize=finalize,
|
214 |
+
**ref_dict,
|
215 |
+
)
|
216 |
+
return output_mels
|
217 |
+
|
218 |
+
|
219 |
+
class S3Token2Wav(S3Token2Mel):
|
220 |
+
"""
|
221 |
+
The decoder of CosyVoice2 is a concat of token-to-mel (CFM) and a mel-to-waveform (HiFiGAN) modules.
|
222 |
+
|
223 |
+
TODO: make these modules configurable?
|
224 |
+
"""
|
225 |
+
|
226 |
+
def __init__(self):
|
227 |
+
super().__init__()
|
228 |
+
|
229 |
+
f0_predictor = ConvRNNF0Predictor()
|
230 |
+
self.mel2wav = HiFTGenerator(
|
231 |
+
sampling_rate=S3GEN_SR,
|
232 |
+
upsample_rates=[8, 5, 3],
|
233 |
+
upsample_kernel_sizes=[16, 11, 7],
|
234 |
+
source_resblock_kernel_sizes=[7, 7, 11],
|
235 |
+
source_resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
236 |
+
f0_predictor=f0_predictor,
|
237 |
+
)
|
238 |
+
|
239 |
+
# silence out a few ms and fade audio in to reduce artifacts
|
240 |
+
n_trim = S3GEN_SR // 50 # 20ms = half of a frame
|
241 |
+
trim_fade = torch.zeros(2 * n_trim)
|
242 |
+
trim_fade[n_trim:] = (torch.cos(torch.linspace(torch.pi, 0, n_trim)) + 1) / 2
|
243 |
+
self.register_buffer("trim_fade", trim_fade, persistent=False) # (buffers get automatic device casting)
|
244 |
+
|
245 |
+
def forward(
|
246 |
+
self,
|
247 |
+
speech_tokens,
|
248 |
+
# locally-computed ref embedding (mutex with ref_dict)
|
249 |
+
ref_wav: Optional[torch.Tensor],
|
250 |
+
ref_sr: Optional[int],
|
251 |
+
# pre-computed ref embedding (prod API)
|
252 |
+
ref_dict: Optional[dict] = None,
|
253 |
+
finalize: bool = False
|
254 |
+
):
|
255 |
+
output_mels = super().forward(speech_tokens, ref_wav=ref_wav, ref_sr=ref_sr, ref_dict=ref_dict, finalize=finalize)
|
256 |
+
|
257 |
+
# TODO jrm: ignoring the speed control (mel interpolation) and the HiFTGAN caching mechanisms for now.
|
258 |
+
hift_cache_source = torch.zeros(1, 1, 0).to(self.device)
|
259 |
+
|
260 |
+
output_wavs, *_ = self.mel2wav.inference(speech_feat=output_mels, cache_source=hift_cache_source)
|
261 |
+
|
262 |
+
if not self.training:
|
263 |
+
# NOTE: ad-hoc method to reduce "spillover" from the reference clip.
|
264 |
+
output_wavs[:, :len(self.trim_fade)] *= self.trim_fade
|
265 |
+
|
266 |
+
return output_wavs
|
267 |
+
|
268 |
+
@torch.inference_mode()
|
269 |
+
def flow_inference(
|
270 |
+
self,
|
271 |
+
speech_tokens,
|
272 |
+
# locally-computed ref embedding (mutex with ref_dict)
|
273 |
+
ref_wav: Optional[torch.Tensor] = None,
|
274 |
+
ref_sr: Optional[int] = None,
|
275 |
+
# pre-computed ref embedding (prod API)
|
276 |
+
ref_dict: Optional[dict] = None,
|
277 |
+
finalize: bool = False,
|
278 |
+
):
|
279 |
+
return super().forward(speech_tokens, ref_wav=ref_wav, ref_sr=ref_sr, ref_dict=ref_dict, finalize=finalize)
|
280 |
+
|
281 |
+
@torch.inference_mode()
|
282 |
+
def hift_inference(self, speech_feat, cache_source: torch.Tensor = None):
|
283 |
+
if cache_source is None:
|
284 |
+
cache_source = torch.zeros(1, 1, 0).to(self.device)
|
285 |
+
return self.mel2wav.inference(speech_feat=speech_feat, cache_source=cache_source)
|
286 |
+
|
287 |
+
@torch.inference_mode()
|
288 |
+
def inference(
|
289 |
+
self,
|
290 |
+
speech_tokens,
|
291 |
+
# locally-computed ref embedding (mutex with ref_dict)
|
292 |
+
ref_wav: Optional[torch.Tensor] = None,
|
293 |
+
ref_sr: Optional[int] = None,
|
294 |
+
# pre-computed ref embedding (prod API)
|
295 |
+
ref_dict: Optional[dict] = None,
|
296 |
+
cache_source: torch.Tensor = None, # NOTE: this arg is for streaming, it can probably be removed here
|
297 |
+
finalize: bool = True,
|
298 |
+
):
|
299 |
+
output_mels = self.flow_inference(speech_tokens, ref_wav=ref_wav, ref_sr=ref_sr, ref_dict=ref_dict, finalize=finalize)
|
300 |
+
output_wavs, output_sources = self.hift_inference(output_mels, cache_source)
|
301 |
+
|
302 |
+
# NOTE: ad-hoc method to reduce "spillover" from the reference clip.
|
303 |
+
output_wavs[:, :len(self.trim_fade)] *= self.trim_fade
|
304 |
+
|
305 |
+
return output_wavs, output_sources
|
src/chatterbox/models/s3gen/transformer/__init__.py
ADDED
File without changes
|
src/chatterbox/models/s3gen/transformer/activation.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020 Johns Hopkins University (Shinji Watanabe)
|
2 |
+
# 2020 Northwestern Polytechnical University (Pengcheng Guo)
|
3 |
+
# 2020 Mobvoi Inc (Binbin Zhang)
|
4 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
+
# you may not use this file except in compliance with the License.
|
8 |
+
# You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing, software
|
13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
+
# See the License for the specific language governing permissions and
|
16 |
+
# limitations under the License.
|
17 |
+
"""Swish() activation function for Conformer."""
|
18 |
+
|
19 |
+
import torch
|
20 |
+
from torch import nn, sin, pow
|
21 |
+
from torch.nn import Parameter
|
22 |
+
|
23 |
+
|
24 |
+
class Swish(torch.nn.Module):
|
25 |
+
"""Construct an Swish object."""
|
26 |
+
|
27 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
28 |
+
"""Return Swish activation function."""
|
29 |
+
return x * torch.sigmoid(x)
|
30 |
+
|
31 |
+
|
32 |
+
# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
|
33 |
+
# LICENSE is in incl_licenses directory.
|
34 |
+
class Snake(nn.Module):
|
35 |
+
'''
|
36 |
+
Implementation of a sine-based periodic activation function
|
37 |
+
Shape:
|
38 |
+
- Input: (B, C, T)
|
39 |
+
- Output: (B, C, T), same shape as the input
|
40 |
+
Parameters:
|
41 |
+
- alpha - trainable parameter
|
42 |
+
References:
|
43 |
+
- This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
44 |
+
https://arxiv.org/abs/2006.08195
|
45 |
+
Examples:
|
46 |
+
>>> a1 = snake(256)
|
47 |
+
>>> x = torch.randn(256)
|
48 |
+
>>> x = a1(x)
|
49 |
+
'''
|
50 |
+
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
|
51 |
+
'''
|
52 |
+
Initialization.
|
53 |
+
INPUT:
|
54 |
+
- in_features: shape of the input
|
55 |
+
- alpha: trainable parameter
|
56 |
+
alpha is initialized to 1 by default, higher values = higher-frequency.
|
57 |
+
alpha will be trained along with the rest of your model.
|
58 |
+
'''
|
59 |
+
super(Snake, self).__init__()
|
60 |
+
self.in_features = in_features
|
61 |
+
|
62 |
+
# initialize alpha
|
63 |
+
self.alpha_logscale = alpha_logscale
|
64 |
+
if self.alpha_logscale: # log scale alphas initialized to zeros
|
65 |
+
self.alpha = Parameter(torch.zeros(in_features) * alpha)
|
66 |
+
else: # linear scale alphas initialized to ones
|
67 |
+
self.alpha = Parameter(torch.ones(in_features) * alpha)
|
68 |
+
|
69 |
+
self.alpha.requires_grad = alpha_trainable
|
70 |
+
|
71 |
+
self.no_div_by_zero = 0.000000001
|
72 |
+
|
73 |
+
def forward(self, x):
|
74 |
+
'''
|
75 |
+
Forward pass of the function.
|
76 |
+
Applies the function to the input elementwise.
|
77 |
+
Snake ∶= x + 1/a * sin^2 (xa)
|
78 |
+
'''
|
79 |
+
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
|
80 |
+
if self.alpha_logscale:
|
81 |
+
alpha = torch.exp(alpha)
|
82 |
+
x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
|
83 |
+
|
84 |
+
return x
|
src/chatterbox/models/s3gen/transformer/attention.py
ADDED
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 Shigeki Karita
|
2 |
+
# 2020 Mobvoi Inc (Binbin Zhang)
|
3 |
+
# 2022 Xingchen Song ([email protected])
|
4 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
+
# you may not use this file except in compliance with the License.
|
8 |
+
# You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing, software
|
13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
+
# See the License for the specific language governing permissions and
|
16 |
+
# limitations under the License.
|
17 |
+
"""Multi-Head Attention layer definition."""
|
18 |
+
|
19 |
+
import math
|
20 |
+
from typing import Tuple
|
21 |
+
|
22 |
+
import torch
|
23 |
+
from torch import nn
|
24 |
+
|
25 |
+
|
26 |
+
class MultiHeadedAttention(nn.Module):
|
27 |
+
"""Multi-Head Attention layer.
|
28 |
+
|
29 |
+
Args:
|
30 |
+
n_head (int): The number of heads.
|
31 |
+
n_feat (int): The number of features.
|
32 |
+
dropout_rate (float): Dropout rate.
|
33 |
+
|
34 |
+
"""
|
35 |
+
|
36 |
+
def __init__(self,
|
37 |
+
n_head: int,
|
38 |
+
n_feat: int,
|
39 |
+
dropout_rate: float,
|
40 |
+
key_bias: bool = True):
|
41 |
+
"""Construct an MultiHeadedAttention object."""
|
42 |
+
super().__init__()
|
43 |
+
assert n_feat % n_head == 0
|
44 |
+
# We assume d_v always equals d_k
|
45 |
+
self.d_k = n_feat // n_head
|
46 |
+
self.h = n_head
|
47 |
+
self.linear_q = nn.Linear(n_feat, n_feat)
|
48 |
+
self.linear_k = nn.Linear(n_feat, n_feat, bias=key_bias)
|
49 |
+
self.linear_v = nn.Linear(n_feat, n_feat)
|
50 |
+
self.linear_out = nn.Linear(n_feat, n_feat)
|
51 |
+
self.dropout = nn.Dropout(p=dropout_rate)
|
52 |
+
|
53 |
+
def forward_qkv(
|
54 |
+
self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
|
55 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
56 |
+
"""Transform query, key and value.
|
57 |
+
|
58 |
+
Args:
|
59 |
+
query (torch.Tensor): Query tensor (#batch, time1, size).
|
60 |
+
key (torch.Tensor): Key tensor (#batch, time2, size).
|
61 |
+
value (torch.Tensor): Value tensor (#batch, time2, size).
|
62 |
+
|
63 |
+
Returns:
|
64 |
+
torch.Tensor: Transformed query tensor, size
|
65 |
+
(#batch, n_head, time1, d_k).
|
66 |
+
torch.Tensor: Transformed key tensor, size
|
67 |
+
(#batch, n_head, time2, d_k).
|
68 |
+
torch.Tensor: Transformed value tensor, size
|
69 |
+
(#batch, n_head, time2, d_k).
|
70 |
+
|
71 |
+
"""
|
72 |
+
n_batch = query.size(0)
|
73 |
+
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
|
74 |
+
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
|
75 |
+
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
|
76 |
+
q = q.transpose(1, 2) # (batch, head, time1, d_k)
|
77 |
+
k = k.transpose(1, 2) # (batch, head, time2, d_k)
|
78 |
+
v = v.transpose(1, 2) # (batch, head, time2, d_k)
|
79 |
+
|
80 |
+
return q, k, v
|
81 |
+
|
82 |
+
def forward_attention(
|
83 |
+
self,
|
84 |
+
value: torch.Tensor,
|
85 |
+
scores: torch.Tensor,
|
86 |
+
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool)
|
87 |
+
) -> torch.Tensor:
|
88 |
+
"""Compute attention context vector.
|
89 |
+
|
90 |
+
Args:
|
91 |
+
value (torch.Tensor): Transformed value, size
|
92 |
+
(#batch, n_head, time2, d_k).
|
93 |
+
scores (torch.Tensor): Attention score, size
|
94 |
+
(#batch, n_head, time1, time2).
|
95 |
+
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
|
96 |
+
(#batch, time1, time2), (0, 0, 0) means fake mask.
|
97 |
+
|
98 |
+
Returns:
|
99 |
+
torch.Tensor: Transformed value (#batch, time1, d_model)
|
100 |
+
weighted by the attention score (#batch, time1, time2).
|
101 |
+
|
102 |
+
"""
|
103 |
+
n_batch = value.size(0)
|
104 |
+
# NOTE(xcsong): When will `if mask.size(2) > 0` be True?
|
105 |
+
# 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the
|
106 |
+
# 1st chunk to ease the onnx export.]
|
107 |
+
# 2. pytorch training
|
108 |
+
if mask.size(2) > 0: # time2 > 0
|
109 |
+
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
|
110 |
+
# For last chunk, time2 might be larger than scores.size(-1)
|
111 |
+
mask = mask[:, :, :, :scores.size(-1)] # (batch, 1, *, time2)
|
112 |
+
scores = scores.masked_fill(mask, -float('inf'))
|
113 |
+
attn = torch.softmax(scores, dim=-1).masked_fill(
|
114 |
+
mask, 0.0) # (batch, head, time1, time2)
|
115 |
+
# NOTE(xcsong): When will `if mask.size(2) > 0` be False?
|
116 |
+
# 1. onnx(16/-1, -1/-1, 16/0)
|
117 |
+
# 2. jit (16/-1, -1/-1, 16/0, 16/4)
|
118 |
+
else:
|
119 |
+
attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
|
120 |
+
|
121 |
+
p_attn = self.dropout(attn)
|
122 |
+
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
|
123 |
+
x = (x.transpose(1, 2).contiguous().view(n_batch, -1,
|
124 |
+
self.h * self.d_k)
|
125 |
+
) # (batch, time1, d_model)
|
126 |
+
|
127 |
+
return self.linear_out(x) # (batch, time1, d_model)
|
128 |
+
|
129 |
+
def forward(
|
130 |
+
self,
|
131 |
+
query: torch.Tensor,
|
132 |
+
key: torch.Tensor,
|
133 |
+
value: torch.Tensor,
|
134 |
+
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
135 |
+
pos_emb: torch.Tensor = torch.empty(0),
|
136 |
+
cache: torch.Tensor = torch.zeros((0, 0, 0, 0))
|
137 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
138 |
+
"""Compute scaled dot product attention.
|
139 |
+
|
140 |
+
Args:
|
141 |
+
query (torch.Tensor): Query tensor (#batch, time1, size).
|
142 |
+
key (torch.Tensor): Key tensor (#batch, time2, size).
|
143 |
+
value (torch.Tensor): Value tensor (#batch, time2, size).
|
144 |
+
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
|
145 |
+
(#batch, time1, time2).
|
146 |
+
1.When applying cross attention between decoder and encoder,
|
147 |
+
the batch padding mask for input is in (#batch, 1, T) shape.
|
148 |
+
2.When applying self attention of encoder,
|
149 |
+
the mask is in (#batch, T, T) shape.
|
150 |
+
3.When applying self attention of decoder,
|
151 |
+
the mask is in (#batch, L, L) shape.
|
152 |
+
4.If the different position in decoder see different block
|
153 |
+
of the encoder, such as Mocha, the passed in mask could be
|
154 |
+
in (#batch, L, T) shape. But there is no such case in current
|
155 |
+
CosyVoice.
|
156 |
+
cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),
|
157 |
+
where `cache_t == chunk_size * num_decoding_left_chunks`
|
158 |
+
and `head * d_k == size`
|
159 |
+
|
160 |
+
|
161 |
+
Returns:
|
162 |
+
torch.Tensor: Output tensor (#batch, time1, d_model).
|
163 |
+
torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)
|
164 |
+
where `cache_t == chunk_size * num_decoding_left_chunks`
|
165 |
+
and `head * d_k == size`
|
166 |
+
|
167 |
+
"""
|
168 |
+
q, k, v = self.forward_qkv(query, key, value)
|
169 |
+
|
170 |
+
# NOTE(xcsong):
|
171 |
+
# when export onnx model, for 1st chunk, we feed
|
172 |
+
# cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
|
173 |
+
# or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).
|
174 |
+
# In all modes, `if cache.size(0) > 0` will alwayse be `True`
|
175 |
+
# and we will always do splitting and
|
176 |
+
# concatnation(this will simplify onnx export). Note that
|
177 |
+
# it's OK to concat & split zero-shaped tensors(see code below).
|
178 |
+
# when export jit model, for 1st chunk, we always feed
|
179 |
+
# cache(0, 0, 0, 0) since jit supports dynamic if-branch.
|
180 |
+
# >>> a = torch.ones((1, 2, 0, 4))
|
181 |
+
# >>> b = torch.ones((1, 2, 3, 4))
|
182 |
+
# >>> c = torch.cat((a, b), dim=2)
|
183 |
+
# >>> torch.equal(b, c) # True
|
184 |
+
# >>> d = torch.split(a, 2, dim=-1)
|
185 |
+
# >>> torch.equal(d[0], d[1]) # True
|
186 |
+
if cache.size(0) > 0:
|
187 |
+
key_cache, value_cache = torch.split(cache,
|
188 |
+
cache.size(-1) // 2,
|
189 |
+
dim=-1)
|
190 |
+
k = torch.cat([key_cache, k], dim=2)
|
191 |
+
v = torch.cat([value_cache, v], dim=2)
|
192 |
+
# NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's
|
193 |
+
# non-trivial to calculate `next_cache_start` here.
|
194 |
+
new_cache = torch.cat((k, v), dim=-1)
|
195 |
+
|
196 |
+
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
|
197 |
+
return self.forward_attention(v, scores, mask), new_cache
|
198 |
+
|
199 |
+
|
200 |
+
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
|
201 |
+
"""Multi-Head Attention layer with relative position encoding.
|
202 |
+
Paper: https://arxiv.org/abs/1901.02860
|
203 |
+
Args:
|
204 |
+
n_head (int): The number of heads.
|
205 |
+
n_feat (int): The number of features.
|
206 |
+
dropout_rate (float): Dropout rate.
|
207 |
+
"""
|
208 |
+
|
209 |
+
def __init__(self,
|
210 |
+
n_head: int,
|
211 |
+
n_feat: int,
|
212 |
+
dropout_rate: float,
|
213 |
+
key_bias: bool = True):
|
214 |
+
"""Construct an RelPositionMultiHeadedAttention object."""
|
215 |
+
super().__init__(n_head, n_feat, dropout_rate, key_bias)
|
216 |
+
# linear transformation for positional encoding
|
217 |
+
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
|
218 |
+
# these two learnable bias are used in matrix c and matrix d
|
219 |
+
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
|
220 |
+
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
|
221 |
+
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
|
222 |
+
torch.nn.init.xavier_uniform_(self.pos_bias_u)
|
223 |
+
torch.nn.init.xavier_uniform_(self.pos_bias_v)
|
224 |
+
|
225 |
+
def rel_shift(self, x: torch.Tensor) -> torch.Tensor:
|
226 |
+
"""Compute relative positional encoding.
|
227 |
+
|
228 |
+
Args:
|
229 |
+
x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).
|
230 |
+
time1 means the length of query vector.
|
231 |
+
|
232 |
+
Returns:
|
233 |
+
torch.Tensor: Output tensor.
|
234 |
+
|
235 |
+
"""
|
236 |
+
zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1),
|
237 |
+
device=x.device,
|
238 |
+
dtype=x.dtype)
|
239 |
+
x_padded = torch.cat([zero_pad, x], dim=-1)
|
240 |
+
|
241 |
+
x_padded = x_padded.view(x.size()[0],
|
242 |
+
x.size()[1],
|
243 |
+
x.size(3) + 1, x.size(2))
|
244 |
+
x = x_padded[:, :, 1:].view_as(x)[
|
245 |
+
:, :, :, : x.size(-1) // 2 + 1
|
246 |
+
] # only keep the positions from 0 to time2
|
247 |
+
return x
|
248 |
+
|
249 |
+
def forward(
|
250 |
+
self,
|
251 |
+
query: torch.Tensor,
|
252 |
+
key: torch.Tensor,
|
253 |
+
value: torch.Tensor,
|
254 |
+
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
255 |
+
pos_emb: torch.Tensor = torch.empty(0),
|
256 |
+
cache: torch.Tensor = torch.zeros((0, 0, 0, 0))
|
257 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
258 |
+
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
|
259 |
+
Args:
|
260 |
+
query (torch.Tensor): Query tensor (#batch, time1, size).
|
261 |
+
key (torch.Tensor): Key tensor (#batch, time2, size).
|
262 |
+
value (torch.Tensor): Value tensor (#batch, time2, size).
|
263 |
+
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
|
264 |
+
(#batch, time1, time2), (0, 0, 0) means fake mask.
|
265 |
+
pos_emb (torch.Tensor): Positional embedding tensor
|
266 |
+
(#batch, time2, size).
|
267 |
+
cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),
|
268 |
+
where `cache_t == chunk_size * num_decoding_left_chunks`
|
269 |
+
and `head * d_k == size`
|
270 |
+
Returns:
|
271 |
+
torch.Tensor: Output tensor (#batch, time1, d_model).
|
272 |
+
torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)
|
273 |
+
where `cache_t == chunk_size * num_decoding_left_chunks`
|
274 |
+
and `head * d_k == size`
|
275 |
+
"""
|
276 |
+
q, k, v = self.forward_qkv(query, key, value)
|
277 |
+
q = q.transpose(1, 2) # (batch, time1, head, d_k)
|
278 |
+
|
279 |
+
# NOTE(xcsong):
|
280 |
+
# when export onnx model, for 1st chunk, we feed
|
281 |
+
# cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
|
282 |
+
# or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).
|
283 |
+
# In all modes, `if cache.size(0) > 0` will alwayse be `True`
|
284 |
+
# and we will always do splitting and
|
285 |
+
# concatnation(this will simplify onnx export). Note that
|
286 |
+
# it's OK to concat & split zero-shaped tensors(see code below).
|
287 |
+
# when export jit model, for 1st chunk, we always feed
|
288 |
+
# cache(0, 0, 0, 0) since jit supports dynamic if-branch.
|
289 |
+
# >>> a = torch.ones((1, 2, 0, 4))
|
290 |
+
# >>> b = torch.ones((1, 2, 3, 4))
|
291 |
+
# >>> c = torch.cat((a, b), dim=2)
|
292 |
+
# >>> torch.equal(b, c) # True
|
293 |
+
# >>> d = torch.split(a, 2, dim=-1)
|
294 |
+
# >>> torch.equal(d[0], d[1]) # True
|
295 |
+
if cache.size(0) > 0:
|
296 |
+
key_cache, value_cache = torch.split(cache,
|
297 |
+
cache.size(-1) // 2,
|
298 |
+
dim=-1)
|
299 |
+
k = torch.cat([key_cache, k], dim=2)
|
300 |
+
v = torch.cat([value_cache, v], dim=2)
|
301 |
+
# NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's
|
302 |
+
# non-trivial to calculate `next_cache_start` here.
|
303 |
+
new_cache = torch.cat((k, v), dim=-1)
|
304 |
+
|
305 |
+
n_batch_pos = pos_emb.size(0)
|
306 |
+
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
|
307 |
+
p = p.transpose(1, 2) # (batch, head, time1, d_k)
|
308 |
+
|
309 |
+
# (batch, head, time1, d_k)
|
310 |
+
q_with_bias_u = (q + self.pos_bias_u.to(q.device)).transpose(1, 2)
|
311 |
+
# (batch, head, time1, d_k)
|
312 |
+
q_with_bias_v = (q + self.pos_bias_v.to(q.device)).transpose(1, 2)
|
313 |
+
|
314 |
+
# compute attention score
|
315 |
+
# first compute matrix a and matrix c
|
316 |
+
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
|
317 |
+
# (batch, head, time1, time2)
|
318 |
+
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
|
319 |
+
|
320 |
+
# compute matrix b and matrix d
|
321 |
+
# (batch, head, time1, time2)
|
322 |
+
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
|
323 |
+
# NOTE(Xiang Lyu): Keep rel_shift since espnet rel_pos_emb is used
|
324 |
+
if matrix_ac.shape != matrix_bd.shape:
|
325 |
+
matrix_bd = self.rel_shift(matrix_bd)
|
326 |
+
|
327 |
+
scores = (matrix_ac + matrix_bd) / math.sqrt(
|
328 |
+
self.d_k) # (batch, head, time1, time2)
|
329 |
+
|
330 |
+
return self.forward_attention(v, scores, mask), new_cache
|
src/chatterbox/models/s3gen/transformer/convolution.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
|
2 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
16 |
+
"""ConvolutionModule definition."""
|
17 |
+
|
18 |
+
from typing import Tuple
|
19 |
+
|
20 |
+
import torch
|
21 |
+
from torch import nn
|
22 |
+
|
23 |
+
|
24 |
+
class ConvolutionModule(nn.Module):
|
25 |
+
"""ConvolutionModule in Conformer model."""
|
26 |
+
|
27 |
+
def __init__(self,
|
28 |
+
channels: int,
|
29 |
+
kernel_size: int = 15,
|
30 |
+
activation: nn.Module = nn.ReLU(),
|
31 |
+
norm: str = "batch_norm",
|
32 |
+
causal: bool = False,
|
33 |
+
bias: bool = True):
|
34 |
+
"""Construct an ConvolutionModule object.
|
35 |
+
Args:
|
36 |
+
channels (int): The number of channels of conv layers.
|
37 |
+
kernel_size (int): Kernel size of conv layers.
|
38 |
+
causal (int): Whether use causal convolution or not
|
39 |
+
"""
|
40 |
+
super().__init__()
|
41 |
+
|
42 |
+
self.pointwise_conv1 = nn.Conv1d(
|
43 |
+
channels,
|
44 |
+
2 * channels,
|
45 |
+
kernel_size=1,
|
46 |
+
stride=1,
|
47 |
+
padding=0,
|
48 |
+
bias=bias,
|
49 |
+
)
|
50 |
+
# self.lorder is used to distinguish if it's a causal convolution,
|
51 |
+
# if self.lorder > 0: it's a causal convolution, the input will be
|
52 |
+
# padded with self.lorder frames on the left in forward.
|
53 |
+
# else: it's a symmetrical convolution
|
54 |
+
if causal:
|
55 |
+
padding = 0
|
56 |
+
self.lorder = kernel_size - 1
|
57 |
+
else:
|
58 |
+
# kernel_size should be an odd number for none causal convolution
|
59 |
+
assert (kernel_size - 1) % 2 == 0
|
60 |
+
padding = (kernel_size - 1) // 2
|
61 |
+
self.lorder = 0
|
62 |
+
self.depthwise_conv = nn.Conv1d(
|
63 |
+
channels,
|
64 |
+
channels,
|
65 |
+
kernel_size,
|
66 |
+
stride=1,
|
67 |
+
padding=padding,
|
68 |
+
groups=channels,
|
69 |
+
bias=bias,
|
70 |
+
)
|
71 |
+
|
72 |
+
assert norm in ['batch_norm', 'layer_norm']
|
73 |
+
if norm == "batch_norm":
|
74 |
+
self.use_layer_norm = False
|
75 |
+
self.norm = nn.BatchNorm1d(channels)
|
76 |
+
else:
|
77 |
+
self.use_layer_norm = True
|
78 |
+
self.norm = nn.LayerNorm(channels)
|
79 |
+
|
80 |
+
self.pointwise_conv2 = nn.Conv1d(
|
81 |
+
channels,
|
82 |
+
channels,
|
83 |
+
kernel_size=1,
|
84 |
+
stride=1,
|
85 |
+
padding=0,
|
86 |
+
bias=bias,
|
87 |
+
)
|
88 |
+
self.activation = activation
|
89 |
+
|
90 |
+
def forward(
|
91 |
+
self,
|
92 |
+
x: torch.Tensor,
|
93 |
+
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
94 |
+
cache: torch.Tensor = torch.zeros((0, 0, 0)),
|
95 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
96 |
+
"""Compute convolution module.
|
97 |
+
Args:
|
98 |
+
x (torch.Tensor): Input tensor (#batch, time, channels).
|
99 |
+
mask_pad (torch.Tensor): used for batch padding (#batch, 1, time),
|
100 |
+
(0, 0, 0) means fake mask.
|
101 |
+
cache (torch.Tensor): left context cache, it is only
|
102 |
+
used in causal convolution (#batch, channels, cache_t),
|
103 |
+
(0, 0, 0) meas fake cache.
|
104 |
+
Returns:
|
105 |
+
torch.Tensor: Output tensor (#batch, time, channels).
|
106 |
+
"""
|
107 |
+
# exchange the temporal dimension and the feature dimension
|
108 |
+
x = x.transpose(1, 2) # (#batch, channels, time)
|
109 |
+
|
110 |
+
# mask batch padding
|
111 |
+
if mask_pad.size(2) > 0: # time > 0
|
112 |
+
x.masked_fill_(~mask_pad, 0.0)
|
113 |
+
|
114 |
+
if self.lorder > 0:
|
115 |
+
if cache.size(2) == 0: # cache_t == 0
|
116 |
+
x = nn.functional.pad(x, (self.lorder, 0), 'constant', 0.0)
|
117 |
+
else:
|
118 |
+
assert cache.size(0) == x.size(0) # equal batch
|
119 |
+
assert cache.size(1) == x.size(1) # equal channel
|
120 |
+
x = torch.cat((cache, x), dim=2)
|
121 |
+
assert (x.size(2) > self.lorder)
|
122 |
+
new_cache = x[:, :, -self.lorder:]
|
123 |
+
else:
|
124 |
+
# It's better we just return None if no cache is required,
|
125 |
+
# However, for JIT export, here we just fake one tensor instead of
|
126 |
+
# None.
|
127 |
+
new_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
|
128 |
+
|
129 |
+
# GLU mechanism
|
130 |
+
x = self.pointwise_conv1(x) # (batch, 2*channel, dim)
|
131 |
+
x = nn.functional.glu(x, dim=1) # (batch, channel, dim)
|
132 |
+
|
133 |
+
# 1D Depthwise Conv
|
134 |
+
x = self.depthwise_conv(x)
|
135 |
+
if self.use_layer_norm:
|
136 |
+
x = x.transpose(1, 2)
|
137 |
+
x = self.activation(self.norm(x))
|
138 |
+
if self.use_layer_norm:
|
139 |
+
x = x.transpose(1, 2)
|
140 |
+
x = self.pointwise_conv2(x)
|
141 |
+
# mask batch padding
|
142 |
+
if mask_pad.size(2) > 0: # time > 0
|
143 |
+
x.masked_fill_(~mask_pad, 0.0)
|
144 |
+
|
145 |
+
return x.transpose(1, 2), new_cache
|
src/chatterbox/models/s3gen/transformer/embedding.py
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
|
2 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
16 |
+
"""Positonal Encoding Module."""
|
17 |
+
|
18 |
+
import math
|
19 |
+
from typing import Tuple, Union
|
20 |
+
|
21 |
+
import torch
|
22 |
+
import torch.nn.functional as F
|
23 |
+
import numpy as np
|
24 |
+
|
25 |
+
|
26 |
+
class PositionalEncoding(torch.nn.Module):
|
27 |
+
"""Positional encoding.
|
28 |
+
|
29 |
+
:param int d_model: embedding dim
|
30 |
+
:param float dropout_rate: dropout rate
|
31 |
+
:param int max_len: maximum input length
|
32 |
+
|
33 |
+
PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))
|
34 |
+
PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))
|
35 |
+
"""
|
36 |
+
|
37 |
+
def __init__(self,
|
38 |
+
d_model: int,
|
39 |
+
dropout_rate: float,
|
40 |
+
max_len: int = 5000,
|
41 |
+
reverse: bool = False):
|
42 |
+
"""Construct an PositionalEncoding object."""
|
43 |
+
super().__init__()
|
44 |
+
self.d_model = d_model
|
45 |
+
self.xscale = math.sqrt(self.d_model)
|
46 |
+
self.dropout = torch.nn.Dropout(p=dropout_rate)
|
47 |
+
self.max_len = max_len
|
48 |
+
|
49 |
+
self.pe = torch.zeros(self.max_len, self.d_model)
|
50 |
+
position = torch.arange(0, self.max_len,
|
51 |
+
dtype=torch.float32).unsqueeze(1)
|
52 |
+
div_term = torch.exp(
|
53 |
+
torch.arange(0, self.d_model, 2, dtype=torch.float32) *
|
54 |
+
-(math.log(10000.0) / self.d_model))
|
55 |
+
self.pe[:, 0::2] = torch.sin(position * div_term)
|
56 |
+
self.pe[:, 1::2] = torch.cos(position * div_term)
|
57 |
+
self.pe = self.pe.unsqueeze(0)
|
58 |
+
|
59 |
+
def forward(self,
|
60 |
+
x: torch.Tensor,
|
61 |
+
offset: Union[int, torch.Tensor] = 0) \
|
62 |
+
-> Tuple[torch.Tensor, torch.Tensor]:
|
63 |
+
"""Add positional encoding.
|
64 |
+
|
65 |
+
Args:
|
66 |
+
x (torch.Tensor): Input. Its shape is (batch, time, ...)
|
67 |
+
offset (int, torch.tensor): position offset
|
68 |
+
|
69 |
+
Returns:
|
70 |
+
torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)
|
71 |
+
torch.Tensor: for compatibility to RelPositionalEncoding
|
72 |
+
"""
|
73 |
+
|
74 |
+
self.pe = self.pe.to(x.device)
|
75 |
+
pos_emb = self.position_encoding(offset, x.size(1), False)
|
76 |
+
x = x * self.xscale + pos_emb
|
77 |
+
return self.dropout(x), self.dropout(pos_emb)
|
78 |
+
|
79 |
+
def position_encoding(self,
|
80 |
+
offset: Union[int, torch.Tensor],
|
81 |
+
size: int,
|
82 |
+
apply_dropout: bool = True) -> torch.Tensor:
|
83 |
+
""" For getting encoding in a streaming fashion
|
84 |
+
|
85 |
+
Attention!!!!!
|
86 |
+
we apply dropout only once at the whole utterance level in a none
|
87 |
+
streaming way, but will call this function several times with
|
88 |
+
increasing input size in a streaming scenario, so the dropout will
|
89 |
+
be applied several times.
|
90 |
+
|
91 |
+
Args:
|
92 |
+
offset (int or torch.tensor): start offset
|
93 |
+
size (int): required size of position encoding
|
94 |
+
|
95 |
+
Returns:
|
96 |
+
torch.Tensor: Corresponding encoding
|
97 |
+
"""
|
98 |
+
# How to subscript a Union type:
|
99 |
+
# https://github.com/pytorch/pytorch/issues/69434
|
100 |
+
if isinstance(offset, int):
|
101 |
+
assert offset + size <= self.max_len
|
102 |
+
pos_emb = self.pe[:, offset:offset + size]
|
103 |
+
elif isinstance(offset, torch.Tensor) and offset.dim() == 0: # scalar
|
104 |
+
assert offset + size <= self.max_len
|
105 |
+
pos_emb = self.pe[:, offset:offset + size]
|
106 |
+
else: # for batched streaming decoding on GPU
|
107 |
+
assert torch.max(offset) + size <= self.max_len
|
108 |
+
index = offset.unsqueeze(1) + \
|
109 |
+
torch.arange(0, size).to(offset.device) # B X T
|
110 |
+
flag = index > 0
|
111 |
+
# remove negative offset
|
112 |
+
index = index * flag
|
113 |
+
pos_emb = F.embedding(index, self.pe[0]) # B X T X d_model
|
114 |
+
|
115 |
+
if apply_dropout:
|
116 |
+
pos_emb = self.dropout(pos_emb)
|
117 |
+
return pos_emb
|
118 |
+
|
119 |
+
|
120 |
+
class RelPositionalEncoding(PositionalEncoding):
|
121 |
+
"""Relative positional encoding module.
|
122 |
+
See : Appendix B in https://arxiv.org/abs/1901.02860
|
123 |
+
Args:
|
124 |
+
d_model (int): Embedding dimension.
|
125 |
+
dropout_rate (float): Dropout rate.
|
126 |
+
max_len (int): Maximum input length.
|
127 |
+
"""
|
128 |
+
|
129 |
+
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):
|
130 |
+
"""Initialize class."""
|
131 |
+
super().__init__(d_model, dropout_rate, max_len, reverse=True)
|
132 |
+
|
133 |
+
def forward(self,
|
134 |
+
x: torch.Tensor,
|
135 |
+
offset: Union[int, torch.Tensor] = 0) \
|
136 |
+
-> Tuple[torch.Tensor, torch.Tensor]:
|
137 |
+
"""Compute positional encoding.
|
138 |
+
Args:
|
139 |
+
x (torch.Tensor): Input tensor (batch, time, `*`).
|
140 |
+
Returns:
|
141 |
+
torch.Tensor: Encoded tensor (batch, time, `*`).
|
142 |
+
torch.Tensor: Positional embedding tensor (1, time, `*`).
|
143 |
+
"""
|
144 |
+
self.pe = self.pe.to(x.device)
|
145 |
+
x = x * self.xscale
|
146 |
+
pos_emb = self.position_encoding(offset, x.size(1), False)
|
147 |
+
return self.dropout(x), self.dropout(pos_emb)
|
148 |
+
|
149 |
+
|
150 |
+
class WhisperPositionalEncoding(PositionalEncoding):
|
151 |
+
""" Sinusoids position encoding used in openai-whisper.encoder
|
152 |
+
"""
|
153 |
+
|
154 |
+
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 1500):
|
155 |
+
super().__init__(d_model, dropout_rate, max_len)
|
156 |
+
self.xscale = 1.0
|
157 |
+
log_timescale_increment = np.log(10000) / (d_model // 2 - 1)
|
158 |
+
inv_timescales = torch.exp(-log_timescale_increment *
|
159 |
+
torch.arange(d_model // 2))
|
160 |
+
scaled_time = torch.arange(max_len)[:, np.newaxis] * \
|
161 |
+
inv_timescales[np.newaxis, :]
|
162 |
+
pe = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
|
163 |
+
delattr(self, "pe")
|
164 |
+
self.register_buffer("pe", pe.unsqueeze(0))
|
165 |
+
|
166 |
+
|
167 |
+
class LearnablePositionalEncoding(PositionalEncoding):
|
168 |
+
""" Learnable position encoding used in openai-whisper.decoder
|
169 |
+
"""
|
170 |
+
|
171 |
+
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 448):
|
172 |
+
super().__init__(d_model, dropout_rate, max_len)
|
173 |
+
# NOTE(xcsong): overwrite self.pe & self.xscale
|
174 |
+
self.pe = torch.nn.Parameter(torch.empty(1, max_len, d_model))
|
175 |
+
self.xscale = 1.0
|
176 |
+
|
177 |
+
|
178 |
+
class NoPositionalEncoding(torch.nn.Module):
|
179 |
+
""" No position encoding
|
180 |
+
"""
|
181 |
+
|
182 |
+
def __init__(self, d_model: int, dropout_rate: float):
|
183 |
+
super().__init__()
|
184 |
+
self.d_model = d_model
|
185 |
+
self.dropout = torch.nn.Dropout(p=dropout_rate)
|
186 |
+
|
187 |
+
def forward(self,
|
188 |
+
x: torch.Tensor,
|
189 |
+
offset: Union[int, torch.Tensor] = 0) \
|
190 |
+
-> Tuple[torch.Tensor, torch.Tensor]:
|
191 |
+
""" Just return zero vector for interface compatibility
|
192 |
+
"""
|
193 |
+
pos_emb = torch.zeros(1, x.size(1), self.d_model).to(x.device)
|
194 |
+
return self.dropout(x), pos_emb
|
195 |
+
|
196 |
+
def position_encoding(self, offset: Union[int, torch.Tensor],
|
197 |
+
size: int) -> torch.Tensor:
|
198 |
+
return torch.zeros(1, size, self.d_model)
|
199 |
+
|
200 |
+
|
201 |
+
class EspnetRelPositionalEncoding(torch.nn.Module):
|
202 |
+
"""Relative positional encoding module (new implementation).
|
203 |
+
|
204 |
+
Details can be found in https://github.com/espnet/espnet/pull/2816.
|
205 |
+
|
206 |
+
See : Appendix B in https://arxiv.org/abs/1901.02860
|
207 |
+
|
208 |
+
Args:
|
209 |
+
d_model (int): Embedding dimension.
|
210 |
+
dropout_rate (float): Dropout rate.
|
211 |
+
max_len (int): Maximum input length.
|
212 |
+
|
213 |
+
"""
|
214 |
+
|
215 |
+
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):
|
216 |
+
"""Construct an PositionalEncoding object."""
|
217 |
+
super(EspnetRelPositionalEncoding, self).__init__()
|
218 |
+
self.d_model = d_model
|
219 |
+
self.xscale = math.sqrt(self.d_model)
|
220 |
+
self.dropout = torch.nn.Dropout(p=dropout_rate)
|
221 |
+
self.pe = None
|
222 |
+
self.extend_pe(torch.tensor(0.0).expand(1, max_len))
|
223 |
+
|
224 |
+
def extend_pe(self, x: torch.Tensor):
|
225 |
+
"""Reset the positional encodings."""
|
226 |
+
if self.pe is not None:
|
227 |
+
# self.pe contains both positive and negative parts
|
228 |
+
# the length of self.pe is 2 * input_len - 1
|
229 |
+
if self.pe.size(1) >= x.size(1) * 2 - 1:
|
230 |
+
if self.pe.dtype != x.dtype or self.pe.device != x.device:
|
231 |
+
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
|
232 |
+
return
|
233 |
+
# Suppose `i` means to the position of query vecotr and `j` means the
|
234 |
+
# position of key vector. We use position relative positions when keys
|
235 |
+
# are to the left (i>j) and negative relative positions otherwise (i<j).
|
236 |
+
pe_positive = torch.zeros(x.size(1), self.d_model)
|
237 |
+
pe_negative = torch.zeros(x.size(1), self.d_model)
|
238 |
+
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
|
239 |
+
div_term = torch.exp(
|
240 |
+
torch.arange(0, self.d_model, 2, dtype=torch.float32)
|
241 |
+
* -(math.log(10000.0) / self.d_model)
|
242 |
+
)
|
243 |
+
pe_positive[:, 0::2] = torch.sin(position * div_term)
|
244 |
+
pe_positive[:, 1::2] = torch.cos(position * div_term)
|
245 |
+
pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
|
246 |
+
pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
|
247 |
+
|
248 |
+
# Reserve the order of positive indices and concat both positive and
|
249 |
+
# negative indices. This is used to support the shifting trick
|
250 |
+
# as in https://arxiv.org/abs/1901.02860
|
251 |
+
pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
|
252 |
+
pe_negative = pe_negative[1:].unsqueeze(0)
|
253 |
+
pe = torch.cat([pe_positive, pe_negative], dim=1)
|
254 |
+
self.pe = pe.to(device=x.device, dtype=x.dtype)
|
255 |
+
|
256 |
+
def forward(self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0) \
|
257 |
+
-> Tuple[torch.Tensor, torch.Tensor]:
|
258 |
+
"""Add positional encoding.
|
259 |
+
|
260 |
+
Args:
|
261 |
+
x (torch.Tensor): Input tensor (batch, time, `*`).
|
262 |
+
|
263 |
+
Returns:
|
264 |
+
torch.Tensor: Encoded tensor (batch, time, `*`).
|
265 |
+
|
266 |
+
"""
|
267 |
+
self.extend_pe(x)
|
268 |
+
x = x * self.xscale
|
269 |
+
pos_emb = self.position_encoding(size=x.size(1), offset=offset)
|
270 |
+
return self.dropout(x), self.dropout(pos_emb)
|
271 |
+
|
272 |
+
def position_encoding(self,
|
273 |
+
offset: Union[int, torch.Tensor],
|
274 |
+
size: int) -> torch.Tensor:
|
275 |
+
""" For getting encoding in a streaming fashion
|
276 |
+
|
277 |
+
Attention!!!!!
|
278 |
+
we apply dropout only once at the whole utterance level in a none
|
279 |
+
streaming way, but will call this function several times with
|
280 |
+
increasing input size in a streaming scenario, so the dropout will
|
281 |
+
be applied several times.
|
282 |
+
|
283 |
+
Args:
|
284 |
+
offset (int or torch.tensor): start offset
|
285 |
+
size (int): required size of position encoding
|
286 |
+
|
287 |
+
Returns:
|
288 |
+
torch.Tensor: Corresponding encoding
|
289 |
+
"""
|
290 |
+
pos_emb = self.pe[
|
291 |
+
:,
|
292 |
+
self.pe.size(1) // 2 - size + 1: self.pe.size(1) // 2 + size,
|
293 |
+
]
|
294 |
+
return pos_emb
|
src/chatterbox/models/s3gen/transformer/encoder_layer.py
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
|
2 |
+
# 2022 Xingchen Song ([email protected])
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
16 |
+
"""Encoder self-attention layer definition."""
|
17 |
+
|
18 |
+
from typing import Optional, Tuple
|
19 |
+
|
20 |
+
import torch
|
21 |
+
from torch import nn
|
22 |
+
|
23 |
+
|
24 |
+
class TransformerEncoderLayer(nn.Module):
|
25 |
+
"""Encoder layer module.
|
26 |
+
|
27 |
+
Args:
|
28 |
+
size (int): Input dimension.
|
29 |
+
self_attn (torch.nn.Module): Self-attention module instance.
|
30 |
+
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention`
|
31 |
+
instance can be used as the argument.
|
32 |
+
feed_forward (torch.nn.Module): Feed-forward module instance.
|
33 |
+
`PositionwiseFeedForward`, instance can be used as the argument.
|
34 |
+
dropout_rate (float): Dropout rate.
|
35 |
+
normalize_before (bool):
|
36 |
+
True: use layer_norm before each sub-block.
|
37 |
+
False: to use layer_norm after each sub-block.
|
38 |
+
"""
|
39 |
+
|
40 |
+
def __init__(
|
41 |
+
self,
|
42 |
+
size: int,
|
43 |
+
self_attn: torch.nn.Module,
|
44 |
+
feed_forward: torch.nn.Module,
|
45 |
+
dropout_rate: float,
|
46 |
+
normalize_before: bool = True,
|
47 |
+
):
|
48 |
+
"""Construct an EncoderLayer object."""
|
49 |
+
super().__init__()
|
50 |
+
self.self_attn = self_attn
|
51 |
+
self.feed_forward = feed_forward
|
52 |
+
self.norm1 = nn.LayerNorm(size, eps=1e-12)
|
53 |
+
self.norm2 = nn.LayerNorm(size, eps=1e-12)
|
54 |
+
self.dropout = nn.Dropout(dropout_rate)
|
55 |
+
self.size = size
|
56 |
+
self.normalize_before = normalize_before
|
57 |
+
|
58 |
+
def forward(
|
59 |
+
self,
|
60 |
+
x: torch.Tensor,
|
61 |
+
mask: torch.Tensor,
|
62 |
+
pos_emb: torch.Tensor,
|
63 |
+
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
64 |
+
att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
|
65 |
+
cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
|
66 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
67 |
+
"""Compute encoded features.
|
68 |
+
|
69 |
+
Args:
|
70 |
+
x (torch.Tensor): (#batch, time, size)
|
71 |
+
mask (torch.Tensor): Mask tensor for the input (#batch, time,time),
|
72 |
+
(0, 0, 0) means fake mask.
|
73 |
+
pos_emb (torch.Tensor): just for interface compatibility
|
74 |
+
to ConformerEncoderLayer
|
75 |
+
mask_pad (torch.Tensor): does not used in transformer layer,
|
76 |
+
just for unified api with conformer.
|
77 |
+
att_cache (torch.Tensor): Cache tensor of the KEY & VALUE
|
78 |
+
(#batch=1, head, cache_t1, d_k * 2), head * d_k == size.
|
79 |
+
cnn_cache (torch.Tensor): Convolution cache in conformer layer
|
80 |
+
(#batch=1, size, cache_t2), not used here, it's for interface
|
81 |
+
compatibility to ConformerEncoderLayer.
|
82 |
+
Returns:
|
83 |
+
torch.Tensor: Output tensor (#batch, time, size).
|
84 |
+
torch.Tensor: Mask tensor (#batch, time, time).
|
85 |
+
torch.Tensor: att_cache tensor,
|
86 |
+
(#batch=1, head, cache_t1 + time, d_k * 2).
|
87 |
+
torch.Tensor: cnn_cahce tensor (#batch=1, size, cache_t2).
|
88 |
+
|
89 |
+
"""
|
90 |
+
residual = x
|
91 |
+
if self.normalize_before:
|
92 |
+
x = self.norm1(x)
|
93 |
+
x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb=pos_emb, cache=att_cache)
|
94 |
+
x = residual + self.dropout(x_att)
|
95 |
+
if not self.normalize_before:
|
96 |
+
x = self.norm1(x)
|
97 |
+
|
98 |
+
residual = x
|
99 |
+
if self.normalize_before:
|
100 |
+
x = self.norm2(x)
|
101 |
+
x = residual + self.dropout(self.feed_forward(x))
|
102 |
+
if not self.normalize_before:
|
103 |
+
x = self.norm2(x)
|
104 |
+
|
105 |
+
fake_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
|
106 |
+
return x, mask, new_att_cache, fake_cnn_cache
|
107 |
+
|
108 |
+
|
109 |
+
class ConformerEncoderLayer(nn.Module):
|
110 |
+
"""Encoder layer module.
|
111 |
+
Args:
|
112 |
+
size (int): Input dimension.
|
113 |
+
self_attn (torch.nn.Module): Self-attention module instance.
|
114 |
+
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention`
|
115 |
+
instance can be used as the argument.
|
116 |
+
feed_forward (torch.nn.Module): Feed-forward module instance.
|
117 |
+
`PositionwiseFeedForward` instance can be used as the argument.
|
118 |
+
feed_forward_macaron (torch.nn.Module): Additional feed-forward module
|
119 |
+
instance.
|
120 |
+
`PositionwiseFeedForward` instance can be used as the argument.
|
121 |
+
conv_module (torch.nn.Module): Convolution module instance.
|
122 |
+
`ConvlutionModule` instance can be used as the argument.
|
123 |
+
dropout_rate (float): Dropout rate.
|
124 |
+
normalize_before (bool):
|
125 |
+
True: use layer_norm before each sub-block.
|
126 |
+
False: use layer_norm after each sub-block.
|
127 |
+
"""
|
128 |
+
|
129 |
+
def __init__(
|
130 |
+
self,
|
131 |
+
size: int,
|
132 |
+
self_attn: torch.nn.Module,
|
133 |
+
feed_forward: Optional[nn.Module] = None,
|
134 |
+
feed_forward_macaron: Optional[nn.Module] = None,
|
135 |
+
conv_module: Optional[nn.Module] = None,
|
136 |
+
dropout_rate: float = 0.1,
|
137 |
+
normalize_before: bool = True,
|
138 |
+
):
|
139 |
+
"""Construct an EncoderLayer object."""
|
140 |
+
super().__init__()
|
141 |
+
self.self_attn = self_attn
|
142 |
+
self.feed_forward = feed_forward
|
143 |
+
self.feed_forward_macaron = feed_forward_macaron
|
144 |
+
self.conv_module = conv_module
|
145 |
+
self.norm_ff = nn.LayerNorm(size, eps=1e-12) # for the FNN module
|
146 |
+
self.norm_mha = nn.LayerNorm(size, eps=1e-12) # for the MHA module
|
147 |
+
if feed_forward_macaron is not None:
|
148 |
+
self.norm_ff_macaron = nn.LayerNorm(size, eps=1e-12)
|
149 |
+
self.ff_scale = 0.5
|
150 |
+
else:
|
151 |
+
self.ff_scale = 1.0
|
152 |
+
if self.conv_module is not None:
|
153 |
+
self.norm_conv = nn.LayerNorm(size, eps=1e-12) # for the CNN module
|
154 |
+
self.norm_final = nn.LayerNorm(
|
155 |
+
size, eps=1e-12) # for the final output of the block
|
156 |
+
self.dropout = nn.Dropout(dropout_rate)
|
157 |
+
self.size = size
|
158 |
+
self.normalize_before = normalize_before
|
159 |
+
|
160 |
+
def forward(
|
161 |
+
self,
|
162 |
+
x: torch.Tensor,
|
163 |
+
mask: torch.Tensor,
|
164 |
+
pos_emb: torch.Tensor,
|
165 |
+
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
166 |
+
att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
|
167 |
+
cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
|
168 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
169 |
+
"""Compute encoded features.
|
170 |
+
|
171 |
+
Args:
|
172 |
+
x (torch.Tensor): (#batch, time, size)
|
173 |
+
mask (torch.Tensor): Mask tensor for the input (#batch, time,time),
|
174 |
+
(0, 0, 0) means fake mask.
|
175 |
+
pos_emb (torch.Tensor): positional encoding, must not be None
|
176 |
+
for ConformerEncoderLayer.
|
177 |
+
mask_pad (torch.Tensor): batch padding mask used for conv module.
|
178 |
+
(#batch, 1,time), (0, 0, 0) means fake mask.
|
179 |
+
att_cache (torch.Tensor): Cache tensor of the KEY & VALUE
|
180 |
+
(#batch=1, head, cache_t1, d_k * 2), head * d_k == size.
|
181 |
+
cnn_cache (torch.Tensor): Convolution cache in conformer layer
|
182 |
+
(#batch=1, size, cache_t2)
|
183 |
+
Returns:
|
184 |
+
torch.Tensor: Output tensor (#batch, time, size).
|
185 |
+
torch.Tensor: Mask tensor (#batch, time, time).
|
186 |
+
torch.Tensor: att_cache tensor,
|
187 |
+
(#batch=1, head, cache_t1 + time, d_k * 2).
|
188 |
+
torch.Tensor: cnn_cahce tensor (#batch, size, cache_t2).
|
189 |
+
"""
|
190 |
+
|
191 |
+
# whether to use macaron style
|
192 |
+
if self.feed_forward_macaron is not None:
|
193 |
+
residual = x
|
194 |
+
if self.normalize_before:
|
195 |
+
x = self.norm_ff_macaron(x)
|
196 |
+
x = residual + self.ff_scale * self.dropout(
|
197 |
+
self.feed_forward_macaron(x))
|
198 |
+
if not self.normalize_before:
|
199 |
+
x = self.norm_ff_macaron(x)
|
200 |
+
|
201 |
+
# multi-headed self-attention module
|
202 |
+
residual = x
|
203 |
+
if self.normalize_before:
|
204 |
+
x = self.norm_mha(x)
|
205 |
+
x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb,
|
206 |
+
att_cache)
|
207 |
+
x = residual + self.dropout(x_att)
|
208 |
+
if not self.normalize_before:
|
209 |
+
x = self.norm_mha(x)
|
210 |
+
|
211 |
+
# convolution module
|
212 |
+
# Fake new cnn cache here, and then change it in conv_module
|
213 |
+
new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
|
214 |
+
if self.conv_module is not None:
|
215 |
+
residual = x
|
216 |
+
if self.normalize_before:
|
217 |
+
x = self.norm_conv(x)
|
218 |
+
x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache)
|
219 |
+
x = residual + self.dropout(x)
|
220 |
+
|
221 |
+
if not self.normalize_before:
|
222 |
+
x = self.norm_conv(x)
|
223 |
+
|
224 |
+
# feed forward module
|
225 |
+
residual = x
|
226 |
+
if self.normalize_before:
|
227 |
+
x = self.norm_ff(x)
|
228 |
+
|
229 |
+
x = residual + self.ff_scale * self.dropout(self.feed_forward(x))
|
230 |
+
if not self.normalize_before:
|
231 |
+
x = self.norm_ff(x)
|
232 |
+
|
233 |
+
if self.conv_module is not None:
|
234 |
+
x = self.norm_final(x)
|
235 |
+
|
236 |
+
return x, mask, new_att_cache, new_cnn_cache
|
src/chatterbox/models/s3gen/transformer/positionwise_feed_forward.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 Shigeki Karita
|
2 |
+
# 2020 Mobvoi Inc (Binbin Zhang)
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Positionwise feed forward layer definition."""
|
16 |
+
|
17 |
+
import torch
|
18 |
+
|
19 |
+
|
20 |
+
class PositionwiseFeedForward(torch.nn.Module):
|
21 |
+
"""Positionwise feed forward layer.
|
22 |
+
|
23 |
+
FeedForward are appied on each position of the sequence.
|
24 |
+
The output dim is same with the input dim.
|
25 |
+
|
26 |
+
Args:
|
27 |
+
idim (int): Input dimenstion.
|
28 |
+
hidden_units (int): The number of hidden units.
|
29 |
+
dropout_rate (float): Dropout rate.
|
30 |
+
activation (torch.nn.Module): Activation function
|
31 |
+
"""
|
32 |
+
|
33 |
+
def __init__(
|
34 |
+
self,
|
35 |
+
idim: int,
|
36 |
+
hidden_units: int,
|
37 |
+
dropout_rate: float,
|
38 |
+
activation: torch.nn.Module = torch.nn.ReLU(),
|
39 |
+
):
|
40 |
+
"""Construct a PositionwiseFeedForward object."""
|
41 |
+
super(PositionwiseFeedForward, self).__init__()
|
42 |
+
self.w_1 = torch.nn.Linear(idim, hidden_units)
|
43 |
+
self.activation = activation
|
44 |
+
self.dropout = torch.nn.Dropout(dropout_rate)
|
45 |
+
self.w_2 = torch.nn.Linear(hidden_units, idim)
|
46 |
+
|
47 |
+
def forward(self, xs: torch.Tensor) -> torch.Tensor:
|
48 |
+
"""Forward function.
|
49 |
+
|
50 |
+
Args:
|
51 |
+
xs: input tensor (B, L, D)
|
52 |
+
Returns:
|
53 |
+
output tensor, (B, L, D)
|
54 |
+
"""
|
55 |
+
return self.w_2(self.dropout(self.activation(self.w_1(xs))))
|
56 |
+
|
57 |
+
|
58 |
+
class MoEFFNLayer(torch.nn.Module):
|
59 |
+
"""
|
60 |
+
Mixture of expert with Positionwise feed forward layer
|
61 |
+
See also figure 1 in https://arxiv.org/pdf/2305.15663.pdf
|
62 |
+
The output dim is same with the input dim.
|
63 |
+
|
64 |
+
Modified from https://github.com/Lightning-AI/lit-gpt/pull/823
|
65 |
+
https://github.com/mistralai/mistral-src/blob/b46d6/moe_one_file_ref.py#L203-L219
|
66 |
+
Args:
|
67 |
+
n_expert: number of expert.
|
68 |
+
n_expert_per_token: The actual number of experts used for each frame
|
69 |
+
idim (int): Input dimenstion.
|
70 |
+
hidden_units (int): The number of hidden units.
|
71 |
+
dropout_rate (float): Dropout rate.
|
72 |
+
activation (torch.nn.Module): Activation function
|
73 |
+
"""
|
74 |
+
|
75 |
+
def __init__(
|
76 |
+
self,
|
77 |
+
n_expert: int,
|
78 |
+
n_expert_per_token: int,
|
79 |
+
idim: int,
|
80 |
+
hidden_units: int,
|
81 |
+
dropout_rate: float,
|
82 |
+
activation: torch.nn.Module = torch.nn.ReLU(),
|
83 |
+
):
|
84 |
+
super(MoEFFNLayer, self).__init__()
|
85 |
+
self.gate = torch.nn.Linear(idim, n_expert, bias=False)
|
86 |
+
self.experts = torch.nn.ModuleList(
|
87 |
+
PositionwiseFeedForward(idim, hidden_units, dropout_rate,
|
88 |
+
activation) for _ in range(n_expert))
|
89 |
+
self.n_expert_per_token = n_expert_per_token
|
90 |
+
|
91 |
+
def forward(self, xs: torch.Tensor) -> torch.Tensor:
|
92 |
+
"""Foward function.
|
93 |
+
Args:
|
94 |
+
xs: input tensor (B, L, D)
|
95 |
+
Returns:
|
96 |
+
output tensor, (B, L, D)
|
97 |
+
|
98 |
+
"""
|
99 |
+
B, L, D = xs.size(
|
100 |
+
) # batch size, sequence length, embedding dimension (idim)
|
101 |
+
xs = xs.view(-1, D) # (B*L, D)
|
102 |
+
router = self.gate(xs) # (B*L, n_expert)
|
103 |
+
logits, indices = torch.topk(
|
104 |
+
router, self.n_expert_per_token
|
105 |
+
) # probs:(B*L, n_expert), indices: (B*L, n_expert)
|
106 |
+
weights = torch.nn.functional.softmax(
|
107 |
+
logits, dim=1,
|
108 |
+
dtype=torch.float).to(dtype=xs.dtype) # (B*L, n_expert_per_token)
|
109 |
+
output = torch.zeros_like(xs) # (B*L, D)
|
110 |
+
for i, expert in enumerate(self.experts):
|
111 |
+
mask = indices == i
|
112 |
+
batch_idx, ith_expert = torch.where(mask)
|
113 |
+
output[batch_idx] += weights[batch_idx, ith_expert, None] * expert(
|
114 |
+
xs[batch_idx])
|
115 |
+
return output.view(B, L, D)
|
src/chatterbox/models/s3gen/transformer/subsampling.py
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
|
2 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
16 |
+
"""Subsampling layer definition."""
|
17 |
+
|
18 |
+
from typing import Tuple, Union
|
19 |
+
|
20 |
+
import torch
|
21 |
+
|
22 |
+
|
23 |
+
class BaseSubsampling(torch.nn.Module):
|
24 |
+
|
25 |
+
def __init__(self):
|
26 |
+
super().__init__()
|
27 |
+
self.right_context = 0
|
28 |
+
self.subsampling_rate = 1
|
29 |
+
|
30 |
+
def position_encoding(self, offset: Union[int, torch.Tensor],
|
31 |
+
size: int) -> torch.Tensor:
|
32 |
+
return self.pos_enc.position_encoding(offset, size)
|
33 |
+
|
34 |
+
|
35 |
+
class EmbedinigNoSubsampling(BaseSubsampling):
|
36 |
+
"""Embedding input without subsampling
|
37 |
+
"""
|
38 |
+
|
39 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
40 |
+
pos_enc_class: torch.nn.Module):
|
41 |
+
super().__init__()
|
42 |
+
self.embed = torch.nn.Embedding(idim, odim)
|
43 |
+
self.pos_enc = pos_enc_class
|
44 |
+
|
45 |
+
def forward(
|
46 |
+
self,
|
47 |
+
x: torch.Tensor,
|
48 |
+
x_mask: torch.Tensor,
|
49 |
+
offset: Union[int, torch.Tensor] = 0
|
50 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
51 |
+
"""Input x.
|
52 |
+
|
53 |
+
Args:
|
54 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
55 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
56 |
+
|
57 |
+
Returns:
|
58 |
+
torch.Tensor: linear input tensor (#batch, time', odim),
|
59 |
+
where time' = time .
|
60 |
+
torch.Tensor: linear input mask (#batch, 1, time'),
|
61 |
+
where time' = time .
|
62 |
+
|
63 |
+
"""
|
64 |
+
x = self.embed(x)
|
65 |
+
x, pos_emb = self.pos_enc(x, offset)
|
66 |
+
return x, pos_emb, x_mask
|
67 |
+
|
68 |
+
|
69 |
+
class LinearNoSubsampling(BaseSubsampling):
|
70 |
+
"""Linear transform the input without subsampling
|
71 |
+
|
72 |
+
Args:
|
73 |
+
idim (int): Input dimension.
|
74 |
+
odim (int): Output dimension.
|
75 |
+
dropout_rate (float): Dropout rate.
|
76 |
+
|
77 |
+
"""
|
78 |
+
|
79 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
80 |
+
pos_enc_class: torch.nn.Module):
|
81 |
+
"""Construct an linear object."""
|
82 |
+
super().__init__()
|
83 |
+
self.out = torch.nn.Sequential(
|
84 |
+
torch.nn.Linear(idim, odim),
|
85 |
+
torch.nn.LayerNorm(odim, eps=1e-5),
|
86 |
+
torch.nn.Dropout(dropout_rate),
|
87 |
+
)
|
88 |
+
self.pos_enc = pos_enc_class
|
89 |
+
self.right_context = 0
|
90 |
+
self.subsampling_rate = 1
|
91 |
+
|
92 |
+
def forward(
|
93 |
+
self,
|
94 |
+
x: torch.Tensor,
|
95 |
+
x_mask: torch.Tensor,
|
96 |
+
offset: Union[int, torch.Tensor] = 0
|
97 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
98 |
+
"""Input x.
|
99 |
+
|
100 |
+
Args:
|
101 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
102 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
103 |
+
|
104 |
+
Returns:
|
105 |
+
torch.Tensor: linear input tensor (#batch, time', odim),
|
106 |
+
where time' = time .
|
107 |
+
torch.Tensor: linear input mask (#batch, 1, time'),
|
108 |
+
where time' = time .
|
109 |
+
|
110 |
+
"""
|
111 |
+
x = self.out(x)
|
112 |
+
x, pos_emb = self.pos_enc(x, offset)
|
113 |
+
return x, pos_emb, x_mask
|
114 |
+
|
115 |
+
|
116 |
+
class Conv1dSubsampling2(BaseSubsampling):
|
117 |
+
"""Convolutional 1D subsampling (to 1/2 length).
|
118 |
+
It is designed for Whisper, ref:
|
119 |
+
https://github.com/openai/whisper/blob/main/whisper/model.py
|
120 |
+
|
121 |
+
Args:
|
122 |
+
idim (int): Input dimension.
|
123 |
+
odim (int): Output dimension.
|
124 |
+
dropout_rate (float): Dropout rate.
|
125 |
+
|
126 |
+
"""
|
127 |
+
|
128 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
129 |
+
pos_enc_class: torch.nn.Module):
|
130 |
+
"""Construct an Conv1dSubsampling2 object."""
|
131 |
+
super().__init__()
|
132 |
+
self.conv = torch.nn.Sequential(
|
133 |
+
torch.nn.Conv1d(idim, odim, kernel_size=3, padding=1),
|
134 |
+
torch.nn.GELU(),
|
135 |
+
torch.nn.Conv1d(odim, odim, kernel_size=3, stride=2, padding=1),
|
136 |
+
torch.nn.GELU(),
|
137 |
+
)
|
138 |
+
self.pos_enc = pos_enc_class
|
139 |
+
# The right context for every conv layer is computed by:
|
140 |
+
# (kernel_size - 1) * frame_rate_of_this_layer
|
141 |
+
self.subsampling_rate = 2
|
142 |
+
# 4 = (3 - 1) * 1 + (3 - 1) * 1
|
143 |
+
self.right_context = 4
|
144 |
+
|
145 |
+
def forward(
|
146 |
+
self,
|
147 |
+
x: torch.Tensor,
|
148 |
+
x_mask: torch.Tensor,
|
149 |
+
offset: Union[int, torch.Tensor] = 0
|
150 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
151 |
+
"""Subsample x.
|
152 |
+
|
153 |
+
Args:
|
154 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
155 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
156 |
+
|
157 |
+
Returns:
|
158 |
+
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
159 |
+
where time' = time // 2.
|
160 |
+
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
161 |
+
where time' = time // 2.
|
162 |
+
torch.Tensor: positional encoding
|
163 |
+
|
164 |
+
"""
|
165 |
+
time = x.size(1)
|
166 |
+
x = x.transpose(1, 2) # (b, f, t)
|
167 |
+
x = self.conv(x)
|
168 |
+
x = x.transpose(1, 2) # (b, t, f)
|
169 |
+
x, pos_emb = self.pos_enc(x, offset)
|
170 |
+
return x, pos_emb, x_mask[:, :, (time + 1) % 2::2]
|
171 |
+
|
172 |
+
|
173 |
+
class Conv2dSubsampling4(BaseSubsampling):
|
174 |
+
"""Convolutional 2D subsampling (to 1/4 length).
|
175 |
+
|
176 |
+
Args:
|
177 |
+
idim (int): Input dimension.
|
178 |
+
odim (int): Output dimension.
|
179 |
+
dropout_rate (float): Dropout rate.
|
180 |
+
|
181 |
+
"""
|
182 |
+
|
183 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
184 |
+
pos_enc_class: torch.nn.Module):
|
185 |
+
"""Construct an Conv2dSubsampling4 object."""
|
186 |
+
super().__init__()
|
187 |
+
self.conv = torch.nn.Sequential(
|
188 |
+
torch.nn.Conv2d(1, odim, 3, 2),
|
189 |
+
torch.nn.ReLU(),
|
190 |
+
torch.nn.Conv2d(odim, odim, 3, 2),
|
191 |
+
torch.nn.ReLU(),
|
192 |
+
)
|
193 |
+
self.out = torch.nn.Sequential(
|
194 |
+
torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim))
|
195 |
+
self.pos_enc = pos_enc_class
|
196 |
+
# The right context for every conv layer is computed by:
|
197 |
+
# (kernel_size - 1) * frame_rate_of_this_layer
|
198 |
+
self.subsampling_rate = 4
|
199 |
+
# 6 = (3 - 1) * 1 + (3 - 1) * 2
|
200 |
+
self.right_context = 6
|
201 |
+
|
202 |
+
def forward(
|
203 |
+
self,
|
204 |
+
x: torch.Tensor,
|
205 |
+
x_mask: torch.Tensor,
|
206 |
+
offset: Union[int, torch.Tensor] = 0
|
207 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
208 |
+
"""Subsample x.
|
209 |
+
|
210 |
+
Args:
|
211 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
212 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
213 |
+
|
214 |
+
Returns:
|
215 |
+
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
216 |
+
where time' = time // 4.
|
217 |
+
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
218 |
+
where time' = time // 4.
|
219 |
+
torch.Tensor: positional encoding
|
220 |
+
|
221 |
+
"""
|
222 |
+
x = x.unsqueeze(1) # (b, c=1, t, f)
|
223 |
+
x = self.conv(x)
|
224 |
+
b, c, t, f = x.size()
|
225 |
+
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
226 |
+
x, pos_emb = self.pos_enc(x, offset)
|
227 |
+
return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2]
|
228 |
+
|
229 |
+
|
230 |
+
class Conv2dSubsampling6(BaseSubsampling):
|
231 |
+
"""Convolutional 2D subsampling (to 1/6 length).
|
232 |
+
Args:
|
233 |
+
idim (int): Input dimension.
|
234 |
+
odim (int): Output dimension.
|
235 |
+
dropout_rate (float): Dropout rate.
|
236 |
+
pos_enc (torch.nn.Module): Custom position encoding layer.
|
237 |
+
"""
|
238 |
+
|
239 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
240 |
+
pos_enc_class: torch.nn.Module):
|
241 |
+
"""Construct an Conv2dSubsampling6 object."""
|
242 |
+
super().__init__()
|
243 |
+
self.conv = torch.nn.Sequential(
|
244 |
+
torch.nn.Conv2d(1, odim, 3, 2),
|
245 |
+
torch.nn.ReLU(),
|
246 |
+
torch.nn.Conv2d(odim, odim, 5, 3),
|
247 |
+
torch.nn.ReLU(),
|
248 |
+
)
|
249 |
+
self.linear = torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3),
|
250 |
+
odim)
|
251 |
+
self.pos_enc = pos_enc_class
|
252 |
+
# 10 = (3 - 1) * 1 + (5 - 1) * 2
|
253 |
+
self.subsampling_rate = 6
|
254 |
+
self.right_context = 10
|
255 |
+
|
256 |
+
def forward(
|
257 |
+
self,
|
258 |
+
x: torch.Tensor,
|
259 |
+
x_mask: torch.Tensor,
|
260 |
+
offset: Union[int, torch.Tensor] = 0
|
261 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
262 |
+
"""Subsample x.
|
263 |
+
Args:
|
264 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
265 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
266 |
+
|
267 |
+
Returns:
|
268 |
+
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
269 |
+
where time' = time // 6.
|
270 |
+
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
271 |
+
where time' = time // 6.
|
272 |
+
torch.Tensor: positional encoding
|
273 |
+
"""
|
274 |
+
x = x.unsqueeze(1) # (b, c, t, f)
|
275 |
+
x = self.conv(x)
|
276 |
+
b, c, t, f = x.size()
|
277 |
+
x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
278 |
+
x, pos_emb = self.pos_enc(x, offset)
|
279 |
+
return x, pos_emb, x_mask[:, :, 2::2][:, :, 4::3]
|
280 |
+
|
281 |
+
|
282 |
+
class Conv2dSubsampling8(BaseSubsampling):
|
283 |
+
"""Convolutional 2D subsampling (to 1/8 length).
|
284 |
+
|
285 |
+
Args:
|
286 |
+
idim (int): Input dimension.
|
287 |
+
odim (int): Output dimension.
|
288 |
+
dropout_rate (float): Dropout rate.
|
289 |
+
|
290 |
+
"""
|
291 |
+
|
292 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
293 |
+
pos_enc_class: torch.nn.Module):
|
294 |
+
"""Construct an Conv2dSubsampling8 object."""
|
295 |
+
super().__init__()
|
296 |
+
self.conv = torch.nn.Sequential(
|
297 |
+
torch.nn.Conv2d(1, odim, 3, 2),
|
298 |
+
torch.nn.ReLU(),
|
299 |
+
torch.nn.Conv2d(odim, odim, 3, 2),
|
300 |
+
torch.nn.ReLU(),
|
301 |
+
torch.nn.Conv2d(odim, odim, 3, 2),
|
302 |
+
torch.nn.ReLU(),
|
303 |
+
)
|
304 |
+
self.linear = torch.nn.Linear(
|
305 |
+
odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim)
|
306 |
+
self.pos_enc = pos_enc_class
|
307 |
+
self.subsampling_rate = 8
|
308 |
+
# 14 = (3 - 1) * 1 + (3 - 1) * 2 + (3 - 1) * 4
|
309 |
+
self.right_context = 14
|
310 |
+
|
311 |
+
def forward(
|
312 |
+
self,
|
313 |
+
x: torch.Tensor,
|
314 |
+
x_mask: torch.Tensor,
|
315 |
+
offset: Union[int, torch.Tensor] = 0
|
316 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
317 |
+
"""Subsample x.
|
318 |
+
|
319 |
+
Args:
|
320 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
321 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
322 |
+
|
323 |
+
Returns:
|
324 |
+
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
325 |
+
where time' = time // 8.
|
326 |
+
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
327 |
+
where time' = time // 8.
|
328 |
+
torch.Tensor: positional encoding
|
329 |
+
"""
|
330 |
+
x = x.unsqueeze(1) # (b, c, t, f)
|
331 |
+
x = self.conv(x)
|
332 |
+
b, c, t, f = x.size()
|
333 |
+
x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
334 |
+
x, pos_emb = self.pos_enc(x, offset)
|
335 |
+
return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2][:, :, 2::2]
|
336 |
+
|
337 |
+
|
338 |
+
class LegacyLinearNoSubsampling(BaseSubsampling):
|
339 |
+
"""Linear transform the input without subsampling
|
340 |
+
|
341 |
+
Args:
|
342 |
+
idim (int): Input dimension.
|
343 |
+
odim (int): Output dimension.
|
344 |
+
dropout_rate (float): Dropout rate.
|
345 |
+
|
346 |
+
"""
|
347 |
+
|
348 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
349 |
+
pos_enc_class: torch.nn.Module):
|
350 |
+
"""Construct an linear object."""
|
351 |
+
super().__init__()
|
352 |
+
self.out = torch.nn.Sequential(
|
353 |
+
torch.nn.Linear(idim, odim),
|
354 |
+
torch.nn.LayerNorm(odim, eps=1e-5),
|
355 |
+
torch.nn.Dropout(dropout_rate),
|
356 |
+
torch.nn.ReLU(),
|
357 |
+
)
|
358 |
+
self.pos_enc = pos_enc_class
|
359 |
+
self.right_context = 0
|
360 |
+
self.subsampling_rate = 1
|
361 |
+
|
362 |
+
def forward(
|
363 |
+
self,
|
364 |
+
x: torch.Tensor,
|
365 |
+
x_mask: torch.Tensor,
|
366 |
+
offset: Union[int, torch.Tensor] = 0
|
367 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
368 |
+
"""Input x.
|
369 |
+
|
370 |
+
Args:
|
371 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
372 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
373 |
+
|
374 |
+
Returns:
|
375 |
+
torch.Tensor: linear input tensor (#batch, time', odim),
|
376 |
+
where time' = time .
|
377 |
+
torch.Tensor: linear input mask (#batch, 1, time'),
|
378 |
+
where time' = time .
|
379 |
+
|
380 |
+
"""
|
381 |
+
x = self.out(x)
|
382 |
+
x, pos_emb = self.pos_enc(x, offset)
|
383 |
+
return x, pos_emb, x_mask
|
src/chatterbox/models/s3gen/transformer/upsample_encoder.py
ADDED
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
|
2 |
+
# 2022 Xingchen Song ([email protected])
|
3 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
17 |
+
"""Encoder definition."""
|
18 |
+
from typing import Tuple
|
19 |
+
|
20 |
+
import torch
|
21 |
+
from torch import nn
|
22 |
+
from torch.nn import functional as F
|
23 |
+
|
24 |
+
from .convolution import ConvolutionModule
|
25 |
+
from .encoder_layer import ConformerEncoderLayer
|
26 |
+
from .positionwise_feed_forward import PositionwiseFeedForward
|
27 |
+
from ..utils.class_utils import (
|
28 |
+
COSYVOICE_EMB_CLASSES,
|
29 |
+
COSYVOICE_SUBSAMPLE_CLASSES,
|
30 |
+
COSYVOICE_ATTENTION_CLASSES,
|
31 |
+
COSYVOICE_ACTIVATION_CLASSES,
|
32 |
+
)
|
33 |
+
from ..utils.mask import make_pad_mask
|
34 |
+
from ..utils.mask import add_optional_chunk_mask
|
35 |
+
|
36 |
+
|
37 |
+
class Upsample1D(nn.Module):
|
38 |
+
"""A 1D upsampling layer with an optional convolution.
|
39 |
+
|
40 |
+
Parameters:
|
41 |
+
channels (`int`):
|
42 |
+
number of channels in the inputs and outputs.
|
43 |
+
use_conv (`bool`, default `False`):
|
44 |
+
option to use a convolution.
|
45 |
+
use_conv_transpose (`bool`, default `False`):
|
46 |
+
option to use a convolution transpose.
|
47 |
+
out_channels (`int`, optional):
|
48 |
+
number of output channels. Defaults to `channels`.
|
49 |
+
"""
|
50 |
+
|
51 |
+
def __init__(self, channels: int, out_channels: int, stride: int = 2):
|
52 |
+
super().__init__()
|
53 |
+
self.channels = channels
|
54 |
+
self.out_channels = out_channels
|
55 |
+
self.stride = stride
|
56 |
+
# In this mode, first repeat interpolate, than conv with stride=1
|
57 |
+
self.conv = nn.Conv1d(self.channels, self.out_channels, stride * 2 + 1, stride=1, padding=0)
|
58 |
+
|
59 |
+
def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor):
|
60 |
+
outputs = F.interpolate(inputs, scale_factor=float(self.stride), mode="nearest")
|
61 |
+
outputs = F.pad(outputs, (self.stride * 2, 0), value=0.0)
|
62 |
+
outputs = self.conv(outputs)
|
63 |
+
return outputs, input_lengths * self.stride
|
64 |
+
|
65 |
+
|
66 |
+
class PreLookaheadLayer(nn.Module):
|
67 |
+
def __init__(self, channels: int, pre_lookahead_len: int = 1):
|
68 |
+
super().__init__()
|
69 |
+
self.channels = channels
|
70 |
+
self.pre_lookahead_len = pre_lookahead_len
|
71 |
+
self.conv1 = nn.Conv1d(
|
72 |
+
channels, channels,
|
73 |
+
kernel_size=pre_lookahead_len + 1,
|
74 |
+
stride=1, padding=0,
|
75 |
+
)
|
76 |
+
self.conv2 = nn.Conv1d(
|
77 |
+
channels, channels,
|
78 |
+
kernel_size=3, stride=1, padding=0,
|
79 |
+
)
|
80 |
+
|
81 |
+
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
82 |
+
"""
|
83 |
+
inputs: (batch_size, seq_len, channels)
|
84 |
+
"""
|
85 |
+
outputs = inputs.transpose(1, 2).contiguous()
|
86 |
+
# look ahead
|
87 |
+
outputs = F.pad(outputs, (0, self.pre_lookahead_len), mode='constant', value=0.0)
|
88 |
+
outputs = F.leaky_relu(self.conv1(outputs))
|
89 |
+
# outputs
|
90 |
+
outputs = F.pad(outputs, (2, 0), mode='constant', value=0.0)
|
91 |
+
outputs = self.conv2(outputs)
|
92 |
+
outputs = outputs.transpose(1, 2).contiguous()
|
93 |
+
|
94 |
+
# residual connection
|
95 |
+
outputs = outputs + inputs
|
96 |
+
return outputs
|
97 |
+
|
98 |
+
|
99 |
+
class UpsampleConformerEncoder(torch.nn.Module):
|
100 |
+
|
101 |
+
def __init__(
|
102 |
+
self,
|
103 |
+
input_size: int = 512,
|
104 |
+
output_size: int = 512,
|
105 |
+
attention_heads: int = 8,
|
106 |
+
linear_units: int = 2048,
|
107 |
+
num_blocks: int = 6,
|
108 |
+
dropout_rate: float = 0.1,
|
109 |
+
positional_dropout_rate: float = 0.1,
|
110 |
+
attention_dropout_rate: float = 0.1,
|
111 |
+
input_layer: str = "linear",
|
112 |
+
pos_enc_layer_type: str = "rel_pos_espnet",
|
113 |
+
normalize_before: bool = True,
|
114 |
+
static_chunk_size: int = 0,
|
115 |
+
use_dynamic_chunk: bool = False,
|
116 |
+
global_cmvn: torch.nn.Module = None,
|
117 |
+
use_dynamic_left_chunk: bool = False,
|
118 |
+
positionwise_conv_kernel_size: int = 1,
|
119 |
+
macaron_style: bool = False,
|
120 |
+
selfattention_layer_type: str = "rel_selfattn",
|
121 |
+
activation_type: str = "swish",
|
122 |
+
use_cnn_module: bool = False,
|
123 |
+
cnn_module_kernel: int = 15,
|
124 |
+
causal: bool = False,
|
125 |
+
cnn_module_norm: str = "batch_norm",
|
126 |
+
key_bias: bool = True,
|
127 |
+
gradient_checkpointing: bool = False,
|
128 |
+
):
|
129 |
+
"""
|
130 |
+
Args:
|
131 |
+
input_size (int): input dim
|
132 |
+
output_size (int): dimension of attention
|
133 |
+
attention_heads (int): the number of heads of multi head attention
|
134 |
+
linear_units (int): the hidden units number of position-wise feed
|
135 |
+
forward
|
136 |
+
num_blocks (int): the number of decoder blocks
|
137 |
+
dropout_rate (float): dropout rate
|
138 |
+
attention_dropout_rate (float): dropout rate in attention
|
139 |
+
positional_dropout_rate (float): dropout rate after adding
|
140 |
+
positional encoding
|
141 |
+
input_layer (str): input layer type.
|
142 |
+
optional [linear, conv2d, conv2d6, conv2d8]
|
143 |
+
pos_enc_layer_type (str): Encoder positional encoding layer type.
|
144 |
+
opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos]
|
145 |
+
normalize_before (bool):
|
146 |
+
True: use layer_norm before each sub-block of a layer.
|
147 |
+
False: use layer_norm after each sub-block of a layer.
|
148 |
+
static_chunk_size (int): chunk size for static chunk training and
|
149 |
+
decoding
|
150 |
+
use_dynamic_chunk (bool): whether use dynamic chunk size for
|
151 |
+
training or not, You can only use fixed chunk(chunk_size > 0)
|
152 |
+
or dyanmic chunk size(use_dynamic_chunk = True)
|
153 |
+
global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module
|
154 |
+
use_dynamic_left_chunk (bool): whether use dynamic left chunk in
|
155 |
+
dynamic chunk training
|
156 |
+
key_bias: whether use bias in attention.linear_k, False for whisper models.
|
157 |
+
gradient_checkpointing: rerunning a forward-pass segment for each
|
158 |
+
checkpointed segment during backward.
|
159 |
+
"""
|
160 |
+
super().__init__()
|
161 |
+
self._output_size = output_size
|
162 |
+
|
163 |
+
self.global_cmvn = global_cmvn
|
164 |
+
self.embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
|
165 |
+
input_size,
|
166 |
+
output_size,
|
167 |
+
dropout_rate,
|
168 |
+
COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
|
169 |
+
positional_dropout_rate),
|
170 |
+
)
|
171 |
+
|
172 |
+
self.normalize_before = normalize_before
|
173 |
+
self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5)
|
174 |
+
self.static_chunk_size = static_chunk_size
|
175 |
+
self.use_dynamic_chunk = use_dynamic_chunk
|
176 |
+
self.use_dynamic_left_chunk = use_dynamic_left_chunk
|
177 |
+
self.gradient_checkpointing = gradient_checkpointing
|
178 |
+
activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
|
179 |
+
# self-attention module definition
|
180 |
+
encoder_selfattn_layer_args = (
|
181 |
+
attention_heads,
|
182 |
+
output_size,
|
183 |
+
attention_dropout_rate,
|
184 |
+
key_bias,
|
185 |
+
)
|
186 |
+
# feed-forward module definition
|
187 |
+
positionwise_layer_args = (
|
188 |
+
output_size,
|
189 |
+
linear_units,
|
190 |
+
dropout_rate,
|
191 |
+
activation,
|
192 |
+
)
|
193 |
+
# convolution module definition
|
194 |
+
convolution_layer_args = (output_size, cnn_module_kernel, activation,
|
195 |
+
cnn_module_norm, causal)
|
196 |
+
self.pre_lookahead_layer = PreLookaheadLayer(channels=512, pre_lookahead_len=3)
|
197 |
+
self.encoders = torch.nn.ModuleList([
|
198 |
+
ConformerEncoderLayer(
|
199 |
+
output_size,
|
200 |
+
COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
|
201 |
+
*encoder_selfattn_layer_args),
|
202 |
+
PositionwiseFeedForward(*positionwise_layer_args),
|
203 |
+
PositionwiseFeedForward(
|
204 |
+
*positionwise_layer_args) if macaron_style else None,
|
205 |
+
ConvolutionModule(
|
206 |
+
*convolution_layer_args) if use_cnn_module else None,
|
207 |
+
dropout_rate,
|
208 |
+
normalize_before,
|
209 |
+
) for _ in range(num_blocks)
|
210 |
+
])
|
211 |
+
self.up_layer = Upsample1D(channels=512, out_channels=512, stride=2)
|
212 |
+
self.up_embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
|
213 |
+
input_size,
|
214 |
+
output_size,
|
215 |
+
dropout_rate,
|
216 |
+
COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
|
217 |
+
positional_dropout_rate),
|
218 |
+
)
|
219 |
+
self.up_encoders = torch.nn.ModuleList([
|
220 |
+
ConformerEncoderLayer(
|
221 |
+
output_size,
|
222 |
+
COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
|
223 |
+
*encoder_selfattn_layer_args),
|
224 |
+
PositionwiseFeedForward(*positionwise_layer_args),
|
225 |
+
PositionwiseFeedForward(
|
226 |
+
*positionwise_layer_args) if macaron_style else None,
|
227 |
+
ConvolutionModule(
|
228 |
+
*convolution_layer_args) if use_cnn_module else None,
|
229 |
+
dropout_rate,
|
230 |
+
normalize_before,
|
231 |
+
) for _ in range(4)
|
232 |
+
])
|
233 |
+
|
234 |
+
def output_size(self) -> int:
|
235 |
+
return self._output_size
|
236 |
+
|
237 |
+
def forward(
|
238 |
+
self,
|
239 |
+
xs: torch.Tensor,
|
240 |
+
xs_lens: torch.Tensor,
|
241 |
+
decoding_chunk_size: int = 0,
|
242 |
+
num_decoding_left_chunks: int = -1,
|
243 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
244 |
+
"""Embed positions in tensor.
|
245 |
+
|
246 |
+
Args:
|
247 |
+
xs: padded input tensor (B, T, D)
|
248 |
+
xs_lens: input length (B)
|
249 |
+
decoding_chunk_size: decoding chunk size for dynamic chunk
|
250 |
+
0: default for training, use random dynamic chunk.
|
251 |
+
<0: for decoding, use full chunk.
|
252 |
+
>0: for decoding, use fixed chunk size as set.
|
253 |
+
num_decoding_left_chunks: number of left chunks, this is for decoding,
|
254 |
+
the chunk size is decoding_chunk_size.
|
255 |
+
>=0: use num_decoding_left_chunks
|
256 |
+
<0: use all left chunks
|
257 |
+
Returns:
|
258 |
+
encoder output tensor xs, and subsampled masks
|
259 |
+
xs: padded output tensor (B, T' ~= T/subsample_rate, D)
|
260 |
+
masks: torch.Tensor batch padding mask after subsample
|
261 |
+
(B, 1, T' ~= T/subsample_rate)
|
262 |
+
NOTE(xcsong):
|
263 |
+
We pass the `__call__` method of the modules instead of `forward` to the
|
264 |
+
checkpointing API because `__call__` attaches all the hooks of the module.
|
265 |
+
https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
|
266 |
+
"""
|
267 |
+
T = xs.size(1)
|
268 |
+
masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
|
269 |
+
if self.global_cmvn is not None:
|
270 |
+
xs = self.global_cmvn(xs)
|
271 |
+
xs, pos_emb, masks = self.embed(xs, masks)
|
272 |
+
mask_pad = masks # (B, 1, T/subsample_rate)
|
273 |
+
chunk_masks = add_optional_chunk_mask(xs, masks,
|
274 |
+
self.use_dynamic_chunk,
|
275 |
+
self.use_dynamic_left_chunk,
|
276 |
+
decoding_chunk_size,
|
277 |
+
self.static_chunk_size,
|
278 |
+
num_decoding_left_chunks)
|
279 |
+
# lookahead + conformer encoder
|
280 |
+
xs = self.pre_lookahead_layer(xs)
|
281 |
+
xs = self.forward_layers(xs, chunk_masks, pos_emb, mask_pad)
|
282 |
+
|
283 |
+
# upsample + conformer encoder
|
284 |
+
xs = xs.transpose(1, 2).contiguous()
|
285 |
+
xs, xs_lens = self.up_layer(xs, xs_lens)
|
286 |
+
xs = xs.transpose(1, 2).contiguous()
|
287 |
+
T = xs.size(1)
|
288 |
+
masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
|
289 |
+
xs, pos_emb, masks = self.up_embed(xs, masks)
|
290 |
+
mask_pad = masks # (B, 1, T/subsample_rate)
|
291 |
+
chunk_masks = add_optional_chunk_mask(xs, masks,
|
292 |
+
self.use_dynamic_chunk,
|
293 |
+
self.use_dynamic_left_chunk,
|
294 |
+
decoding_chunk_size,
|
295 |
+
self.static_chunk_size * self.up_layer.stride,
|
296 |
+
num_decoding_left_chunks)
|
297 |
+
xs = self.forward_up_layers(xs, chunk_masks, pos_emb, mask_pad)
|
298 |
+
|
299 |
+
if self.normalize_before:
|
300 |
+
xs = self.after_norm(xs)
|
301 |
+
# Here we assume the mask is not changed in encoder layers, so just
|
302 |
+
# return the masks before encoder layers, and the masks will be used
|
303 |
+
# for cross attention with decoder later
|
304 |
+
return xs, masks
|
305 |
+
|
306 |
+
def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
|
307 |
+
pos_emb: torch.Tensor,
|
308 |
+
mask_pad: torch.Tensor) -> torch.Tensor:
|
309 |
+
for layer in self.encoders:
|
310 |
+
xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
|
311 |
+
return xs
|
312 |
+
|
313 |
+
def forward_up_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
|
314 |
+
pos_emb: torch.Tensor,
|
315 |
+
mask_pad: torch.Tensor) -> torch.Tensor:
|
316 |
+
for layer in self.up_encoders:
|
317 |
+
xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
|
318 |
+
return xs
|
src/chatterbox/models/s3gen/utils/class_utils.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright [2023-11-28] <[email protected], Xingchen Song>
|
2 |
+
# 2024 Alibaba Inc (authors: Xiang Lyu)
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
import torch
|
16 |
+
|
17 |
+
from ..transformer.activation import Swish
|
18 |
+
from ..transformer.subsampling import (
|
19 |
+
LinearNoSubsampling,
|
20 |
+
EmbedinigNoSubsampling,
|
21 |
+
Conv1dSubsampling2,
|
22 |
+
Conv2dSubsampling4,
|
23 |
+
Conv2dSubsampling6,
|
24 |
+
Conv2dSubsampling8,
|
25 |
+
)
|
26 |
+
from ..transformer.embedding import (
|
27 |
+
PositionalEncoding,
|
28 |
+
RelPositionalEncoding,
|
29 |
+
WhisperPositionalEncoding,
|
30 |
+
LearnablePositionalEncoding,
|
31 |
+
NoPositionalEncoding)
|
32 |
+
from ..transformer.attention import (MultiHeadedAttention,
|
33 |
+
RelPositionMultiHeadedAttention)
|
34 |
+
from ..transformer.embedding import EspnetRelPositionalEncoding
|
35 |
+
from ..transformer.subsampling import LegacyLinearNoSubsampling
|
36 |
+
|
37 |
+
|
38 |
+
COSYVOICE_ACTIVATION_CLASSES = {
|
39 |
+
"hardtanh": torch.nn.Hardtanh,
|
40 |
+
"tanh": torch.nn.Tanh,
|
41 |
+
"relu": torch.nn.ReLU,
|
42 |
+
"selu": torch.nn.SELU,
|
43 |
+
"swish": getattr(torch.nn, "SiLU", Swish),
|
44 |
+
"gelu": torch.nn.GELU,
|
45 |
+
}
|
46 |
+
|
47 |
+
COSYVOICE_SUBSAMPLE_CLASSES = {
|
48 |
+
"linear": LinearNoSubsampling,
|
49 |
+
"linear_legacy": LegacyLinearNoSubsampling,
|
50 |
+
"embed": EmbedinigNoSubsampling,
|
51 |
+
"conv1d2": Conv1dSubsampling2,
|
52 |
+
"conv2d": Conv2dSubsampling4,
|
53 |
+
"conv2d6": Conv2dSubsampling6,
|
54 |
+
"conv2d8": Conv2dSubsampling8,
|
55 |
+
'paraformer_dummy': torch.nn.Identity
|
56 |
+
}
|
57 |
+
|
58 |
+
COSYVOICE_EMB_CLASSES = {
|
59 |
+
"embed": PositionalEncoding,
|
60 |
+
"abs_pos": PositionalEncoding,
|
61 |
+
"rel_pos": RelPositionalEncoding,
|
62 |
+
"rel_pos_espnet": EspnetRelPositionalEncoding,
|
63 |
+
"no_pos": NoPositionalEncoding,
|
64 |
+
"abs_pos_whisper": WhisperPositionalEncoding,
|
65 |
+
"embed_learnable_pe": LearnablePositionalEncoding,
|
66 |
+
}
|
67 |
+
|
68 |
+
COSYVOICE_ATTENTION_CLASSES = {
|
69 |
+
"selfattn": MultiHeadedAttention,
|
70 |
+
"rel_selfattn": RelPositionMultiHeadedAttention,
|
71 |
+
}
|
src/chatterbox/models/s3gen/utils/mask.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 Shigeki Karita
|
2 |
+
# 2020 Mobvoi Inc (Binbin Zhang)
|
3 |
+
# 2024 Alibaba Inc (authors: Xiang Lyu)
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
|
17 |
+
import torch
|
18 |
+
|
19 |
+
'''
|
20 |
+
def subsequent_mask(
|
21 |
+
size: int,
|
22 |
+
device: torch.device = torch.device("cpu"),
|
23 |
+
) -> torch.Tensor:
|
24 |
+
"""Create mask for subsequent steps (size, size).
|
25 |
+
|
26 |
+
This mask is used only in decoder which works in an auto-regressive mode.
|
27 |
+
This means the current step could only do attention with its left steps.
|
28 |
+
|
29 |
+
In encoder, fully attention is used when streaming is not necessary and
|
30 |
+
the sequence is not long. In this case, no attention mask is needed.
|
31 |
+
|
32 |
+
When streaming is need, chunk-based attention is used in encoder. See
|
33 |
+
subsequent_chunk_mask for the chunk-based attention mask.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
size (int): size of mask
|
37 |
+
str device (str): "cpu" or "cuda" or torch.Tensor.device
|
38 |
+
dtype (torch.device): result dtype
|
39 |
+
|
40 |
+
Returns:
|
41 |
+
torch.Tensor: mask
|
42 |
+
|
43 |
+
Examples:
|
44 |
+
>>> subsequent_mask(3)
|
45 |
+
[[1, 0, 0],
|
46 |
+
[1, 1, 0],
|
47 |
+
[1, 1, 1]]
|
48 |
+
"""
|
49 |
+
ret = torch.ones(size, size, device=device, dtype=torch.bool)
|
50 |
+
return torch.tril(ret)
|
51 |
+
'''
|
52 |
+
|
53 |
+
|
54 |
+
def subsequent_chunk_mask(
|
55 |
+
size: int,
|
56 |
+
chunk_size: int,
|
57 |
+
num_left_chunks: int = -1,
|
58 |
+
device: torch.device = torch.device("cpu"),
|
59 |
+
) -> torch.Tensor:
|
60 |
+
"""Create mask for subsequent steps (size, size) with chunk size,
|
61 |
+
this is for streaming encoder
|
62 |
+
|
63 |
+
Args:
|
64 |
+
size (int): size of mask
|
65 |
+
chunk_size (int): size of chunk
|
66 |
+
num_left_chunks (int): number of left chunks
|
67 |
+
<0: use full chunk
|
68 |
+
>=0: use num_left_chunks
|
69 |
+
device (torch.device): "cpu" or "cuda" or torch.Tensor.device
|
70 |
+
|
71 |
+
Returns:
|
72 |
+
torch.Tensor: mask
|
73 |
+
|
74 |
+
Examples:
|
75 |
+
>>> subsequent_chunk_mask(4, 2)
|
76 |
+
[[1, 1, 0, 0],
|
77 |
+
[1, 1, 0, 0],
|
78 |
+
[1, 1, 1, 1],
|
79 |
+
[1, 1, 1, 1]]
|
80 |
+
"""
|
81 |
+
# NOTE this modified implementation meets onnx export requirements, but it doesn't support num_left_chunks
|
82 |
+
# actually this is not needed after we have inference cache implemented, will remove it later
|
83 |
+
pos_idx = torch.arange(size, device=device)
|
84 |
+
block_value = (torch.div(pos_idx, chunk_size, rounding_mode='trunc') + 1) * chunk_size
|
85 |
+
ret = pos_idx.unsqueeze(0) < block_value.unsqueeze(1)
|
86 |
+
return ret
|
87 |
+
|
88 |
+
|
89 |
+
def add_optional_chunk_mask(xs: torch.Tensor,
|
90 |
+
masks: torch.Tensor,
|
91 |
+
use_dynamic_chunk: bool,
|
92 |
+
use_dynamic_left_chunk: bool,
|
93 |
+
decoding_chunk_size: int,
|
94 |
+
static_chunk_size: int,
|
95 |
+
num_decoding_left_chunks: int,
|
96 |
+
enable_full_context: bool = True):
|
97 |
+
""" Apply optional mask for encoder.
|
98 |
+
|
99 |
+
Args:
|
100 |
+
xs (torch.Tensor): padded input, (B, L, D), L for max length
|
101 |
+
mask (torch.Tensor): mask for xs, (B, 1, L)
|
102 |
+
use_dynamic_chunk (bool): whether to use dynamic chunk or not
|
103 |
+
use_dynamic_left_chunk (bool): whether to use dynamic left chunk for
|
104 |
+
training.
|
105 |
+
decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's
|
106 |
+
0: default for training, use random dynamic chunk.
|
107 |
+
<0: for decoding, use full chunk.
|
108 |
+
>0: for decoding, use fixed chunk size as set.
|
109 |
+
static_chunk_size (int): chunk size for static chunk training/decoding
|
110 |
+
if it's greater than 0, if use_dynamic_chunk is true,
|
111 |
+
this parameter will be ignored
|
112 |
+
num_decoding_left_chunks: number of left chunks, this is for decoding,
|
113 |
+
the chunk size is decoding_chunk_size.
|
114 |
+
>=0: use num_decoding_left_chunks
|
115 |
+
<0: use all left chunks
|
116 |
+
enable_full_context (bool):
|
117 |
+
True: chunk size is either [1, 25] or full context(max_len)
|
118 |
+
False: chunk size ~ U[1, 25]
|
119 |
+
|
120 |
+
Returns:
|
121 |
+
torch.Tensor: chunk mask of the input xs.
|
122 |
+
"""
|
123 |
+
# Whether to use chunk mask or not
|
124 |
+
if use_dynamic_chunk:
|
125 |
+
max_len = xs.size(1)
|
126 |
+
if decoding_chunk_size < 0:
|
127 |
+
chunk_size = max_len
|
128 |
+
num_left_chunks = -1
|
129 |
+
elif decoding_chunk_size > 0:
|
130 |
+
chunk_size = decoding_chunk_size
|
131 |
+
num_left_chunks = num_decoding_left_chunks
|
132 |
+
else:
|
133 |
+
# chunk size is either [1, 25] or full context(max_len).
|
134 |
+
# Since we use 4 times subsampling and allow up to 1s(100 frames)
|
135 |
+
# delay, the maximum frame is 100 / 4 = 25.
|
136 |
+
chunk_size = torch.randint(1, max_len, (1, )).item()
|
137 |
+
num_left_chunks = -1
|
138 |
+
if chunk_size > max_len // 2 and enable_full_context:
|
139 |
+
chunk_size = max_len
|
140 |
+
else:
|
141 |
+
chunk_size = chunk_size % 25 + 1
|
142 |
+
if use_dynamic_left_chunk:
|
143 |
+
max_left_chunks = (max_len - 1) // chunk_size
|
144 |
+
num_left_chunks = torch.randint(0, max_left_chunks,
|
145 |
+
(1, )).item()
|
146 |
+
chunk_masks = subsequent_chunk_mask(xs.size(1), chunk_size,
|
147 |
+
num_left_chunks,
|
148 |
+
xs.device) # (L, L)
|
149 |
+
chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
|
150 |
+
chunk_masks = masks & chunk_masks # (B, L, L)
|
151 |
+
elif static_chunk_size > 0:
|
152 |
+
num_left_chunks = num_decoding_left_chunks
|
153 |
+
chunk_masks = subsequent_chunk_mask(xs.size(1), static_chunk_size,
|
154 |
+
num_left_chunks,
|
155 |
+
xs.device) # (L, L)
|
156 |
+
chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
|
157 |
+
chunk_masks = masks & chunk_masks # (B, L, L)
|
158 |
+
else:
|
159 |
+
chunk_masks = masks
|
160 |
+
assert chunk_masks.dtype == torch.bool
|
161 |
+
if (chunk_masks.sum(dim=-1) == 0).sum().item() != 0:
|
162 |
+
logging.warning('get chunk_masks all false at some timestep, force set to true, make sure they are masked in futuer computation!')
|
163 |
+
chunk_masks[chunk_masks.sum(dim=-1)==0] = True
|
164 |
+
return chunk_masks
|
165 |
+
|
166 |
+
|
167 |
+
def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
|
168 |
+
"""Make mask tensor containing indices of padded part.
|
169 |
+
|
170 |
+
See description of make_non_pad_mask.
|
171 |
+
|
172 |
+
Args:
|
173 |
+
lengths (torch.Tensor): Batch of lengths (B,).
|
174 |
+
Returns:
|
175 |
+
torch.Tensor: Mask tensor containing indices of padded part.
|
176 |
+
|
177 |
+
Examples:
|
178 |
+
>>> lengths = [5, 3, 2]
|
179 |
+
>>> make_pad_mask(lengths)
|
180 |
+
masks = [[0, 0, 0, 0 ,0],
|
181 |
+
[0, 0, 0, 1, 1],
|
182 |
+
[0, 0, 1, 1, 1]]
|
183 |
+
"""
|
184 |
+
batch_size = lengths.size(0)
|
185 |
+
max_len = max_len if max_len > 0 else lengths.max().item()
|
186 |
+
seq_range = torch.arange(0,
|
187 |
+
max_len,
|
188 |
+
dtype=torch.int64,
|
189 |
+
device=lengths.device)
|
190 |
+
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
|
191 |
+
seq_length_expand = lengths.unsqueeze(-1)
|
192 |
+
mask = seq_range_expand >= seq_length_expand
|
193 |
+
return mask
|
src/chatterbox/models/s3gen/utils/mel.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""mel-spectrogram extraction in Matcha-TTS"""
|
2 |
+
from librosa.filters import mel as librosa_mel_fn
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
# NOTE: they decalred these global vars
|
8 |
+
mel_basis = {}
|
9 |
+
hann_window = {}
|
10 |
+
|
11 |
+
|
12 |
+
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
13 |
+
return torch.log(torch.clamp(x, min=clip_val) * C)
|
14 |
+
|
15 |
+
|
16 |
+
def spectral_normalize_torch(magnitudes):
|
17 |
+
output = dynamic_range_compression_torch(magnitudes)
|
18 |
+
return output
|
19 |
+
|
20 |
+
"""
|
21 |
+
feat_extractor: !name:matcha.utils.audio.mel_spectrogram
|
22 |
+
n_fft: 1920
|
23 |
+
num_mels: 80
|
24 |
+
sampling_rate: 24000
|
25 |
+
hop_size: 480
|
26 |
+
win_size: 1920
|
27 |
+
fmin: 0
|
28 |
+
fmax: 8000
|
29 |
+
center: False
|
30 |
+
|
31 |
+
"""
|
32 |
+
|
33 |
+
def mel_spectrogram(y, n_fft=1920, num_mels=80, sampling_rate=24000, hop_size=480, win_size=1920,
|
34 |
+
fmin=0, fmax=8000, center=False):
|
35 |
+
"""Copied from https://github.com/shivammehta25/Matcha-TTS/blob/main/matcha/utils/audio.py
|
36 |
+
Set default values according to Cosyvoice's config.
|
37 |
+
"""
|
38 |
+
|
39 |
+
if isinstance(y, np.ndarray):
|
40 |
+
y = torch.tensor(y).float()
|
41 |
+
|
42 |
+
if len(y.shape) == 1:
|
43 |
+
y = y[None, ]
|
44 |
+
|
45 |
+
if torch.min(y) < -1.0:
|
46 |
+
print("min value is ", torch.min(y))
|
47 |
+
if torch.max(y) > 1.0:
|
48 |
+
print("max value is ", torch.max(y))
|
49 |
+
|
50 |
+
global mel_basis, hann_window # pylint: disable=global-statement,global-variable-not-assigned
|
51 |
+
if f"{str(fmax)}_{str(y.device)}" not in mel_basis:
|
52 |
+
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
|
53 |
+
mel_basis[str(fmax) + "_" + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
|
54 |
+
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
|
55 |
+
|
56 |
+
y = torch.nn.functional.pad(
|
57 |
+
y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect"
|
58 |
+
)
|
59 |
+
y = y.squeeze(1)
|
60 |
+
|
61 |
+
spec = torch.view_as_real(
|
62 |
+
torch.stft(
|
63 |
+
y,
|
64 |
+
n_fft,
|
65 |
+
hop_length=hop_size,
|
66 |
+
win_length=win_size,
|
67 |
+
window=hann_window[str(y.device)],
|
68 |
+
center=center,
|
69 |
+
pad_mode="reflect",
|
70 |
+
normalized=False,
|
71 |
+
onesided=True,
|
72 |
+
return_complex=True,
|
73 |
+
)
|
74 |
+
)
|
75 |
+
|
76 |
+
spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
|
77 |
+
|
78 |
+
spec = torch.matmul(mel_basis[str(fmax) + "_" + str(y.device)], spec)
|
79 |
+
spec = spectral_normalize_torch(spec)
|
80 |
+
|
81 |
+
return spec
|
src/chatterbox/models/s3gen/xvector.py
ADDED
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- encoding: utf-8 -*-
|
3 |
+
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
|
4 |
+
# MIT License (https://opensource.org/licenses/MIT)
|
5 |
+
# Modified from 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker)
|
6 |
+
|
7 |
+
|
8 |
+
from collections import OrderedDict
|
9 |
+
import torch
|
10 |
+
import torch.nn.functional as F
|
11 |
+
import torch.utils.checkpoint as cp
|
12 |
+
import torchaudio.compliance.kaldi as Kaldi
|
13 |
+
|
14 |
+
|
15 |
+
def pad_list(xs, pad_value):
|
16 |
+
"""Perform padding for the list of tensors.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
|
20 |
+
pad_value (float): Value for padding.
|
21 |
+
|
22 |
+
Returns:
|
23 |
+
Tensor: Padded tensor (B, Tmax, `*`).
|
24 |
+
|
25 |
+
Examples:
|
26 |
+
>>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
|
27 |
+
>>> x
|
28 |
+
[tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
|
29 |
+
>>> pad_list(x, 0)
|
30 |
+
tensor([[1., 1., 1., 1.],
|
31 |
+
[1., 1., 0., 0.],
|
32 |
+
[1., 0., 0., 0.]])
|
33 |
+
|
34 |
+
"""
|
35 |
+
n_batch = len(xs)
|
36 |
+
max_len = max(x.size(0) for x in xs)
|
37 |
+
pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)
|
38 |
+
|
39 |
+
for i in range(n_batch):
|
40 |
+
pad[i, : xs[i].size(0)] = xs[i]
|
41 |
+
|
42 |
+
return pad
|
43 |
+
|
44 |
+
|
45 |
+
def extract_feature(audio):
|
46 |
+
features = []
|
47 |
+
feature_times = []
|
48 |
+
feature_lengths = []
|
49 |
+
for au in audio:
|
50 |
+
feature = Kaldi.fbank(au.unsqueeze(0), num_mel_bins=80)
|
51 |
+
feature = feature - feature.mean(dim=0, keepdim=True)
|
52 |
+
features.append(feature)
|
53 |
+
feature_times.append(au.shape[0])
|
54 |
+
feature_lengths.append(feature.shape[0])
|
55 |
+
# padding for batch inference
|
56 |
+
features_padded = pad_list(features, pad_value=0)
|
57 |
+
# features = torch.cat(features)
|
58 |
+
return features_padded, feature_lengths, feature_times
|
59 |
+
|
60 |
+
|
61 |
+
class BasicResBlock(torch.nn.Module):
|
62 |
+
expansion = 1
|
63 |
+
|
64 |
+
def __init__(self, in_planes, planes, stride=1):
|
65 |
+
super(BasicResBlock, self).__init__()
|
66 |
+
self.conv1 = torch.nn.Conv2d(
|
67 |
+
in_planes, planes, kernel_size=3, stride=(stride, 1), padding=1, bias=False
|
68 |
+
)
|
69 |
+
self.bn1 = torch.nn.BatchNorm2d(planes)
|
70 |
+
self.conv2 = torch.nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
|
71 |
+
self.bn2 = torch.nn.BatchNorm2d(planes)
|
72 |
+
|
73 |
+
self.shortcut = torch.nn.Sequential()
|
74 |
+
if stride != 1 or in_planes != self.expansion * planes:
|
75 |
+
self.shortcut = torch.nn.Sequential(
|
76 |
+
torch.nn.Conv2d(
|
77 |
+
in_planes,
|
78 |
+
self.expansion * planes,
|
79 |
+
kernel_size=1,
|
80 |
+
stride=(stride, 1),
|
81 |
+
bias=False,
|
82 |
+
),
|
83 |
+
torch.nn.BatchNorm2d(self.expansion * planes),
|
84 |
+
)
|
85 |
+
|
86 |
+
def forward(self, x):
|
87 |
+
out = F.relu(self.bn1(self.conv1(x)))
|
88 |
+
out = self.bn2(self.conv2(out))
|
89 |
+
out += self.shortcut(x)
|
90 |
+
out = F.relu(out)
|
91 |
+
return out
|
92 |
+
|
93 |
+
|
94 |
+
class FCM(torch.nn.Module):
|
95 |
+
def __init__(self, block=BasicResBlock, num_blocks=[2, 2], m_channels=32, feat_dim=80):
|
96 |
+
super(FCM, self).__init__()
|
97 |
+
self.in_planes = m_channels
|
98 |
+
self.conv1 = torch.nn.Conv2d(1, m_channels, kernel_size=3, stride=1, padding=1, bias=False)
|
99 |
+
self.bn1 = torch.nn.BatchNorm2d(m_channels)
|
100 |
+
|
101 |
+
self.layer1 = self._make_layer(block, m_channels, num_blocks[0], stride=2)
|
102 |
+
self.layer2 = self._make_layer(block, m_channels, num_blocks[0], stride=2)
|
103 |
+
|
104 |
+
self.conv2 = torch.nn.Conv2d(
|
105 |
+
m_channels, m_channels, kernel_size=3, stride=(2, 1), padding=1, bias=False
|
106 |
+
)
|
107 |
+
self.bn2 = torch.nn.BatchNorm2d(m_channels)
|
108 |
+
self.out_channels = m_channels * (feat_dim // 8)
|
109 |
+
|
110 |
+
def _make_layer(self, block, planes, num_blocks, stride):
|
111 |
+
strides = [stride] + [1] * (num_blocks - 1)
|
112 |
+
layers = []
|
113 |
+
for stride in strides:
|
114 |
+
layers.append(block(self.in_planes, planes, stride))
|
115 |
+
self.in_planes = planes * block.expansion
|
116 |
+
return torch.nn.Sequential(*layers)
|
117 |
+
|
118 |
+
def forward(self, x):
|
119 |
+
x = x.unsqueeze(1)
|
120 |
+
out = F.relu(self.bn1(self.conv1(x)))
|
121 |
+
out = self.layer1(out)
|
122 |
+
out = self.layer2(out)
|
123 |
+
out = F.relu(self.bn2(self.conv2(out)))
|
124 |
+
|
125 |
+
shape = out.shape
|
126 |
+
out = out.reshape(shape[0], shape[1] * shape[2], shape[3])
|
127 |
+
return out
|
128 |
+
|
129 |
+
|
130 |
+
def get_nonlinear(config_str, channels):
|
131 |
+
nonlinear = torch.nn.Sequential()
|
132 |
+
for name in config_str.split("-"):
|
133 |
+
if name == "relu":
|
134 |
+
nonlinear.add_module("relu", torch.nn.ReLU(inplace=True))
|
135 |
+
elif name == "prelu":
|
136 |
+
nonlinear.add_module("prelu", torch.nn.PReLU(channels))
|
137 |
+
elif name == "batchnorm":
|
138 |
+
nonlinear.add_module("batchnorm", torch.nn.BatchNorm1d(channels))
|
139 |
+
elif name == "batchnorm_":
|
140 |
+
nonlinear.add_module("batchnorm", torch.nn.BatchNorm1d(channels, affine=False))
|
141 |
+
else:
|
142 |
+
raise ValueError("Unexpected module ({}).".format(name))
|
143 |
+
return nonlinear
|
144 |
+
|
145 |
+
|
146 |
+
def statistics_pooling(x, dim=-1, keepdim=False, unbiased=True, eps=1e-2):
|
147 |
+
mean = x.mean(dim=dim)
|
148 |
+
std = x.std(dim=dim, unbiased=unbiased)
|
149 |
+
stats = torch.cat([mean, std], dim=-1)
|
150 |
+
if keepdim:
|
151 |
+
stats = stats.unsqueeze(dim=dim)
|
152 |
+
return stats
|
153 |
+
|
154 |
+
|
155 |
+
class StatsPool(torch.nn.Module):
|
156 |
+
def forward(self, x):
|
157 |
+
return statistics_pooling(x)
|
158 |
+
|
159 |
+
|
160 |
+
class TDNNLayer(torch.nn.Module):
|
161 |
+
def __init__(
|
162 |
+
self,
|
163 |
+
in_channels,
|
164 |
+
out_channels,
|
165 |
+
kernel_size,
|
166 |
+
stride=1,
|
167 |
+
padding=0,
|
168 |
+
dilation=1,
|
169 |
+
bias=False,
|
170 |
+
config_str="batchnorm-relu",
|
171 |
+
):
|
172 |
+
super(TDNNLayer, self).__init__()
|
173 |
+
if padding < 0:
|
174 |
+
assert (
|
175 |
+
kernel_size % 2 == 1
|
176 |
+
), "Expect equal paddings, but got even kernel size ({})".format(kernel_size)
|
177 |
+
padding = (kernel_size - 1) // 2 * dilation
|
178 |
+
self.linear = torch.nn.Conv1d(
|
179 |
+
in_channels,
|
180 |
+
out_channels,
|
181 |
+
kernel_size,
|
182 |
+
stride=stride,
|
183 |
+
padding=padding,
|
184 |
+
dilation=dilation,
|
185 |
+
bias=bias,
|
186 |
+
)
|
187 |
+
self.nonlinear = get_nonlinear(config_str, out_channels)
|
188 |
+
|
189 |
+
def forward(self, x):
|
190 |
+
x = self.linear(x)
|
191 |
+
x = self.nonlinear(x)
|
192 |
+
return x
|
193 |
+
|
194 |
+
|
195 |
+
class CAMLayer(torch.nn.Module):
|
196 |
+
def __init__(
|
197 |
+
self, bn_channels, out_channels, kernel_size, stride, padding, dilation, bias, reduction=2
|
198 |
+
):
|
199 |
+
super(CAMLayer, self).__init__()
|
200 |
+
self.linear_local = torch.nn.Conv1d(
|
201 |
+
bn_channels,
|
202 |
+
out_channels,
|
203 |
+
kernel_size,
|
204 |
+
stride=stride,
|
205 |
+
padding=padding,
|
206 |
+
dilation=dilation,
|
207 |
+
bias=bias,
|
208 |
+
)
|
209 |
+
self.linear1 = torch.nn.Conv1d(bn_channels, bn_channels // reduction, 1)
|
210 |
+
self.relu = torch.nn.ReLU(inplace=True)
|
211 |
+
self.linear2 = torch.nn.Conv1d(bn_channels // reduction, out_channels, 1)
|
212 |
+
self.sigmoid = torch.nn.Sigmoid()
|
213 |
+
|
214 |
+
def forward(self, x):
|
215 |
+
y = self.linear_local(x)
|
216 |
+
context = x.mean(-1, keepdim=True) + self.seg_pooling(x)
|
217 |
+
context = self.relu(self.linear1(context))
|
218 |
+
m = self.sigmoid(self.linear2(context))
|
219 |
+
return y * m
|
220 |
+
|
221 |
+
def seg_pooling(self, x, seg_len=100, stype="avg"):
|
222 |
+
if stype == "avg":
|
223 |
+
seg = F.avg_pool1d(x, kernel_size=seg_len, stride=seg_len, ceil_mode=True)
|
224 |
+
elif stype == "max":
|
225 |
+
seg = F.max_pool1d(x, kernel_size=seg_len, stride=seg_len, ceil_mode=True)
|
226 |
+
else:
|
227 |
+
raise ValueError("Wrong segment pooling type.")
|
228 |
+
shape = seg.shape
|
229 |
+
seg = seg.unsqueeze(-1).expand(*shape, seg_len).reshape(*shape[:-1], -1)
|
230 |
+
seg = seg[..., : x.shape[-1]]
|
231 |
+
return seg
|
232 |
+
|
233 |
+
|
234 |
+
class CAMDenseTDNNLayer(torch.nn.Module):
|
235 |
+
def __init__(
|
236 |
+
self,
|
237 |
+
in_channels,
|
238 |
+
out_channels,
|
239 |
+
bn_channels,
|
240 |
+
kernel_size,
|
241 |
+
stride=1,
|
242 |
+
dilation=1,
|
243 |
+
bias=False,
|
244 |
+
config_str="batchnorm-relu",
|
245 |
+
memory_efficient=False,
|
246 |
+
):
|
247 |
+
super(CAMDenseTDNNLayer, self).__init__()
|
248 |
+
assert kernel_size % 2 == 1, "Expect equal paddings, but got even kernel size ({})".format(
|
249 |
+
kernel_size
|
250 |
+
)
|
251 |
+
padding = (kernel_size - 1) // 2 * dilation
|
252 |
+
self.memory_efficient = memory_efficient
|
253 |
+
self.nonlinear1 = get_nonlinear(config_str, in_channels)
|
254 |
+
self.linear1 = torch.nn.Conv1d(in_channels, bn_channels, 1, bias=False)
|
255 |
+
self.nonlinear2 = get_nonlinear(config_str, bn_channels)
|
256 |
+
self.cam_layer = CAMLayer(
|
257 |
+
bn_channels,
|
258 |
+
out_channels,
|
259 |
+
kernel_size,
|
260 |
+
stride=stride,
|
261 |
+
padding=padding,
|
262 |
+
dilation=dilation,
|
263 |
+
bias=bias,
|
264 |
+
)
|
265 |
+
|
266 |
+
def bn_function(self, x):
|
267 |
+
return self.linear1(self.nonlinear1(x))
|
268 |
+
|
269 |
+
def forward(self, x):
|
270 |
+
if self.training and self.memory_efficient:
|
271 |
+
x = cp.checkpoint(self.bn_function, x)
|
272 |
+
else:
|
273 |
+
x = self.bn_function(x)
|
274 |
+
x = self.cam_layer(self.nonlinear2(x))
|
275 |
+
return x
|
276 |
+
|
277 |
+
|
278 |
+
class CAMDenseTDNNBlock(torch.nn.ModuleList):
|
279 |
+
def __init__(
|
280 |
+
self,
|
281 |
+
num_layers,
|
282 |
+
in_channels,
|
283 |
+
out_channels,
|
284 |
+
bn_channels,
|
285 |
+
kernel_size,
|
286 |
+
stride=1,
|
287 |
+
dilation=1,
|
288 |
+
bias=False,
|
289 |
+
config_str="batchnorm-relu",
|
290 |
+
memory_efficient=False,
|
291 |
+
):
|
292 |
+
super(CAMDenseTDNNBlock, self).__init__()
|
293 |
+
for i in range(num_layers):
|
294 |
+
layer = CAMDenseTDNNLayer(
|
295 |
+
in_channels=in_channels + i * out_channels,
|
296 |
+
out_channels=out_channels,
|
297 |
+
bn_channels=bn_channels,
|
298 |
+
kernel_size=kernel_size,
|
299 |
+
stride=stride,
|
300 |
+
dilation=dilation,
|
301 |
+
bias=bias,
|
302 |
+
config_str=config_str,
|
303 |
+
memory_efficient=memory_efficient,
|
304 |
+
)
|
305 |
+
self.add_module("tdnnd%d" % (i + 1), layer)
|
306 |
+
|
307 |
+
def forward(self, x):
|
308 |
+
for layer in self:
|
309 |
+
x = torch.cat([x, layer(x)], dim=1)
|
310 |
+
return x
|
311 |
+
|
312 |
+
|
313 |
+
class TransitLayer(torch.nn.Module):
|
314 |
+
def __init__(self, in_channels, out_channels, bias=True, config_str="batchnorm-relu"):
|
315 |
+
super(TransitLayer, self).__init__()
|
316 |
+
self.nonlinear = get_nonlinear(config_str, in_channels)
|
317 |
+
self.linear = torch.nn.Conv1d(in_channels, out_channels, 1, bias=bias)
|
318 |
+
|
319 |
+
def forward(self, x):
|
320 |
+
x = self.nonlinear(x)
|
321 |
+
x = self.linear(x)
|
322 |
+
return x
|
323 |
+
|
324 |
+
|
325 |
+
class DenseLayer(torch.nn.Module):
|
326 |
+
def __init__(self, in_channels, out_channels, bias=False, config_str="batchnorm-relu"):
|
327 |
+
super(DenseLayer, self).__init__()
|
328 |
+
self.linear = torch.nn.Conv1d(in_channels, out_channels, 1, bias=bias)
|
329 |
+
self.nonlinear = get_nonlinear(config_str, out_channels)
|
330 |
+
|
331 |
+
def forward(self, x):
|
332 |
+
if len(x.shape) == 2:
|
333 |
+
x = self.linear(x.unsqueeze(dim=-1)).squeeze(dim=-1)
|
334 |
+
else:
|
335 |
+
x = self.linear(x)
|
336 |
+
x = self.nonlinear(x)
|
337 |
+
return x
|
338 |
+
|
339 |
+
# @tables.register("model_classes", "CAMPPlus")
|
340 |
+
class CAMPPlus(torch.nn.Module):
|
341 |
+
def __init__(
|
342 |
+
self,
|
343 |
+
feat_dim=80,
|
344 |
+
embedding_size=192,
|
345 |
+
growth_rate=32,
|
346 |
+
bn_size=4,
|
347 |
+
init_channels=128,
|
348 |
+
config_str="batchnorm-relu",
|
349 |
+
memory_efficient=True,
|
350 |
+
output_level="segment",
|
351 |
+
**kwargs,
|
352 |
+
):
|
353 |
+
super().__init__()
|
354 |
+
|
355 |
+
self.head = FCM(feat_dim=feat_dim)
|
356 |
+
channels = self.head.out_channels
|
357 |
+
self.output_level = output_level
|
358 |
+
|
359 |
+
self.xvector = torch.nn.Sequential(
|
360 |
+
OrderedDict(
|
361 |
+
[
|
362 |
+
(
|
363 |
+
"tdnn",
|
364 |
+
TDNNLayer(
|
365 |
+
channels,
|
366 |
+
init_channels,
|
367 |
+
5,
|
368 |
+
stride=2,
|
369 |
+
dilation=1,
|
370 |
+
padding=-1,
|
371 |
+
config_str=config_str,
|
372 |
+
),
|
373 |
+
),
|
374 |
+
]
|
375 |
+
)
|
376 |
+
)
|
377 |
+
channels = init_channels
|
378 |
+
for i, (num_layers, kernel_size, dilation) in enumerate(
|
379 |
+
zip((12, 24, 16), (3, 3, 3), (1, 2, 2))
|
380 |
+
):
|
381 |
+
block = CAMDenseTDNNBlock(
|
382 |
+
num_layers=num_layers,
|
383 |
+
in_channels=channels,
|
384 |
+
out_channels=growth_rate,
|
385 |
+
bn_channels=bn_size * growth_rate,
|
386 |
+
kernel_size=kernel_size,
|
387 |
+
dilation=dilation,
|
388 |
+
config_str=config_str,
|
389 |
+
memory_efficient=memory_efficient,
|
390 |
+
)
|
391 |
+
self.xvector.add_module("block%d" % (i + 1), block)
|
392 |
+
channels = channels + num_layers * growth_rate
|
393 |
+
self.xvector.add_module(
|
394 |
+
"transit%d" % (i + 1),
|
395 |
+
TransitLayer(channels, channels // 2, bias=False, config_str=config_str),
|
396 |
+
)
|
397 |
+
channels //= 2
|
398 |
+
|
399 |
+
self.xvector.add_module("out_nonlinear", get_nonlinear(config_str, channels))
|
400 |
+
|
401 |
+
if self.output_level == "segment":
|
402 |
+
self.xvector.add_module("stats", StatsPool())
|
403 |
+
self.xvector.add_module(
|
404 |
+
"dense", DenseLayer(channels * 2, embedding_size, config_str="batchnorm_")
|
405 |
+
)
|
406 |
+
else:
|
407 |
+
assert (
|
408 |
+
self.output_level == "frame"
|
409 |
+
), "`output_level` should be set to 'segment' or 'frame'. "
|
410 |
+
|
411 |
+
for m in self.modules():
|
412 |
+
if isinstance(m, (torch.nn.Conv1d, torch.nn.Linear)):
|
413 |
+
torch.nn.init.kaiming_normal_(m.weight.data)
|
414 |
+
if m.bias is not None:
|
415 |
+
torch.nn.init.zeros_(m.bias)
|
416 |
+
|
417 |
+
def forward(self, x):
|
418 |
+
x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T)
|
419 |
+
x = self.head(x)
|
420 |
+
x = self.xvector(x)
|
421 |
+
if self.output_level == "frame":
|
422 |
+
x = x.transpose(1, 2)
|
423 |
+
return x
|
424 |
+
|
425 |
+
def inference(self, audio_list):
|
426 |
+
speech, speech_lengths, speech_times = extract_feature(audio_list)
|
427 |
+
results = self.forward(speech.to(torch.float32))
|
428 |
+
return results
|
src/chatterbox/models/s3tokenizer/__init__.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .s3tokenizer import (
|
2 |
+
S3_SR,
|
3 |
+
S3_HOP,
|
4 |
+
S3_TOKEN_HOP,
|
5 |
+
S3_TOKEN_RATE,
|
6 |
+
SPEECH_VOCAB_SIZE,
|
7 |
+
S3Tokenizer,
|
8 |
+
)
|
9 |
+
|
10 |
+
|
11 |
+
SOS = SPEECH_VOCAB_SIZE
|
12 |
+
EOS = SPEECH_VOCAB_SIZE + 1
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
def drop_invalid_tokens(x):
|
17 |
+
"""Drop SoS and EoS"""
|
18 |
+
assert len(x.shape) == 1 or (len(x.shape) == 2 and x.shape[0] == 1), "only batch size of one allowed for now"
|
19 |
+
if SOS in x:
|
20 |
+
s = (x == SOS).nonzero(as_tuple=True)[0].squeeze(0) + 1
|
21 |
+
else:
|
22 |
+
s = 0
|
23 |
+
|
24 |
+
if EOS in x:
|
25 |
+
e = (x == EOS).nonzero(as_tuple=True)[0].squeeze(0)
|
26 |
+
else:
|
27 |
+
e = None
|
28 |
+
|
29 |
+
x = x[s: e]
|
30 |
+
return x
|
src/chatterbox/models/s3tokenizer/s3tokenizer.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Tuple
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import librosa
|
5 |
+
import torch
|
6 |
+
import torch.nn.functional as F
|
7 |
+
from s3tokenizer.utils import padding
|
8 |
+
from s3tokenizer.model_v2 import (
|
9 |
+
S3TokenizerV2,
|
10 |
+
ModelConfig,
|
11 |
+
)
|
12 |
+
|
13 |
+
|
14 |
+
# Sampling rate of the inputs to S3TokenizerV2
|
15 |
+
S3_SR = 16_000
|
16 |
+
S3_HOP = 160 # 100 frames/sec
|
17 |
+
S3_TOKEN_HOP = 640 # 25 tokens/sec
|
18 |
+
S3_TOKEN_RATE = 25
|
19 |
+
SPEECH_VOCAB_SIZE = 6561
|
20 |
+
|
21 |
+
|
22 |
+
class S3Tokenizer(S3TokenizerV2):
|
23 |
+
"""
|
24 |
+
s3tokenizer.S3TokenizerV2 with the following changes:
|
25 |
+
- a more integrated `forward`
|
26 |
+
- compute `log_mel_spectrogram` using `_mel_filters` and `window` in `register_buffers`
|
27 |
+
"""
|
28 |
+
|
29 |
+
ignore_state_dict_missing = ("_mel_filters", "window")
|
30 |
+
|
31 |
+
def __init__(
|
32 |
+
self,
|
33 |
+
name: str="speech_tokenizer_v2_25hz",
|
34 |
+
config: ModelConfig = ModelConfig()
|
35 |
+
):
|
36 |
+
super().__init__(name)
|
37 |
+
|
38 |
+
self.n_fft = 400
|
39 |
+
_mel_filters = librosa.filters.mel(
|
40 |
+
sr=S3_SR,
|
41 |
+
n_fft=self.n_fft,
|
42 |
+
n_mels=config.n_mels
|
43 |
+
)
|
44 |
+
self.register_buffer(
|
45 |
+
"_mel_filters",
|
46 |
+
torch.FloatTensor(_mel_filters),
|
47 |
+
)
|
48 |
+
|
49 |
+
self.register_buffer(
|
50 |
+
"window",
|
51 |
+
torch.hann_window(self.n_fft),
|
52 |
+
)
|
53 |
+
|
54 |
+
def pad(self, wavs, sr) -> List[torch.Tensor]:
|
55 |
+
"""
|
56 |
+
Given a list of wavs with the same `sample_rate`, pad them so that the length is multiple of 40ms (S3 runs at 25 token/sec).
|
57 |
+
"""
|
58 |
+
processed_wavs = []
|
59 |
+
for wav in wavs:
|
60 |
+
if isinstance(wav, np.ndarray):
|
61 |
+
wav = torch.from_numpy(wav)
|
62 |
+
if wav.dim() == 1:
|
63 |
+
wav = wav.unsqueeze(0)
|
64 |
+
|
65 |
+
n_tokens = (wav.shape[1] / sr) * S3_TOKEN_RATE
|
66 |
+
n_tokens = np.ceil(n_tokens)
|
67 |
+
intended_wav_len = n_tokens * (sr / S3_TOKEN_RATE)
|
68 |
+
intended_wav_len = int(intended_wav_len)
|
69 |
+
wav = torch.nn.functional.pad(
|
70 |
+
wav,
|
71 |
+
(0, intended_wav_len - wav.shape[-1]),
|
72 |
+
mode="constant",
|
73 |
+
value=0
|
74 |
+
)
|
75 |
+
processed_wavs.append(wav)
|
76 |
+
return processed_wavs
|
77 |
+
|
78 |
+
def _prepare_audio(self, wavs):
|
79 |
+
"""Prepare a list of audios for s3tokenizer processing."""
|
80 |
+
processed_wavs = []
|
81 |
+
for wav in wavs:
|
82 |
+
if isinstance(wav, np.ndarray):
|
83 |
+
wav = torch.from_numpy(wav)
|
84 |
+
if wav.dim() == 1:
|
85 |
+
wav = wav.unsqueeze(0)
|
86 |
+
|
87 |
+
processed_wavs.append(wav)
|
88 |
+
return processed_wavs
|
89 |
+
|
90 |
+
@torch.no_grad()
|
91 |
+
def forward(
|
92 |
+
self,
|
93 |
+
wavs: torch.Tensor,
|
94 |
+
accelerator: 'Accelerator'=None,
|
95 |
+
max_len: int=None,
|
96 |
+
) -> Tuple[torch.Tensor, torch.LongTensor]:
|
97 |
+
"""
|
98 |
+
NOTE: mel-spec has a hop size of 160 points (100 frame/sec).
|
99 |
+
FIXME: this class inherits `nn.Module` but doesn't accept `torch.Tensor` and handles a list of wavs one by one, which is unexpected.
|
100 |
+
|
101 |
+
Args
|
102 |
+
----
|
103 |
+
- `wavs`: 16 kHz speech audio
|
104 |
+
- `max_len` max length to truncate the output sequence to (25 token/sec).
|
105 |
+
NOTE: please pad the waveform if longer sequence is needed.
|
106 |
+
"""
|
107 |
+
processed_wavs = self._prepare_audio(wavs)
|
108 |
+
mels, mel_lens = [], []
|
109 |
+
for wav in processed_wavs:
|
110 |
+
wav = wav.to(self.device)
|
111 |
+
mel = self.log_mel_spectrogram(wav) # [B=1, F, T]
|
112 |
+
if max_len is not None:
|
113 |
+
mel = mel[..., :max_len * 4] # num_mel_frames = 4 * num_tokens
|
114 |
+
mels.append(mel.squeeze(0))
|
115 |
+
|
116 |
+
mels, mel_lens = padding(mels)
|
117 |
+
if accelerator is None:
|
118 |
+
tokenizer = self
|
119 |
+
else:
|
120 |
+
tokenizer = accelerator.unwrap_model(self)
|
121 |
+
|
122 |
+
speech_tokens, speech_token_lens = tokenizer.quantize(mels, mel_lens.to(self.device))
|
123 |
+
return (
|
124 |
+
speech_tokens.long().detach(),
|
125 |
+
speech_token_lens.long().detach(),
|
126 |
+
)
|
127 |
+
|
128 |
+
def log_mel_spectrogram(
|
129 |
+
self,
|
130 |
+
audio: torch.Tensor,
|
131 |
+
padding: int = 0,
|
132 |
+
):
|
133 |
+
"""
|
134 |
+
Compute the log-Mel spectrogram of
|
135 |
+
|
136 |
+
Parameters
|
137 |
+
----------
|
138 |
+
audio: torch.Tensor, shape = (*)
|
139 |
+
The path to audio or either a NumPy array or Tensor containing the
|
140 |
+
audio waveform in 16 kHz
|
141 |
+
|
142 |
+
padding: int
|
143 |
+
Number of zero samples to pad to the right
|
144 |
+
|
145 |
+
Returns
|
146 |
+
-------
|
147 |
+
torch.Tensor, shape = (128, n_frames)
|
148 |
+
A Tensor that contains the Mel spectrogram
|
149 |
+
"""
|
150 |
+
if not torch.is_tensor(audio):
|
151 |
+
audio = torch.from_numpy(audio)
|
152 |
+
|
153 |
+
audio = audio.to(self.device)
|
154 |
+
if padding > 0:
|
155 |
+
audio = F.pad(audio, (0, padding))
|
156 |
+
stft = torch.stft(
|
157 |
+
audio, self.n_fft, S3_HOP,
|
158 |
+
window=self.window.to(self.device),
|
159 |
+
return_complex=True
|
160 |
+
)
|
161 |
+
magnitudes = stft[..., :-1].abs()**2
|
162 |
+
|
163 |
+
mel_spec = self._mel_filters.to(self.device) @ magnitudes
|
164 |
+
|
165 |
+
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
|
166 |
+
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
|
167 |
+
log_spec = (log_spec + 4.0) / 4.0
|
168 |
+
return log_spec
|
src/chatterbox/models/t3/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .t3 import T3
|
src/chatterbox/models/t3/inference/alignment_stream_analyzer.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2025 Resemble AI
|
2 |
+
# Author: John Meade, Jeremy Hsu
|
3 |
+
# MIT License
|
4 |
+
import logging
|
5 |
+
import torch
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from types import MethodType
|
8 |
+
|
9 |
+
|
10 |
+
logger = logging.getLogger(__name__)
|
11 |
+
|
12 |
+
|
13 |
+
@dataclass
|
14 |
+
class AlignmentAnalysisResult:
|
15 |
+
# was this frame detected as being part of a noisy beginning chunk with potential hallucinations?
|
16 |
+
false_start: bool
|
17 |
+
# was this frame detected as being part of a long tail with potential hallucinations?
|
18 |
+
long_tail: bool
|
19 |
+
# was this frame detected as repeating existing text content?
|
20 |
+
repetition: bool
|
21 |
+
# was the alignment position of this frame too far from the previous frame?
|
22 |
+
discontinuity: bool
|
23 |
+
# has inference reached the end of the text tokens? eg, this remains false if inference stops early
|
24 |
+
complete: bool
|
25 |
+
# approximate position in the text token sequence. Can be used for generating online timestamps.
|
26 |
+
position: int
|
27 |
+
|
28 |
+
|
29 |
+
class AlignmentStreamAnalyzer:
|
30 |
+
def __init__(self, tfmr, queue, text_tokens_slice, alignment_layer_idx=9, eos_idx=0):
|
31 |
+
"""
|
32 |
+
Some transformer TTS models implicitly solve text-speech alignment in one or more of their self-attention
|
33 |
+
activation maps. This module exploits this to perform online integrity checks which streaming.
|
34 |
+
A hook is injected into the specified attention layer, and heuristics are used to determine alignment
|
35 |
+
position, repetition, etc.
|
36 |
+
|
37 |
+
NOTE: currently requires no queues.
|
38 |
+
"""
|
39 |
+
# self.queue = queue
|
40 |
+
self.text_tokens_slice = (i, j) = text_tokens_slice
|
41 |
+
self.eos_idx = eos_idx
|
42 |
+
self.alignment = torch.zeros(0, j-i)
|
43 |
+
# self.alignment_bin = torch.zeros(0, j-i)
|
44 |
+
self.curr_frame_pos = 0
|
45 |
+
self.text_position = 0
|
46 |
+
|
47 |
+
self.started = False
|
48 |
+
self.started_at = None
|
49 |
+
|
50 |
+
self.complete = False
|
51 |
+
self.completed_at = None
|
52 |
+
|
53 |
+
# Using `output_attentions=True` is incompatible with optimized attention kernels, so
|
54 |
+
# using it for all layers slows things down too much. We can apply it to just one layer
|
55 |
+
# by intercepting the kwargs and adding a forward hook (credit: jrm)
|
56 |
+
self.last_aligned_attn = None
|
57 |
+
self._add_attention_spy(tfmr, alignment_layer_idx)
|
58 |
+
|
59 |
+
def _add_attention_spy(self, tfmr, alignment_layer_idx):
|
60 |
+
"""
|
61 |
+
Adds a forward hook to a specific attention layer to collect outputs.
|
62 |
+
Using `output_attentions=True` is incompatible with optimized attention kernels, so
|
63 |
+
using it for all layers slows things down too much.
|
64 |
+
(credit: jrm)
|
65 |
+
"""
|
66 |
+
|
67 |
+
def attention_forward_hook(module, input, output):
|
68 |
+
"""
|
69 |
+
See `LlamaAttention.forward`; the output is a 3-tuple: `attn_output, attn_weights, past_key_value`.
|
70 |
+
NOTE:
|
71 |
+
- When `output_attentions=True`, `LlamaSdpaAttention.forward` calls `LlamaAttention.forward`.
|
72 |
+
- `attn_output` has shape [B, H, T0, T0] for the 0th entry, and [B, H, 1, T0+i] for the rest i-th.
|
73 |
+
"""
|
74 |
+
step_attention = output[1].cpu() # (B, 16, N, N)
|
75 |
+
self.last_aligned_attn = step_attention[0].mean(0) # (N, N)
|
76 |
+
|
77 |
+
target_layer = tfmr.layers[alignment_layer_idx].self_attn
|
78 |
+
hook_handle = target_layer.register_forward_hook(attention_forward_hook)
|
79 |
+
|
80 |
+
# Backup original forward
|
81 |
+
original_forward = target_layer.forward
|
82 |
+
def patched_forward(self, *args, **kwargs):
|
83 |
+
kwargs['output_attentions'] = True
|
84 |
+
return original_forward(*args, **kwargs)
|
85 |
+
|
86 |
+
# TODO: how to unpatch it?
|
87 |
+
target_layer.forward = MethodType(patched_forward, target_layer)
|
88 |
+
|
89 |
+
def step(self, logits):
|
90 |
+
"""
|
91 |
+
Emits an AlignmentAnalysisResult into the output queue, and potentially modifies the logits to force an EOS.
|
92 |
+
"""
|
93 |
+
# extract approximate alignment matrix chunk (1 frame at a time after the first chunk)
|
94 |
+
aligned_attn = self.last_aligned_attn # (N, N)
|
95 |
+
i, j = self.text_tokens_slice
|
96 |
+
if self.curr_frame_pos == 0:
|
97 |
+
# first chunk has conditioning info, text tokens, and BOS token
|
98 |
+
A_chunk = aligned_attn[j:, i:j].clone().cpu() # (T, S)
|
99 |
+
else:
|
100 |
+
# subsequent chunks have 1 frame due to KV-caching
|
101 |
+
A_chunk = aligned_attn[:, i:j].clone().cpu() # (1, S)
|
102 |
+
|
103 |
+
# TODO: monotonic masking; could have issue b/c spaces are often skipped.
|
104 |
+
A_chunk[:, self.curr_frame_pos + 1:] = 0
|
105 |
+
|
106 |
+
|
107 |
+
self.alignment = torch.cat((self.alignment, A_chunk), dim=0)
|
108 |
+
|
109 |
+
A = self.alignment
|
110 |
+
T, S = A.shape
|
111 |
+
|
112 |
+
# update position
|
113 |
+
cur_text_posn = A_chunk[-1].argmax()
|
114 |
+
discontinuity = not(-4 < cur_text_posn - self.text_position < 7) # NOTE: very lenient!
|
115 |
+
if not discontinuity:
|
116 |
+
self.text_position = cur_text_posn
|
117 |
+
|
118 |
+
# Hallucinations at the start of speech show up as activations at the bottom of the attention maps!
|
119 |
+
# To mitigate this, we just wait until there are no activations far off-diagonal in the last 2 tokens,
|
120 |
+
# and there are some strong activations in the first few tokens.
|
121 |
+
false_start = (not self.started) and (A[-2:, -2:].max() > 0.1 or A[:, :4].max() < 0.5)
|
122 |
+
self.started = not false_start
|
123 |
+
if self.started and self.started_at is None:
|
124 |
+
self.started_at = T
|
125 |
+
|
126 |
+
# Is generation likely complete?
|
127 |
+
self.complete = self.complete or self.text_position >= S - 3
|
128 |
+
if self.complete and self.completed_at is None:
|
129 |
+
self.completed_at = T
|
130 |
+
|
131 |
+
# NOTE: EOS rarely assigned activations, and second-last token is often punctuation, so use last 3 tokens.
|
132 |
+
# NOTE: due to the false-start behaviour, we need to make sure we skip activations for the first few tokens.
|
133 |
+
last_text_token_duration = A[15:, -3:].sum()
|
134 |
+
|
135 |
+
# Activations for the final token that last too long are likely hallucinations.
|
136 |
+
long_tail = self.complete and (A[self.completed_at:, -3:].sum(dim=0).max() >= 10) # 400ms
|
137 |
+
|
138 |
+
# If there are activations in previous tokens after generation has completed, assume this is a repetition error.
|
139 |
+
repetition = self.complete and (A[self.completed_at:, :-5].max(dim=1).values.sum() > 5)
|
140 |
+
|
141 |
+
# If a bad ending is detected, force emit EOS by modifying logits
|
142 |
+
# NOTE: this means logits may be inconsistent with latents!
|
143 |
+
if long_tail or repetition:
|
144 |
+
logger.warn(f"forcing EOS token, {long_tail=}, {repetition=}")
|
145 |
+
# (±2**15 is safe for all dtypes >= 16bit)
|
146 |
+
logits = -(2**15) * torch.ones_like(logits)
|
147 |
+
logits[..., self.eos_idx] = 2**15
|
148 |
+
|
149 |
+
# Suppress EoS to prevent early termination
|
150 |
+
if cur_text_posn < S - 3: # FIXME: arbitrary
|
151 |
+
logits[..., self.eos_idx] = -2**15
|
152 |
+
|
153 |
+
self.curr_frame_pos += 1
|
154 |
+
return logits
|
src/chatterbox/models/t3/inference/t3_hf_backend.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch import nn as nn
|
5 |
+
from transformers import LlamaConfig, LlamaModel, LlamaPreTrainedModel, GenerationMixin
|
6 |
+
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
|
7 |
+
|
8 |
+
|
9 |
+
class T3HuggingfaceBackend(LlamaPreTrainedModel, GenerationMixin):
|
10 |
+
"""
|
11 |
+
Override some HuggingFace interface methods so we can use the standard `generate` method with our
|
12 |
+
custom embedding / logit layers.
|
13 |
+
|
14 |
+
NOTE: need to extend "*PreTrainedModel" to avoid re-initializing weights!
|
15 |
+
"""
|
16 |
+
|
17 |
+
def __init__(
|
18 |
+
self,
|
19 |
+
config: LlamaConfig,
|
20 |
+
llama: LlamaModel,
|
21 |
+
*,
|
22 |
+
speech_enc,
|
23 |
+
speech_head,
|
24 |
+
latents_queue=None,
|
25 |
+
logits_queue=None,
|
26 |
+
alignment_stream_analyzer: 'AlignmentStreamAnalyzer'=None,
|
27 |
+
):
|
28 |
+
super().__init__(config)
|
29 |
+
self.model = llama
|
30 |
+
self.speech_enc = speech_enc
|
31 |
+
self.speech_head = speech_head
|
32 |
+
self._added_cond = False
|
33 |
+
self.alignment_stream_analyzer = alignment_stream_analyzer
|
34 |
+
|
35 |
+
@torch.inference_mode()
|
36 |
+
def prepare_inputs_for_generation(
|
37 |
+
self, input_ids: torch.Tensor, decoder_cond: torch.Tensor, use_cache: bool, past_key_values=None,
|
38 |
+
# This argument was introduced in some recent version of transformers (>=4.29.1)
|
39 |
+
cache_position=None
|
40 |
+
):
|
41 |
+
"""
|
42 |
+
This is a method used by huggingface's generate() method.
|
43 |
+
Overridden here to apply our custom speech token embedding layer.
|
44 |
+
|
45 |
+
:param input_ids: (B, S) int64 tensors of input tokens.
|
46 |
+
:param decoder_cond: (B, T, C) float32 tensor of conditioning (prefixed to <input_embeds>)
|
47 |
+
"""
|
48 |
+
|
49 |
+
# Make use of the kv cache: only the last input ID is new, we trim away all the ones before
|
50 |
+
if not use_cache:
|
51 |
+
past_key_values = None
|
52 |
+
if past_key_values is not None:
|
53 |
+
input_ids = input_ids[:, -1:]
|
54 |
+
|
55 |
+
# custom speech token embedding layer
|
56 |
+
inputs_embeds = self.speech_enc(input_ids)
|
57 |
+
|
58 |
+
# prefix decoder conditioning if applicable
|
59 |
+
if not self._added_cond:
|
60 |
+
assert past_key_values is not None # should be first step
|
61 |
+
if decoder_cond.size(0) != inputs_embeds.size(0):
|
62 |
+
decoder_cond = decoder_cond.expand(inputs_embeds.size(0), -1, -1)
|
63 |
+
inputs_embeds = torch.cat([decoder_cond, inputs_embeds], dim=1)
|
64 |
+
self._added_cond = True
|
65 |
+
|
66 |
+
return {
|
67 |
+
"inputs_embeds": inputs_embeds,
|
68 |
+
"past_key_values": past_key_values,
|
69 |
+
"use_cache": use_cache,
|
70 |
+
}
|
71 |
+
|
72 |
+
@torch.inference_mode()
|
73 |
+
def forward(
|
74 |
+
self,
|
75 |
+
inputs_embeds: torch.Tensor,
|
76 |
+
past_key_values: Optional[torch.Tensor]=None,
|
77 |
+
use_cache=True,
|
78 |
+
output_attentions=False,
|
79 |
+
output_hidden_states=True,
|
80 |
+
return_dict=True,
|
81 |
+
):
|
82 |
+
"""
|
83 |
+
This is a method used by huggingface's generate() method.
|
84 |
+
Overridden here to apply our custom layer norm and speech logit projection layers.
|
85 |
+
|
86 |
+
:param inputs_embeds: (B, S, C) float32 tensor of conditioning inputs. If past key values are given,
|
87 |
+
S should be 1.
|
88 |
+
"""
|
89 |
+
is_large_input = inputs_embeds.size(1) != 1
|
90 |
+
has_cache = past_key_values is not None and len(past_key_values) > 0
|
91 |
+
assert not (is_large_input and has_cache)
|
92 |
+
assert return_dict
|
93 |
+
assert output_hidden_states
|
94 |
+
|
95 |
+
tfmr_out = self.model(
|
96 |
+
inputs_embeds=inputs_embeds,
|
97 |
+
past_key_values=past_key_values,
|
98 |
+
use_cache=use_cache,
|
99 |
+
output_attentions=output_attentions,
|
100 |
+
output_hidden_states=output_hidden_states,
|
101 |
+
return_dict=True,
|
102 |
+
)
|
103 |
+
hidden_states = tfmr_out.hidden_states[-1] # (B, seq, dim)
|
104 |
+
|
105 |
+
logits = self.speech_head(hidden_states)
|
106 |
+
# assert inputs_embeds.size(0) == 1 # (disabled for CFG)
|
107 |
+
|
108 |
+
# NOTE: hallucination handler may modify logits to force emit an EOS token
|
109 |
+
# logits = self.alignment_stream_analyzer.step(logits)
|
110 |
+
|
111 |
+
return CausalLMOutputWithCrossAttentions(
|
112 |
+
logits=logits,
|
113 |
+
past_key_values=tfmr_out.past_key_values,
|
114 |
+
hidden_states=tfmr_out.hidden_states,
|
115 |
+
attentions=tfmr_out.attentions,
|
116 |
+
)
|
src/chatterbox/models/t3/llama_configs.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
LLAMA_520M_CONFIG_DICT = dict(
|
2 |
+
# Arbitrary small number that won't cause problems when loading.
|
3 |
+
# These param are unused due to custom input layers.
|
4 |
+
vocab_size=8,
|
5 |
+
# default params needed for loading most pretrained 1B weights
|
6 |
+
max_position_embeddings=131072,
|
7 |
+
hidden_size=1024,
|
8 |
+
intermediate_size=4096,
|
9 |
+
num_hidden_layers=30,
|
10 |
+
num_attention_heads=16,
|
11 |
+
attn_implementation="sdpa",
|
12 |
+
head_dim=64,
|
13 |
+
tie_word_embeddings=False,
|
14 |
+
hidden_act="silu",
|
15 |
+
attention_bias=False,
|
16 |
+
attention_dropout=0.0,
|
17 |
+
initializer_range=0.02,
|
18 |
+
mlp_bias=False,
|
19 |
+
model_type="llama",
|
20 |
+
num_key_value_heads=16,
|
21 |
+
pretraining_tp=1,
|
22 |
+
rms_norm_eps=1e-05,
|
23 |
+
rope_scaling=dict(
|
24 |
+
factor=8.0,
|
25 |
+
high_freq_factor=4.0,
|
26 |
+
low_freq_factor=1.0,
|
27 |
+
original_max_position_embeddings=8192,
|
28 |
+
rope_type="llama3"
|
29 |
+
),
|
30 |
+
rope_theta=500000.0,
|
31 |
+
torch_dtype="bfloat16",
|
32 |
+
use_cache=True,
|
33 |
+
)
|
34 |
+
|
35 |
+
LLAMA_CONFIGS = {
|
36 |
+
"Llama_520M": LLAMA_520M_CONFIG_DICT,
|
37 |
+
}
|
src/chatterbox/models/t3/modules/cond_enc.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import Optional
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from torch import nn, Tensor
|
6 |
+
|
7 |
+
from .perceiver import Perceiver
|
8 |
+
from .t3_config import T3Config
|
9 |
+
|
10 |
+
|
11 |
+
@dataclass
|
12 |
+
class T3Cond:
|
13 |
+
"""
|
14 |
+
Dataclass container for most / all conditioning info.
|
15 |
+
TODO: serialization methods aren't used, keeping them around for convenience
|
16 |
+
"""
|
17 |
+
|
18 |
+
speaker_emb: Tensor
|
19 |
+
clap_emb: Optional[Tensor] = None
|
20 |
+
cond_prompt_speech_tokens: Optional[Tensor] = None
|
21 |
+
cond_prompt_speech_emb: Optional[Tensor] = None
|
22 |
+
emotion_adv: Optional[Tensor] = 0.5
|
23 |
+
|
24 |
+
def to(self, *, device=None, dtype=None):
|
25 |
+
"Cast to a device and dtype. Dtype casting is ignored for long/int tensors."
|
26 |
+
for k, v in self.__dict__.items():
|
27 |
+
if torch.is_tensor(v):
|
28 |
+
is_fp = type(v.view(-1)[0].item()) is not int
|
29 |
+
setattr(self, k, v.to(device=device, dtype=dtype if is_fp else None))
|
30 |
+
return self
|
31 |
+
|
32 |
+
def save(self, fpath):
|
33 |
+
torch.save(self.__dict__, fpath)
|
34 |
+
|
35 |
+
@staticmethod
|
36 |
+
def load(fpath, map_location="cpu"):
|
37 |
+
kwargs = torch.load(fpath, map_location=map_location, weights_only=True)
|
38 |
+
return T3Cond(**kwargs)
|
39 |
+
|
40 |
+
|
41 |
+
class T3CondEnc(nn.Module):
|
42 |
+
"""
|
43 |
+
Handle all non-text conditioning, like speaker embeddings / prompts, CLAP, emotion, etc.
|
44 |
+
"""
|
45 |
+
|
46 |
+
def __init__(self, hp: T3Config):
|
47 |
+
super().__init__()
|
48 |
+
self.hp = hp
|
49 |
+
if hp.encoder_type == "voice_encoder":
|
50 |
+
self.spkr_enc = nn.Linear(hp.speaker_embed_size, hp.n_channels)
|
51 |
+
else:
|
52 |
+
raise NotImplementedError(str(hp.encoder_type))
|
53 |
+
|
54 |
+
# emotion adv
|
55 |
+
self.emotion_adv_fc = None
|
56 |
+
if hp.emotion_adv:
|
57 |
+
self.emotion_adv_fc = nn.Linear(1, hp.n_channels, bias=False)
|
58 |
+
|
59 |
+
# perceiver resampler
|
60 |
+
self.perceiver = None
|
61 |
+
if hp.use_perceiver_resampler:
|
62 |
+
self.perceiver = Perceiver()
|
63 |
+
|
64 |
+
def forward(self, cond: T3Cond):
|
65 |
+
# Validate
|
66 |
+
assert (cond.cond_prompt_speech_tokens is None) == (cond.cond_prompt_speech_emb is None), \
|
67 |
+
"no embeddings for cond_prompt_speech_tokens"
|
68 |
+
|
69 |
+
# Speaker embedding projection
|
70 |
+
cond_spkr = self.spkr_enc(cond.speaker_emb.view(-1, self.hp.speaker_embed_size))[:, None] # (B, 1, dim)
|
71 |
+
empty = torch.zeros_like(cond_spkr[:, :0]) # (B, 0, dim)
|
72 |
+
|
73 |
+
# TODO CLAP
|
74 |
+
assert cond.clap_emb is None, "clap_embed not implemented"
|
75 |
+
cond_clap = empty # (B, 0, dim)
|
76 |
+
|
77 |
+
# Cond prompt
|
78 |
+
cond_prompt_speech_emb = cond.cond_prompt_speech_emb
|
79 |
+
if cond_prompt_speech_emb is None:
|
80 |
+
cond_prompt_speech_emb = empty # (B, 0, dim)
|
81 |
+
elif self.hp.use_perceiver_resampler:
|
82 |
+
cond_prompt_speech_emb = self.perceiver(cond_prompt_speech_emb)
|
83 |
+
|
84 |
+
# Emotion Adv: must provide a value if this model uses emotion conditioning
|
85 |
+
cond_emotion_adv = empty # (B, 0, dim)
|
86 |
+
if self.hp.emotion_adv:
|
87 |
+
assert cond.emotion_adv is not None
|
88 |
+
cond_emotion_adv = self.emotion_adv_fc(cond.emotion_adv.view(-1, 1, 1))
|
89 |
+
|
90 |
+
# Concat and return
|
91 |
+
cond_embeds = torch.cat((
|
92 |
+
cond_spkr,
|
93 |
+
cond_clap,
|
94 |
+
cond_prompt_speech_emb,
|
95 |
+
cond_emotion_adv,
|
96 |
+
), dim=1)
|
97 |
+
return cond_embeds
|
src/chatterbox/models/t3/modules/learned_pos_emb.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Union
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch import nn, Tensor
|
5 |
+
|
6 |
+
|
7 |
+
class LearnedPositionEmbeddings(nn.Module):
|
8 |
+
def __init__(self, seq_len, model_dim, init=.02):
|
9 |
+
super().__init__()
|
10 |
+
self.emb = nn.Embedding(seq_len, model_dim)
|
11 |
+
# Initializing this way is standard for GPT-2
|
12 |
+
self.emb.weight.data.normal_(mean=0.0, std=init)
|
13 |
+
|
14 |
+
def forward(self, x):
|
15 |
+
"""
|
16 |
+
Returns positional embeddings for index 0 up to the length of x
|
17 |
+
"""
|
18 |
+
sl = x.shape[1]
|
19 |
+
return self.emb(torch.arange(0, sl, device=x.device))
|
20 |
+
|
21 |
+
def get_fixed_embedding(self, idx: 'Union[int, Tensor]'):
|
22 |
+
"""
|
23 |
+
Args:
|
24 |
+
idx: scalar int or an integer tensor of shape (T,) or (B, T)
|
25 |
+
Returns:
|
26 |
+
positional embeddings for given indices, shape (B, T, dim), ie (1, 1, dim) for int input
|
27 |
+
"""
|
28 |
+
device = self.emb.weight.device
|
29 |
+
idx = idx.to(device) if torch.is_tensor(idx) else torch.tensor(idx, device=device)
|
30 |
+
idx = torch.atleast_2d(idx)
|
31 |
+
assert idx.ndim == 2
|
32 |
+
return self.emb(idx) # (B, T, dim)
|
src/chatterbox/models/t3/modules/perceiver.py
ADDED
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2025 Resemble AI
|
2 |
+
# Author: Manmay Nakhashi
|
3 |
+
# MIT License
|
4 |
+
import math
|
5 |
+
|
6 |
+
import torch
|
7 |
+
from torch import nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
from einops import rearrange
|
10 |
+
|
11 |
+
|
12 |
+
class RelativePositionBias(nn.Module):
|
13 |
+
def __init__(self, scale, causal=False, num_buckets=32, max_distance=128, heads=8):
|
14 |
+
super().__init__()
|
15 |
+
self.scale = scale
|
16 |
+
self.causal = causal
|
17 |
+
self.num_buckets = num_buckets
|
18 |
+
self.max_distance = max_distance
|
19 |
+
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
|
20 |
+
|
21 |
+
@staticmethod
|
22 |
+
def _relative_position_bucket(relative_position, causal=True, num_buckets=32, max_distance=128):
|
23 |
+
ret = 0
|
24 |
+
n = -relative_position
|
25 |
+
if not causal:
|
26 |
+
num_buckets //= 2
|
27 |
+
ret += (n < 0).long() * num_buckets
|
28 |
+
n = torch.abs(n)
|
29 |
+
else:
|
30 |
+
n = torch.max(n, torch.zeros_like(n))
|
31 |
+
|
32 |
+
max_exact = num_buckets // 2
|
33 |
+
is_small = n < max_exact
|
34 |
+
|
35 |
+
val_if_large = max_exact + (
|
36 |
+
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
|
37 |
+
).long()
|
38 |
+
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
|
39 |
+
|
40 |
+
ret += torch.where(is_small, n, val_if_large)
|
41 |
+
return ret
|
42 |
+
|
43 |
+
def forward(self, qk_dots):
|
44 |
+
i, j, device = *qk_dots.shape[-2:], qk_dots.device
|
45 |
+
q_pos = torch.arange(i, dtype=torch.long, device=device)
|
46 |
+
k_pos = torch.arange(j, dtype=torch.long, device=device)
|
47 |
+
rel_pos = k_pos[None, :] - q_pos[:, None]
|
48 |
+
rp_bucket = self._relative_position_bucket(rel_pos, causal=self.causal, num_buckets=self.num_buckets,
|
49 |
+
max_distance=self.max_distance)
|
50 |
+
values = self.relative_attention_bias(rp_bucket)
|
51 |
+
bias = rearrange(values, 'i j h -> () h i j')
|
52 |
+
return qk_dots + (bias * self.scale)
|
53 |
+
|
54 |
+
|
55 |
+
class AttentionQKV(nn.Module):
|
56 |
+
def __init__(self, n_heads, head_dim, dropout_rate=0.1, scale=None, flash=False):
|
57 |
+
super().__init__()
|
58 |
+
self.n_heads = n_heads
|
59 |
+
self.head_dim = head_dim
|
60 |
+
self.scale = scale if scale is not None else head_dim ** -0.5
|
61 |
+
self.flash = flash
|
62 |
+
self.dropout_rate = dropout_rate
|
63 |
+
self.dropout = nn.Dropout(dropout_rate)
|
64 |
+
self.flash_config = self.setup_flash_config() if flash else None
|
65 |
+
|
66 |
+
def setup_flash_config(self):
|
67 |
+
# Setup flash attention configuration
|
68 |
+
flash_config = {
|
69 |
+
'enable_flash': True,
|
70 |
+
'enable_math': True,
|
71 |
+
'enable_mem_efficient': True
|
72 |
+
}
|
73 |
+
return flash_config
|
74 |
+
|
75 |
+
def forward(self, q, k, v, mask=None):
|
76 |
+
q, k, v = [self.split_heads(tensor) for tensor in [q, k, v]]
|
77 |
+
if self.flash:
|
78 |
+
out = self.flash_attention(q, k, v, mask=mask)
|
79 |
+
else:
|
80 |
+
out = self.scaled_dot_product_attention(q, k, v, mask=mask)
|
81 |
+
|
82 |
+
return self.combine_heads(out)
|
83 |
+
|
84 |
+
def scaled_dot_product_attention(self, q, k, v, mask=None):
|
85 |
+
sim = torch.einsum("bhlt,bhls->bhts", q, k) * self.scale
|
86 |
+
if mask is not None:
|
87 |
+
sim = sim.masked_fill(mask == 0, float('-inf'))
|
88 |
+
attn = torch.softmax(sim, dim=-1)
|
89 |
+
attn = self.dropout(attn)
|
90 |
+
return torch.einsum("bhts,bhls->bhlt", attn, v)
|
91 |
+
|
92 |
+
def flash_attention(self, q, k, v, mask=None):
|
93 |
+
config = self.flash_config if self.flash_config else {}
|
94 |
+
with torch.backends.cuda.sdp_kernel(**config):
|
95 |
+
out = F.scaled_dot_product_attention(
|
96 |
+
q, k, v,
|
97 |
+
attn_mask=mask,
|
98 |
+
dropout_p=self.dropout_rate if self.training else 0.
|
99 |
+
)
|
100 |
+
return out
|
101 |
+
|
102 |
+
def split_heads(self, x):
|
103 |
+
bs, length, _ = x.shape
|
104 |
+
x = x.view(bs, length, self.n_heads, self.head_dim)
|
105 |
+
return x.permute(0, 2, 1, 3)
|
106 |
+
|
107 |
+
def combine_heads(self, x):
|
108 |
+
bs, _, length, _ = x.shape
|
109 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
110 |
+
return x.view(bs, length, -1)
|
111 |
+
|
112 |
+
|
113 |
+
class AttentionBlock2(nn.Module):
|
114 |
+
"""
|
115 |
+
An attention block that allows spatial positions to attend to each other,
|
116 |
+
using AttentionQKV and separate linear transformations for Q, K, and V.
|
117 |
+
"""
|
118 |
+
|
119 |
+
def __init__(
|
120 |
+
self,
|
121 |
+
channels,
|
122 |
+
num_heads=1,
|
123 |
+
num_head_channels=-1,
|
124 |
+
relative_pos_embeddings=False,
|
125 |
+
flash_attention=True,
|
126 |
+
dropout_rate=0.2,
|
127 |
+
scale=None
|
128 |
+
):
|
129 |
+
super().__init__()
|
130 |
+
self.channels = channels
|
131 |
+
|
132 |
+
if num_head_channels == -1:
|
133 |
+
self.num_heads = num_heads
|
134 |
+
else:
|
135 |
+
assert (
|
136 |
+
channels % num_head_channels == 0
|
137 |
+
), f"channels {channels} is not divisible by num_head_channels {num_head_channels}"
|
138 |
+
self.num_heads = channels // num_head_channels
|
139 |
+
|
140 |
+
self.norm = nn.LayerNorm(channels)
|
141 |
+
|
142 |
+
# Separate linear layers for Q, K, and V
|
143 |
+
self.to_q = nn.Linear(channels, channels)
|
144 |
+
self.to_k = nn.Linear(channels, channels)
|
145 |
+
self.to_v = nn.Linear(channels, channels)
|
146 |
+
|
147 |
+
self.attention = AttentionQKV(self.num_heads, channels // self.num_heads, dropout_rate=dropout_rate, flash=flash_attention, scale=scale)
|
148 |
+
|
149 |
+
self.proj_out = nn.Linear(channels, channels)
|
150 |
+
|
151 |
+
if relative_pos_embeddings:
|
152 |
+
self.relative_pos_embeddings = RelativePositionBias(scale=(channels // self.num_heads) ** .5, causal=False, heads=num_heads, num_buckets=32, max_distance=64)
|
153 |
+
else:
|
154 |
+
self.relative_pos_embeddings = None
|
155 |
+
|
156 |
+
def forward(self, x1, x2, mask=None):
|
157 |
+
b1, c1, *spatial1 = x1.shape
|
158 |
+
b2, c2, *spatial2 = x2.shape
|
159 |
+
|
160 |
+
x1_norm = self.norm(x1)
|
161 |
+
x2_norm = self.norm(x2)
|
162 |
+
|
163 |
+
q = self.to_q(x1_norm)
|
164 |
+
k = self.to_k(x2_norm)
|
165 |
+
v = self.to_v(x2_norm)
|
166 |
+
|
167 |
+
h = self.attention(q, k, v, mask=mask)
|
168 |
+
h = self.proj_out(h)
|
169 |
+
|
170 |
+
return (x1 + h).reshape(b1, c1, *spatial1)
|
171 |
+
|
172 |
+
|
173 |
+
class Perceiver(nn.Module):
|
174 |
+
"""Inspired by https://arxiv.org/abs/2103.03206"""
|
175 |
+
def __init__(self, pre_attention_query_token=32, pre_attention_query_size=1024, embedding_dim=1024, num_attn_heads=4):
|
176 |
+
"""
|
177 |
+
Initialize the perceiver module.
|
178 |
+
|
179 |
+
:param pre_attention_query_token: Number of query tokens for pre-attention
|
180 |
+
:param pre_attention_query_size: Size of each query token
|
181 |
+
:param embedding_dim: Dimension of the embedding space
|
182 |
+
:param num_attn_heads: Number of attention heads
|
183 |
+
"""
|
184 |
+
super().__init__()
|
185 |
+
|
186 |
+
# Initialize the pre-attention query parameter
|
187 |
+
self.pre_attention_query = torch.nn.Parameter(
|
188 |
+
torch.empty(1, pre_attention_query_token, pre_attention_query_size)
|
189 |
+
)
|
190 |
+
|
191 |
+
# Calculate the variance for uniform initialization
|
192 |
+
query_variance = math.sqrt(3.0) * math.sqrt(2.0 / (pre_attention_query_token + pre_attention_query_token))
|
193 |
+
|
194 |
+
# Initialize the pre-attention query with uniform distribution
|
195 |
+
self.pre_attention_query.data.uniform_(-query_variance, query_variance)
|
196 |
+
|
197 |
+
# Initialize the attention block
|
198 |
+
self.attn = AttentionBlock2(embedding_dim, num_attn_heads)
|
199 |
+
|
200 |
+
def forward(self, h):
|
201 |
+
"""
|
202 |
+
Forward pass of the perceiver module.
|
203 |
+
:param h: Input tensor
|
204 |
+
:return: Output after applying attention mechanisms
|
205 |
+
"""
|
206 |
+
# Expand the pre-attention query to match the batch size of the input
|
207 |
+
query_ = self.pre_attention_query.expand(h.shape[0], -1, -1)
|
208 |
+
# Apply the first attention mechanism (cross-attention)
|
209 |
+
pre_att = self.attn(query_, h)
|
210 |
+
# Apply the second attention mechanism (self-attention)
|
211 |
+
attn = self.attn(pre_att, pre_att)
|
212 |
+
return attn
|
src/chatterbox/models/t3/modules/t3_config.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..llama_configs import LLAMA_CONFIGS
|
2 |
+
|
3 |
+
|
4 |
+
class T3Config:
|
5 |
+
start_text_token = 255
|
6 |
+
stop_text_token = 0
|
7 |
+
text_tokens_dict_size = 704
|
8 |
+
max_text_tokens = 2048
|
9 |
+
|
10 |
+
start_speech_token = 6561
|
11 |
+
stop_speech_token = 6562
|
12 |
+
speech_tokens_dict_size = 8194
|
13 |
+
max_speech_tokens = 4096
|
14 |
+
|
15 |
+
llama_config_name = "Llama_520M"
|
16 |
+
input_pos_emb = "learned"
|
17 |
+
speech_cond_prompt_len = 150
|
18 |
+
|
19 |
+
# For T3CondEnc
|
20 |
+
encoder_type = "voice_encoder"
|
21 |
+
speaker_embed_size = 256
|
22 |
+
use_perceiver_resampler = True
|
23 |
+
emotion_adv = True
|
24 |
+
|
25 |
+
@property
|
26 |
+
def n_channels(self):
|
27 |
+
return LLAMA_CONFIGS[self.llama_config_name]["hidden_size"]
|
src/chatterbox/models/t3/t3.py
ADDED
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2025 Resemble AI
|
2 |
+
# MIT License
|
3 |
+
import logging
|
4 |
+
from typing import Union, Optional, List
|
5 |
+
|
6 |
+
from tqdm import tqdm
|
7 |
+
import torch
|
8 |
+
import torch.nn.functional as F
|
9 |
+
from torch import nn, Tensor
|
10 |
+
from transformers import LlamaModel, LlamaConfig
|
11 |
+
from transformers.generation.logits_process import TopPLogitsWarper, RepetitionPenaltyLogitsProcessor
|
12 |
+
|
13 |
+
from .modules.learned_pos_emb import LearnedPositionEmbeddings
|
14 |
+
|
15 |
+
from .modules.cond_enc import T3CondEnc, T3Cond
|
16 |
+
from .modules.t3_config import T3Config
|
17 |
+
from .llama_configs import LLAMA_CONFIGS
|
18 |
+
from .inference.t3_hf_backend import T3HuggingfaceBackend
|
19 |
+
from .inference.alignment_stream_analyzer import AlignmentStreamAnalyzer
|
20 |
+
|
21 |
+
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
+
|
25 |
+
class AttrDict(dict):
|
26 |
+
def __init__(self, *args, **kwargs):
|
27 |
+
super(AttrDict, self).__init__(*args, **kwargs)
|
28 |
+
self.__dict__ = self
|
29 |
+
|
30 |
+
|
31 |
+
def _ensure_BOT_EOT(text_tokens: Tensor, hp):
|
32 |
+
B = text_tokens.size(0)
|
33 |
+
assert (text_tokens == hp.start_text_token).int().sum() >= B, "missing start_text_token"
|
34 |
+
assert (text_tokens == hp.stop_text_token).int().sum() >= B, "missing stop_text_token"
|
35 |
+
|
36 |
+
|
37 |
+
class T3(nn.Module):
|
38 |
+
"""
|
39 |
+
Token-To-Token (T3) TTS model using huggingface transformer models as backbones,
|
40 |
+
* tokenization, including start / stop tokens are always added externally to this class
|
41 |
+
* conditioning data like CLAP, emotion, etc are all in a separate file for more modularity
|
42 |
+
* careful! this class assumes relative positional encoding -- with absolute PE, we would at
|
43 |
+
least want to reset the position to 0 when speech tokens begin, and optionally use a
|
44 |
+
different PE embedding space for speech.
|
45 |
+
"""
|
46 |
+
|
47 |
+
def __init__(self, hp=T3Config()):
|
48 |
+
super().__init__()
|
49 |
+
self.hp = hp
|
50 |
+
self.cfg = LlamaConfig(**LLAMA_CONFIGS[hp.llama_config_name])
|
51 |
+
self.tfmr = LlamaModel(self.cfg)
|
52 |
+
self.dim = self.cfg.hidden_size
|
53 |
+
self.deepspeed_patch_applied = False
|
54 |
+
|
55 |
+
# conditioning / embedding
|
56 |
+
self.cond_enc = T3CondEnc(hp)
|
57 |
+
self.text_emb = nn.Embedding(hp.text_tokens_dict_size, self.dim)
|
58 |
+
self.speech_emb = nn.Embedding(hp.speech_tokens_dict_size, self.dim)
|
59 |
+
|
60 |
+
# custom position embedding
|
61 |
+
if hp.input_pos_emb == "learned":
|
62 |
+
max_text_seq_len = hp.max_text_tokens + 2
|
63 |
+
self.text_pos_emb = LearnedPositionEmbeddings(max_text_seq_len, self.dim)
|
64 |
+
|
65 |
+
max_mel_seq_len = hp.max_speech_tokens + 2 + 2
|
66 |
+
self.speech_pos_emb = LearnedPositionEmbeddings(max_mel_seq_len, self.dim)
|
67 |
+
|
68 |
+
# logit projection
|
69 |
+
self.text_head = nn.Linear(self.cfg.hidden_size, hp.text_tokens_dict_size, bias=False)
|
70 |
+
self.speech_head = nn.Linear(self.cfg.hidden_size, hp.speech_tokens_dict_size, bias=False)
|
71 |
+
self.compiled = False
|
72 |
+
|
73 |
+
@property
|
74 |
+
def device(self):
|
75 |
+
return self.speech_head.weight.device
|
76 |
+
|
77 |
+
def prepare_conditioning(self, t3_cond: T3Cond):
|
78 |
+
"""
|
79 |
+
Token cond data needs to be embedded, so that needs to be here instead of in `T3CondEnc`.
|
80 |
+
"""
|
81 |
+
if t3_cond.cond_prompt_speech_tokens is not None and t3_cond.cond_prompt_speech_emb is None:
|
82 |
+
t3_cond.cond_prompt_speech_emb = self.speech_emb(t3_cond.cond_prompt_speech_tokens) + \
|
83 |
+
self.speech_pos_emb(t3_cond.cond_prompt_speech_tokens)
|
84 |
+
return self.cond_enc(t3_cond) # (B, len_cond, dim)
|
85 |
+
|
86 |
+
def prepare_input_embeds(
|
87 |
+
self,
|
88 |
+
*,
|
89 |
+
t3_cond: T3Cond,
|
90 |
+
text_tokens: torch.LongTensor,
|
91 |
+
speech_tokens: torch.LongTensor,
|
92 |
+
cfg_weight: float = 0.0,
|
93 |
+
):
|
94 |
+
# prepare input embeddings (skip backbone tranformer embeddings)
|
95 |
+
cond_emb = self.prepare_conditioning(t3_cond) # (B, len_cond, dim)
|
96 |
+
text_emb = self.text_emb(text_tokens) # (B, len_text, dim)
|
97 |
+
if cfg_weight > 0.0:
|
98 |
+
text_emb[1].zero_() # CFG uncond
|
99 |
+
|
100 |
+
speech_emb = self.speech_emb(speech_tokens) # (B, len_speech, dim)
|
101 |
+
if self.hp.input_pos_emb == "learned":
|
102 |
+
text_emb = text_emb + self.text_pos_emb(text_tokens)
|
103 |
+
speech_emb = speech_emb + self.speech_pos_emb(speech_tokens)
|
104 |
+
len_cond = cond_emb.size(1)
|
105 |
+
|
106 |
+
if cond_emb.size(0) != text_emb.size(0):
|
107 |
+
cond_emb = cond_emb.expand(text_emb.size(0), -1, -1)
|
108 |
+
|
109 |
+
# concat
|
110 |
+
embeds = torch.stack([
|
111 |
+
torch.cat((ce, te, se))
|
112 |
+
for ce, te, se in zip(cond_emb, text_emb, speech_emb)
|
113 |
+
]) # (B, length, dim)
|
114 |
+
return embeds, len_cond
|
115 |
+
|
116 |
+
def forward(
|
117 |
+
self,
|
118 |
+
*,
|
119 |
+
t3_cond: T3Cond,
|
120 |
+
text_tokens: torch.LongTensor,
|
121 |
+
text_token_lens: torch.LongTensor,
|
122 |
+
speech_tokens: torch.LongTensor,
|
123 |
+
speech_token_lens: torch.LongTensor,
|
124 |
+
training=False,
|
125 |
+
):
|
126 |
+
_ensure_BOT_EOT(text_tokens, self.hp)
|
127 |
+
|
128 |
+
# prepare custom input embeds
|
129 |
+
embeds, len_cond = self.prepare_input_embeds(
|
130 |
+
t3_cond=t3_cond,
|
131 |
+
text_tokens=text_tokens,
|
132 |
+
speech_tokens=speech_tokens,
|
133 |
+
)
|
134 |
+
|
135 |
+
# backbone tranformer forward
|
136 |
+
tfmr_out = self.tfmr.forward(
|
137 |
+
input_ids=None,
|
138 |
+
# position_ids=position_ids, # TODO? ROPE should be fine?
|
139 |
+
inputs_embeds=embeds,
|
140 |
+
output_hidden_states=True,
|
141 |
+
return_dict=True,
|
142 |
+
use_cache=(not training),
|
143 |
+
)
|
144 |
+
hidden_states = tfmr_out.hidden_states[-1] # final tfmr layer output, (B, seq, dim)
|
145 |
+
|
146 |
+
# post-processing: splice out text and speech parts of hidden states
|
147 |
+
len_text = text_tokens.size(1)
|
148 |
+
len_speech = speech_tokens.size(1)
|
149 |
+
B, _, dim = hidden_states.shape
|
150 |
+
device, dtype = hidden_states.device, hidden_states.dtype
|
151 |
+
text_latents = torch.zeros(B, len_text, dim, dtype=dtype, device=device)
|
152 |
+
speech_latents = torch.zeros(B, len_speech, dim, dtype=dtype, device=device)
|
153 |
+
ttl, stl = text_token_lens, speech_token_lens
|
154 |
+
for i in range(B):
|
155 |
+
text_end = len_cond + ttl[i].item()
|
156 |
+
speech_start = len_cond + text_tokens.size(1)
|
157 |
+
speech_end = speech_start + stl[i].item()
|
158 |
+
text_latents[i, :ttl[i]] = hidden_states[i, len_cond:text_end]
|
159 |
+
speech_latents[i, :stl[i]] = hidden_states[i, speech_start:speech_end]
|
160 |
+
|
161 |
+
# logit projection
|
162 |
+
text_logits = self.text_head(text_latents)
|
163 |
+
speech_logits = self.speech_head(speech_latents)
|
164 |
+
|
165 |
+
return AttrDict(
|
166 |
+
text_logits=text_logits,
|
167 |
+
text_latents=text_latents,
|
168 |
+
speech_logits=speech_logits,
|
169 |
+
speech_latents=speech_latents,
|
170 |
+
hidden_states=hidden_states,
|
171 |
+
)
|
172 |
+
|
173 |
+
def loss(
|
174 |
+
self,
|
175 |
+
*,
|
176 |
+
t3_cond: T3Cond,
|
177 |
+
text_tokens: torch.LongTensor,
|
178 |
+
text_token_lens: torch.LongTensor,
|
179 |
+
speech_tokens: torch.LongTensor,
|
180 |
+
speech_token_lens: torch.LongTensor,
|
181 |
+
):
|
182 |
+
"training method"
|
183 |
+
len_text = text_tokens.size(1)
|
184 |
+
len_speech = speech_tokens.size(1)
|
185 |
+
assert len_text == text_token_lens.max()
|
186 |
+
assert len_speech == speech_token_lens.max()
|
187 |
+
|
188 |
+
out = self.forward(
|
189 |
+
t3_cond=t3_cond,
|
190 |
+
text_tokens=text_tokens,
|
191 |
+
text_token_lens=text_token_lens,
|
192 |
+
speech_tokens=speech_tokens,
|
193 |
+
speech_token_lens=speech_token_lens,
|
194 |
+
training=True,
|
195 |
+
) # (B, seq, vocab_size)
|
196 |
+
|
197 |
+
# Calc CCE losses
|
198 |
+
IGNORE_ID = -100
|
199 |
+
device = out.text_logits.device
|
200 |
+
mask_text = torch.arange(len_text, device=device)[None] >= text_token_lens[:, None] # (B, len_text)
|
201 |
+
mask_speech = torch.arange(len_speech, device=device)[None] >= speech_token_lens[:, None] # (B, len_speech)
|
202 |
+
masked_text = text_tokens.masked_fill(mask_text, IGNORE_ID)
|
203 |
+
masked_speech = speech_tokens.masked_fill(mask_speech, IGNORE_ID)
|
204 |
+
loss_text = F.cross_entropy(out.text_logits, masked_text, ignore_index=IGNORE_ID)
|
205 |
+
loss_speech = F.cross_entropy(out.speech_logits, masked_speech, ignore_index=IGNORE_ID)
|
206 |
+
|
207 |
+
return loss_text, loss_speech
|
208 |
+
|
209 |
+
@torch.inference_mode()
|
210 |
+
def inference(
|
211 |
+
self,
|
212 |
+
*,
|
213 |
+
t3_cond: T3Cond,
|
214 |
+
text_tokens: Tensor,
|
215 |
+
initial_speech_tokens: Optional[Tensor]=None,
|
216 |
+
|
217 |
+
# misc conditioning
|
218 |
+
prepend_prompt_speech_tokens: Optional[Tensor]=None,
|
219 |
+
|
220 |
+
# HF generate args
|
221 |
+
num_return_sequences=1,
|
222 |
+
max_new_tokens=None,
|
223 |
+
stop_on_eos=True,
|
224 |
+
do_sample=True,
|
225 |
+
temperature=0.8,
|
226 |
+
top_p=0.8,
|
227 |
+
length_penalty=1.0,
|
228 |
+
repetition_penalty=2.0,
|
229 |
+
cfg_weight=0,
|
230 |
+
):
|
231 |
+
"""
|
232 |
+
Args:
|
233 |
+
text_tokens: a 1D (unbatched) or 2D (batched) tensor.
|
234 |
+
"""
|
235 |
+
# Validate / sanitize inputs
|
236 |
+
assert prepend_prompt_speech_tokens is None, "not implemented"
|
237 |
+
_ensure_BOT_EOT(text_tokens, self.hp)
|
238 |
+
text_tokens = torch.atleast_2d(text_tokens).to(dtype=torch.long, device=self.device)
|
239 |
+
|
240 |
+
# Default initial speech to a single start-of-speech token
|
241 |
+
if initial_speech_tokens is None:
|
242 |
+
initial_speech_tokens = self.hp.start_speech_token * torch.ones_like(text_tokens[:, :1])
|
243 |
+
|
244 |
+
# Prepare custom input embeds
|
245 |
+
embeds, len_cond = self.prepare_input_embeds(
|
246 |
+
t3_cond=t3_cond,
|
247 |
+
text_tokens=text_tokens,
|
248 |
+
speech_tokens=initial_speech_tokens,
|
249 |
+
cfg_weight=cfg_weight,
|
250 |
+
)
|
251 |
+
|
252 |
+
# In order to use the standard HF generate method, we need to extend some methods to inject our custom logic
|
253 |
+
# Note the llama-specific logic. Other tfmr types can be added later.
|
254 |
+
|
255 |
+
self.compiled = False
|
256 |
+
|
257 |
+
# TODO? synchronize the expensive compile function
|
258 |
+
# with self.compile_lock:
|
259 |
+
if not self.compiled:
|
260 |
+
alignment_stream_analyzer = AlignmentStreamAnalyzer(
|
261 |
+
self.tfmr,
|
262 |
+
None,
|
263 |
+
text_tokens_slice=(len_cond, len_cond + text_tokens.size(-1)),
|
264 |
+
alignment_layer_idx=9, # TODO: hparam or something?
|
265 |
+
eos_idx=self.hp.stop_speech_token,
|
266 |
+
)
|
267 |
+
patched_model = T3HuggingfaceBackend(
|
268 |
+
config=self.cfg,
|
269 |
+
llama=self.tfmr,
|
270 |
+
speech_enc=self.speech_emb,
|
271 |
+
speech_head=self.speech_head,
|
272 |
+
alignment_stream_analyzer=alignment_stream_analyzer,
|
273 |
+
)
|
274 |
+
self.patched_model = patched_model
|
275 |
+
self.compiled = True
|
276 |
+
|
277 |
+
# # Run normal generate method, which calls our custom extended methods
|
278 |
+
# return self.patched_model.generate(
|
279 |
+
# inputs=initial_speech_tokens,
|
280 |
+
# decoder_cond=embeds,
|
281 |
+
# bos_token_id=self.hp.start_speech_token,
|
282 |
+
# eos_token_id=(self.hp.stop_speech_token if stop_on_eos else -1),
|
283 |
+
# pad_token_id=self.hp.stop_speech_token,
|
284 |
+
# max_new_tokens=max_new_tokens or self.hp.max_speech_tokens,
|
285 |
+
# num_return_sequences=num_return_sequences,
|
286 |
+
# temperature=temperature,
|
287 |
+
# top_p=top_p,
|
288 |
+
# length_penalty=length_penalty,
|
289 |
+
# repetition_penalty=repetition_penalty,
|
290 |
+
# do_sample=do_sample,
|
291 |
+
# # cache_implementation=None if not self.compiled else "static",
|
292 |
+
# )
|
293 |
+
|
294 |
+
device = embeds.device
|
295 |
+
|
296 |
+
bos_token = torch.tensor([[self.hp.start_speech_token]], dtype=torch.long, device=device)
|
297 |
+
bos_embed = self.speech_emb(bos_token) # shape: (B, 1, embed_dim)
|
298 |
+
bos_embed = bos_embed + self.speech_pos_emb.get_fixed_embedding(0)
|
299 |
+
|
300 |
+
# batch_size=2 for CFG
|
301 |
+
bos_embed = torch.cat([bos_embed, bos_embed])
|
302 |
+
|
303 |
+
# Combine condition and BOS token for the initial input if cfg_weight > 0
|
304 |
+
if cfg_weight > 0:
|
305 |
+
inputs_embeds = torch.cat([embeds, bos_embed], dim=1)
|
306 |
+
else:
|
307 |
+
inputs_embeds = embeds
|
308 |
+
|
309 |
+
# Track generated token ids; start with the BOS token.
|
310 |
+
generated_ids = bos_token.clone()
|
311 |
+
predicted = [] # To store the predicted tokens
|
312 |
+
|
313 |
+
# Instantiate the logits processors.
|
314 |
+
top_p_warper = TopPLogitsWarper(top_p=top_p)
|
315 |
+
repetition_penalty_processor = RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty)
|
316 |
+
|
317 |
+
# ---- Initial Forward Pass (no kv_cache yet) ----
|
318 |
+
output = self.patched_model(
|
319 |
+
inputs_embeds=inputs_embeds,
|
320 |
+
past_key_values=None,
|
321 |
+
use_cache=True,
|
322 |
+
output_attentions=True,
|
323 |
+
output_hidden_states=True,
|
324 |
+
return_dict=True,
|
325 |
+
)
|
326 |
+
# Initialize kv_cache with the full context.
|
327 |
+
past = output.past_key_values
|
328 |
+
|
329 |
+
# ---- Generation Loop using kv_cache ----
|
330 |
+
for i in tqdm(range(max_new_tokens), desc="Sampling", dynamic_ncols=True):
|
331 |
+
logits = output.logits[:, -1, :]
|
332 |
+
|
333 |
+
# CFG
|
334 |
+
if cfg_weight > 0.0:
|
335 |
+
logits_cond = logits[0:1]
|
336 |
+
logits_uncond = logits[1:2]
|
337 |
+
logits = logits_cond + cfg_weight * (logits_cond - logits_uncond)
|
338 |
+
|
339 |
+
logits = logits.squeeze(1)
|
340 |
+
|
341 |
+
# Apply temperature scaling.
|
342 |
+
if temperature != 1.0:
|
343 |
+
logits = logits / temperature
|
344 |
+
|
345 |
+
# Apply repetition penalty and top‑p filtering.
|
346 |
+
logits = repetition_penalty_processor(generated_ids, logits)
|
347 |
+
logits = top_p_warper(None, logits)
|
348 |
+
|
349 |
+
# Convert logits to probabilities and sample the next token.
|
350 |
+
probs = torch.softmax(logits, dim=-1)
|
351 |
+
next_token = torch.multinomial(probs, num_samples=1) # shape: (B, 1)
|
352 |
+
|
353 |
+
predicted.append(next_token)
|
354 |
+
generated_ids = torch.cat([generated_ids, next_token], dim=1)
|
355 |
+
|
356 |
+
# Check for EOS token.
|
357 |
+
if next_token.view(-1) == self.hp.stop_speech_token:
|
358 |
+
break
|
359 |
+
|
360 |
+
# Get embedding for the new token.
|
361 |
+
next_token_embed = self.speech_emb(next_token)
|
362 |
+
next_token_embed = next_token_embed + self.speech_pos_emb.get_fixed_embedding(i + 1)
|
363 |
+
|
364 |
+
# For CFG
|
365 |
+
if cfg_weight > 0.0:
|
366 |
+
next_token_embed = torch.cat([next_token_embed, next_token_embed])
|
367 |
+
|
368 |
+
# Forward pass with only the new token and the cached past.
|
369 |
+
output = self.patched_model(
|
370 |
+
inputs_embeds=next_token_embed,
|
371 |
+
past_key_values=past,
|
372 |
+
output_attentions=True,
|
373 |
+
output_hidden_states=True,
|
374 |
+
return_dict=True,
|
375 |
+
)
|
376 |
+
# Update the kv_cache.
|
377 |
+
past = output.past_key_values
|
378 |
+
|
379 |
+
# Concatenate all predicted tokens along the sequence dimension.
|
380 |
+
predicted_tokens = torch.cat(predicted, dim=1) # shape: (B, num_tokens)
|
381 |
+
return predicted_tokens
|
src/chatterbox/models/tokenizers/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .tokenizer import EnTokenizer
|
src/chatterbox/models/tokenizers/tokenizer.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from tokenizers import Tokenizer
|
5 |
+
|
6 |
+
|
7 |
+
# Special tokens
|
8 |
+
SOT = "[START]"
|
9 |
+
EOT = "[STOP]"
|
10 |
+
UNK = "[UNK]"
|
11 |
+
SPACE = "[SPACE]"
|
12 |
+
SPECIAL_TOKENS = [SOT, EOT, UNK, SPACE, "[PAD]", "[SEP]", "[CLS]", "[MASK]"]
|
13 |
+
|
14 |
+
logger = logging.getLogger(__name__)
|
15 |
+
|
16 |
+
class EnTokenizer:
|
17 |
+
def __init__(self, vocab_file_path):
|
18 |
+
self.tokenizer: Tokenizer = Tokenizer.from_file(vocab_file_path)
|
19 |
+
self.check_vocabset_sot_eot()
|
20 |
+
|
21 |
+
def check_vocabset_sot_eot(self):
|
22 |
+
voc = self.tokenizer.get_vocab()
|
23 |
+
assert SOT in voc
|
24 |
+
assert EOT in voc
|
25 |
+
|
26 |
+
def text_to_tokens(self, text: str):
|
27 |
+
text_tokens = self.encode(text)
|
28 |
+
text_tokens = torch.IntTensor(text_tokens).unsqueeze(0)
|
29 |
+
return text_tokens
|
30 |
+
|
31 |
+
def encode( self, txt: str, verbose=False):
|
32 |
+
"""
|
33 |
+
clean_text > (append `lang_id`) > replace SPACE > encode text using Tokenizer
|
34 |
+
"""
|
35 |
+
txt = txt.replace(' ', SPACE)
|
36 |
+
code = self.tokenizer.encode(txt)
|
37 |
+
ids = code.ids
|
38 |
+
return ids
|
39 |
+
|
40 |
+
def decode(self, seq):
|
41 |
+
if isinstance(seq, torch.Tensor):
|
42 |
+
seq = seq.cpu().numpy()
|
43 |
+
|
44 |
+
txt: str = self.tokenizer.decode(seq,
|
45 |
+
skip_special_tokens=False)
|
46 |
+
txt = txt.replace(' ', '')
|
47 |
+
txt = txt.replace(SPACE, ' ')
|
48 |
+
txt = txt.replace(EOT, '')
|
49 |
+
txt = txt.replace(UNK, '')
|
50 |
+
return txt
|
src/chatterbox/models/voice_encoder/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .voice_encoder import VoiceEncoder, VoiceEncConfig
|