Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +137 -0
- AUDIOBOOK_FEATURES.md +169 -0
- LICENSE +21 -0
- README.md +368 -12
- VOICE_LIBRARY_ENHANCEMENT_COMPLETE.md +99 -0
- gradio_tts_app_audiobook.py +0 -0
- gradio_tts_app_audiobook_with_batch.py +0 -0
- install-audiobook.bat +151 -0
- launch_audiobook.bat +52 -0
- pyproject.toml +38 -0
- simple_batch_demo.py +146 -0
- src/audiobook/__init__.py +8 -0
- src/audiobook/audio_processing.py +480 -0
- src/audiobook/config.py +72 -0
- src/audiobook/models.py +236 -0
- src/audiobook/processing.py +928 -0
- src/audiobook/project_management.py +656 -0
- src/audiobook/voice_management.py +332 -0
- src/chatterbox/__init__.py +2 -0
- src/chatterbox/models/s3gen/__init__.py +2 -0
- src/chatterbox/models/s3gen/const.py +1 -0
- src/chatterbox/models/s3gen/decoder.py +317 -0
- src/chatterbox/models/s3gen/f0_predictor.py +55 -0
- src/chatterbox/models/s3gen/flow.py +242 -0
- src/chatterbox/models/s3gen/flow_matching.py +228 -0
- src/chatterbox/models/s3gen/hifigan.py +474 -0
- src/chatterbox/models/s3gen/matcha/decoder.py +443 -0
- src/chatterbox/models/s3gen/matcha/flow_matching.py +129 -0
- src/chatterbox/models/s3gen/matcha/text_encoder.py +413 -0
- src/chatterbox/models/s3gen/matcha/transformer.py +316 -0
- src/chatterbox/models/s3gen/s3gen.py +305 -0
- src/chatterbox/models/s3gen/transformer/__init__.py +0 -0
- src/chatterbox/models/s3gen/transformer/activation.py +84 -0
- src/chatterbox/models/s3gen/transformer/attention.py +330 -0
- src/chatterbox/models/s3gen/transformer/convolution.py +145 -0
- src/chatterbox/models/s3gen/transformer/embedding.py +294 -0
- src/chatterbox/models/s3gen/transformer/encoder_layer.py +236 -0
- src/chatterbox/models/s3gen/transformer/positionwise_feed_forward.py +115 -0
- src/chatterbox/models/s3gen/transformer/subsampling.py +383 -0
- src/chatterbox/models/s3gen/transformer/upsample_encoder.py +318 -0
- src/chatterbox/models/s3gen/utils/class_utils.py +71 -0
- src/chatterbox/models/s3gen/utils/mask.py +193 -0
- src/chatterbox/models/s3gen/utils/mel.py +81 -0
- src/chatterbox/models/s3gen/xvector.py +428 -0
- src/chatterbox/models/s3tokenizer/__init__.py +30 -0
- src/chatterbox/models/s3tokenizer/s3tokenizer.py +168 -0
- src/chatterbox/models/t3/__init__.py +1 -0
- src/chatterbox/models/t3/inference/alignment_stream_analyzer.py +154 -0
- src/chatterbox/models/t3/inference/t3_hf_backend.py +116 -0
- src/chatterbox/models/t3/llama_configs.py +37 -0
.gitignore
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.vscode
|
| 2 |
+
|
| 3 |
+
# Pylance
|
| 4 |
+
pyrightconfig.json
|
| 5 |
+
|
| 6 |
+
# Byte-compiled / optimized / DLL files
|
| 7 |
+
__pycache__/
|
| 8 |
+
*.py[cod]
|
| 9 |
+
*$py.class
|
| 10 |
+
src/**/__pycache__/
|
| 11 |
+
src/**/*.pyc
|
| 12 |
+
|
| 13 |
+
# C extensions
|
| 14 |
+
*.so
|
| 15 |
+
|
| 16 |
+
# Distribution / packaging
|
| 17 |
+
.Python
|
| 18 |
+
build/
|
| 19 |
+
develop-eggs/
|
| 20 |
+
dist/
|
| 21 |
+
downloads/
|
| 22 |
+
eggs/
|
| 23 |
+
.eggs/
|
| 24 |
+
lib/
|
| 25 |
+
lib64/
|
| 26 |
+
parts/
|
| 27 |
+
sdist/
|
| 28 |
+
var/
|
| 29 |
+
wheels/
|
| 30 |
+
pip-wheel-metadata/
|
| 31 |
+
share/python-wheels/
|
| 32 |
+
*.egg-info/
|
| 33 |
+
.installed.cfg
|
| 34 |
+
*.egg
|
| 35 |
+
MANIFEST
|
| 36 |
+
|
| 37 |
+
# PyInstaller
|
| 38 |
+
# Usually these files are written by a python script from a template
|
| 39 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 40 |
+
*.manifest
|
| 41 |
+
*.spec
|
| 42 |
+
|
| 43 |
+
# Installer logs
|
| 44 |
+
pip-log.txt
|
| 45 |
+
pip-delete-this-directory.txt
|
| 46 |
+
|
| 47 |
+
# Unit test / coverage reports
|
| 48 |
+
htmlcov/
|
| 49 |
+
.tox/
|
| 50 |
+
.nox/
|
| 51 |
+
.coverage
|
| 52 |
+
.coverage.*
|
| 53 |
+
.cache
|
| 54 |
+
nosetests.xml
|
| 55 |
+
coverage.xml
|
| 56 |
+
*.cover
|
| 57 |
+
.hypothesis/
|
| 58 |
+
.pytest_cache/
|
| 59 |
+
|
| 60 |
+
# Translations
|
| 61 |
+
*.mo
|
| 62 |
+
*.pot
|
| 63 |
+
|
| 64 |
+
# Django stuff:
|
| 65 |
+
*.log
|
| 66 |
+
local_settings.py
|
| 67 |
+
db.sqlite3
|
| 68 |
+
|
| 69 |
+
# Flask stuff:
|
| 70 |
+
instance/
|
| 71 |
+
.webassets-cache
|
| 72 |
+
|
| 73 |
+
# Scrapy stuff:
|
| 74 |
+
.scrapy
|
| 75 |
+
|
| 76 |
+
# Sphinx documentation
|
| 77 |
+
docs/_build/
|
| 78 |
+
|
| 79 |
+
# PyBuilder
|
| 80 |
+
target/
|
| 81 |
+
|
| 82 |
+
# Jupyter Notebook
|
| 83 |
+
.ipynb_checkpoints
|
| 84 |
+
|
| 85 |
+
# Environments
|
| 86 |
+
.env
|
| 87 |
+
.venv
|
| 88 |
+
env/
|
| 89 |
+
venv/
|
| 90 |
+
ENV/
|
| 91 |
+
env.bak/
|
| 92 |
+
venv.bak/
|
| 93 |
+
|
| 94 |
+
# Spyder project settings
|
| 95 |
+
.spyderproject
|
| 96 |
+
.spyderworkspace
|
| 97 |
+
|
| 98 |
+
# Rope project settings
|
| 99 |
+
.ropeproject
|
| 100 |
+
|
| 101 |
+
# mkdocs documentation
|
| 102 |
+
/site
|
| 103 |
+
|
| 104 |
+
# mypy
|
| 105 |
+
.mypy_cache/
|
| 106 |
+
.dmypy.json
|
| 107 |
+
dmypy.json
|
| 108 |
+
|
| 109 |
+
# Pyre type checker
|
| 110 |
+
.pyre/
|
| 111 |
+
|
| 112 |
+
syn_out/
|
| 113 |
+
checkpoints/
|
| 114 |
+
.gradio
|
| 115 |
+
|
| 116 |
+
# Ignore generated sample .wav files
|
| 117 |
+
**/*.wav
|
| 118 |
+
|
| 119 |
+
# User data directories - keep repository clean
|
| 120 |
+
venv/
|
| 121 |
+
audiobook_projects/
|
| 122 |
+
speakers/
|
| 123 |
+
source/
|
| 124 |
+
audiobook_config.json
|
| 125 |
+
|
| 126 |
+
# Development and archive directories
|
| 127 |
+
archive/
|
| 128 |
+
development/
|
| 129 |
+
|
| 130 |
+
# OS generated files
|
| 131 |
+
.DS_Store
|
| 132 |
+
.DS_Store?
|
| 133 |
+
._*
|
| 134 |
+
.Spotlight-V100
|
| 135 |
+
.Trashes
|
| 136 |
+
ehthumbs.db
|
| 137 |
+
Thumbs.db
|
AUDIOBOOK_FEATURES.md
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🎧 Chatterbox TTS - Audiobook Edition Features
|
| 2 |
+
|
| 3 |
+
## 🚀 New Voice Management System
|
| 4 |
+
|
| 5 |
+
The Audiobook Edition adds powerful voice management capabilities perfect for creating consistent character voices across your audiobook projects.
|
| 6 |
+
|
| 7 |
+
## ✨ Key Features
|
| 8 |
+
|
| 9 |
+
### 📚 Voice Library Tab
|
| 10 |
+
- **Organized Voice Storage**: Keep all your character voices in one place
|
| 11 |
+
- **Custom Voice Profiles**: Save voice settings with names, descriptions, and reference audio
|
| 12 |
+
- **Easy Voice Selection**: Quick dropdown to switch between saved voices
|
| 13 |
+
- **Voice Testing**: Test voices before saving or using them
|
| 14 |
+
|
| 15 |
+
### 🎭 Character Voice Management
|
| 16 |
+
- **Voice Profiles**: Each voice includes:
|
| 17 |
+
- Voice name (for file organization)
|
| 18 |
+
- Display name (human-readable)
|
| 19 |
+
- Description (character notes)
|
| 20 |
+
- Reference audio file
|
| 21 |
+
- Optimized settings (exaggeration, CFG/pace, temperature)
|
| 22 |
+
|
| 23 |
+
### 🎙️ Voice Testing & Configuration
|
| 24 |
+
- **Live Testing**: Test voice settings with custom text
|
| 25 |
+
- **Parameter Tuning**: Fine-tune exaggeration, CFG/pace, and temperature
|
| 26 |
+
- **Instant Feedback**: Hear changes immediately
|
| 27 |
+
- **Save Optimized Settings**: Store perfect settings for each character
|
| 28 |
+
|
| 29 |
+
## 🛠️ How to Use
|
| 30 |
+
|
| 31 |
+
### 1. Launch the Audiobook Edition
|
| 32 |
+
```bash
|
| 33 |
+
# Use the audiobook launcher
|
| 34 |
+
launch_audiobook.bat
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
### 2. Set Up Your Voice Library
|
| 38 |
+
1. Go to the **"📚 Voice Library"** tab
|
| 39 |
+
2. Set your voice library folder path (default: `voice_library`)
|
| 40 |
+
3. Click **"📁 Update Library Path"**
|
| 41 |
+
|
| 42 |
+
### 3. Create a Voice Profile
|
| 43 |
+
1. **Upload Reference Audio**: Upload 10-30 seconds of clear speech
|
| 44 |
+
2. **Configure Settings**:
|
| 45 |
+
- **Exaggeration**: 0.3-0.7 for most voices
|
| 46 |
+
- **CFG/Pace**: Lower = slower, more deliberate
|
| 47 |
+
- **Temperature**: Higher = more variation
|
| 48 |
+
3. **Test the Voice**: Use the test text to hear how it sounds
|
| 49 |
+
4. **Save Profile**: Give it a name and description, then save
|
| 50 |
+
|
| 51 |
+
### 4. Use Saved Voices
|
| 52 |
+
1. **Select Voice**: Choose from dropdown in Voice Library
|
| 53 |
+
2. **Load Voice**: Click "📥 Load Voice" to load settings
|
| 54 |
+
3. **Generate Speech**: Switch to TTS tab and generate with loaded voice
|
| 55 |
+
|
| 56 |
+
## 📁 Voice Library Structure
|
| 57 |
+
|
| 58 |
+
```
|
| 59 |
+
voice_library/
|
| 60 |
+
├── narrator_male_deep/
|
| 61 |
+
│ ├── config.json # Voice settings
|
| 62 |
+
│ └── reference.wav # Reference audio
|
| 63 |
+
├── character_female_young/
|
| 64 |
+
│ ├── config.json
|
| 65 |
+
│ └── reference.mp3
|
| 66 |
+
└── villain_gravelly/
|
| 67 |
+
├── config.json
|
| 68 |
+
└── reference.wav
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
## 🎯 Audiobook Workflow
|
| 72 |
+
|
| 73 |
+
### Step 1: Character Planning
|
| 74 |
+
- List all characters in your audiobook
|
| 75 |
+
- Gather reference audio for each (record or find samples)
|
| 76 |
+
- Plan voice characteristics (age, personality, accent)
|
| 77 |
+
|
| 78 |
+
### Step 2: Voice Creation
|
| 79 |
+
- Create a voice profile for each character
|
| 80 |
+
- Test and refine settings for consistency
|
| 81 |
+
- Save with descriptive names (e.g., "Harry_confident", "Hermione_intelligent")
|
| 82 |
+
|
| 83 |
+
### Step 3: Production
|
| 84 |
+
- Load character voice before generating their dialogue
|
| 85 |
+
- Use consistent settings throughout the book
|
| 86 |
+
- Test voice regularly to maintain quality
|
| 87 |
+
|
| 88 |
+
### Step 4: Quality Control
|
| 89 |
+
- Use the same test phrase for all characters
|
| 90 |
+
- Ensure voices are distinguishable
|
| 91 |
+
- Adjust settings if characters sound too similar
|
| 92 |
+
|
| 93 |
+
## 💡 Pro Tips
|
| 94 |
+
|
| 95 |
+
### Voice Creation
|
| 96 |
+
- **Reference Audio**: Use clean, noise-free recordings
|
| 97 |
+
- **Length**: 10-30 seconds is optimal
|
| 98 |
+
- **Content**: Natural speech, not overly dramatic
|
| 99 |
+
- **Quality**: Higher quality audio = better cloning
|
| 100 |
+
|
| 101 |
+
### Settings Optimization
|
| 102 |
+
- **Exaggeration**:
|
| 103 |
+
- 0.3-0.5: Subtle, natural voices
|
| 104 |
+
- 0.5-0.7: Standard character voices
|
| 105 |
+
- 0.7-1.0: Dramatic or distinctive voices
|
| 106 |
+
|
| 107 |
+
- **CFG/Pace**:
|
| 108 |
+
- 0.3-0.4: Slow, deliberate (elderly, wise characters)
|
| 109 |
+
- 0.5: Standard pace
|
| 110 |
+
- 0.6-0.8: Faster pace (young, energetic characters)
|
| 111 |
+
|
| 112 |
+
- **Temperature**:
|
| 113 |
+
- 0.5-0.8: Consistent delivery
|
| 114 |
+
- 0.8-1.2: More natural variation
|
| 115 |
+
- 1.2+: Creative but less predictable
|
| 116 |
+
|
| 117 |
+
### Organization
|
| 118 |
+
- **Naming Convention**: Use descriptive names (character_trait_type)
|
| 119 |
+
- **Descriptions**: Include character details and usage notes
|
| 120 |
+
- **Backup**: Keep your voice_library folder backed up
|
| 121 |
+
- **Version Control**: Save multiple versions for different emotions
|
| 122 |
+
|
| 123 |
+
## 🔧 Advanced Features
|
| 124 |
+
|
| 125 |
+
### Voice Library Management
|
| 126 |
+
- **Import/Export**: Copy voice_library folder between projects
|
| 127 |
+
- **Sharing**: Share voice profiles with other audiobook creators
|
| 128 |
+
- **Backup**: Regular backups of your voice library
|
| 129 |
+
- **Organization**: Folder structure for different projects
|
| 130 |
+
|
| 131 |
+
### Batch Processing (Future)
|
| 132 |
+
- Process entire chapters with character voice switching
|
| 133 |
+
- Automatic voice detection based on speaker tags
|
| 134 |
+
- Export management for audiobook production
|
| 135 |
+
|
| 136 |
+
## 🎵 Example Character Voices
|
| 137 |
+
|
| 138 |
+
### Narrator
|
| 139 |
+
- **Settings**: Exaggeration 0.4, CFG 0.5, Temp 0.7
|
| 140 |
+
- **Description**: Clear, neutral, professional tone
|
| 141 |
+
- **Use**: Chapter narration, scene descriptions
|
| 142 |
+
|
| 143 |
+
### Hero Character
|
| 144 |
+
- **Settings**: Exaggeration 0.6, CFG 0.6, Temp 0.8
|
| 145 |
+
- **Description**: Confident, determined, slightly higher energy
|
| 146 |
+
- **Use**: Main character dialogue
|
| 147 |
+
|
| 148 |
+
### Wise Mentor
|
| 149 |
+
- **Settings**: Exaggeration 0.3, CFG 0.3, Temp 0.6
|
| 150 |
+
- **Description**: Slow, deliberate, thoughtful delivery
|
| 151 |
+
- **Use**: Advisor character, important wisdom
|
| 152 |
+
|
| 153 |
+
### Comic Relief
|
| 154 |
+
- **Settings**: Exaggeration 0.8, CFG 0.7, Temp 1.0
|
| 155 |
+
- **Description**: Energetic, expressive, variable delivery
|
| 156 |
+
- **Use**: Funny sidekick, lighthearted moments
|
| 157 |
+
|
| 158 |
+
## 🛡️ Best Practices
|
| 159 |
+
|
| 160 |
+
1. **Consistency**: Always use the same voice profile for each character
|
| 161 |
+
2. **Testing**: Test voices regularly during production
|
| 162 |
+
3. **Backup**: Keep voice profiles backed up
|
| 163 |
+
4. **Documentation**: Maintain character voice notes
|
| 164 |
+
5. **Quality**: Use high-quality reference audio
|
| 165 |
+
6. **Organization**: Use clear naming conventions
|
| 166 |
+
|
| 167 |
+
---
|
| 168 |
+
|
| 169 |
+
**Ready to create amazing audiobooks with consistent character voices? Launch the Audiobook Edition and start building your voice library! 🎧✨**
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Resemble AI
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
CHANGED
|
@@ -1,12 +1,368 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🎧 Chatterbox Audiobook Generator
|
| 2 |
+
|
| 3 |
+
**This is a work in progress. You can consider this a pre-launch repo at the moment, but if you find bugs, please put them in the issues area. Thank you.**
|
| 4 |
+
**Transform your text into high-quality audiobooks with advanced TTS models, voice cloning, and professional volume normalization.**
|
| 5 |
+
|
| 6 |
+
## 🚀 Quick Start
|
| 7 |
+
|
| 8 |
+
### 1. Install Dependencies
|
| 9 |
+
```bash
|
| 10 |
+
./install-audiobook.bat
|
| 11 |
+
```
|
| 12 |
+
|
| 13 |
+
### 2. Launch the Application
|
| 14 |
+
```bash
|
| 15 |
+
./launch_audiobook.bat
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
### 3. CUDA Issue Fix (If Needed)
|
| 19 |
+
If you encounter CUDA assertion errors during generation, install the patched version:
|
| 20 |
+
```bash
|
| 21 |
+
# Activate your virtual environment first
|
| 22 |
+
venv\Scripts\activate.bat
|
| 23 |
+
|
| 24 |
+
# Install the CUDA-fixed version
|
| 25 |
+
pip install --force-reinstall --no-cache-dir "chatterbox-tts @ git+https://github.com/fakerybakery/better-chatterbox@fix-cuda-issue"
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
The web interface will open automatically in your browser at `http://localhost:7860`
|
| 29 |
+
|
| 30 |
+
---
|
| 31 |
+
|
| 32 |
+
## ✨ Features
|
| 33 |
+
|
| 34 |
+
### 📚 **Audiobook Creation**
|
| 35 |
+
- **Single Voice**: Generate entire audiobooks with one consistent voice
|
| 36 |
+
- **Multi-Voice**: Create dynamic audiobooks with multiple characters
|
| 37 |
+
- **Custom Voices**: Clone voices from audio samples for personalized narration
|
| 38 |
+
- **Professional Volume Normalization**: Ensure consistent audio levels across all voices
|
| 39 |
+
- **📋 Text Queuing System** ⭐ *NEW*: Upload books in any size chapters and generate continuously
|
| 40 |
+
- **🔄 Chunk-Based Processing** ⭐ *NEW*: Improved reliability for longer text generations
|
| 41 |
+
|
| 42 |
+
### 🎵 **Audio Processing**
|
| 43 |
+
- **Smart Cleanup**: Remove unwanted silence and audio artifacts
|
| 44 |
+
- **Volume Normalization**: Professional-grade volume balancing for all voices
|
| 45 |
+
- **Real-time Audio Analysis**: Live volume level monitoring and feedback
|
| 46 |
+
- **Preview System**: Test settings before applying to entire projects
|
| 47 |
+
- **Batch Processing**: Process multiple projects efficiently
|
| 48 |
+
- **Quality Control**: Advanced audio optimization tools
|
| 49 |
+
- **🎯 Enhanced Audio Quality** ⭐ *NEW*: Improved P-top and minimum P parameters for better voice generation
|
| 50 |
+
|
| 51 |
+
### 🎭 **Voice Management**
|
| 52 |
+
- **Voice Library**: Organize and manage your voice collection
|
| 53 |
+
- **Voice Cloning**: Create custom voices from audio samples
|
| 54 |
+
- **Volume Settings**: Configure target volume levels for each voice
|
| 55 |
+
- **Professional Presets**: Industry-standard volume levels (audiobook, podcast, broadcast)
|
| 56 |
+
- **Character Assignment**: Map specific voices to story characters
|
| 57 |
+
|
| 58 |
+
### 📊 **Volume Normalization System** ⭐ *NEW*
|
| 59 |
+
- **Professional Standards**: Audiobook (-18 dB), Podcast (-16 dB), Broadcast (-23 dB) presets
|
| 60 |
+
- **Consistent Character Voices**: All characters maintain the same volume level
|
| 61 |
+
- **Real-time Analysis**: Color-coded volume status with RMS and peak level display
|
| 62 |
+
- **Retroactive Normalization**: Apply volume settings to existing voice projects
|
| 63 |
+
- **Multi-Voice Support**: Batch normalize all voices in multi-character audiobooks
|
| 64 |
+
- **Soft Limiting**: Intelligent audio limiting to prevent distortion
|
| 65 |
+
|
| 66 |
+
### 📖 **Text Processing**
|
| 67 |
+
- **Chapter Support**: Automatic chapter detection and organization
|
| 68 |
+
- **Multi-Voice Parsing**: Parse character dialogue automatically
|
| 69 |
+
- **Text Validation**: Ensure proper formatting before generation
|
| 70 |
+
- **📋 Queue Management** ⭐ *NEW*: Batch process multiple text files sequentially
|
| 71 |
+
- **🔇 Return Pause System** ⭐ *NEW*: Automatic pause insertion based on line breaks for natural speech flow
|
| 72 |
+
|
| 73 |
+
---
|
| 74 |
+
|
| 75 |
+
## 🎭 Custom Audiobook Processing Pipeline ⭐ *NEW*
|
| 76 |
+
|
| 77 |
+
Our advanced text processing pipeline transforms your written content into natural-sounding audiobooks with intelligent pause placement and character flow management.
|
| 78 |
+
|
| 79 |
+
### 🔇 **Return Pause System**
|
| 80 |
+
|
| 81 |
+
**Automatic pause insertion based on your text formatting** - Every line break (`\n`) in your text automatically adds a 0.1-second pause to the generated audio, creating natural speech rhythms without manual intervention.
|
| 82 |
+
|
| 83 |
+
#### **How It Works**
|
| 84 |
+
- **Line Break Detection**: System automatically counts all line breaks in your text
|
| 85 |
+
- **Pause Calculation**: Each return adds exactly 0.1 seconds of silence
|
| 86 |
+
- **Accumulative Pauses**: Multiple consecutive line breaks create longer pauses
|
| 87 |
+
- **Universal Support**: Works with single-voice, multi-voice, and batch processing
|
| 88 |
+
|
| 89 |
+
#### **Example Text Formatting**
|
| 90 |
+
```
|
| 91 |
+
[Narrator] The sun was setting over the hills.
|
| 92 |
+
|
| 93 |
+
[Character1] "We need to find shelter soon."
|
| 94 |
+
|
| 95 |
+
[Character2] "I see a cave up ahead.
|
| 96 |
+
Let's hurry before it gets dark."
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
[Narrator] They rushed toward the cave, hearts pounding.
|
| 100 |
+
```
|
| 101 |
+
**Result**: Natural pauses between dialogue, emphasis pauses for dramatic effect, and smooth character transitions.
|
| 102 |
+
|
| 103 |
+
### 📝 **Text Formatting Best Practices**
|
| 104 |
+
|
| 105 |
+
#### **🎭 Multi-Voice Dialogue Structure**
|
| 106 |
+
```
|
| 107 |
+
[Character Name] Dialogue content here.
|
| 108 |
+
|
| 109 |
+
[Another Character] Response content here.
|
| 110 |
+
Multiple lines can be used for the same character.
|
| 111 |
+
|
| 112 |
+
[Narrator] Descriptive text and scene setting.
|
| 113 |
+
```
|
| 114 |
+
|
| 115 |
+
#### **🎪 Natural Flow Techniques**
|
| 116 |
+
- **Paragraph Breaks**: Use double line breaks for scene transitions
|
| 117 |
+
- **Emphasis Pauses**: Add extra returns before important revelations
|
| 118 |
+
- **Character Separation**: Single returns between different speakers
|
| 119 |
+
- **Breathing Room**: Natural pauses for complex concepts or emotional moments
|
| 120 |
+
|
| 121 |
+
#### **📖 Single Voice Formatting**
|
| 122 |
+
```
|
| 123 |
+
Chapter content flows naturally here.
|
| 124 |
+
|
| 125 |
+
New paragraphs create natural pauses.
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
Extended pauses can emphasize dramatic moments.
|
| 129 |
+
|
| 130 |
+
Regular text continues with normal pacing.
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
### 🔄 **Processing Pipeline Features**
|
| 134 |
+
|
| 135 |
+
#### **🧠 Intelligent Text Analysis**
|
| 136 |
+
- **Line Break Preservation**: Maintains your formatting intentions throughout processing
|
| 137 |
+
- **Character Assignment**: Automatically maps voice tags to selected voice profiles
|
| 138 |
+
- **Chunk Optimization**: Breaks long texts into optimal segments while preserving pause timing
|
| 139 |
+
- **Error Recovery**: Validates text and provides helpful formatting suggestions
|
| 140 |
+
|
| 141 |
+
#### **⚡ Real-Time Processing**
|
| 142 |
+
- **Live Feedback**: Console output shows exactly how many pauses are being added
|
| 143 |
+
- **Debug Information**: Detailed logging of pause detection and application
|
| 144 |
+
- **Progress Tracking**: Monitor pause processing alongside audio generation
|
| 145 |
+
- **Quality Assurance**: Automatic validation of pause placement
|
| 146 |
+
|
| 147 |
+
#### **🎚️ Professional Output**
|
| 148 |
+
- **Seamless Integration**: Pauses blend naturally with generated speech
|
| 149 |
+
- **Volume Consistency**: Silence segments match the audio output specifications
|
| 150 |
+
- **Format Compatibility**: Works with all supported audio formats and quality settings
|
| 151 |
+
- **Project Preservation**: Pause information saved in project metadata for regeneration
|
| 152 |
+
|
| 153 |
+
### 💡 **Pro Tips for Better Audiobooks**
|
| 154 |
+
|
| 155 |
+
#### **🎯 Dialogue Formatting**
|
| 156 |
+
- **Character Consistency**: Always use the same character name format `[Name]`
|
| 157 |
+
- **Natural Breaks**: Place returns where a human reader would naturally pause
|
| 158 |
+
- **Scene Transitions**: Use multiple returns (2-3) for major scene changes
|
| 159 |
+
- **Emotional Beats**: Add single returns before/after emotional dialogue
|
| 160 |
+
|
| 161 |
+
#### **📚 Chapter Structure**
|
| 162 |
+
```
|
| 163 |
+
Chapter 1: The Beginning
|
| 164 |
+
|
| 165 |
+
Opening paragraph with scene setting.
|
| 166 |
+
|
| 167 |
+
"Character dialogue with natural flow."
|
| 168 |
+
|
| 169 |
+
Descriptive narrative continues.
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
Major scene transition with extended pause.
|
| 173 |
+
|
| 174 |
+
New section begins here.
|
| 175 |
+
```
|
| 176 |
+
|
| 177 |
+
#### **🎪 Advanced Techniques**
|
| 178 |
+
- **Cliffhangers**: Use extended pauses before revealing crucial information
|
| 179 |
+
- **Action Sequences**: Shorter, punchy sentences with minimal pauses for intensity
|
| 180 |
+
- **Contemplative Moments**: Longer pauses for reflection and character development
|
| 181 |
+
- **Comedic Timing**: Strategic pauses before punchlines or comedic reveals
|
| 182 |
+
|
| 183 |
+
### 🔍 **Debug Output Examples**
|
| 184 |
+
|
| 185 |
+
When generating your audiobook, watch for these helpful console messages:
|
| 186 |
+
```
|
| 187 |
+
🔇 Detected 15 line breaks → 1.5s total pause time
|
| 188 |
+
🔇 Line breaks detected in [Character1]: +0.3s pause (from 3 returns)
|
| 189 |
+
🔇 Chunk 2 (Narrator): Added 0.2s pause after speech
|
| 190 |
+
```
|
| 191 |
+
|
| 192 |
+
This real-time feedback helps you understand exactly how your formatting translates to audio timing.
|
| 193 |
+
|
| 194 |
+
---
|
| 195 |
+
|
| 196 |
+
## 🆕 Recent Improvements
|
| 197 |
+
|
| 198 |
+
### 🎯 **Audio Quality Enhancements**
|
| 199 |
+
We've significantly improved audio generation quality by optimizing the underlying TTS parameters:
|
| 200 |
+
|
| 201 |
+
- **Enhanced P-top and Minimum P Settings**: Fine-tuned probability parameters for more natural speech patterns
|
| 202 |
+
- **Reduced Audio Artifacts**: Better handling of pronunciation and intonation
|
| 203 |
+
- **Improved Voice Consistency**: More stable voice characteristics across long generations
|
| 204 |
+
- **Better Pronunciation**: Enhanced handling of complex words and names
|
| 205 |
+
|
| 206 |
+
**📝 Note for Existing Users**:
|
| 207 |
+
- Older voice profiles will continue to work as before
|
| 208 |
+
- To take advantage of the new audio quality improvements, consider re-creating voice profiles
|
| 209 |
+
- Existing projects remain fully compatible
|
| 210 |
+
|
| 211 |
+
### 📋 **Text Queuing System**
|
| 212 |
+
Perfect for processing large books or multiple chapters:
|
| 213 |
+
|
| 214 |
+
- **Batch Upload**: Upload multiple text files of any size
|
| 215 |
+
- **Sequential Processing**: Automatically processes files one after another
|
| 216 |
+
- **Progress Tracking**: Monitor generation progress across all queued items
|
| 217 |
+
- **Flexible Chapter Sizes**: No restrictions on individual file length
|
| 218 |
+
- **Unattended Generation**: Set up large projects and let them run automatically
|
| 219 |
+
|
| 220 |
+
### 🔄 **Chunk-Based TTS System**
|
| 221 |
+
Enhanced the core text-to-speech engine for better reliability:
|
| 222 |
+
|
| 223 |
+
- **Background Chunking**: Automatically splits long texts into optimal chunks
|
| 224 |
+
- **Memory Management**: Better handling of large text inputs
|
| 225 |
+
- **Error Recovery**: Improved resilience during long generation sessions
|
| 226 |
+
- **Consistent Quality**: Maintains voice quality across chunk boundaries
|
| 227 |
+
- **Progress Feedback**: Real-time updates on generation progress
|
| 228 |
+
|
| 229 |
+
---
|
| 230 |
+
|
| 231 |
+
## 🎚️ Volume Normalization Guide
|
| 232 |
+
|
| 233 |
+
### **Individual Voice Setup**
|
| 234 |
+
1. Go to **Voice Library** tab
|
| 235 |
+
2. Upload your voice sample and configure settings
|
| 236 |
+
3. Set target volume level (default: -18 dB for audiobooks)
|
| 237 |
+
4. Choose from professional presets or use custom levels
|
| 238 |
+
5. Save voice profile with volume settings
|
| 239 |
+
|
| 240 |
+
### **Multi-Voice Projects**
|
| 241 |
+
1. Navigate to **Multi-Voice Audiobook Creation** tab
|
| 242 |
+
2. Enable volume normalization for all voices
|
| 243 |
+
3. Set target level for consistent character voices
|
| 244 |
+
4. All characters will be automatically normalized during generation
|
| 245 |
+
|
| 246 |
+
### **Text Queuing Workflow** ⭐ *NEW*
|
| 247 |
+
1. Go to **Production Studio** tab
|
| 248 |
+
2. Select "Batch Processing" mode
|
| 249 |
+
3. Upload multiple text files (chapters, sections, etc.)
|
| 250 |
+
4. Choose your voice and settings
|
| 251 |
+
5. Start batch processing - files will generate sequentially
|
| 252 |
+
6. Monitor progress and download completed audiobooks
|
| 253 |
+
|
| 254 |
+
### **Professional Standards**
|
| 255 |
+
- **📖 Audiobook Standard**: -18 dB RMS (recommended for most audiobooks)
|
| 256 |
+
- **🎙️ Podcast Standard**: -16 dB RMS (for podcast-style content)
|
| 257 |
+
- **🔇 Quiet/Comfortable**: -20 dB RMS (for quiet listening environments)
|
| 258 |
+
- **🔊 Loud/Energetic**: -14 dB RMS (for dynamic, energetic content)
|
| 259 |
+
- **📺 Broadcast Standard**: -23 dB RMS (for broadcast television standards)
|
| 260 |
+
|
| 261 |
+
---
|
| 262 |
+
|
| 263 |
+
## 📁 Project Structure
|
| 264 |
+
|
| 265 |
+
```
|
| 266 |
+
📦 Your Audiobook Projects
|
| 267 |
+
├── 🎤 speakers/ # Voice library and samples
|
| 268 |
+
├── 📚 audiobook_projects/ # Generated audiobooks
|
| 269 |
+
├── 🔧 src/audiobook/ # Core processing modules
|
| 270 |
+
└── 📄 Generated files... # Audio chunks and final outputs
|
| 271 |
+
```
|
| 272 |
+
|
| 273 |
+
---
|
| 274 |
+
|
| 275 |
+
## 🎯 Workflow
|
| 276 |
+
|
| 277 |
+
1. **📝 Prepare Text**: Format your story with proper chapter breaks and strategic line breaks for natural pauses
|
| 278 |
+
2. **🎤 Select Voices**: Choose or clone voices for your characters
|
| 279 |
+
3. **🎚️ Configure Volume**: Set professional volume levels and normalization
|
| 280 |
+
4. **⚙️ Configure Settings**: Adjust quality, speed, and processing options
|
| 281 |
+
5. **🎧 Generate Audio**: Create your audiobook with advanced TTS and automatic pause insertion
|
| 282 |
+
6. **🧹 Clean & Optimize**: Use smart cleanup tools for perfect audio
|
| 283 |
+
7. **📦 Export**: Get your finished audiobook ready for distribution
|
| 284 |
+
|
| 285 |
+
### 🎭 **Enhanced Multi-Voice Workflow**
|
| 286 |
+
1. **📝 Format Dialogue**: Use `[Character]` tags and strategic line breaks for natural flow
|
| 287 |
+
2. **🔇 Add Return Pauses**: Place line breaks where you want natural speech pauses (0.1s each)
|
| 288 |
+
3. **🎤 Assign Voices**: Map each character to their voice profile
|
| 289 |
+
4. **⚡ Process with Intelligence**: Watch console output for pause detection feedback
|
| 290 |
+
5. **🎧 Review & Adjust**: Listen to generated audio and refine formatting if needed
|
| 291 |
+
|
| 292 |
+
### 📋 **Batch Processing Workflow** ⭐ *NEW*
|
| 293 |
+
1. **📚 Organize Chapters**: Split your book into individual text files
|
| 294 |
+
2. **📋 Queue Setup**: Upload all files to the batch processing system
|
| 295 |
+
3. **🎤 Voice Selection**: Choose voice and configure settings once
|
| 296 |
+
4. **🔄 Automated Generation**: Let the system process all files sequentially
|
| 297 |
+
5. **📊 Monitor Progress**: Track completion status in real-time
|
| 298 |
+
6. **📦 Collect Results**: Download all generated audiobook chapters
|
| 299 |
+
|
| 300 |
+
---
|
| 301 |
+
|
| 302 |
+
## 🛠️ Technical Requirements
|
| 303 |
+
|
| 304 |
+
- **Python 3.8+**
|
| 305 |
+
- **CUDA GPU** (recommended for faster processing)
|
| 306 |
+
- **8GB+ RAM** (16GB recommended for large projects)
|
| 307 |
+
- **Modern web browser** for the interface
|
| 308 |
+
|
| 309 |
+
### 🔧 **CUDA Support**
|
| 310 |
+
- CUDA compatibility issues have been resolved with updated dependencies
|
| 311 |
+
- GPU acceleration is now stable for extended generation sessions
|
| 312 |
+
- Fallback to CPU processing available if CUDA issues occur
|
| 313 |
+
- **If you encounter CUDA assertion errors**: Use the patched version from the installation instructions above
|
| 314 |
+
- The fix addresses PyTorch indexing issues that could cause crashes during audio generation
|
| 315 |
+
|
| 316 |
+
---
|
| 317 |
+
|
| 318 |
+
## ⚠️ Known Issues & Compatibility
|
| 319 |
+
|
| 320 |
+
### **Multi-Voice Generation**
|
| 321 |
+
- Short sentences or sections may occasionally cause issues during multi-voice generation
|
| 322 |
+
- This is a limitation of the underlying TTS models rather than the implementation
|
| 323 |
+
- **Workaround**: Use longer, more detailed sentences for better stability
|
| 324 |
+
- Single-voice generation is not affected by this issue
|
| 325 |
+
|
| 326 |
+
### **Voice Profile Compatibility**
|
| 327 |
+
- **Existing Voices**: All older voice profiles remain fully functional
|
| 328 |
+
- **New Features**: To benefit from improved audio quality, consider re-creating voice profiles
|
| 329 |
+
- **Project Compatibility**: Existing audiobook projects work without modification
|
| 330 |
+
- **Regeneration**: Individual chunks can be regenerated with improved quality settings
|
| 331 |
+
|
| 332 |
+
### **Batch Processing Considerations**
|
| 333 |
+
- Large batch jobs may take significant time depending on text length and hardware
|
| 334 |
+
- Monitor system resources during extended batch processing sessions
|
| 335 |
+
- Consider processing very large books in smaller batches for better control
|
| 336 |
+
|
| 337 |
+
---
|
| 338 |
+
|
| 339 |
+
## 📋 Supported Formats
|
| 340 |
+
|
| 341 |
+
### Input
|
| 342 |
+
- **Text**: `.txt`, `.md`, formatted stories and scripts
|
| 343 |
+
- **Audio Samples**: `.wav`, `.mp3`, `.flac` for voice cloning
|
| 344 |
+
- **Batch Files**: Multiple text files for queue processing
|
| 345 |
+
|
| 346 |
+
### Output
|
| 347 |
+
- **Audio**: High-quality `.wav` files with professional volume levels
|
| 348 |
+
- **Projects**: Organized folder structure with chapters
|
| 349 |
+
- **Exports**: Ready-to-use audiobook files
|
| 350 |
+
- **Batch Results**: Multiple completed audiobooks from queue processing
|
| 351 |
+
|
| 352 |
+
---
|
| 353 |
+
|
| 354 |
+
## 🆘 Support
|
| 355 |
+
|
| 356 |
+
- **Features Guide**: See `AUDIOBOOK_FEATURES.md` for detailed capabilities
|
| 357 |
+
- **Development Notes**: Check `development/` folder for technical details
|
| 358 |
+
- **Issues**: Report problems via GitHub issues
|
| 359 |
+
|
| 360 |
+
---
|
| 361 |
+
|
| 362 |
+
## 📄 License
|
| 363 |
+
|
| 364 |
+
This project is licensed under the terms specified in `LICENSE`.
|
| 365 |
+
|
| 366 |
+
---
|
| 367 |
+
|
| 368 |
+
**🎉 Ready to create amazing audiobooks with professional volume levels and enhanced audio quality? Run `./launch_audiobook.bat` and start generating!**
|
VOICE_LIBRARY_ENHANCEMENT_COMPLETE.md
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ✅ Voice Library Enhancement Complete
|
| 2 |
+
|
| 3 |
+
## 🎯 **Problem Solved**
|
| 4 |
+
The Voice Library UI was missing **advanced TTS parameters** (Min-P, Top-P, Repetition Penalty) that were available in the backend but not exposed to users.
|
| 5 |
+
|
| 6 |
+
## 🛠️ **Changes Made**
|
| 7 |
+
|
| 8 |
+
### 1. **Enhanced Voice Profile Storage** ⚙️
|
| 9 |
+
- Updated `save_voice_profile()` function to accept and store:
|
| 10 |
+
- **Min-P** (default: 0.05) - Minimum probability threshold
|
| 11 |
+
- **Top-P** (default: 1.0) - Nucleus sampling threshold
|
| 12 |
+
- **Repetition Penalty** (default: 1.2) - Token repetition control
|
| 13 |
+
- Incremented version to **v2.1** for backward compatibility
|
| 14 |
+
- Enhanced status messages to show advanced settings
|
| 15 |
+
|
| 16 |
+
### 2. **Enhanced Voice Profile Loading** 📥
|
| 17 |
+
- Updated `load_voice_profile()` function to return new parameters
|
| 18 |
+
- Added backward compatibility - old voice profiles get sensible defaults
|
| 19 |
+
- Enhanced status messages to show profile version
|
| 20 |
+
|
| 21 |
+
### 3. **New Voice Library UI Controls** 🎛️
|
| 22 |
+
Added **"Advanced Voice Parameters"** section in Voice Library tab:
|
| 23 |
+
```
|
| 24 |
+
🎛️ Advanced Voice Parameters
|
| 25 |
+
├── Min-P (0.01-0.5) - "Minimum probability threshold for token selection (lower = more diverse)"
|
| 26 |
+
├── Top-P (0.1-1.0) - "Nucleus sampling threshold (lower = more focused)"
|
| 27 |
+
└── Repetition Penalty (1.0-2.0) - "Penalty for repeating tokens (higher = less repetition)"
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
### 4. **Enhanced TTS Generation** 🎵
|
| 31 |
+
- Updated core `generate()` function to accept new parameters
|
| 32 |
+
- Updated `generate_with_cpu_fallback()` function for fallback mode
|
| 33 |
+
- Updated `generate_with_retry()` function for robust generation
|
| 34 |
+
- All TTS calls now use voice-specific advanced parameters
|
| 35 |
+
|
| 36 |
+
### 5. **Enhanced Voice Configuration** 📋
|
| 37 |
+
- Updated `get_voice_config()` function to include new parameters
|
| 38 |
+
- All audiobook generation now uses saved voice settings
|
| 39 |
+
- Backward compatibility maintained for existing voices
|
| 40 |
+
|
| 41 |
+
### 6. **UI Integration** 🔗
|
| 42 |
+
- **Save Button**: Now includes all 3 new parameters in voice profiles
|
| 43 |
+
- **Load Button**: Populates all UI sliders with saved values
|
| 44 |
+
- **Test Button**: Uses advanced parameters for voice testing
|
| 45 |
+
|
| 46 |
+
## 🎮 **User Experience**
|
| 47 |
+
|
| 48 |
+
### **Before** ❌
|
| 49 |
+
- Only basic parameters: Exaggeration, CFG/Pace, Temperature
|
| 50 |
+
- Advanced TTS controls were hidden and inaccessible
|
| 51 |
+
- All voices used default Min-P/Top-P/Rep-Penalty values
|
| 52 |
+
|
| 53 |
+
### **After** ✅
|
| 54 |
+
- **Full control** over TTS generation parameters
|
| 55 |
+
- **Professional voice tuning** with industry-standard controls
|
| 56 |
+
- **Per-voice customization** - each voice can have unique settings
|
| 57 |
+
- **Backward compatibility** - existing voices continue working
|
| 58 |
+
- **Enhanced voice testing** with all parameters
|
| 59 |
+
|
| 60 |
+
## 📊 **Technical Benefits**
|
| 61 |
+
|
| 62 |
+
### **Voice Quality Control** 🎭
|
| 63 |
+
- **Min-P**: Fine-tune creativity vs consistency
|
| 64 |
+
- **Top-P**: Control focus vs diversity in voice generation
|
| 65 |
+
- **Repetition Penalty**: Eliminate unwanted voice repetitions
|
| 66 |
+
|
| 67 |
+
### **Professional Workflow** 🎯
|
| 68 |
+
- Voice artists can now fine-tune voices like professional TTS systems
|
| 69 |
+
- Each character voice can have unique personality parameters
|
| 70 |
+
- Better control over audiobook consistency and quality
|
| 71 |
+
|
| 72 |
+
### **Future-Proof Architecture** 🚀
|
| 73 |
+
- Versioned voice profiles (v2.1) support new features
|
| 74 |
+
- Clean parameter passing through all generation functions
|
| 75 |
+
- Ready for additional TTS parameters in future updates
|
| 76 |
+
|
| 77 |
+
## 🧪 **Testing Recommendations**
|
| 78 |
+
|
| 79 |
+
1. **Create New Voice**: Test all advanced parameters
|
| 80 |
+
2. **Load Old Voice**: Verify backward compatibility
|
| 81 |
+
3. **Generate Audio**: Confirm parameters affect output quality
|
| 82 |
+
4. **Multi-Voice**: Test advanced parameters in character dialogue
|
| 83 |
+
5. **Volume + Advanced**: Test combined normalization + advanced settings
|
| 84 |
+
|
| 85 |
+
## ✨ **What Users See Now**
|
| 86 |
+
|
| 87 |
+
When saving a voice, users get confirmation like:
|
| 88 |
+
```
|
| 89 |
+
✅ Voice profile 'Deep Male Narrator' saved successfully!
|
| 90 |
+
📊 Audio normalized from -12.3 dB to -18.0 dB
|
| 91 |
+
🎛️ Advanced settings: Min-P=0.03, Top-P=0.9, Rep. Penalty=1.3
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
When loading a voice profile, version info is shown:
|
| 95 |
+
```
|
| 96 |
+
✅ Loaded voice profile: Deep Male Narrator (v2.1)
|
| 97 |
+
```
|
| 98 |
+
|
| 99 |
+
**The Voice Library now provides complete professional-grade TTS control!** 🎉
|
gradio_tts_app_audiobook.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
gradio_tts_app_audiobook_with_batch.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
install-audiobook.bat
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
d@echo off
|
| 2 |
+
echo ========================================
|
| 3 |
+
echo Chatterbox TTS - Installation Setup
|
| 4 |
+
echo ========================================
|
| 5 |
+
echo.
|
| 6 |
+
echo This will install Chatterbox TTS in a virtual environment
|
| 7 |
+
echo to keep it isolated from other Python projects.
|
| 8 |
+
echo.
|
| 9 |
+
echo Requirements:
|
| 10 |
+
echo - Python 3.10 or higher
|
| 11 |
+
echo - NVIDIA GPU with CUDA support (recommended)
|
| 12 |
+
echo - Git (if you want to pull updates)
|
| 13 |
+
echo.
|
| 14 |
+
echo Current directory: %CD%
|
| 15 |
+
echo.
|
| 16 |
+
pause
|
| 17 |
+
|
| 18 |
+
echo.
|
| 19 |
+
echo [1/9] Checking Python installation...
|
| 20 |
+
python --version
|
| 21 |
+
if %errorlevel% neq 0 (
|
| 22 |
+
echo ERROR: Python is not installed or not in PATH
|
| 23 |
+
echo Please install Python 3.10+ from https://python.org
|
| 24 |
+
pause
|
| 25 |
+
exit /b 1
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
echo.
|
| 29 |
+
echo [2/9] Checking if we're in the correct directory...
|
| 30 |
+
if not exist "pyproject.toml" (
|
| 31 |
+
echo ERROR: pyproject.toml not found!
|
| 32 |
+
echo Please make sure you're running this from the chatterbox repository root.
|
| 33 |
+
echo Expected files: pyproject.toml, gradio_tts_app.py, src/chatterbox/
|
| 34 |
+
pause
|
| 35 |
+
exit /b 1
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
if not exist "src\chatterbox" (
|
| 39 |
+
echo ERROR: src\chatterbox directory not found!
|
| 40 |
+
echo Please make sure you're in the correct chatterbox repository.
|
| 41 |
+
pause
|
| 42 |
+
exit /b 1
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
echo Repository structure verified ✓
|
| 46 |
+
|
| 47 |
+
echo.
|
| 48 |
+
echo [3/9] Creating virtual environment...
|
| 49 |
+
if exist "venv" (
|
| 50 |
+
echo Virtual environment already exists. Removing old one...
|
| 51 |
+
rmdir /s /q venv
|
| 52 |
+
)
|
| 53 |
+
python -m venv venv
|
| 54 |
+
|
| 55 |
+
echo.
|
| 56 |
+
echo [4/9] Activating virtual environment...
|
| 57 |
+
call venv\Scripts\activate.bat
|
| 58 |
+
|
| 59 |
+
echo.
|
| 60 |
+
echo [5/9] Upgrading pip...
|
| 61 |
+
python -m pip install --upgrade pip
|
| 62 |
+
|
| 63 |
+
echo.
|
| 64 |
+
echo [6/9] Installing compatible PyTorch with CUDA support...
|
| 65 |
+
echo This may take a while (downloading ~2.5GB)...
|
| 66 |
+
echo Installing PyTorch 2.4.1 + torchvision 0.19.1 (compatible versions)...
|
| 67 |
+
pip install torch==2.4.1+cu121 torchvision==0.19.1+cu121 torchaudio==2.4.1+cu121 --index-url https://download.pytorch.org/whl/cu121
|
| 68 |
+
|
| 69 |
+
echo.
|
| 70 |
+
echo [7/9] Installing Chatterbox TTS and dependencies...
|
| 71 |
+
pip install -e .
|
| 72 |
+
pip install gradio
|
| 73 |
+
|
| 74 |
+
echo.
|
| 75 |
+
echo [8/9] Installing and configuring pydantic (tested version)...
|
| 76 |
+
echo Uninstalling any existing pydantic versions...
|
| 77 |
+
pip uninstall pydantic -y
|
| 78 |
+
echo Installing pydantic version 2.10.6 (tested and verified)...
|
| 79 |
+
pip install pydantic==2.10.6
|
| 80 |
+
echo Verifying pydantic installation...
|
| 81 |
+
pip show pydantic | findstr /C:"Version: 2.10.6"
|
| 82 |
+
if %errorlevel% neq 0 (
|
| 83 |
+
echo WARNING: Pydantic 2.10.6 installation may have issues.
|
| 84 |
+
echo Attempting alternative installation...
|
| 85 |
+
pip install pydantic==2.10.6 --force-reinstall
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
echo Installing numpy (compatible version)...
|
| 89 |
+
pip install numpy==1.26.0 --force-reinstall
|
| 90 |
+
|
| 91 |
+
echo.
|
| 92 |
+
echo [9/9] Testing installation...
|
| 93 |
+
echo Testing PyTorch and CUDA...
|
| 94 |
+
python -c "import torch; print('PyTorch version:', torch.__version__); print('CUDA available:', torch.cuda.is_available())"
|
| 95 |
+
|
| 96 |
+
if %errorlevel% neq 0 (
|
| 97 |
+
echo WARNING: PyTorch test failed. Trying to fix torchvision compatibility...
|
| 98 |
+
pip uninstall torchvision -y
|
| 99 |
+
pip install torchvision==0.19.1+cu121 --index-url https://download.pytorch.org/whl/cu121 --force-reinstall
|
| 100 |
+
echo Retesting...
|
| 101 |
+
python -c "import torch; print('PyTorch version:', torch.__version__); print('CUDA available:', torch.cuda.is_available())"
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
echo.
|
| 105 |
+
echo Testing Chatterbox import...
|
| 106 |
+
python -c "from chatterbox.tts import ChatterboxTTS; print('Chatterbox TTS imported successfully!')"
|
| 107 |
+
|
| 108 |
+
if %errorlevel% neq 0 (
|
| 109 |
+
echo WARNING: Chatterbox import failed. This might be a dependency issue.
|
| 110 |
+
echo The installation will continue, but you may need to troubleshoot.
|
| 111 |
+
echo Common fixes:
|
| 112 |
+
echo 1. Run install.bat again
|
| 113 |
+
echo 2. Check NVIDIA drivers are up to date
|
| 114 |
+
echo 3. Restart your computer
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
echo.
|
| 118 |
+
echo Testing pydantic compatibility...
|
| 119 |
+
python -c "import pydantic; print('Pydantic version:', pydantic.__version__)"
|
| 120 |
+
|
| 121 |
+
echo.
|
| 122 |
+
echo ========================================
|
| 123 |
+
echo Installation Complete!
|
| 124 |
+
echo ========================================
|
| 125 |
+
echo.
|
| 126 |
+
echo Virtual environment created at: %CD%\venv
|
| 127 |
+
echo.
|
| 128 |
+
|
| 129 |
+
echo Final system check...
|
| 130 |
+
python -c "import torch; print('GPU:', torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'None')"
|
| 131 |
+
|
| 132 |
+
echo.
|
| 133 |
+
echo ========================================
|
| 134 |
+
echo Ready for Audiobooks!
|
| 135 |
+
echo ========================================
|
| 136 |
+
echo.
|
| 137 |
+
echo To start Chatterbox TTS:
|
| 138 |
+
echo 1. Run launch_audiobook.bat (recommended)
|
| 139 |
+
echo 2. Or manually: venv\Scripts\activate.bat then python gradio_tts_app_audiobook.py
|
| 140 |
+
echo.
|
| 141 |
+
echo Perfect for:
|
| 142 |
+
echo - Voice cloning for audiobook narration
|
| 143 |
+
echo - Multiple character voices
|
| 144 |
+
echo - Consistent voice quality across chapters
|
| 145 |
+
echo - Professional audiobook production
|
| 146 |
+
echo.
|
| 147 |
+
echo Note: If you encounter pydantic compatibility issues later,
|
| 148 |
+
echo you can run update.bat to specifically update pydantic.
|
| 149 |
+
echo.
|
| 150 |
+
echo Installation finished successfully!
|
| 151 |
+
pause
|
launch_audiobook.bat
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@echo off
|
| 2 |
+
setlocal
|
| 3 |
+
|
| 4 |
+
rem Performance and Debugging Section
|
| 5 |
+
rem =================================
|
| 6 |
+
rem Enable CUDA_LAUNCH_BLOCKING for detailed error reports, but it hurts performance.
|
| 7 |
+
rem set "CUDA_LAUNCH_BLOCKING=1"
|
| 8 |
+
rem set "TORCH_USE_CUDA_DSA=1"
|
| 9 |
+
|
| 10 |
+
echo Checking for virtual environment...
|
| 11 |
+
if not exist "venv\Scripts\activate.bat" (
|
| 12 |
+
echo ERROR: Virtual environment not found!
|
| 13 |
+
echo Please run install.bat first to set up the environment.
|
| 14 |
+
echo.
|
| 15 |
+
echo Make sure you're in the chatterbox repository directory.
|
| 16 |
+
pause
|
| 17 |
+
exit /b 1
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
echo Checking repository structure...
|
| 21 |
+
if not exist "gradio_tts_app_audiobook.py" (
|
| 22 |
+
echo ERROR: gradio_tts_app_audiobook.py not found!
|
| 23 |
+
echo Please make sure you're in the chatterbox repository root.
|
| 24 |
+
pause
|
| 25 |
+
exit /b 1
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
echo Activating virtual environment...
|
| 29 |
+
call venv\Scripts\activate.bat
|
| 30 |
+
|
| 31 |
+
echo.
|
| 32 |
+
echo Starting Chatterbox TTS Audiobook Edition...
|
| 33 |
+
echo Features: Voice Library, Character Management, Audiobook Tools
|
| 34 |
+
echo Audio Cleaning Available in "Clean Samples" Tab
|
| 35 |
+
echo This may take a moment to load the models...
|
| 36 |
+
echo.
|
| 37 |
+
echo Current directory: %CD%
|
| 38 |
+
echo Python environment: %VIRTUAL_ENV%
|
| 39 |
+
echo Voice library will be created at: %CD%\voice_library
|
| 40 |
+
echo.
|
| 41 |
+
|
| 42 |
+
python gradio_tts_app_audiobook.py
|
| 43 |
+
|
| 44 |
+
echo.
|
| 45 |
+
echo Chatterbox TTS Audiobook Edition has stopped.
|
| 46 |
+
echo Deactivating virtual environment...
|
| 47 |
+
deactivate
|
| 48 |
+
echo.
|
| 49 |
+
echo Thanks for using Chatterbox TTS Audiobook Edition! 🎧✨
|
| 50 |
+
echo Your voice profiles are saved in the voice_library folder.
|
| 51 |
+
echo Audio cleaning features are in the "Clean Samples" tab!
|
| 52 |
+
pause
|
pyproject.toml
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "chatterbox-tts"
|
| 3 |
+
version = "0.1.1"
|
| 4 |
+
description = "Chatterbox: Open Source TTS and Voice Conversion by Resemble AI"
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.8"
|
| 7 |
+
license = {file = "LICENSE"}
|
| 8 |
+
authors = [
|
| 9 |
+
{name = "resemble-ai", email = "[email protected]"}
|
| 10 |
+
]
|
| 11 |
+
dependencies = [
|
| 12 |
+
"numpy==1.26.0",
|
| 13 |
+
"resampy==0.4.3",
|
| 14 |
+
"librosa==0.10.0",
|
| 15 |
+
"s3tokenizer",
|
| 16 |
+
"torch==2.4.1",
|
| 17 |
+
"torchaudio==2.4.1",
|
| 18 |
+
"transformers==4.46.3",
|
| 19 |
+
"diffusers==0.29.0",
|
| 20 |
+
"resemble-perth==1.0.1",
|
| 21 |
+
"omegaconf==2.3.0",
|
| 22 |
+
"conformer==0.3.2",
|
| 23 |
+
"spacy>=3.4.0",
|
| 24 |
+
]
|
| 25 |
+
|
| 26 |
+
[project.optional-dependencies]
|
| 27 |
+
advanced-nlp = ["spacy[en_core_web_sm]>=3.4.0"]
|
| 28 |
+
|
| 29 |
+
[project.urls]
|
| 30 |
+
Homepage = "https://github.com/resemble-ai/chatterbox"
|
| 31 |
+
Repository = "https://github.com/resemble-ai/chatterbox"
|
| 32 |
+
|
| 33 |
+
[build-system]
|
| 34 |
+
requires = ["setuptools>=61.0"]
|
| 35 |
+
build-backend = "setuptools.build_meta"
|
| 36 |
+
|
| 37 |
+
[tool.setuptools.packages.find]
|
| 38 |
+
where = ["src"]
|
simple_batch_demo.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Simple Batch Processing UI Demo for ChatterBox Audiobook
|
| 3 |
+
|
| 4 |
+
This file shows exactly what needs to be added to the main interface
|
| 5 |
+
to enable the batch processing functionality.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import gradio as gr
|
| 9 |
+
|
| 10 |
+
# Simulated functions (these already exist in your main file)
|
| 11 |
+
def load_text_files_batch(file_paths):
|
| 12 |
+
if not file_paths:
|
| 13 |
+
return [], "No files uploaded"
|
| 14 |
+
return [{"filename": f"file_{i}.txt", "content": "sample", "words": 100} for i in range(len(file_paths))], f"Loaded {len(file_paths)} files"
|
| 15 |
+
|
| 16 |
+
def validate_batch_audiobook_input(file_list, voice, project_name):
|
| 17 |
+
if not file_list:
|
| 18 |
+
return gr.Button(interactive=False), "❌ No files loaded", None
|
| 19 |
+
if not voice:
|
| 20 |
+
return gr.Button(interactive=False), "❌ Select a voice", None
|
| 21 |
+
if not project_name:
|
| 22 |
+
return gr.Button(interactive=False), "❌ Enter project name", None
|
| 23 |
+
return gr.Button(interactive=True), f"✅ Ready to process {len(file_list)} files", None
|
| 24 |
+
|
| 25 |
+
def create_batch_audiobook(model, file_list, voice_lib, voice, project_name, norm, level):
|
| 26 |
+
return None, f"✅ Batch processing complete! Created {len(file_list)} audiobooks with names {project_name}-1, {project_name}-2, etc."
|
| 27 |
+
|
| 28 |
+
def demo_interface():
|
| 29 |
+
with gr.Blocks(title="Batch Processing Demo") as demo:
|
| 30 |
+
gr.HTML("""
|
| 31 |
+
<h1>🎵 Batch Processing Demo</h1>
|
| 32 |
+
<p>This shows the UI components that need to be added to your main interface.</p>
|
| 33 |
+
""")
|
| 34 |
+
|
| 35 |
+
with gr.Row():
|
| 36 |
+
with gr.Column():
|
| 37 |
+
# Upload Mode Selection
|
| 38 |
+
upload_mode = gr.Radio(
|
| 39 |
+
choices=[("Single File", "single"), ("Batch Processing", "batch")],
|
| 40 |
+
value="single",
|
| 41 |
+
label="📋 Upload Mode",
|
| 42 |
+
info="Switch between single file and batch processing"
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
# Single file upload (visible by default)
|
| 46 |
+
with gr.Group(visible=True) as single_group:
|
| 47 |
+
single_file = gr.File(
|
| 48 |
+
label="📄 Upload Single Text File",
|
| 49 |
+
file_types=[".txt"],
|
| 50 |
+
type="filepath"
|
| 51 |
+
)
|
| 52 |
+
single_status = gr.HTML("📄 Single file mode")
|
| 53 |
+
|
| 54 |
+
# Batch file upload (hidden by default)
|
| 55 |
+
with gr.Group(visible=False) as batch_group:
|
| 56 |
+
batch_files = gr.File(
|
| 57 |
+
label="📚 Upload Multiple Text Files",
|
| 58 |
+
file_types=[".txt"],
|
| 59 |
+
file_count="multiple",
|
| 60 |
+
type="filepath"
|
| 61 |
+
)
|
| 62 |
+
load_batch_btn = gr.Button("📂 Load Batch Files")
|
| 63 |
+
batch_status = gr.HTML("📚 Batch processing mode")
|
| 64 |
+
|
| 65 |
+
# Voice and project settings
|
| 66 |
+
voice_dropdown = gr.Dropdown(
|
| 67 |
+
choices=["Voice 1", "Voice 2", "Voice 3"],
|
| 68 |
+
label="Select Voice",
|
| 69 |
+
value=None
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
project_name = gr.Textbox(
|
| 73 |
+
label="Project Name",
|
| 74 |
+
placeholder="my_audiobook"
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
# Batch file list state
|
| 78 |
+
batch_file_list = gr.State([])
|
| 79 |
+
|
| 80 |
+
with gr.Column():
|
| 81 |
+
# Processing buttons
|
| 82 |
+
validate_batch_btn = gr.Button("🔍 Validate Batch", variant="secondary")
|
| 83 |
+
process_batch_btn = gr.Button("🎵 Create Batch Audiobooks", variant="primary", interactive=False)
|
| 84 |
+
|
| 85 |
+
# Status and output
|
| 86 |
+
processing_status = gr.HTML("Ready for batch processing")
|
| 87 |
+
output_audio = gr.Audio(label="Preview (last created audiobook)", visible=False)
|
| 88 |
+
|
| 89 |
+
# Event handlers
|
| 90 |
+
def toggle_upload_mode(mode):
|
| 91 |
+
if mode == "single":
|
| 92 |
+
return gr.Group(visible=True), gr.Group(visible=False)
|
| 93 |
+
else:
|
| 94 |
+
return gr.Group(visible=False), gr.Group(visible=True)
|
| 95 |
+
|
| 96 |
+
upload_mode.change(
|
| 97 |
+
fn=toggle_upload_mode,
|
| 98 |
+
inputs=[upload_mode],
|
| 99 |
+
outputs=[single_group, batch_group]
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
load_batch_btn.click(
|
| 103 |
+
fn=load_text_files_batch,
|
| 104 |
+
inputs=[batch_files],
|
| 105 |
+
outputs=[batch_file_list, batch_status]
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
validate_batch_btn.click(
|
| 109 |
+
fn=validate_batch_audiobook_input,
|
| 110 |
+
inputs=[batch_file_list, voice_dropdown, project_name],
|
| 111 |
+
outputs=[process_batch_btn, processing_status, gr.State()]
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
process_batch_btn.click(
|
| 115 |
+
fn=create_batch_audiobook,
|
| 116 |
+
inputs=[gr.State(None), batch_file_list, gr.State(""), voice_dropdown, project_name, gr.State(True), gr.State(-18)],
|
| 117 |
+
outputs=[output_audio, processing_status]
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
gr.HTML("""
|
| 121 |
+
<div style="margin-top: 20px; padding: 15px; border: 1px solid #ddd; border-radius: 5px;">
|
| 122 |
+
<h3>📋 To Add This to Your Main Interface:</h3>
|
| 123 |
+
<ol>
|
| 124 |
+
<li>Replace the simple file upload section with the Upload Mode selection</li>
|
| 125 |
+
<li>Add the single and batch upload groups</li>
|
| 126 |
+
<li>Add the batch processing buttons</li>
|
| 127 |
+
<li>Wire up the event handlers</li>
|
| 128 |
+
<li>Add the batch_file_list State component</li>
|
| 129 |
+
</ol>
|
| 130 |
+
<p><strong>Key Components Needed:</strong></p>
|
| 131 |
+
<ul>
|
| 132 |
+
<li>upload_mode (Radio)</li>
|
| 133 |
+
<li>single_upload_group and batch_upload_group (Group)</li>
|
| 134 |
+
<li>batch_files (File with file_count="multiple")</li>
|
| 135 |
+
<li>load_batch_btn (Button)</li>
|
| 136 |
+
<li>validate_batch_btn and process_batch_btn (Buttons)</li>
|
| 137 |
+
<li>batch_file_list (State)</li>
|
| 138 |
+
</ul>
|
| 139 |
+
</div>
|
| 140 |
+
""")
|
| 141 |
+
|
| 142 |
+
return demo
|
| 143 |
+
|
| 144 |
+
if __name__ == "__main__":
|
| 145 |
+
demo = demo_interface()
|
| 146 |
+
demo.launch()
|
src/audiobook/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
ChatterBox Audiobook Generator
|
| 3 |
+
|
| 4 |
+
A modular audiobook creation system using TTS technology.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
__version__ = "1.0.0"
|
| 8 |
+
__author__ = "ChatterBox Team"
|
src/audiobook/audio_processing.py
ADDED
|
@@ -0,0 +1,480 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Audio processing utilities for audiobook generation.
|
| 3 |
+
|
| 4 |
+
Handles audio saving, combining, trimming, and file operations.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import wave
|
| 9 |
+
import numpy as np
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import List, Tuple, Optional, Any
|
| 12 |
+
import tempfile
|
| 13 |
+
import shutil
|
| 14 |
+
|
| 15 |
+
# Optional audio processing imports
|
| 16 |
+
try:
|
| 17 |
+
import librosa
|
| 18 |
+
import soundfile as sf
|
| 19 |
+
AUDIO_PROCESSING_AVAILABLE = True
|
| 20 |
+
except ImportError:
|
| 21 |
+
AUDIO_PROCESSING_AVAILABLE = False
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def save_audio_chunks(
|
| 25 |
+
audio_chunks: List[np.ndarray],
|
| 26 |
+
sample_rate: int,
|
| 27 |
+
project_name: str,
|
| 28 |
+
output_dir: str = "audiobook_projects"
|
| 29 |
+
) -> List[str]:
|
| 30 |
+
"""Save audio chunks as numbered WAV files.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
audio_chunks: List of audio arrays
|
| 34 |
+
sample_rate: Audio sample rate
|
| 35 |
+
project_name: Name of the project
|
| 36 |
+
output_dir: Output directory for projects
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
List of saved file paths
|
| 40 |
+
"""
|
| 41 |
+
if not project_name.strip():
|
| 42 |
+
project_name = "untitled_audiobook"
|
| 43 |
+
|
| 44 |
+
# Sanitize project name
|
| 45 |
+
safe_project_name = "".join(c for c in project_name if c.isalnum() or c in (' ', '-', '_')).rstrip()
|
| 46 |
+
safe_project_name = safe_project_name.replace(' ', '_')
|
| 47 |
+
|
| 48 |
+
# Create output directory
|
| 49 |
+
project_dir = os.path.join(output_dir, safe_project_name)
|
| 50 |
+
os.makedirs(project_dir, exist_ok=True)
|
| 51 |
+
|
| 52 |
+
saved_files = []
|
| 53 |
+
|
| 54 |
+
for i, audio_chunk in enumerate(audio_chunks, 1):
|
| 55 |
+
filename = f"{safe_project_name}_{i:03d}.wav"
|
| 56 |
+
filepath = os.path.join(project_dir, filename)
|
| 57 |
+
|
| 58 |
+
# Save as WAV file
|
| 59 |
+
with wave.open(filepath, 'wb') as wav_file:
|
| 60 |
+
wav_file.setnchannels(1) # Mono
|
| 61 |
+
wav_file.setsampwidth(2) # 16-bit
|
| 62 |
+
wav_file.setframerate(sample_rate)
|
| 63 |
+
|
| 64 |
+
# Convert float32 to int16
|
| 65 |
+
audio_int16 = (audio_chunk * 32767).astype(np.int16)
|
| 66 |
+
wav_file.writeframes(audio_int16.tobytes())
|
| 67 |
+
|
| 68 |
+
saved_files.append(filepath)
|
| 69 |
+
|
| 70 |
+
return saved_files
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def combine_audio_files(file_paths: List[str], output_path: str, output_format: str = "wav") -> str:
|
| 74 |
+
"""Combine multiple audio files into a single file.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
file_paths: List of audio file paths to combine
|
| 78 |
+
output_path: Output file path
|
| 79 |
+
output_format: Output format (wav or mp3)
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
Success message or error
|
| 83 |
+
"""
|
| 84 |
+
if not file_paths:
|
| 85 |
+
return "❌ No audio files to combine"
|
| 86 |
+
|
| 87 |
+
try:
|
| 88 |
+
# Read all audio files and combine
|
| 89 |
+
combined_audio = []
|
| 90 |
+
sample_rate = None
|
| 91 |
+
|
| 92 |
+
for file_path in file_paths:
|
| 93 |
+
if not os.path.exists(file_path):
|
| 94 |
+
continue
|
| 95 |
+
|
| 96 |
+
with wave.open(file_path, 'rb') as wav_file:
|
| 97 |
+
if sample_rate is None:
|
| 98 |
+
sample_rate = wav_file.getframerate()
|
| 99 |
+
|
| 100 |
+
frames = wav_file.readframes(wav_file.getnframes())
|
| 101 |
+
audio_data = np.frombuffer(frames, dtype=np.int16)
|
| 102 |
+
combined_audio.append(audio_data)
|
| 103 |
+
|
| 104 |
+
if not combined_audio:
|
| 105 |
+
return "❌ No valid audio files found"
|
| 106 |
+
|
| 107 |
+
# Concatenate all audio
|
| 108 |
+
final_audio = np.concatenate(combined_audio)
|
| 109 |
+
|
| 110 |
+
# Save combined audio
|
| 111 |
+
if output_format.lower() == "wav":
|
| 112 |
+
with wave.open(output_path, 'wb') as wav_file:
|
| 113 |
+
wav_file.setnchannels(1)
|
| 114 |
+
wav_file.setsampwidth(2)
|
| 115 |
+
wav_file.setframerate(sample_rate)
|
| 116 |
+
wav_file.writeframes(final_audio.tobytes())
|
| 117 |
+
else:
|
| 118 |
+
# For MP3, we'd need additional dependencies like pydub
|
| 119 |
+
return "❌ MP3 export not implemented yet"
|
| 120 |
+
|
| 121 |
+
return f"✅ Combined {len(file_paths)} files into {output_path}"
|
| 122 |
+
|
| 123 |
+
except Exception as e:
|
| 124 |
+
return f"❌ Error combining audio files: {str(e)}"
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def save_trimmed_audio(audio_data: Any, original_file_path: str, chunk_num: int) -> Tuple[str, str]:
|
| 128 |
+
"""Save trimmed audio data to a new file.
|
| 129 |
+
|
| 130 |
+
Args:
|
| 131 |
+
audio_data: Audio data from Gradio component
|
| 132 |
+
original_file_path: Original audio file path
|
| 133 |
+
chunk_num: Chunk number
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
tuple: (success_message, file_path)
|
| 137 |
+
"""
|
| 138 |
+
if audio_data is None:
|
| 139 |
+
return "❌ No audio data provided", ""
|
| 140 |
+
|
| 141 |
+
try:
|
| 142 |
+
# Extract sample rate and audio array from gradio format
|
| 143 |
+
if isinstance(audio_data, tuple) and len(audio_data) == 2:
|
| 144 |
+
sample_rate, audio_array = audio_data
|
| 145 |
+
else:
|
| 146 |
+
return "❌ Invalid audio data format", ""
|
| 147 |
+
|
| 148 |
+
# Create temporary file path
|
| 149 |
+
base_dir = os.path.dirname(original_file_path)
|
| 150 |
+
base_name = os.path.splitext(os.path.basename(original_file_path))[0]
|
| 151 |
+
trimmed_path = os.path.join(base_dir, f"{base_name}_trimmed.wav")
|
| 152 |
+
|
| 153 |
+
# Save trimmed audio
|
| 154 |
+
with wave.open(trimmed_path, 'wb') as wav_file:
|
| 155 |
+
wav_file.setnchannels(1)
|
| 156 |
+
wav_file.setsampwidth(2)
|
| 157 |
+
wav_file.setframerate(sample_rate)
|
| 158 |
+
|
| 159 |
+
# Convert to int16 if needed
|
| 160 |
+
if audio_array.dtype != np.int16:
|
| 161 |
+
if audio_array.dtype == np.float32 or audio_array.dtype == np.float64:
|
| 162 |
+
audio_array = (audio_array * 32767).astype(np.int16)
|
| 163 |
+
else:
|
| 164 |
+
audio_array = audio_array.astype(np.int16)
|
| 165 |
+
|
| 166 |
+
wav_file.writeframes(audio_array.tobytes())
|
| 167 |
+
|
| 168 |
+
return f"✅ Trimmed audio saved for chunk {chunk_num}", trimmed_path
|
| 169 |
+
|
| 170 |
+
except Exception as e:
|
| 171 |
+
return f"❌ Error saving trimmed audio: {str(e)}", ""
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def extract_audio_segment(
|
| 175 |
+
audio_data: Any,
|
| 176 |
+
start_time: Optional[float] = None,
|
| 177 |
+
end_time: Optional[float] = None
|
| 178 |
+
) -> Tuple[str, Any]:
|
| 179 |
+
"""Extract a segment from audio data based on time stamps.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
audio_data: Audio data tuple (sample_rate, audio_array)
|
| 183 |
+
start_time: Start time in seconds
|
| 184 |
+
end_time: End time in seconds
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
tuple: (status_message, extracted_audio_data)
|
| 188 |
+
"""
|
| 189 |
+
if audio_data is None:
|
| 190 |
+
return "❌ No audio data provided", None
|
| 191 |
+
|
| 192 |
+
try:
|
| 193 |
+
if isinstance(audio_data, tuple) and len(audio_data) == 2:
|
| 194 |
+
sample_rate, audio_array = audio_data
|
| 195 |
+
else:
|
| 196 |
+
return "❌ Invalid audio data format", None
|
| 197 |
+
|
| 198 |
+
if start_time is None and end_time is None:
|
| 199 |
+
return "❌ Please specify start time or end time", None
|
| 200 |
+
|
| 201 |
+
# Convert time to sample indices
|
| 202 |
+
start_sample = int(start_time * sample_rate) if start_time is not None else 0
|
| 203 |
+
end_sample = int(end_time * sample_rate) if end_time is not None else len(audio_array)
|
| 204 |
+
|
| 205 |
+
# Validate bounds
|
| 206 |
+
start_sample = max(0, start_sample)
|
| 207 |
+
end_sample = min(len(audio_array), end_sample)
|
| 208 |
+
|
| 209 |
+
if start_sample >= end_sample:
|
| 210 |
+
return "❌ Invalid time range", None
|
| 211 |
+
|
| 212 |
+
# Extract segment
|
| 213 |
+
extracted_audio = audio_array[start_sample:end_sample]
|
| 214 |
+
|
| 215 |
+
return "✅ Audio segment extracted", (sample_rate, extracted_audio)
|
| 216 |
+
|
| 217 |
+
except Exception as e:
|
| 218 |
+
return f"❌ Error extracting audio segment: {str(e)}", None
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def handle_audio_trimming(audio_data: Any) -> Tuple[str, Any]:
|
| 222 |
+
"""Handle audio trimming from Gradio component.
|
| 223 |
+
|
| 224 |
+
Args:
|
| 225 |
+
audio_data: Audio data from Gradio component
|
| 226 |
+
|
| 227 |
+
Returns:
|
| 228 |
+
tuple: (status_message, processed_audio_data)
|
| 229 |
+
"""
|
| 230 |
+
if audio_data is None:
|
| 231 |
+
return "No audio data", None
|
| 232 |
+
|
| 233 |
+
try:
|
| 234 |
+
# Process audio data from Gradio
|
| 235 |
+
if isinstance(audio_data, tuple) and len(audio_data) == 2:
|
| 236 |
+
sample_rate, audio_array = audio_data
|
| 237 |
+
|
| 238 |
+
# Validate audio array
|
| 239 |
+
if audio_array is None or len(audio_array) == 0:
|
| 240 |
+
return "Empty audio data", None
|
| 241 |
+
|
| 242 |
+
return "Audio ready for processing", audio_data
|
| 243 |
+
else:
|
| 244 |
+
return "Invalid audio format", None
|
| 245 |
+
|
| 246 |
+
except Exception as e:
|
| 247 |
+
return f"Error processing audio: {str(e)}", None
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def cleanup_temp_files(file_paths: List[str]) -> None:
|
| 251 |
+
"""Clean up temporary files.
|
| 252 |
+
|
| 253 |
+
Args:
|
| 254 |
+
file_paths: List of file paths to delete
|
| 255 |
+
"""
|
| 256 |
+
for file_path in file_paths:
|
| 257 |
+
try:
|
| 258 |
+
if os.path.exists(file_path):
|
| 259 |
+
os.remove(file_path)
|
| 260 |
+
except Exception as e:
|
| 261 |
+
print(f"Warning: Could not delete {file_path}: {e}")
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def analyze_audio_quality(file_path: str) -> dict:
|
| 265 |
+
"""Analyze audio file quality metrics.
|
| 266 |
+
|
| 267 |
+
Args:
|
| 268 |
+
file_path: Path to audio file
|
| 269 |
+
|
| 270 |
+
Returns:
|
| 271 |
+
Dictionary with quality metrics
|
| 272 |
+
"""
|
| 273 |
+
try:
|
| 274 |
+
if AUDIO_PROCESSING_AVAILABLE:
|
| 275 |
+
# Use librosa for more detailed analysis
|
| 276 |
+
y, sr = librosa.load(file_path, sr=None)
|
| 277 |
+
|
| 278 |
+
# Calculate advanced metrics
|
| 279 |
+
rms = np.sqrt(np.mean(y**2))
|
| 280 |
+
peak = np.max(np.abs(y))
|
| 281 |
+
duration = len(y) / sr
|
| 282 |
+
|
| 283 |
+
# Calculate spectral centroid (brightness)
|
| 284 |
+
spectral_centroids = librosa.feature.spectral_centroid(y=y, sr=sr)[0]
|
| 285 |
+
spectral_centroid_mean = np.mean(spectral_centroids)
|
| 286 |
+
|
| 287 |
+
# Calculate zero crossing rate (useful for speech analysis)
|
| 288 |
+
zcr = librosa.feature.zero_crossing_rate(y)[0]
|
| 289 |
+
zcr_mean = np.mean(zcr)
|
| 290 |
+
|
| 291 |
+
return {
|
| 292 |
+
'duration': duration,
|
| 293 |
+
'sample_rate': sr,
|
| 294 |
+
'rms_level': float(rms),
|
| 295 |
+
'peak_level': float(peak),
|
| 296 |
+
'dynamic_range': float(peak / (rms + 1e-6)),
|
| 297 |
+
'spectral_centroid': float(spectral_centroid_mean),
|
| 298 |
+
'zero_crossing_rate': float(zcr_mean),
|
| 299 |
+
'has_advanced_analysis': True
|
| 300 |
+
}
|
| 301 |
+
else:
|
| 302 |
+
# Fallback to basic wave analysis
|
| 303 |
+
with wave.open(file_path, 'rb') as wav_file:
|
| 304 |
+
sample_rate = wav_file.getframerate()
|
| 305 |
+
n_frames = wav_file.getnframes()
|
| 306 |
+
duration = n_frames / sample_rate
|
| 307 |
+
|
| 308 |
+
frames = wav_file.readframes(n_frames)
|
| 309 |
+
audio_data = np.frombuffer(frames, dtype=np.int16)
|
| 310 |
+
|
| 311 |
+
# Normalize to float
|
| 312 |
+
audio_data = audio_data.astype(np.float32) / 32768.0
|
| 313 |
+
|
| 314 |
+
# Calculate basic metrics
|
| 315 |
+
rms = np.sqrt(np.mean(audio_data**2))
|
| 316 |
+
peak = np.max(np.abs(audio_data))
|
| 317 |
+
|
| 318 |
+
return {
|
| 319 |
+
'duration': duration,
|
| 320 |
+
'sample_rate': sample_rate,
|
| 321 |
+
'rms_level': float(rms),
|
| 322 |
+
'peak_level': float(peak),
|
| 323 |
+
'dynamic_range': float(peak / (rms + 1e-6)),
|
| 324 |
+
'has_advanced_analysis': False
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
except Exception as e:
|
| 328 |
+
return {'error': str(e)}
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def auto_remove_silence(
|
| 332 |
+
file_path: str,
|
| 333 |
+
silence_threshold: float = -50.0,
|
| 334 |
+
min_silence_duration: float = 0.5
|
| 335 |
+
) -> Tuple[str, str]:
|
| 336 |
+
"""Automatically remove silence from audio file using advanced audio processing.
|
| 337 |
+
|
| 338 |
+
Args:
|
| 339 |
+
file_path: Path to audio file
|
| 340 |
+
silence_threshold: Silence threshold in dB
|
| 341 |
+
min_silence_duration: Minimum silence duration to remove in seconds
|
| 342 |
+
|
| 343 |
+
Returns:
|
| 344 |
+
tuple: (status_message, output_file_path)
|
| 345 |
+
"""
|
| 346 |
+
if not AUDIO_PROCESSING_AVAILABLE:
|
| 347 |
+
# Fallback behavior - just copy the file with a warning
|
| 348 |
+
try:
|
| 349 |
+
output_path = file_path.replace('.wav', '_cleaned.wav')
|
| 350 |
+
shutil.copy2(file_path, output_path)
|
| 351 |
+
return "⚠️ Audio processing libraries not available. File copied without cleaning. Install librosa and soundfile for real audio processing.", output_path
|
| 352 |
+
except Exception as e:
|
| 353 |
+
return f"❌ Error copying file: {str(e)}", ""
|
| 354 |
+
|
| 355 |
+
try:
|
| 356 |
+
# Load audio with librosa
|
| 357 |
+
y, sr = librosa.load(file_path, sr=None)
|
| 358 |
+
|
| 359 |
+
if len(y) == 0:
|
| 360 |
+
return "❌ Audio file is empty", ""
|
| 361 |
+
|
| 362 |
+
# Convert threshold from dB to amplitude
|
| 363 |
+
# silence_threshold is in dB (e.g., -50 dB)
|
| 364 |
+
threshold_amplitude = 10 ** (silence_threshold / 20)
|
| 365 |
+
|
| 366 |
+
# Calculate frame length based on minimum silence duration
|
| 367 |
+
frame_length = int(min_silence_duration * sr)
|
| 368 |
+
hop_length = frame_length // 4 # 75% overlap
|
| 369 |
+
|
| 370 |
+
# Calculate RMS energy for each frame
|
| 371 |
+
rms = librosa.feature.rms(y=y, frame_length=frame_length, hop_length=hop_length)[0]
|
| 372 |
+
|
| 373 |
+
# Create time array for frames
|
| 374 |
+
times = librosa.frames_to_time(np.arange(len(rms)), sr=sr, hop_length=hop_length)
|
| 375 |
+
|
| 376 |
+
# Find frames above threshold (non-silent)
|
| 377 |
+
non_silent_frames = rms > threshold_amplitude
|
| 378 |
+
|
| 379 |
+
if not np.any(non_silent_frames):
|
| 380 |
+
return "❌ Entire audio file is below silence threshold", ""
|
| 381 |
+
|
| 382 |
+
# Find continuous segments of non-silent audio
|
| 383 |
+
# Add padding to avoid cutting speech too close
|
| 384 |
+
padding_frames = max(1, int(0.1 * sr / hop_length)) # 100ms padding
|
| 385 |
+
|
| 386 |
+
# Expand non-silent regions
|
| 387 |
+
expanded_mask = np.copy(non_silent_frames)
|
| 388 |
+
for i in range(len(non_silent_frames)):
|
| 389 |
+
if non_silent_frames[i]:
|
| 390 |
+
start_pad = max(0, i - padding_frames)
|
| 391 |
+
end_pad = min(len(expanded_mask), i + padding_frames + 1)
|
| 392 |
+
expanded_mask[start_pad:end_pad] = True
|
| 393 |
+
|
| 394 |
+
# Convert frame indices back to sample indices
|
| 395 |
+
non_silent_samples = np.zeros(len(y), dtype=bool)
|
| 396 |
+
for i, is_voice in enumerate(expanded_mask):
|
| 397 |
+
if is_voice:
|
| 398 |
+
start_sample = int(times[i] * sr) if i < len(times) else len(y)
|
| 399 |
+
end_sample = int(times[i + 1] * sr) if i + 1 < len(times) else len(y)
|
| 400 |
+
start_sample = max(0, min(start_sample, len(y)))
|
| 401 |
+
end_sample = max(0, min(end_sample, len(y)))
|
| 402 |
+
non_silent_samples[start_sample:end_sample] = True
|
| 403 |
+
|
| 404 |
+
# Extract non-silent audio
|
| 405 |
+
cleaned_audio = y[non_silent_samples]
|
| 406 |
+
|
| 407 |
+
if len(cleaned_audio) == 0:
|
| 408 |
+
return "❌ No audio remaining after silence removal", ""
|
| 409 |
+
|
| 410 |
+
# Save cleaned audio
|
| 411 |
+
output_path = file_path.replace('.wav', '_cleaned.wav')
|
| 412 |
+
sf.write(output_path, cleaned_audio, sr)
|
| 413 |
+
|
| 414 |
+
# Calculate statistics
|
| 415 |
+
original_duration = len(y) / sr
|
| 416 |
+
cleaned_duration = len(cleaned_audio) / sr
|
| 417 |
+
removed_duration = original_duration - cleaned_duration
|
| 418 |
+
percentage_removed = (removed_duration / original_duration) * 100
|
| 419 |
+
|
| 420 |
+
return (
|
| 421 |
+
f"✅ Silence removal completed! "
|
| 422 |
+
f"Removed {removed_duration:.2f}s ({percentage_removed:.1f}%) of silence. "
|
| 423 |
+
f"Final duration: {cleaned_duration:.2f}s",
|
| 424 |
+
output_path
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
except Exception as e:
|
| 428 |
+
return f"❌ Error removing silence: {str(e)}", ""
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
def normalize_audio_levels(
|
| 432 |
+
file_path: str,
|
| 433 |
+
target_lufs: float = -23.0,
|
| 434 |
+
peak_limit: float = -1.0
|
| 435 |
+
) -> Tuple[str, str]:
|
| 436 |
+
"""Normalize audio levels to broadcast standards.
|
| 437 |
+
|
| 438 |
+
Args:
|
| 439 |
+
file_path: Path to audio file
|
| 440 |
+
target_lufs: Target loudness in LUFS (default: -23 for broadcast)
|
| 441 |
+
peak_limit: Peak limit in dB (default: -1.0)
|
| 442 |
+
|
| 443 |
+
Returns:
|
| 444 |
+
tuple: (status_message, output_file_path)
|
| 445 |
+
"""
|
| 446 |
+
if not AUDIO_PROCESSING_AVAILABLE:
|
| 447 |
+
return "❌ Audio processing libraries not available. Install librosa and soundfile.", ""
|
| 448 |
+
|
| 449 |
+
try:
|
| 450 |
+
# Load audio
|
| 451 |
+
y, sr = librosa.load(file_path, sr=None)
|
| 452 |
+
|
| 453 |
+
if len(y) == 0:
|
| 454 |
+
return "❌ Audio file is empty", ""
|
| 455 |
+
|
| 456 |
+
# Simple peak normalization (more advanced LUFS would require pyloudnorm)
|
| 457 |
+
current_peak = np.max(np.abs(y))
|
| 458 |
+
target_peak = 10 ** (peak_limit / 20) # Convert dB to linear
|
| 459 |
+
|
| 460 |
+
if current_peak > 0:
|
| 461 |
+
# Normalize to target peak
|
| 462 |
+
normalized_audio = y * (target_peak / current_peak)
|
| 463 |
+
else:
|
| 464 |
+
normalized_audio = y
|
| 465 |
+
|
| 466 |
+
# Save normalized audio
|
| 467 |
+
output_path = file_path.replace('.wav', '_normalized.wav')
|
| 468 |
+
sf.write(output_path, normalized_audio, sr)
|
| 469 |
+
|
| 470 |
+
# Calculate gain applied
|
| 471 |
+
gain_db = 20 * np.log10(target_peak / current_peak) if current_peak > 0 else 0
|
| 472 |
+
|
| 473 |
+
return (
|
| 474 |
+
f"✅ Audio normalized! Applied {gain_db:+.2f} dB gain. "
|
| 475 |
+
f"Peak level now at {peak_limit:.1f} dB.",
|
| 476 |
+
output_path
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
except Exception as e:
|
| 480 |
+
return f"❌ Error normalizing audio: {str(e)}", ""
|
src/audiobook/config.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Configuration management for the audiobook generator.
|
| 3 |
+
|
| 4 |
+
Handles loading and saving of application configuration including voice library paths.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Default configuration values
|
| 13 |
+
DEFAULT_VOICE_LIBRARY = "voice_library"
|
| 14 |
+
CONFIG_FILE = "audiobook_config.json"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def load_config() -> str:
|
| 18 |
+
"""Load configuration including voice library path.
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
str: Path to the voice library directory
|
| 22 |
+
"""
|
| 23 |
+
if os.path.exists(CONFIG_FILE):
|
| 24 |
+
try:
|
| 25 |
+
with open(CONFIG_FILE, 'r') as f:
|
| 26 |
+
config = json.load(f)
|
| 27 |
+
return config.get('voice_library_path', DEFAULT_VOICE_LIBRARY)
|
| 28 |
+
except Exception:
|
| 29 |
+
return DEFAULT_VOICE_LIBRARY
|
| 30 |
+
return DEFAULT_VOICE_LIBRARY
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def save_config(voice_library_path: str) -> str:
|
| 34 |
+
"""Save configuration including voice library path.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
voice_library_path: Path to the voice library directory
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
str: Success or error message
|
| 41 |
+
"""
|
| 42 |
+
config = {
|
| 43 |
+
'voice_library_path': voice_library_path,
|
| 44 |
+
'last_updated': str(Path().resolve()) # timestamp
|
| 45 |
+
}
|
| 46 |
+
try:
|
| 47 |
+
with open(CONFIG_FILE, 'w') as f:
|
| 48 |
+
json.dump(config, f, indent=2)
|
| 49 |
+
return f"✅ Configuration saved - Voice library path: {voice_library_path}"
|
| 50 |
+
except Exception as e:
|
| 51 |
+
return f"❌ Error saving configuration: {str(e)}"
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def update_voice_library_path(new_path: str) -> tuple[str, str]:
|
| 55 |
+
"""Update the voice library path in configuration.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
new_path: New path to the voice library
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
tuple: (status_message, updated_path)
|
| 62 |
+
"""
|
| 63 |
+
if not new_path.strip():
|
| 64 |
+
return "❌ Voice library path cannot be empty", ""
|
| 65 |
+
|
| 66 |
+
# Create directory if it doesn't exist
|
| 67 |
+
try:
|
| 68 |
+
os.makedirs(new_path, exist_ok=True)
|
| 69 |
+
save_result = save_config(new_path)
|
| 70 |
+
return save_result, new_path
|
| 71 |
+
except Exception as e:
|
| 72 |
+
return f"❌ Error updating voice library path: {str(e)}", ""
|
src/audiobook/models.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Model management and TTS operations for the audiobook generation system."""
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import random
|
| 5 |
+
import numpy as np
|
| 6 |
+
from chatterbox.tts import ChatterboxTTS
|
| 7 |
+
from typing import Any, Tuple, Optional
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# Global device setting - will be imported from main file
|
| 11 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
+
MULTI_VOICE_DEVICE = "cpu" # Force CPU for multi-voice processing
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def set_seed(seed: int) -> None:
|
| 16 |
+
"""Set random seeds for reproducible generation.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
seed: Random seed value
|
| 20 |
+
"""
|
| 21 |
+
torch.manual_seed(seed)
|
| 22 |
+
torch.cuda.manual_seed(seed)
|
| 23 |
+
torch.cuda.manual_seed_all(seed)
|
| 24 |
+
random.seed(seed)
|
| 25 |
+
np.random.seed(seed)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def load_model() -> ChatterboxTTS:
|
| 29 |
+
"""Load TTS model for the default device.
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
ChatterboxTTS: Loaded TTS model
|
| 33 |
+
"""
|
| 34 |
+
model = ChatterboxTTS.from_pretrained(DEVICE)
|
| 35 |
+
return model
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def load_model_cpu() -> ChatterboxTTS:
|
| 39 |
+
"""Load model specifically for CPU processing.
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
ChatterboxTTS: CPU-loaded TTS model
|
| 43 |
+
"""
|
| 44 |
+
model = ChatterboxTTS.from_pretrained("cpu")
|
| 45 |
+
return model
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def clear_gpu_memory() -> None:
|
| 49 |
+
"""Clear GPU memory cache to prevent CUDA errors."""
|
| 50 |
+
if torch.cuda.is_available():
|
| 51 |
+
torch.cuda.empty_cache()
|
| 52 |
+
torch.cuda.synchronize()
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def check_gpu_memory() -> str:
|
| 56 |
+
"""Check current GPU memory usage.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
str: GPU memory status information
|
| 60 |
+
"""
|
| 61 |
+
if torch.cuda.is_available():
|
| 62 |
+
allocated = torch.cuda.memory_allocated()
|
| 63 |
+
cached = torch.cuda.memory_reserved()
|
| 64 |
+
return f"GPU Memory - Allocated: {allocated//1024//1024}MB, Cached: {cached//1024//1024}MB"
|
| 65 |
+
return "CUDA not available"
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def generate(
|
| 69 |
+
model: ChatterboxTTS,
|
| 70 |
+
text: str,
|
| 71 |
+
audio_prompt_path: str,
|
| 72 |
+
exaggeration: float,
|
| 73 |
+
temperature: float,
|
| 74 |
+
seed_num: int,
|
| 75 |
+
cfgw: float
|
| 76 |
+
) -> Tuple[int, np.ndarray]:
|
| 77 |
+
"""Generate audio from text using the TTS model.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
model: TTS model instance
|
| 81 |
+
text: Text to convert to speech
|
| 82 |
+
audio_prompt_path: Path to audio prompt file
|
| 83 |
+
exaggeration: Exaggeration parameter for generation
|
| 84 |
+
temperature: Temperature for generation randomness
|
| 85 |
+
seed_num: Random seed (0 for random)
|
| 86 |
+
cfgw: CFG weight parameter
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
tuple: (sample_rate, audio_array)
|
| 90 |
+
"""
|
| 91 |
+
if model is None:
|
| 92 |
+
model = ChatterboxTTS.from_pretrained(DEVICE)
|
| 93 |
+
|
| 94 |
+
if seed_num != 0:
|
| 95 |
+
set_seed(int(seed_num))
|
| 96 |
+
|
| 97 |
+
wav = model.generate(
|
| 98 |
+
text,
|
| 99 |
+
audio_prompt_path=audio_prompt_path,
|
| 100 |
+
exaggeration=exaggeration,
|
| 101 |
+
temperature=temperature,
|
| 102 |
+
cfg_weight=cfgw,
|
| 103 |
+
)
|
| 104 |
+
return (model.sr, wav.squeeze(0).numpy())
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def generate_with_cpu_fallback(
|
| 108 |
+
model: ChatterboxTTS,
|
| 109 |
+
text: str,
|
| 110 |
+
audio_prompt_path: str,
|
| 111 |
+
exaggeration: float,
|
| 112 |
+
temperature: float,
|
| 113 |
+
cfg_weight: float
|
| 114 |
+
) -> Tuple[Any, str]:
|
| 115 |
+
"""Generate audio with automatic CPU fallback for problematic CUDA errors.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
model: TTS model instance
|
| 119 |
+
text: Text to convert to speech
|
| 120 |
+
audio_prompt_path: Path to audio prompt file
|
| 121 |
+
exaggeration: Exaggeration parameter
|
| 122 |
+
temperature: Temperature parameter
|
| 123 |
+
cfg_weight: CFG weight parameter
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
tuple: (audio_wav, device_used)
|
| 127 |
+
"""
|
| 128 |
+
# First try GPU if available
|
| 129 |
+
if DEVICE == "cuda":
|
| 130 |
+
try:
|
| 131 |
+
clear_gpu_memory()
|
| 132 |
+
wav = model.generate(
|
| 133 |
+
text,
|
| 134 |
+
audio_prompt_path=audio_prompt_path,
|
| 135 |
+
exaggeration=exaggeration,
|
| 136 |
+
temperature=temperature,
|
| 137 |
+
cfg_weight=cfg_weight,
|
| 138 |
+
)
|
| 139 |
+
return wav, "GPU"
|
| 140 |
+
except RuntimeError as e:
|
| 141 |
+
if ("srcIndex < srcSelectDimSize" in str(e) or
|
| 142 |
+
"CUDA" in str(e) or
|
| 143 |
+
"out of memory" in str(e).lower()):
|
| 144 |
+
|
| 145 |
+
print(f"⚠️ CUDA error detected, falling back to CPU: {str(e)[:100]}...")
|
| 146 |
+
# Fall through to CPU mode
|
| 147 |
+
else:
|
| 148 |
+
raise e
|
| 149 |
+
|
| 150 |
+
# CPU fallback or primary CPU mode
|
| 151 |
+
try:
|
| 152 |
+
# Load CPU model if needed
|
| 153 |
+
cpu_model = ChatterboxTTS.from_pretrained("cpu")
|
| 154 |
+
wav = cpu_model.generate(
|
| 155 |
+
text,
|
| 156 |
+
audio_prompt_path=audio_prompt_path,
|
| 157 |
+
exaggeration=exaggeration,
|
| 158 |
+
temperature=temperature,
|
| 159 |
+
cfg_weight=cfg_weight,
|
| 160 |
+
)
|
| 161 |
+
return wav, "CPU"
|
| 162 |
+
except Exception as e:
|
| 163 |
+
raise RuntimeError(f"Both GPU and CPU generation failed: {str(e)}")
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def generate_with_retry(
|
| 167 |
+
model: ChatterboxTTS,
|
| 168 |
+
text: str,
|
| 169 |
+
audio_prompt_path: str,
|
| 170 |
+
exaggeration: float,
|
| 171 |
+
temperature: float,
|
| 172 |
+
cfg_weight: float,
|
| 173 |
+
max_retries: int = 3
|
| 174 |
+
) -> Tuple[Any, str]:
|
| 175 |
+
"""Generate audio with retry mechanism for robustness.
|
| 176 |
+
|
| 177 |
+
Args:
|
| 178 |
+
model: TTS model instance
|
| 179 |
+
text: Text to convert to speech
|
| 180 |
+
audio_prompt_path: Path to audio prompt file
|
| 181 |
+
exaggeration: Exaggeration parameter
|
| 182 |
+
temperature: Temperature parameter
|
| 183 |
+
cfg_weight: CFG weight parameter
|
| 184 |
+
max_retries: Maximum number of retry attempts
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
tuple: (audio_wav, device_used)
|
| 188 |
+
"""
|
| 189 |
+
last_error = None
|
| 190 |
+
|
| 191 |
+
for attempt in range(max_retries):
|
| 192 |
+
try:
|
| 193 |
+
return generate_with_cpu_fallback(
|
| 194 |
+
model, text, audio_prompt_path, exaggeration, temperature, cfg_weight
|
| 195 |
+
)
|
| 196 |
+
except Exception as e:
|
| 197 |
+
last_error = e
|
| 198 |
+
print(f"Attempt {attempt + 1}/{max_retries} failed: {str(e)}")
|
| 199 |
+
if attempt < max_retries - 1:
|
| 200 |
+
clear_gpu_memory()
|
| 201 |
+
|
| 202 |
+
raise RuntimeError(f"All {max_retries} attempts failed. Last error: {last_error}")
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def force_cpu_processing() -> bool:
|
| 206 |
+
"""Check if we should force CPU processing for stability.
|
| 207 |
+
|
| 208 |
+
Returns:
|
| 209 |
+
bool: True if CPU processing should be forced
|
| 210 |
+
"""
|
| 211 |
+
# For multi-voice, always use CPU to avoid CUDA indexing issues
|
| 212 |
+
return True
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def get_model_device_str(model_obj: Optional[ChatterboxTTS]) -> str:
|
| 216 |
+
"""Get the device string for a model object.
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
model_obj: TTS model instance
|
| 220 |
+
|
| 221 |
+
Returns:
|
| 222 |
+
str: Device information string
|
| 223 |
+
"""
|
| 224 |
+
if model_obj is None:
|
| 225 |
+
return "No model loaded"
|
| 226 |
+
|
| 227 |
+
try:
|
| 228 |
+
# Try to access model device info
|
| 229 |
+
if hasattr(model_obj, 'device'):
|
| 230 |
+
return f"Model device: {model_obj.device}"
|
| 231 |
+
elif hasattr(model_obj, 'model') and hasattr(model_obj.model, 'device'):
|
| 232 |
+
return f"Model device: {model_obj.model.device}"
|
| 233 |
+
else:
|
| 234 |
+
return "Device info unavailable"
|
| 235 |
+
except Exception as e:
|
| 236 |
+
return f"Error getting device info: {str(e)}"
|
src/audiobook/processing.py
ADDED
|
@@ -0,0 +1,928 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Text processing utilities for audiobook generation.
|
| 3 |
+
|
| 4 |
+
Handles text chunking, validation, multi-voice parsing, and text cleanup.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import re
|
| 8 |
+
import os
|
| 9 |
+
import wave
|
| 10 |
+
import numpy as np
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import List, Dict, Tuple, Any
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def chunk_text_by_sentences(text: str, max_words: int = 50) -> List[str]:
|
| 16 |
+
"""Split text into chunks, breaking at sentence boundaries after reaching max_words.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
text: Input text to chunk
|
| 20 |
+
max_words: Maximum words per chunk
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
List of text chunks
|
| 24 |
+
"""
|
| 25 |
+
# Split text into sentences using regex to handle multiple punctuation marks
|
| 26 |
+
sentences = re.split(r'([.!?]+\s*)', text)
|
| 27 |
+
|
| 28 |
+
chunks = []
|
| 29 |
+
current_chunk = ""
|
| 30 |
+
current_word_count = 0
|
| 31 |
+
|
| 32 |
+
i = 0
|
| 33 |
+
while i < len(sentences):
|
| 34 |
+
sentence = sentences[i].strip()
|
| 35 |
+
if not sentence:
|
| 36 |
+
i += 1
|
| 37 |
+
continue
|
| 38 |
+
|
| 39 |
+
# Add punctuation if it exists
|
| 40 |
+
if i + 1 < len(sentences) and re.match(r'[.!?]+\s*', sentences[i + 1]):
|
| 41 |
+
sentence += sentences[i + 1]
|
| 42 |
+
i += 2
|
| 43 |
+
else:
|
| 44 |
+
i += 1
|
| 45 |
+
|
| 46 |
+
sentence_words = len(sentence.split())
|
| 47 |
+
|
| 48 |
+
# If adding this sentence would exceed max_words, start new chunk
|
| 49 |
+
if current_word_count > 0 and current_word_count + sentence_words > max_words:
|
| 50 |
+
if current_chunk.strip():
|
| 51 |
+
chunks.append(current_chunk.strip())
|
| 52 |
+
current_chunk = sentence
|
| 53 |
+
current_word_count = sentence_words
|
| 54 |
+
else:
|
| 55 |
+
current_chunk += " " + sentence if current_chunk else sentence
|
| 56 |
+
current_word_count += sentence_words
|
| 57 |
+
|
| 58 |
+
# Add the last chunk if it exists
|
| 59 |
+
if current_chunk.strip():
|
| 60 |
+
chunks.append(current_chunk.strip())
|
| 61 |
+
|
| 62 |
+
return chunks
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def adaptive_chunk_text(text: str, max_words: int = 50, reduce_on_error: bool = True) -> List[str]:
|
| 66 |
+
"""Adaptively chunk text with error handling.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
text: Input text to chunk
|
| 70 |
+
max_words: Maximum words per chunk
|
| 71 |
+
reduce_on_error: Whether to reduce chunk size on errors
|
| 72 |
+
|
| 73 |
+
Returns:
|
| 74 |
+
List of text chunks
|
| 75 |
+
"""
|
| 76 |
+
return chunk_text_by_sentences(text, max_words)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def load_text_file(file_path: str) -> Tuple[str, str]:
|
| 80 |
+
"""Load text content from a file with encoding detection.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
file_path: Path to the text file
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
tuple: (text_content, status_message)
|
| 87 |
+
"""
|
| 88 |
+
if not file_path:
|
| 89 |
+
return "", "No file selected"
|
| 90 |
+
|
| 91 |
+
try:
|
| 92 |
+
# Try UTF-8 first
|
| 93 |
+
try:
|
| 94 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 95 |
+
content = f.read()
|
| 96 |
+
except UnicodeDecodeError:
|
| 97 |
+
# Fallback to latin-1 for older files
|
| 98 |
+
with open(file_path, 'r', encoding='latin-1') as f:
|
| 99 |
+
content = f.read()
|
| 100 |
+
|
| 101 |
+
if not content.strip():
|
| 102 |
+
return "", "File is empty"
|
| 103 |
+
|
| 104 |
+
return content.strip(), f"✅ Loaded {len(content.split())} words from file"
|
| 105 |
+
|
| 106 |
+
except FileNotFoundError:
|
| 107 |
+
return "", "❌ File not found"
|
| 108 |
+
except Exception as e:
|
| 109 |
+
return "", f"❌ Error reading file: {str(e)}"
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def validate_audiobook_input(text_content: str, selected_voice: str, project_name: str) -> Tuple[bool, str]:
|
| 113 |
+
"""Validate input for single-voice audiobook creation.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
text_content: Text to validate
|
| 117 |
+
selected_voice: Selected voice name
|
| 118 |
+
project_name: Project name
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
tuple: (is_valid, error_message)
|
| 122 |
+
"""
|
| 123 |
+
if not text_content or not text_content.strip():
|
| 124 |
+
return False, "❌ Please provide text content or upload a text file"
|
| 125 |
+
|
| 126 |
+
if not selected_voice:
|
| 127 |
+
return False, "❌ Please select a voice"
|
| 128 |
+
|
| 129 |
+
if not project_name or not project_name.strip():
|
| 130 |
+
return False, "❌ Please provide a project name"
|
| 131 |
+
|
| 132 |
+
word_count = len(text_content.split())
|
| 133 |
+
if word_count < 10:
|
| 134 |
+
return False, "❌ Text content too short (minimum 10 words)"
|
| 135 |
+
|
| 136 |
+
if word_count > 50000:
|
| 137 |
+
return False, "❌ Text content too long (maximum 50,000 words for performance)"
|
| 138 |
+
|
| 139 |
+
return True, ""
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def parse_multi_voice_text(text: str) -> List[Dict[str, str]]:
|
| 143 |
+
"""Parse text with multi-voice format markers.
|
| 144 |
+
|
| 145 |
+
Expected format:
|
| 146 |
+
[CHARACTER_NAME] dialogue text (no colon needed, tag and dialogue can be on the same line)
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
text: Input text with character markers
|
| 150 |
+
|
| 151 |
+
Returns:
|
| 152 |
+
List of segments with character and text, e.g.,
|
| 153 |
+
[{'character': 'Character1', 'text': 'Dialogue for character 1.'}, ...]
|
| 154 |
+
"""
|
| 155 |
+
segments = []
|
| 156 |
+
|
| 157 |
+
# Regex to find [CharacterName] tags
|
| 158 |
+
# It captures the character name and the text that follows until the next tag or end of string
|
| 159 |
+
# Using re.split to capture text between tags and the tags themselves
|
| 160 |
+
parts = re.split(r'(\[[^\]]+\])', text)
|
| 161 |
+
|
| 162 |
+
current_character = None
|
| 163 |
+
buffer = ""
|
| 164 |
+
|
| 165 |
+
for part in parts:
|
| 166 |
+
if not part:
|
| 167 |
+
continue # Skip empty parts that can result from re.split
|
| 168 |
+
|
| 169 |
+
part_stripped = part.strip()
|
| 170 |
+
if re.match(r'^\[[^\]]+\]$', part_stripped): # It's a character tag
|
| 171 |
+
if current_character and buffer.strip():
|
| 172 |
+
segments.append({
|
| 173 |
+
'character': current_character,
|
| 174 |
+
'text': buffer.strip()
|
| 175 |
+
})
|
| 176 |
+
current_character = part_stripped[1:-1] # Remove brackets
|
| 177 |
+
buffer = ""
|
| 178 |
+
else: # It's text content
|
| 179 |
+
if current_character is None and part_stripped: # Text before any character tag
|
| 180 |
+
# Assign to a default "Narrator" if no character tag precedes it.
|
| 181 |
+
# This can be adjusted based on desired behavior for untagged leading text.
|
| 182 |
+
segments.append({
|
| 183 |
+
'character': "Narrator", # Or None, if untagged leading text should be handled differently
|
| 184 |
+
'text': part_stripped
|
| 185 |
+
})
|
| 186 |
+
buffer = "" # Clear buffer as this part is processed
|
| 187 |
+
elif current_character:
|
| 188 |
+
buffer += part # Append to current character's text buffer
|
| 189 |
+
# If no current_character and it's not leading text, this part might be ignored
|
| 190 |
+
# or could be appended to a default narrator if strict tagging isn't enforced.
|
| 191 |
+
# Current logic: only appends if current_character is set.
|
| 192 |
+
|
| 193 |
+
# Add any remaining text in the buffer for the last character
|
| 194 |
+
if current_character and buffer.strip():
|
| 195 |
+
segments.append({
|
| 196 |
+
'character': current_character,
|
| 197 |
+
'text': buffer.strip()
|
| 198 |
+
})
|
| 199 |
+
elif not current_character and buffer.strip() and not segments: # Only if it's the *only* content
|
| 200 |
+
# If the entire text has no tags, assign it all to Narrator
|
| 201 |
+
segments.append({
|
| 202 |
+
'character': "Narrator",
|
| 203 |
+
'text': buffer.strip()
|
| 204 |
+
})
|
| 205 |
+
|
| 206 |
+
# Filter out any segments where the text is empty after stripping
|
| 207 |
+
final_segments = [seg for seg in segments if seg['text']]
|
| 208 |
+
|
| 209 |
+
# Debug: Print parsed segments by the module
|
| 210 |
+
# print("Parsed Segments by text_processing.py module:", final_segments)
|
| 211 |
+
return final_segments
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def clean_character_name_from_text(text: str, voice_name: str) -> str:
|
| 215 |
+
"""Clean character name markers from text.
|
| 216 |
+
The new parse_multi_voice_text in this module should handle name/dialogue separation.
|
| 217 |
+
This function primarily acts as a pass-through or for minor cleanup.
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
text: Text that may contain character markers
|
| 221 |
+
voice_name: Voice name (largely ignored by this simplified version)
|
| 222 |
+
|
| 223 |
+
Returns:
|
| 224 |
+
Cleaned text
|
| 225 |
+
"""
|
| 226 |
+
# The parsing logic should have already separated the character name.
|
| 227 |
+
# This function just ensures the text is stripped.
|
| 228 |
+
return text.strip()
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def chunk_multi_voice_segments(segments: List[Dict[str, str]], max_words: int = 50) -> List[Dict[str, str]]:
|
| 232 |
+
"""Chunk multi-voice segments while preserving character assignments.
|
| 233 |
+
|
| 234 |
+
Args:
|
| 235 |
+
segments: List of character segments
|
| 236 |
+
max_words: Maximum words per chunk
|
| 237 |
+
|
| 238 |
+
Returns:
|
| 239 |
+
List of chunked segments with character assignments
|
| 240 |
+
"""
|
| 241 |
+
chunked_segments = []
|
| 242 |
+
|
| 243 |
+
for segment in segments:
|
| 244 |
+
character = segment['character']
|
| 245 |
+
text = segment['text']
|
| 246 |
+
|
| 247 |
+
# Chunk the text for this character
|
| 248 |
+
text_chunks = chunk_text_by_sentences(text, max_words)
|
| 249 |
+
|
| 250 |
+
# Create segment for each chunk
|
| 251 |
+
for chunk in text_chunks:
|
| 252 |
+
chunked_segments.append({
|
| 253 |
+
'character': character,
|
| 254 |
+
'text': chunk
|
| 255 |
+
})
|
| 256 |
+
|
| 257 |
+
return chunked_segments
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def validate_multi_voice_text(text_content: str, voice_library_path: str) -> Tuple[bool, str, List[str]]:
|
| 261 |
+
"""Validate multi-voice text format and extract characters.
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
text_content: Text to validate
|
| 265 |
+
voice_library_path: Path to voice library
|
| 266 |
+
|
| 267 |
+
Returns:
|
| 268 |
+
tuple: (is_valid, error_message, character_list)
|
| 269 |
+
"""
|
| 270 |
+
if not text_content or not text_content.strip():
|
| 271 |
+
return False, "❌ Please provide text content", []
|
| 272 |
+
|
| 273 |
+
# Parse segments to extract characters
|
| 274 |
+
segments = parse_multi_voice_text(text_content)
|
| 275 |
+
|
| 276 |
+
if not segments:
|
| 277 |
+
return False, "❌ No valid character segments found. Use format: [CHARACTER_NAME]: dialogue", []
|
| 278 |
+
|
| 279 |
+
# Extract unique characters
|
| 280 |
+
characters = list(set(segment['character'] for segment in segments))
|
| 281 |
+
|
| 282 |
+
if len(characters) < 2:
|
| 283 |
+
return False, "❌ Multi-voice requires at least 2 different characters", characters
|
| 284 |
+
|
| 285 |
+
if len(characters) > 6:
|
| 286 |
+
return False, "❌ Too many characters (maximum 6 for performance)", characters
|
| 287 |
+
|
| 288 |
+
# Check if we have enough text
|
| 289 |
+
total_words = sum(len(segment['text'].split()) for segment in segments)
|
| 290 |
+
if total_words < 20:
|
| 291 |
+
return False, "❌ Not enough text content (minimum 20 words)", characters
|
| 292 |
+
|
| 293 |
+
return True, "", characters
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def validate_multi_audiobook_input(text_content: str, voice_library_path: str, project_name: str) -> Tuple[bool, str]:
|
| 297 |
+
"""Validate input for multi-voice audiobook creation.
|
| 298 |
+
|
| 299 |
+
Args:
|
| 300 |
+
text_content: Text to validate
|
| 301 |
+
voice_library_path: Path to voice library
|
| 302 |
+
project_name: Project name
|
| 303 |
+
|
| 304 |
+
Returns:
|
| 305 |
+
tuple: (is_valid, error_message)
|
| 306 |
+
"""
|
| 307 |
+
if not project_name or not project_name.strip():
|
| 308 |
+
return False, "❌ Please provide a project name"
|
| 309 |
+
|
| 310 |
+
is_valid, error_msg, _ = validate_multi_voice_text(text_content, voice_library_path)
|
| 311 |
+
return is_valid, error_msg
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def analyze_multi_voice_text(text_content: str, voice_library_path: str) -> Tuple[bool, str, Dict[str, int]]:
|
| 315 |
+
"""Analyze multi-voice text and return character statistics.
|
| 316 |
+
|
| 317 |
+
Args:
|
| 318 |
+
text_content: Text to analyze
|
| 319 |
+
voice_library_path: Path to voice library
|
| 320 |
+
|
| 321 |
+
Returns:
|
| 322 |
+
tuple: (is_valid, message, character_counts)
|
| 323 |
+
"""
|
| 324 |
+
is_valid, error_msg, characters = validate_multi_voice_text(text_content, voice_library_path)
|
| 325 |
+
|
| 326 |
+
if not is_valid:
|
| 327 |
+
return False, error_msg, {}
|
| 328 |
+
|
| 329 |
+
# Parse segments and count words per character
|
| 330 |
+
segments = parse_multi_voice_text(text_content)
|
| 331 |
+
character_counts = {}
|
| 332 |
+
|
| 333 |
+
for segment in segments:
|
| 334 |
+
character = segment['character']
|
| 335 |
+
word_count = len(segment['text'].split())
|
| 336 |
+
character_counts[character] = character_counts.get(character, 0) + word_count
|
| 337 |
+
|
| 338 |
+
total_words = sum(character_counts.values())
|
| 339 |
+
message = f"✅ Found {len(characters)} characters with {total_words} total words"
|
| 340 |
+
|
| 341 |
+
return True, message, character_counts
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def _filter_problematic_short_chunks(chunks: List[str], voice_assignments: Dict[str, str]) -> List[str]:
|
| 345 |
+
"""Filter out problematic short chunks that might cause TTS issues.
|
| 346 |
+
|
| 347 |
+
Args:
|
| 348 |
+
chunks: List of text chunks
|
| 349 |
+
voice_assignments: Character to voice mappings
|
| 350 |
+
|
| 351 |
+
Returns:
|
| 352 |
+
Filtered list of chunks
|
| 353 |
+
"""
|
| 354 |
+
filtered_chunks = []
|
| 355 |
+
min_length = 10 # Minimum character length
|
| 356 |
+
|
| 357 |
+
for chunk in chunks:
|
| 358 |
+
# Skip very short chunks
|
| 359 |
+
if len(chunk.strip()) < min_length:
|
| 360 |
+
continue
|
| 361 |
+
|
| 362 |
+
# Skip chunks that are just punctuation or whitespace
|
| 363 |
+
if not re.search(r'[a-zA-Z]', chunk):
|
| 364 |
+
continue
|
| 365 |
+
|
| 366 |
+
filtered_chunks.append(chunk)
|
| 367 |
+
|
| 368 |
+
return filtered_chunks
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
# PHASE 4 REFACTOR: Adding audio processing functions to this module
|
| 372 |
+
# Originally from gradio_tts_app_audiobook.py save_audio_chunks() function
|
| 373 |
+
|
| 374 |
+
def save_audio_chunks(audio_chunks: List[np.ndarray], sample_rate: int, project_name: str, output_dir: str = "audiobook_projects") -> Tuple[List[str], str]:
|
| 375 |
+
"""
|
| 376 |
+
Save audio chunks as numbered WAV files
|
| 377 |
+
|
| 378 |
+
Args:
|
| 379 |
+
audio_chunks: List of audio numpy arrays
|
| 380 |
+
sample_rate: Sample rate for audio files
|
| 381 |
+
project_name: Name of the project
|
| 382 |
+
output_dir: Directory to save project files
|
| 383 |
+
|
| 384 |
+
Returns:
|
| 385 |
+
tuple: (list of saved file paths, project directory path)
|
| 386 |
+
"""
|
| 387 |
+
if not project_name.strip():
|
| 388 |
+
project_name = "untitled_audiobook"
|
| 389 |
+
|
| 390 |
+
# Sanitize project name
|
| 391 |
+
safe_project_name = "".join(c for c in project_name if c.isalnum() or c in (' ', '-', '_')).rstrip()
|
| 392 |
+
safe_project_name = safe_project_name.replace(' ', '_')
|
| 393 |
+
|
| 394 |
+
# Create output directory
|
| 395 |
+
project_dir = os.path.join(output_dir, safe_project_name)
|
| 396 |
+
os.makedirs(project_dir, exist_ok=True)
|
| 397 |
+
|
| 398 |
+
saved_files = []
|
| 399 |
+
|
| 400 |
+
for i, audio_chunk in enumerate(audio_chunks, 1):
|
| 401 |
+
filename = f"{safe_project_name}_{i:03d}.wav"
|
| 402 |
+
filepath = os.path.join(project_dir, filename)
|
| 403 |
+
|
| 404 |
+
# Save as WAV file
|
| 405 |
+
with wave.open(filepath, 'wb') as wav_file:
|
| 406 |
+
wav_file.setnchannels(1) # Mono
|
| 407 |
+
wav_file.setsampwidth(2) # 16-bit
|
| 408 |
+
wav_file.setframerate(sample_rate)
|
| 409 |
+
|
| 410 |
+
# Convert float32 to int16
|
| 411 |
+
audio_int16 = (audio_chunk * 32767).astype(np.int16)
|
| 412 |
+
wav_file.writeframes(audio_int16.tobytes())
|
| 413 |
+
|
| 414 |
+
saved_files.append(filepath)
|
| 415 |
+
|
| 416 |
+
return saved_files, project_dir
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
# PHASE 4 REFACTOR: Adding extract_audio_segment function from gradio_tts_app_audiobook.py
|
| 420 |
+
def extract_audio_segment(audio_data, start_time: float = None, end_time: float = None) -> tuple:
|
| 421 |
+
"""Extract a segment from audio data.
|
| 422 |
+
|
| 423 |
+
Args:
|
| 424 |
+
audio_data: Numpy array of audio data
|
| 425 |
+
start_time: Start time in seconds (None for beginning)
|
| 426 |
+
end_time: End time in seconds (None for end)
|
| 427 |
+
|
| 428 |
+
Returns:
|
| 429 |
+
tuple: (status_message, extracted_audio_data)
|
| 430 |
+
"""
|
| 431 |
+
try:
|
| 432 |
+
sample_rate = 24000 # Default sample rate
|
| 433 |
+
|
| 434 |
+
if audio_data is None or len(audio_data) == 0:
|
| 435 |
+
return "❌ No audio data to extract from", None
|
| 436 |
+
|
| 437 |
+
total_duration = len(audio_data) / sample_rate
|
| 438 |
+
|
| 439 |
+
start_sample = int(start_time * sample_rate) if start_time else 0
|
| 440 |
+
end_sample = int(end_time * sample_rate) if end_time else len(audio_data)
|
| 441 |
+
|
| 442 |
+
# Validate bounds
|
| 443 |
+
start_sample = max(0, min(start_sample, len(audio_data)))
|
| 444 |
+
end_sample = max(start_sample, min(end_sample, len(audio_data)))
|
| 445 |
+
|
| 446 |
+
extracted_audio = audio_data[start_sample:end_sample]
|
| 447 |
+
|
| 448 |
+
if len(extracted_audio) == 0:
|
| 449 |
+
return "❌ Invalid time range - no audio extracted", None
|
| 450 |
+
|
| 451 |
+
extracted_duration = len(extracted_audio) / sample_rate
|
| 452 |
+
return f"✅ Extracted {extracted_duration:.2f}s of audio", extracted_audio
|
| 453 |
+
|
| 454 |
+
except Exception as e:
|
| 455 |
+
return f"❌ Error extracting audio segment: {str(e)}", None
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
def process_text_for_pauses(text: str, pause_duration: float = 0.1) -> tuple:
|
| 459 |
+
"""Process text to count returns and calculate total pause time.
|
| 460 |
+
|
| 461 |
+
Args:
|
| 462 |
+
text: Input text to process
|
| 463 |
+
pause_duration: Duration in seconds per line break (default 0.1)
|
| 464 |
+
|
| 465 |
+
Returns:
|
| 466 |
+
tuple: (processed_text, return_count, total_pause_duration)
|
| 467 |
+
"""
|
| 468 |
+
# Count line breaks (both \n and \r\n)
|
| 469 |
+
return_count = text.count('\n') + text.count('\r')
|
| 470 |
+
total_pause_duration = return_count * pause_duration
|
| 471 |
+
|
| 472 |
+
# Clean up text for TTS (normalize line breaks but keep content)
|
| 473 |
+
processed_text = text.replace('\r\n', '\n').replace('\r', '\n')
|
| 474 |
+
# Replace multiple consecutive newlines with single space to avoid empty chunks
|
| 475 |
+
processed_text = re.sub(r'\n+', ' ', processed_text).strip()
|
| 476 |
+
|
| 477 |
+
print(f"🔇 Detected {return_count} line breaks → {total_pause_duration:.1f}s total pause time")
|
| 478 |
+
|
| 479 |
+
return processed_text, return_count, total_pause_duration
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
def create_silence_audio(duration: float, sample_rate: int = 24000) -> np.ndarray:
|
| 483 |
+
"""Create silence audio of specified duration.
|
| 484 |
+
|
| 485 |
+
Args:
|
| 486 |
+
duration: Duration in seconds
|
| 487 |
+
sample_rate: Sample rate for the audio
|
| 488 |
+
|
| 489 |
+
Returns:
|
| 490 |
+
numpy array of silence audio
|
| 491 |
+
"""
|
| 492 |
+
num_samples = int(duration * sample_rate)
|
| 493 |
+
return np.zeros(num_samples, dtype=np.float32)
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
def insert_pauses_between_chunks(audio_chunks: List[np.ndarray],
|
| 497 |
+
return_count: int,
|
| 498 |
+
sample_rate: int = 24000,
|
| 499 |
+
pause_duration: float = 0.1) -> np.ndarray:
|
| 500 |
+
"""Insert pauses between audio chunks based on return count.
|
| 501 |
+
|
| 502 |
+
Args:
|
| 503 |
+
audio_chunks: List of audio chunk arrays
|
| 504 |
+
return_count: Number of returns detected in original text
|
| 505 |
+
sample_rate: Sample rate for audio
|
| 506 |
+
pause_duration: Duration per return in seconds
|
| 507 |
+
|
| 508 |
+
Returns:
|
| 509 |
+
Combined audio with pauses inserted
|
| 510 |
+
"""
|
| 511 |
+
if not audio_chunks:
|
| 512 |
+
return np.array([], dtype=np.float32)
|
| 513 |
+
|
| 514 |
+
if return_count == 0:
|
| 515 |
+
# No pauses needed, just concatenate
|
| 516 |
+
return np.concatenate(audio_chunks)
|
| 517 |
+
|
| 518 |
+
# Calculate how to distribute pauses
|
| 519 |
+
# For simplicity, we'll add all pause time at the end
|
| 520 |
+
# In a more sophisticated approach, we could distribute pauses throughout
|
| 521 |
+
total_pause_time = return_count * pause_duration
|
| 522 |
+
pause_audio = create_silence_audio(total_pause_time, sample_rate)
|
| 523 |
+
|
| 524 |
+
print(f"🔇 Adding {total_pause_time:.1f}s pause ({return_count} returns × {pause_duration}s each)")
|
| 525 |
+
|
| 526 |
+
# Concatenate audio chunks with pause at the end
|
| 527 |
+
combined_audio = np.concatenate(audio_chunks)
|
| 528 |
+
final_audio = np.concatenate([combined_audio, pause_audio])
|
| 529 |
+
|
| 530 |
+
return final_audio
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def process_text_with_distributed_pauses(text: str, max_words: int = 50,
|
| 534 |
+
pause_duration: float = 0.1) -> tuple:
|
| 535 |
+
"""Process text and distribute pauses throughout chunks based on line breaks.
|
| 536 |
+
|
| 537 |
+
Args:
|
| 538 |
+
text: Input text to process
|
| 539 |
+
max_words: Maximum words per chunk
|
| 540 |
+
pause_duration: Duration per line break in seconds
|
| 541 |
+
|
| 542 |
+
Returns:
|
| 543 |
+
tuple: (chunks_with_pauses, total_return_count, total_pause_duration)
|
| 544 |
+
"""
|
| 545 |
+
# First, process text to understand pause requirements
|
| 546 |
+
processed_text, return_count, total_pause_duration = process_text_for_pauses(text, pause_duration)
|
| 547 |
+
|
| 548 |
+
# Split into lines to track where pauses should be
|
| 549 |
+
lines = text.split('\n')
|
| 550 |
+
chunks_with_pauses = []
|
| 551 |
+
|
| 552 |
+
current_chunk = ""
|
| 553 |
+
current_word_count = 0
|
| 554 |
+
pauses_for_chunk = 0
|
| 555 |
+
|
| 556 |
+
for i, line in enumerate(lines):
|
| 557 |
+
line = line.strip()
|
| 558 |
+
if not line:
|
| 559 |
+
pauses_for_chunk += 1 # Empty line counts as a pause
|
| 560 |
+
continue
|
| 561 |
+
|
| 562 |
+
line_words = len(line.split())
|
| 563 |
+
|
| 564 |
+
# If adding this line would exceed max_words, finalize current chunk
|
| 565 |
+
if current_word_count > 0 and current_word_count + line_words > max_words:
|
| 566 |
+
if current_chunk.strip():
|
| 567 |
+
chunks_with_pauses.append({
|
| 568 |
+
'text': current_chunk.strip(),
|
| 569 |
+
'pauses': pauses_for_chunk
|
| 570 |
+
})
|
| 571 |
+
current_chunk = line
|
| 572 |
+
current_word_count = line_words
|
| 573 |
+
pauses_for_chunk = 0
|
| 574 |
+
else:
|
| 575 |
+
current_chunk += " " + line if current_chunk else line
|
| 576 |
+
current_word_count += line_words
|
| 577 |
+
|
| 578 |
+
# Add pause if not the last line
|
| 579 |
+
if i < len(lines) - 1:
|
| 580 |
+
pauses_for_chunk += 1
|
| 581 |
+
|
| 582 |
+
# Add the last chunk if it exists
|
| 583 |
+
if current_chunk.strip():
|
| 584 |
+
chunks_with_pauses.append({
|
| 585 |
+
'text': current_chunk.strip(),
|
| 586 |
+
'pauses': pauses_for_chunk
|
| 587 |
+
})
|
| 588 |
+
|
| 589 |
+
return chunks_with_pauses, return_count, total_pause_duration
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
def map_line_breaks_to_chunks(original_text: str, chunks: List[str], pause_duration: float = 0.1) -> tuple:
|
| 593 |
+
"""Map line breaks from original text to corresponding chunks.
|
| 594 |
+
|
| 595 |
+
Args:
|
| 596 |
+
original_text: Original text with line breaks
|
| 597 |
+
chunks: List of text chunks created by sentence chunking
|
| 598 |
+
pause_duration: Duration per line break in seconds
|
| 599 |
+
|
| 600 |
+
Returns:
|
| 601 |
+
tuple: (chunk_pause_map, total_pause_duration)
|
| 602 |
+
chunk_pause_map: Dict mapping chunk index to pause duration
|
| 603 |
+
total_pause_duration: Total pause time across all chunks
|
| 604 |
+
"""
|
| 605 |
+
import re
|
| 606 |
+
|
| 607 |
+
chunk_pause_map = {}
|
| 608 |
+
total_pause_duration = 0.0
|
| 609 |
+
|
| 610 |
+
# Create a version of original text for matching (remove extra whitespace but keep structure)
|
| 611 |
+
normalized_original = re.sub(r'\s+', ' ', original_text.replace('\n', ' ')).strip()
|
| 612 |
+
|
| 613 |
+
# Track position in original text
|
| 614 |
+
original_position = 0
|
| 615 |
+
|
| 616 |
+
for chunk_idx, chunk in enumerate(chunks):
|
| 617 |
+
chunk_normalized = chunk.strip()
|
| 618 |
+
if not chunk_normalized:
|
| 619 |
+
continue
|
| 620 |
+
|
| 621 |
+
# Find this chunk in the original text
|
| 622 |
+
chunk_start = normalized_original.find(chunk_normalized, original_position)
|
| 623 |
+
if chunk_start == -1:
|
| 624 |
+
# Fallback: try to find it without position constraint
|
| 625 |
+
chunk_start = normalized_original.find(chunk_normalized)
|
| 626 |
+
|
| 627 |
+
if chunk_start == -1:
|
| 628 |
+
# Can't find chunk, no pauses for this one
|
| 629 |
+
continue
|
| 630 |
+
|
| 631 |
+
chunk_end = chunk_start + len(chunk_normalized)
|
| 632 |
+
|
| 633 |
+
# Count line breaks in the corresponding section of original text
|
| 634 |
+
# Map back to original text position
|
| 635 |
+
orig_text_section_start = 0
|
| 636 |
+
orig_text_section_end = len(original_text)
|
| 637 |
+
|
| 638 |
+
# Find the corresponding section in original text
|
| 639 |
+
words_before = len(normalized_original[:chunk_start].split())
|
| 640 |
+
words_in_chunk = len(chunk_normalized.split())
|
| 641 |
+
|
| 642 |
+
# Find the section in original text that corresponds to this chunk
|
| 643 |
+
original_words = original_text.split()
|
| 644 |
+
if words_before < len(original_words):
|
| 645 |
+
# Find the start position in original text
|
| 646 |
+
words_section = ' '.join(original_words[words_before:words_before + words_in_chunk])
|
| 647 |
+
section_start = original_text.find(words_section)
|
| 648 |
+
if section_start != -1:
|
| 649 |
+
section_end = section_start + len(words_section)
|
| 650 |
+
# Count line breaks in this section and the gap after it (until next chunk)
|
| 651 |
+
next_chunk_start = section_end
|
| 652 |
+
if chunk_idx < len(chunks) - 1:
|
| 653 |
+
next_chunk_text = chunks[chunk_idx + 1].strip()
|
| 654 |
+
next_chunk_pos = original_text.find(next_chunk_text, section_end)
|
| 655 |
+
if next_chunk_pos != -1:
|
| 656 |
+
next_chunk_start = next_chunk_pos
|
| 657 |
+
|
| 658 |
+
# Count line breaks from end of current chunk to start of next chunk
|
| 659 |
+
gap_text = original_text[section_end:next_chunk_start]
|
| 660 |
+
line_breaks = gap_text.count('\n')
|
| 661 |
+
|
| 662 |
+
if line_breaks > 0:
|
| 663 |
+
pause_time = line_breaks * pause_duration
|
| 664 |
+
chunk_pause_map[chunk_idx] = pause_time
|
| 665 |
+
total_pause_duration += pause_time
|
| 666 |
+
|
| 667 |
+
original_position = chunk_end
|
| 668 |
+
|
| 669 |
+
return chunk_pause_map, total_pause_duration
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
def chunk_text_by_sentences_local(text, max_words=50):
|
| 673 |
+
"""Local copy of sentence chunking to avoid circular imports."""
|
| 674 |
+
import re
|
| 675 |
+
|
| 676 |
+
# Split into sentences using common sentence endings
|
| 677 |
+
sentences = re.split(r'(?<=[.!?])\s+', text.strip())
|
| 678 |
+
|
| 679 |
+
chunks = []
|
| 680 |
+
current_chunk = ""
|
| 681 |
+
current_word_count = 0
|
| 682 |
+
|
| 683 |
+
for sentence in sentences:
|
| 684 |
+
if not sentence.strip():
|
| 685 |
+
continue
|
| 686 |
+
|
| 687 |
+
sentence_words = len(sentence.split())
|
| 688 |
+
|
| 689 |
+
# If adding this sentence would exceed max_words and we have content, start a new chunk
|
| 690 |
+
if current_word_count > 0 and current_word_count + sentence_words > max_words:
|
| 691 |
+
if current_chunk.strip():
|
| 692 |
+
chunks.append(current_chunk.strip())
|
| 693 |
+
current_chunk = sentence
|
| 694 |
+
current_word_count = sentence_words
|
| 695 |
+
else:
|
| 696 |
+
current_chunk += " " + sentence if current_chunk else sentence
|
| 697 |
+
current_word_count += sentence_words
|
| 698 |
+
|
| 699 |
+
# Add the last chunk if it exists
|
| 700 |
+
if current_chunk.strip():
|
| 701 |
+
chunks.append(current_chunk.strip())
|
| 702 |
+
|
| 703 |
+
return chunks
|
| 704 |
+
|
| 705 |
+
def chunk_text_with_line_break_priority(text: str, max_words: int = 50, pause_duration: float = 0.1) -> tuple:
|
| 706 |
+
"""Chunk text with line breaks taking priority over sentence breaks.
|
| 707 |
+
|
| 708 |
+
This function first splits on line breaks, then applies sentence chunking
|
| 709 |
+
within each line break segment if needed.
|
| 710 |
+
|
| 711 |
+
Args:
|
| 712 |
+
text: Input text with line breaks
|
| 713 |
+
max_words: Maximum words per chunk
|
| 714 |
+
pause_duration: Duration per line break in seconds
|
| 715 |
+
|
| 716 |
+
Returns:
|
| 717 |
+
tuple: (chunks_with_pauses, total_pause_duration)
|
| 718 |
+
chunks_with_pauses: List of dicts with 'text' and 'pause_duration' keys
|
| 719 |
+
total_pause_duration: Total pause time across all chunks
|
| 720 |
+
"""
|
| 721 |
+
import re
|
| 722 |
+
|
| 723 |
+
chunks_with_pauses = []
|
| 724 |
+
total_pause_duration = 0.0
|
| 725 |
+
|
| 726 |
+
# Split text by line breaks, keeping track of consecutive breaks
|
| 727 |
+
line_segments = re.split(r'(\n+)', text)
|
| 728 |
+
|
| 729 |
+
for i, segment in enumerate(line_segments):
|
| 730 |
+
if not segment:
|
| 731 |
+
continue
|
| 732 |
+
|
| 733 |
+
# Check if this segment is line breaks
|
| 734 |
+
if re.match(r'\n+', segment):
|
| 735 |
+
# Count the number of line breaks for pause calculation
|
| 736 |
+
line_break_count = segment.count('\n')
|
| 737 |
+
pause_time = line_break_count * pause_duration
|
| 738 |
+
|
| 739 |
+
# Add pause to the previous chunk if it exists
|
| 740 |
+
if chunks_with_pauses:
|
| 741 |
+
chunks_with_pauses[-1]['pause_duration'] += pause_time
|
| 742 |
+
total_pause_duration += pause_time
|
| 743 |
+
print(f"🔇 Line breaks detected: +{pause_time:.1f}s pause (from {line_break_count} returns)")
|
| 744 |
+
continue
|
| 745 |
+
|
| 746 |
+
# This is actual text content - chunk it by sentences if needed
|
| 747 |
+
text_content = segment.strip()
|
| 748 |
+
if not text_content:
|
| 749 |
+
continue
|
| 750 |
+
|
| 751 |
+
# Apply sentence chunking to this segment
|
| 752 |
+
text_chunks = chunk_text_by_sentences_local(text_content, max_words)
|
| 753 |
+
|
| 754 |
+
# Add these chunks with initial pause duration of 0
|
| 755 |
+
for chunk in text_chunks:
|
| 756 |
+
if chunk.strip():
|
| 757 |
+
chunks_with_pauses.append({
|
| 758 |
+
'text': chunk.strip(),
|
| 759 |
+
'pause_duration': 0.0
|
| 760 |
+
})
|
| 761 |
+
|
| 762 |
+
return chunks_with_pauses, total_pause_duration
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
def parse_multi_voice_text_local(text):
|
| 766 |
+
"""Local copy of multi-voice text parsing to avoid circular imports."""
|
| 767 |
+
import re
|
| 768 |
+
|
| 769 |
+
# Pattern to match [CharacterName] at the beginning of lines
|
| 770 |
+
pattern = r'^\[([^\]]+)\]\s*(.*?)(?=^\[|\Z)'
|
| 771 |
+
matches = re.findall(pattern, text, re.MULTILINE | re.DOTALL)
|
| 772 |
+
|
| 773 |
+
if not matches:
|
| 774 |
+
# If no voice tags found, treat as single narrator
|
| 775 |
+
return [("Narrator", text.strip())]
|
| 776 |
+
|
| 777 |
+
segments = []
|
| 778 |
+
for character_name, content in matches:
|
| 779 |
+
# DON'T strip content to preserve line breaks for pause processing
|
| 780 |
+
# Only strip leading/trailing spaces, but preserve newlines
|
| 781 |
+
content = content.rstrip(' \t').lstrip(' \t')
|
| 782 |
+
if content:
|
| 783 |
+
segments.append((character_name.strip(), content))
|
| 784 |
+
|
| 785 |
+
return segments
|
| 786 |
+
|
| 787 |
+
def chunk_multi_voice_text_with_line_break_priority(text: str, max_words: int = 30, pause_duration: float = 0.1) -> tuple:
|
| 788 |
+
"""Chunk multi-voice text with line breaks taking priority over sentence breaks.
|
| 789 |
+
|
| 790 |
+
Args:
|
| 791 |
+
text: Input text with voice tags and line breaks
|
| 792 |
+
max_words: Maximum words per chunk
|
| 793 |
+
pause_duration: Duration per line break in seconds
|
| 794 |
+
|
| 795 |
+
Returns:
|
| 796 |
+
tuple: (segments_with_pauses, total_pause_duration)
|
| 797 |
+
segments_with_pauses: List of dicts with 'voice', 'text', and 'pause_duration' keys
|
| 798 |
+
total_pause_duration: Total pause time across all segments
|
| 799 |
+
"""
|
| 800 |
+
import re
|
| 801 |
+
|
| 802 |
+
# Add debugging output for the input text
|
| 803 |
+
print(f"🔍 DEBUG: chunk_multi_voice_text_with_line_break_priority input:")
|
| 804 |
+
print(f"🔍 DEBUG: Input text length: {len(text)} characters")
|
| 805 |
+
print(f"🔍 DEBUG: Line breaks in input: {text.count(chr(10))} \\n chars, {text.count(chr(13))} \\r chars")
|
| 806 |
+
print(f"🔍 DEBUG: First 200 chars: {repr(text[:200])}")
|
| 807 |
+
|
| 808 |
+
# NEW APPROACH: Process line breaks in the full text before voice parsing
|
| 809 |
+
# Split the entire text by voice segments while preserving line breaks
|
| 810 |
+
segments_with_pauses = []
|
| 811 |
+
total_pause_duration = 0.0
|
| 812 |
+
|
| 813 |
+
# Find all voice segments with their positions, preserving everything in between
|
| 814 |
+
voice_pattern = r'(\[([^\]]+)\]\s*)'
|
| 815 |
+
split_parts = re.split(voice_pattern, text)
|
| 816 |
+
|
| 817 |
+
print(f"🔍 DEBUG: Split text into {len(split_parts)} parts")
|
| 818 |
+
for i, part in enumerate(split_parts):
|
| 819 |
+
print(f"🔍 DEBUG: Part {i}: {repr(part[:50])}")
|
| 820 |
+
|
| 821 |
+
current_voice = None
|
| 822 |
+
|
| 823 |
+
i = 0
|
| 824 |
+
while i < len(split_parts):
|
| 825 |
+
part = split_parts[i]
|
| 826 |
+
|
| 827 |
+
# Check if this part is a voice tag match
|
| 828 |
+
if i + 2 < len(split_parts) and re.match(r'\[([^\]]+)\]\s*', part):
|
| 829 |
+
# This is a voice tag, extract the voice name
|
| 830 |
+
current_voice = split_parts[i + 1] # The captured voice name
|
| 831 |
+
print(f"🔍 DEBUG: Found voice tag: '{current_voice}'")
|
| 832 |
+
|
| 833 |
+
# The content is in the next part after the voice tag and whitespace
|
| 834 |
+
content_part = split_parts[i + 2] if i + 2 < len(split_parts) else ""
|
| 835 |
+
|
| 836 |
+
# Process the content with line break awareness
|
| 837 |
+
if content_part:
|
| 838 |
+
processed_segments = process_voice_content_with_line_breaks(
|
| 839 |
+
current_voice, content_part, max_words, pause_duration
|
| 840 |
+
)
|
| 841 |
+
|
| 842 |
+
for segment in processed_segments:
|
| 843 |
+
segments_with_pauses.append(segment)
|
| 844 |
+
total_pause_duration += segment['pause_duration']
|
| 845 |
+
|
| 846 |
+
i += 3 # Skip voice tag, voice name, and content
|
| 847 |
+
else:
|
| 848 |
+
# This is content between voice tags or before first voice tag
|
| 849 |
+
if current_voice and part.strip():
|
| 850 |
+
# Content continuation for current voice
|
| 851 |
+
processed_segments = process_voice_content_with_line_breaks(
|
| 852 |
+
current_voice, part, max_words, pause_duration
|
| 853 |
+
)
|
| 854 |
+
|
| 855 |
+
for segment in processed_segments:
|
| 856 |
+
segments_with_pauses.append(segment)
|
| 857 |
+
total_pause_duration += segment['pause_duration']
|
| 858 |
+
elif not current_voice and part.strip():
|
| 859 |
+
# Content before any voice tag - treat as narrator
|
| 860 |
+
processed_segments = process_voice_content_with_line_breaks(
|
| 861 |
+
"Narrator", part, max_words, pause_duration
|
| 862 |
+
)
|
| 863 |
+
|
| 864 |
+
for segment in processed_segments:
|
| 865 |
+
segments_with_pauses.append(segment)
|
| 866 |
+
total_pause_duration += segment['pause_duration']
|
| 867 |
+
|
| 868 |
+
i += 1
|
| 869 |
+
|
| 870 |
+
print(f"🔍 DEBUG: Final result: {len(segments_with_pauses)} segments, {total_pause_duration:.1f}s total pause time")
|
| 871 |
+
|
| 872 |
+
return segments_with_pauses, total_pause_duration
|
| 873 |
+
|
| 874 |
+
|
| 875 |
+
def process_voice_content_with_line_breaks(voice_name: str, content: str, max_words: int, pause_duration: float) -> list:
|
| 876 |
+
"""Process voice content while preserving line breaks for pauses."""
|
| 877 |
+
import re
|
| 878 |
+
|
| 879 |
+
segments = []
|
| 880 |
+
|
| 881 |
+
# Split content by line breaks, keeping the line breaks
|
| 882 |
+
line_segments = re.split(r'(\n+)', content)
|
| 883 |
+
|
| 884 |
+
print(f"🔍 DEBUG: Processing voice '{voice_name}' content split into {len(line_segments)} line segments")
|
| 885 |
+
|
| 886 |
+
for i, line_segment in enumerate(line_segments):
|
| 887 |
+
if not line_segment:
|
| 888 |
+
continue
|
| 889 |
+
|
| 890 |
+
# Check if this segment is line breaks
|
| 891 |
+
if re.match(r'\n+', line_segment):
|
| 892 |
+
# Count the number of line breaks for pause calculation
|
| 893 |
+
line_break_count = line_segment.count('\n')
|
| 894 |
+
pause_time = line_break_count * pause_duration
|
| 895 |
+
|
| 896 |
+
print(f"🔍 DEBUG: Found {line_break_count} line breaks, calculating {pause_time:.1f}s pause")
|
| 897 |
+
|
| 898 |
+
# Add pause to the previous segment if it exists and has the same voice
|
| 899 |
+
if segments and segments[-1]['voice'] == voice_name:
|
| 900 |
+
segments[-1]['pause_duration'] += pause_time
|
| 901 |
+
print(f"🔇 Line breaks detected in [{voice_name}]: +{pause_time:.1f}s pause (from {line_break_count} returns)")
|
| 902 |
+
else:
|
| 903 |
+
print(f"🔍 DEBUG: No previous segment to add pause to, or voice mismatch")
|
| 904 |
+
continue
|
| 905 |
+
|
| 906 |
+
# This is actual text content - chunk it by sentences if needed
|
| 907 |
+
text_content = line_segment.strip()
|
| 908 |
+
if not text_content:
|
| 909 |
+
continue
|
| 910 |
+
|
| 911 |
+
print(f"🔍 DEBUG: Processing text content: '{text_content[:50]}...'")
|
| 912 |
+
|
| 913 |
+
# Apply sentence chunking to this segment
|
| 914 |
+
text_chunks = chunk_text_by_sentences_local(text_content, max_words)
|
| 915 |
+
|
| 916 |
+
print(f"🔍 DEBUG: chunk_text_by_sentences_local produced {len(text_chunks)} chunks")
|
| 917 |
+
|
| 918 |
+
# Add these chunks with voice assignment and initial pause duration of 0
|
| 919 |
+
for chunk in text_chunks:
|
| 920 |
+
if chunk.strip():
|
| 921 |
+
segments.append({
|
| 922 |
+
'voice': voice_name,
|
| 923 |
+
'text': chunk.strip(),
|
| 924 |
+
'pause_duration': 0.0
|
| 925 |
+
})
|
| 926 |
+
print(f"🔍 DEBUG: Added segment: voice='{voice_name}', text='{chunk.strip()[:30]}...', pause=0.0")
|
| 927 |
+
|
| 928 |
+
return segments
|
src/audiobook/project_management.py
ADDED
|
@@ -0,0 +1,656 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Project management utilities for audiobook generation.
|
| 3 |
+
|
| 4 |
+
Handles project creation, loading, metadata, file organization, and project lifecycle.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import json
|
| 9 |
+
import shutil
|
| 10 |
+
import time
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import List, Dict, Tuple, Optional, Any
|
| 13 |
+
from datetime import datetime
|
| 14 |
+
|
| 15 |
+
from .text_processing import chunk_text_by_sentences, parse_multi_voice_text, chunk_multi_voice_segments
|
| 16 |
+
from .audio_processing import save_audio_chunks, auto_remove_silence, normalize_audio_levels, analyze_audio_quality
|
| 17 |
+
from .voice_management import load_voice_for_tts, get_voice_config
|
| 18 |
+
from .models import generate_with_retry, load_model_cpu
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# Constants
|
| 22 |
+
MAX_CHUNKS_FOR_INTERFACE = 100
|
| 23 |
+
MAX_CHUNKS_FOR_AUTO_SAVE = 100
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def save_project_metadata(
|
| 27 |
+
project_dir: str,
|
| 28 |
+
project_name: str,
|
| 29 |
+
text_content: str,
|
| 30 |
+
voice_info: dict,
|
| 31 |
+
chunks: list,
|
| 32 |
+
project_type: str = "single_voice"
|
| 33 |
+
) -> None:
|
| 34 |
+
"""Save project metadata to JSON file.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
project_dir: Project directory path
|
| 38 |
+
project_name: Name of the project
|
| 39 |
+
text_content: Original text content
|
| 40 |
+
voice_info: Voice configuration information
|
| 41 |
+
chunks: List of text chunks
|
| 42 |
+
project_type: Type of project (single_voice or multi_voice)
|
| 43 |
+
"""
|
| 44 |
+
metadata = {
|
| 45 |
+
'project_name': project_name,
|
| 46 |
+
'project_type': project_type,
|
| 47 |
+
'created_at': datetime.now().isoformat(),
|
| 48 |
+
'text_content': text_content,
|
| 49 |
+
'voice_info': voice_info,
|
| 50 |
+
'chunks': chunks,
|
| 51 |
+
'total_chunks': len(chunks),
|
| 52 |
+
'status': 'in_progress'
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
metadata_path = os.path.join(project_dir, 'metadata.json')
|
| 56 |
+
with open(metadata_path, 'w', encoding='utf-8') as f:
|
| 57 |
+
json.dump(metadata, f, indent=2)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def load_project_metadata(project_dir: str) -> dict:
|
| 61 |
+
"""Load project metadata from JSON file.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
project_dir: Project directory path
|
| 65 |
+
|
| 66 |
+
Returns:
|
| 67 |
+
Project metadata dictionary
|
| 68 |
+
"""
|
| 69 |
+
metadata_path = os.path.join(project_dir, 'metadata.json')
|
| 70 |
+
if os.path.exists(metadata_path):
|
| 71 |
+
try:
|
| 72 |
+
with open(metadata_path, 'r', encoding='utf-8') as f:
|
| 73 |
+
return json.load(f)
|
| 74 |
+
except Exception as e:
|
| 75 |
+
print(f"Warning: Could not load metadata for {project_dir}: {e}")
|
| 76 |
+
return {}
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def get_existing_projects(output_dir: str = "audiobook_projects") -> List[Dict[str, Any]]:
|
| 80 |
+
"""Get list of existing audiobook projects.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
output_dir: Directory containing projects
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
List of project information dictionaries
|
| 87 |
+
"""
|
| 88 |
+
projects = []
|
| 89 |
+
|
| 90 |
+
if not os.path.exists(output_dir):
|
| 91 |
+
return projects
|
| 92 |
+
|
| 93 |
+
try:
|
| 94 |
+
for item in os.listdir(output_dir):
|
| 95 |
+
project_dir = os.path.join(output_dir, item)
|
| 96 |
+
if os.path.isdir(project_dir):
|
| 97 |
+
metadata = load_project_metadata(project_dir)
|
| 98 |
+
|
| 99 |
+
if metadata:
|
| 100 |
+
# Use metadata information
|
| 101 |
+
project_info = {
|
| 102 |
+
'name': metadata.get('project_name', item),
|
| 103 |
+
'path': project_dir,
|
| 104 |
+
'type': metadata.get('project_type', 'unknown'),
|
| 105 |
+
'created_at': metadata.get('created_at', ''),
|
| 106 |
+
'total_chunks': metadata.get('total_chunks', 0),
|
| 107 |
+
'status': metadata.get('status', 'unknown')
|
| 108 |
+
}
|
| 109 |
+
else:
|
| 110 |
+
# Fallback to directory scanning
|
| 111 |
+
audio_files = [f for f in os.listdir(project_dir) if f.endswith('.wav')]
|
| 112 |
+
project_info = {
|
| 113 |
+
'name': item,
|
| 114 |
+
'path': project_dir,
|
| 115 |
+
'type': 'legacy',
|
| 116 |
+
'created_at': '',
|
| 117 |
+
'total_chunks': len(audio_files),
|
| 118 |
+
'status': 'completed' if audio_files else 'empty'
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
projects.append(project_info)
|
| 122 |
+
|
| 123 |
+
except Exception as e:
|
| 124 |
+
print(f"Warning: Error scanning projects directory: {e}")
|
| 125 |
+
|
| 126 |
+
# Sort by creation date (newest first)
|
| 127 |
+
def get_sort_key(project):
|
| 128 |
+
created_at = project.get('created_at', '')
|
| 129 |
+
if created_at:
|
| 130 |
+
try:
|
| 131 |
+
return datetime.fromisoformat(created_at)
|
| 132 |
+
except:
|
| 133 |
+
pass
|
| 134 |
+
return datetime.min
|
| 135 |
+
|
| 136 |
+
projects.sort(key=get_sort_key, reverse=True)
|
| 137 |
+
return projects
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def get_project_choices() -> List[str]:
|
| 141 |
+
"""Get project names for UI dropdowns.
|
| 142 |
+
|
| 143 |
+
Returns:
|
| 144 |
+
List of project names
|
| 145 |
+
"""
|
| 146 |
+
projects = get_existing_projects()
|
| 147 |
+
if not projects:
|
| 148 |
+
return ["No projects found"]
|
| 149 |
+
|
| 150 |
+
# Format: "project_name (type - chunks)"
|
| 151 |
+
choices = []
|
| 152 |
+
for project in projects:
|
| 153 |
+
name = project['name']
|
| 154 |
+
project_type = project['type']
|
| 155 |
+
chunk_count = project['total_chunks']
|
| 156 |
+
formatted = f"{name} ({project_type} - {chunk_count} chunks)"
|
| 157 |
+
choices.append(formatted)
|
| 158 |
+
|
| 159 |
+
return choices
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def load_project_for_regeneration(project_name: str) -> Tuple[str, str, str, str]:
|
| 163 |
+
"""Load project data for regeneration interface.
|
| 164 |
+
|
| 165 |
+
Args:
|
| 166 |
+
project_name: Name of the project to load
|
| 167 |
+
|
| 168 |
+
Returns:
|
| 169 |
+
tuple: (text_content, voice_name, project_type, status_message)
|
| 170 |
+
"""
|
| 171 |
+
if not project_name or project_name == "No projects found":
|
| 172 |
+
return "", "", "", "No project selected"
|
| 173 |
+
|
| 174 |
+
# Extract actual project name from formatted string
|
| 175 |
+
actual_name = project_name.split(' (')[0] if ' (' in project_name else project_name
|
| 176 |
+
|
| 177 |
+
projects = get_existing_projects()
|
| 178 |
+
project_info = None
|
| 179 |
+
|
| 180 |
+
for project in projects:
|
| 181 |
+
if project['name'] == actual_name:
|
| 182 |
+
project_info = project
|
| 183 |
+
break
|
| 184 |
+
|
| 185 |
+
if not project_info:
|
| 186 |
+
return "", "", "", f"❌ Project '{actual_name}' not found"
|
| 187 |
+
|
| 188 |
+
# Load project metadata
|
| 189 |
+
metadata = load_project_metadata(project_info['path'])
|
| 190 |
+
|
| 191 |
+
if not metadata:
|
| 192 |
+
return "", "", "", f"❌ Could not load project metadata for '{actual_name}'"
|
| 193 |
+
|
| 194 |
+
text_content = metadata.get('text_content', '')
|
| 195 |
+
voice_info = metadata.get('voice_info', {})
|
| 196 |
+
project_type = metadata.get('project_type', 'single_voice')
|
| 197 |
+
|
| 198 |
+
# Extract voice name based on project type
|
| 199 |
+
if project_type == 'single_voice':
|
| 200 |
+
voice_name = voice_info.get('voice_name', '')
|
| 201 |
+
else:
|
| 202 |
+
voice_name = 'Multi-voice project'
|
| 203 |
+
|
| 204 |
+
return text_content, voice_name, project_type, f"✅ Loaded project '{actual_name}'"
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def create_audiobook(
|
| 208 |
+
model: Any,
|
| 209 |
+
text_content: str,
|
| 210 |
+
voice_library_path: str,
|
| 211 |
+
selected_voice: str,
|
| 212 |
+
project_name: str,
|
| 213 |
+
resume: bool = False,
|
| 214 |
+
autosave_interval: int = 10
|
| 215 |
+
) -> Tuple[str, List[str], str]:
|
| 216 |
+
"""Create a single-voice audiobook project.
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
model: TTS model instance
|
| 220 |
+
text_content: Text to convert to audio
|
| 221 |
+
voice_library_path: Path to voice library
|
| 222 |
+
selected_voice: Name of selected voice
|
| 223 |
+
project_name: Name for the project
|
| 224 |
+
resume: Whether to resume existing project
|
| 225 |
+
autosave_interval: Chunks between auto-saves
|
| 226 |
+
|
| 227 |
+
Returns:
|
| 228 |
+
tuple: (status_message, audio_file_paths, project_path)
|
| 229 |
+
"""
|
| 230 |
+
if not model:
|
| 231 |
+
model = load_model_cpu()
|
| 232 |
+
|
| 233 |
+
# Load voice configuration
|
| 234 |
+
audio_prompt_path, voice_config = load_voice_for_tts(voice_library_path, selected_voice)
|
| 235 |
+
|
| 236 |
+
if not audio_prompt_path:
|
| 237 |
+
return f"❌ Could not load voice '{selected_voice}'", [], ""
|
| 238 |
+
|
| 239 |
+
# Get voice parameters
|
| 240 |
+
exaggeration = voice_config.get('exaggeration', 1.0)
|
| 241 |
+
temperature = voice_config.get('temperature', 0.7)
|
| 242 |
+
cfg_weight = voice_config.get('cfg_weight', 1.0)
|
| 243 |
+
|
| 244 |
+
# Chunk the text
|
| 245 |
+
chunks = chunk_text_by_sentences(text_content, max_words=50)
|
| 246 |
+
|
| 247 |
+
# Create project directory
|
| 248 |
+
safe_project_name = "".join(c for c in project_name if c.isalnum() or c in (' ', '-', '_')).strip()
|
| 249 |
+
safe_project_name = safe_project_name.replace(' ', '_')
|
| 250 |
+
project_dir = os.path.join("audiobook_projects", safe_project_name)
|
| 251 |
+
os.makedirs(project_dir, exist_ok=True)
|
| 252 |
+
|
| 253 |
+
# Save project metadata
|
| 254 |
+
voice_info = {
|
| 255 |
+
'voice_name': selected_voice,
|
| 256 |
+
'audio_prompt_path': audio_prompt_path,
|
| 257 |
+
'exaggeration': exaggeration,
|
| 258 |
+
'temperature': temperature,
|
| 259 |
+
'cfg_weight': cfg_weight
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
save_project_metadata(project_dir, project_name, text_content, voice_info, chunks, "single_voice")
|
| 263 |
+
|
| 264 |
+
# Generate audio for chunks
|
| 265 |
+
audio_chunks = []
|
| 266 |
+
generated_files = []
|
| 267 |
+
|
| 268 |
+
try:
|
| 269 |
+
for i, chunk in enumerate(chunks):
|
| 270 |
+
print(f"Generating chunk {i+1}/{len(chunks)}")
|
| 271 |
+
|
| 272 |
+
# Generate audio
|
| 273 |
+
wav, device_used = generate_with_retry(
|
| 274 |
+
model, chunk, audio_prompt_path, exaggeration, temperature, cfg_weight
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
# Convert to numpy array if needed
|
| 278 |
+
if hasattr(wav, 'squeeze'):
|
| 279 |
+
audio_array = wav.squeeze(0).numpy()
|
| 280 |
+
else:
|
| 281 |
+
audio_array = wav
|
| 282 |
+
|
| 283 |
+
audio_chunks.append(audio_array)
|
| 284 |
+
|
| 285 |
+
# Auto-save periodically
|
| 286 |
+
if (i + 1) % autosave_interval == 0 or i == len(chunks) - 1:
|
| 287 |
+
# Save current batch
|
| 288 |
+
batch_files = save_audio_chunks(
|
| 289 |
+
audio_chunks, model.sr, safe_project_name, "audiobook_projects"
|
| 290 |
+
)
|
| 291 |
+
generated_files.extend(batch_files)
|
| 292 |
+
audio_chunks = [] # Reset for next batch
|
| 293 |
+
|
| 294 |
+
# Update metadata to completed
|
| 295 |
+
metadata = load_project_metadata(project_dir)
|
| 296 |
+
metadata['status'] = 'completed'
|
| 297 |
+
metadata['completed_at'] = datetime.now().isoformat()
|
| 298 |
+
|
| 299 |
+
metadata_path = os.path.join(project_dir, 'metadata.json')
|
| 300 |
+
with open(metadata_path, 'w', encoding='utf-8') as f:
|
| 301 |
+
json.dump(metadata, f, indent=2)
|
| 302 |
+
|
| 303 |
+
return f"✅ Audiobook '{project_name}' created successfully! Generated {len(chunks)} audio chunks.", generated_files, project_dir
|
| 304 |
+
|
| 305 |
+
except Exception as e:
|
| 306 |
+
return f"❌ Error creating audiobook: {str(e)}", generated_files, project_dir
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
def create_multi_voice_audiobook_with_assignments(
|
| 310 |
+
model: Any,
|
| 311 |
+
text_content: str,
|
| 312 |
+
voice_library_path: str,
|
| 313 |
+
project_name: str,
|
| 314 |
+
voice_assignments: Dict[str, str],
|
| 315 |
+
resume: bool = False,
|
| 316 |
+
autosave_interval: int = 10
|
| 317 |
+
) -> Tuple[str, List[str], str]:
|
| 318 |
+
"""Create a multi-voice audiobook project with character voice assignments.
|
| 319 |
+
|
| 320 |
+
Args:
|
| 321 |
+
model: TTS model instance
|
| 322 |
+
text_content: Text with character markers
|
| 323 |
+
voice_library_path: Path to voice library
|
| 324 |
+
project_name: Name for the project
|
| 325 |
+
voice_assignments: Character to voice mappings
|
| 326 |
+
resume: Whether to resume existing project
|
| 327 |
+
autosave_interval: Chunks between auto-saves
|
| 328 |
+
|
| 329 |
+
Returns:
|
| 330 |
+
tuple: (status_message, audio_file_paths, project_path)
|
| 331 |
+
"""
|
| 332 |
+
if not model:
|
| 333 |
+
model = load_model_cpu()
|
| 334 |
+
|
| 335 |
+
# Parse multi-voice text
|
| 336 |
+
segments = parse_multi_voice_text(text_content)
|
| 337 |
+
chunked_segments = chunk_multi_voice_segments(segments, max_words=50)
|
| 338 |
+
|
| 339 |
+
# Create project directory
|
| 340 |
+
safe_project_name = "".join(c for c in project_name if c.isalnum() or c in (' ', '-', '_')).strip()
|
| 341 |
+
safe_project_name = safe_project_name.replace(' ', '_')
|
| 342 |
+
project_dir = os.path.join("audiobook_projects", safe_project_name)
|
| 343 |
+
os.makedirs(project_dir, exist_ok=True)
|
| 344 |
+
|
| 345 |
+
# Save project metadata
|
| 346 |
+
voice_info = {
|
| 347 |
+
'voice_assignments': voice_assignments,
|
| 348 |
+
'characters': list(voice_assignments.keys())
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
save_project_metadata(project_dir, project_name, text_content, voice_info, chunked_segments, "multi_voice")
|
| 352 |
+
|
| 353 |
+
# Generate audio for segments
|
| 354 |
+
audio_chunks = []
|
| 355 |
+
generated_files = []
|
| 356 |
+
|
| 357 |
+
try:
|
| 358 |
+
for i, segment in enumerate(chunked_segments):
|
| 359 |
+
character = segment['character']
|
| 360 |
+
text = segment['text']
|
| 361 |
+
|
| 362 |
+
# Get assigned voice for character
|
| 363 |
+
assigned_voice = voice_assignments.get(character)
|
| 364 |
+
if not assigned_voice:
|
| 365 |
+
print(f"Warning: No voice assigned for character '{character}', skipping segment")
|
| 366 |
+
continue
|
| 367 |
+
|
| 368 |
+
# Load voice configuration
|
| 369 |
+
audio_prompt_path, voice_config = load_voice_for_tts(voice_library_path, assigned_voice)
|
| 370 |
+
|
| 371 |
+
if not audio_prompt_path:
|
| 372 |
+
print(f"Warning: Could not load voice '{assigned_voice}' for character '{character}'")
|
| 373 |
+
continue
|
| 374 |
+
|
| 375 |
+
print(f"Generating segment {i+1}/{len(chunked_segments)} - {character}: {text[:50]}...")
|
| 376 |
+
|
| 377 |
+
# Generate audio
|
| 378 |
+
wav, device_used = generate_with_retry(
|
| 379 |
+
model, text, audio_prompt_path,
|
| 380 |
+
voice_config.get('exaggeration', 1.0),
|
| 381 |
+
voice_config.get('temperature', 0.7),
|
| 382 |
+
voice_config.get('cfg_weight', 1.0)
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
# Convert to numpy array if needed
|
| 386 |
+
if hasattr(wav, 'squeeze'):
|
| 387 |
+
audio_array = wav.squeeze(0).numpy()
|
| 388 |
+
else:
|
| 389 |
+
audio_array = wav
|
| 390 |
+
|
| 391 |
+
audio_chunks.append(audio_array)
|
| 392 |
+
|
| 393 |
+
# Auto-save periodically
|
| 394 |
+
if (i + 1) % autosave_interval == 0 or i == len(chunked_segments) - 1:
|
| 395 |
+
# Save current batch
|
| 396 |
+
batch_files = save_audio_chunks(
|
| 397 |
+
audio_chunks, model.sr, safe_project_name, "audiobook_projects"
|
| 398 |
+
)
|
| 399 |
+
generated_files.extend(batch_files)
|
| 400 |
+
audio_chunks = [] # Reset for next batch
|
| 401 |
+
|
| 402 |
+
# Update metadata to completed
|
| 403 |
+
metadata = load_project_metadata(project_dir)
|
| 404 |
+
metadata['status'] = 'completed'
|
| 405 |
+
metadata['completed_at'] = datetime.now().isoformat()
|
| 406 |
+
|
| 407 |
+
metadata_path = os.path.join(project_dir, 'metadata.json')
|
| 408 |
+
with open(metadata_path, 'w', encoding='utf-8') as f:
|
| 409 |
+
json.dump(metadata, f, indent=2)
|
| 410 |
+
|
| 411 |
+
return f"✅ Multi-voice audiobook '{project_name}' created successfully! Generated {len(chunked_segments)} audio segments.", generated_files, project_dir
|
| 412 |
+
|
| 413 |
+
except Exception as e:
|
| 414 |
+
return f"❌ Error creating multi-voice audiobook: {str(e)}", generated_files, project_dir
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
def cleanup_project_temp_files(project_name: str) -> str:
|
| 418 |
+
"""Clean up temporary files for a project.
|
| 419 |
+
|
| 420 |
+
Args:
|
| 421 |
+
project_name: Name of the project
|
| 422 |
+
|
| 423 |
+
Returns:
|
| 424 |
+
Status message
|
| 425 |
+
"""
|
| 426 |
+
if not project_name:
|
| 427 |
+
return "❌ No project specified"
|
| 428 |
+
|
| 429 |
+
# Extract actual project name
|
| 430 |
+
actual_name = project_name.split(' (')[0] if ' (' in project_name else project_name
|
| 431 |
+
safe_name = "".join(c for c in actual_name if c.isalnum() or c in (' ', '-', '_')).strip()
|
| 432 |
+
safe_name = safe_name.replace(' ', '_')
|
| 433 |
+
|
| 434 |
+
project_dir = os.path.join("audiobook_projects", safe_name)
|
| 435 |
+
|
| 436 |
+
if not os.path.exists(project_dir):
|
| 437 |
+
return f"❌ Project directory not found: {safe_name}"
|
| 438 |
+
|
| 439 |
+
try:
|
| 440 |
+
temp_files = []
|
| 441 |
+
for file in os.listdir(project_dir):
|
| 442 |
+
if 'temp' in file.lower() or 'trimmed' in file.lower():
|
| 443 |
+
temp_files.append(os.path.join(project_dir, file))
|
| 444 |
+
|
| 445 |
+
for temp_file in temp_files:
|
| 446 |
+
if os.path.exists(temp_file):
|
| 447 |
+
os.remove(temp_file)
|
| 448 |
+
|
| 449 |
+
return f"✅ Cleaned up {len(temp_files)} temporary files for project '{actual_name}'"
|
| 450 |
+
|
| 451 |
+
except Exception as e:
|
| 452 |
+
return f"❌ Error cleaning up project files: {str(e)}"
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
def auto_clean_project_audio(
|
| 456 |
+
project_name: str,
|
| 457 |
+
silence_threshold: float = -50.0,
|
| 458 |
+
min_silence_duration: float = 0.5
|
| 459 |
+
) -> str:
|
| 460 |
+
"""Automatically clean audio for all chunks in a project.
|
| 461 |
+
|
| 462 |
+
Args:
|
| 463 |
+
project_name: Name of the project
|
| 464 |
+
silence_threshold: Silence threshold in dB
|
| 465 |
+
min_silence_duration: Minimum silence duration to remove
|
| 466 |
+
|
| 467 |
+
Returns:
|
| 468 |
+
Status message
|
| 469 |
+
"""
|
| 470 |
+
if not project_name:
|
| 471 |
+
return "❌ No project specified"
|
| 472 |
+
|
| 473 |
+
# Extract actual project name
|
| 474 |
+
actual_name = project_name.split(' (')[0] if ' (' in project_name else project_name
|
| 475 |
+
safe_name = "".join(c for c in actual_name if c.isalnum() or c in (' ', '-', '_')).strip()
|
| 476 |
+
safe_name = safe_name.replace(' ', '_')
|
| 477 |
+
|
| 478 |
+
project_dir = os.path.join("audiobook_projects", safe_name)
|
| 479 |
+
|
| 480 |
+
if not os.path.exists(project_dir):
|
| 481 |
+
return f"❌ Project directory not found: {safe_name}"
|
| 482 |
+
|
| 483 |
+
try:
|
| 484 |
+
# Get all WAV files in the project
|
| 485 |
+
audio_files = [f for f in os.listdir(project_dir)
|
| 486 |
+
if f.endswith('.wav') and not 'cleaned' in f.lower() and not 'temp' in f.lower()]
|
| 487 |
+
|
| 488 |
+
if not audio_files:
|
| 489 |
+
return f"❌ No audio files found in project '{actual_name}'"
|
| 490 |
+
|
| 491 |
+
cleaned_count = 0
|
| 492 |
+
failed_count = 0
|
| 493 |
+
total_time_saved = 0.0
|
| 494 |
+
|
| 495 |
+
for audio_file in audio_files:
|
| 496 |
+
file_path = os.path.join(project_dir, audio_file)
|
| 497 |
+
|
| 498 |
+
# Clean the audio
|
| 499 |
+
status_msg, cleaned_path = auto_remove_silence(
|
| 500 |
+
file_path, silence_threshold, min_silence_duration
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
if "✅" in status_msg:
|
| 504 |
+
cleaned_count += 1
|
| 505 |
+
# Extract time saved from status message
|
| 506 |
+
if "Removed" in status_msg:
|
| 507 |
+
try:
|
| 508 |
+
# Parse "Removed X.XXs" from status message
|
| 509 |
+
import re
|
| 510 |
+
match = re.search(r'Removed (\d+\.?\d*)s', status_msg)
|
| 511 |
+
if match:
|
| 512 |
+
total_time_saved += float(match.group(1))
|
| 513 |
+
except:
|
| 514 |
+
pass
|
| 515 |
+
else:
|
| 516 |
+
failed_count += 1
|
| 517 |
+
print(f"Failed to clean {audio_file}: {status_msg}")
|
| 518 |
+
|
| 519 |
+
if cleaned_count > 0:
|
| 520 |
+
return (
|
| 521 |
+
f"✅ Auto-cleaned {cleaned_count}/{len(audio_files)} audio files for project '{actual_name}'. "
|
| 522 |
+
f"Total silence removed: {total_time_saved:.2f}s. "
|
| 523 |
+
f"Failed: {failed_count}"
|
| 524 |
+
)
|
| 525 |
+
else:
|
| 526 |
+
return f"❌ Failed to clean any audio files for project '{actual_name}'"
|
| 527 |
+
|
| 528 |
+
except Exception as e:
|
| 529 |
+
return f"❌ Error auto-cleaning project audio: {str(e)}"
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def analyze_project_audio_quality(project_name: str) -> str:
|
| 533 |
+
"""Analyze audio quality for all chunks in a project.
|
| 534 |
+
|
| 535 |
+
Args:
|
| 536 |
+
project_name: Name of the project
|
| 537 |
+
|
| 538 |
+
Returns:
|
| 539 |
+
Analysis results
|
| 540 |
+
"""
|
| 541 |
+
if not project_name:
|
| 542 |
+
return "❌ No project specified"
|
| 543 |
+
|
| 544 |
+
# Extract actual project name
|
| 545 |
+
actual_name = project_name.split(' (')[0] if ' (' in project_name else project_name
|
| 546 |
+
safe_name = "".join(c for c in actual_name if c.isalnum() or c in (' ', '-', '_')).strip()
|
| 547 |
+
safe_name = safe_name.replace(' ', '_')
|
| 548 |
+
|
| 549 |
+
project_dir = os.path.join("audiobook_projects", safe_name)
|
| 550 |
+
|
| 551 |
+
if not os.path.exists(project_dir):
|
| 552 |
+
return f"❌ Project directory not found: {safe_name}"
|
| 553 |
+
|
| 554 |
+
try:
|
| 555 |
+
# Get all WAV files in the project
|
| 556 |
+
audio_files = [f for f in os.listdir(project_dir)
|
| 557 |
+
if f.endswith('.wav') and not 'temp' in f.lower()]
|
| 558 |
+
|
| 559 |
+
if not audio_files:
|
| 560 |
+
return f"❌ No audio files found in project '{actual_name}'"
|
| 561 |
+
|
| 562 |
+
total_duration = 0.0
|
| 563 |
+
total_rms = 0.0
|
| 564 |
+
peak_levels = []
|
| 565 |
+
analyzed_count = 0
|
| 566 |
+
|
| 567 |
+
for audio_file in audio_files:
|
| 568 |
+
file_path = os.path.join(project_dir, audio_file)
|
| 569 |
+
|
| 570 |
+
# Analyze the audio
|
| 571 |
+
metrics = analyze_audio_quality(file_path)
|
| 572 |
+
|
| 573 |
+
if 'error' not in metrics:
|
| 574 |
+
total_duration += metrics.get('duration', 0)
|
| 575 |
+
total_rms += metrics.get('rms_level', 0)
|
| 576 |
+
peak_levels.append(metrics.get('peak_level', 0))
|
| 577 |
+
analyzed_count += 1
|
| 578 |
+
|
| 579 |
+
if analyzed_count > 0:
|
| 580 |
+
avg_rms = total_rms / analyzed_count
|
| 581 |
+
max_peak = max(peak_levels) if peak_levels else 0
|
| 582 |
+
avg_peak = sum(peak_levels) / len(peak_levels) if peak_levels else 0
|
| 583 |
+
|
| 584 |
+
# Convert to dB
|
| 585 |
+
avg_rms_db = 20 * np.log10(avg_rms) if avg_rms > 0 else -np.inf
|
| 586 |
+
max_peak_db = 20 * np.log10(max_peak) if max_peak > 0 else -np.inf
|
| 587 |
+
avg_peak_db = 20 * np.log10(avg_peak) if avg_peak > 0 else -np.inf
|
| 588 |
+
|
| 589 |
+
return (
|
| 590 |
+
f"📊 Audio Quality Analysis for '{actual_name}':\n"
|
| 591 |
+
f"• Files analyzed: {analyzed_count}/{len(audio_files)}\n"
|
| 592 |
+
f"• Total duration: {total_duration:.2f} seconds\n"
|
| 593 |
+
f"• Average RMS level: {avg_rms_db:.1f} dB\n"
|
| 594 |
+
f"• Average peak level: {avg_peak_db:.1f} dB\n"
|
| 595 |
+
f"• Maximum peak level: {max_peak_db:.1f} dB\n"
|
| 596 |
+
f"• Recommended: Keep peaks below -3 dB for headroom"
|
| 597 |
+
)
|
| 598 |
+
else:
|
| 599 |
+
return f"❌ Failed to analyze any audio files for project '{actual_name}'"
|
| 600 |
+
|
| 601 |
+
except Exception as e:
|
| 602 |
+
return f"❌ Error analyzing project audio quality: {str(e)}"
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
def get_project_chunks(project_name: str) -> List[Dict[str, Any]]:
|
| 606 |
+
"""Get list of audio chunks for a project.
|
| 607 |
+
|
| 608 |
+
Args:
|
| 609 |
+
project_name: Name of the project
|
| 610 |
+
|
| 611 |
+
Returns:
|
| 612 |
+
List of chunk information dictionaries
|
| 613 |
+
"""
|
| 614 |
+
if not project_name or project_name == "No projects found":
|
| 615 |
+
return []
|
| 616 |
+
|
| 617 |
+
# Extract actual project name
|
| 618 |
+
actual_name = project_name.split(' (')[0] if ' (' in project_name else project_name
|
| 619 |
+
safe_name = "".join(c for c in actual_name if c.isalnum() or c in (' ', '-', '_')).strip()
|
| 620 |
+
safe_name = safe_name.replace(' ', '_')
|
| 621 |
+
|
| 622 |
+
project_dir = os.path.join("audiobook_projects", safe_name)
|
| 623 |
+
|
| 624 |
+
if not os.path.exists(project_dir):
|
| 625 |
+
return []
|
| 626 |
+
|
| 627 |
+
try:
|
| 628 |
+
chunks = []
|
| 629 |
+
audio_files = [f for f in os.listdir(project_dir) if f.endswith('.wav') and not 'temp' in f.lower()]
|
| 630 |
+
|
| 631 |
+
# Sort files by chunk number
|
| 632 |
+
def extract_chunk_num_from_filename(filename: str) -> int:
|
| 633 |
+
# Extract number from filename like "project_001.wav"
|
| 634 |
+
parts = filename.replace('.wav', '').split('_')
|
| 635 |
+
for part in reversed(parts):
|
| 636 |
+
if part.isdigit():
|
| 637 |
+
return int(part)
|
| 638 |
+
return 0
|
| 639 |
+
|
| 640 |
+
audio_files.sort(key=extract_chunk_num_from_filename)
|
| 641 |
+
|
| 642 |
+
for i, filename in enumerate(audio_files):
|
| 643 |
+
file_path = os.path.join(project_dir, filename)
|
| 644 |
+
chunk_info = {
|
| 645 |
+
'chunk_num': i + 1,
|
| 646 |
+
'filename': filename,
|
| 647 |
+
'file_path': file_path,
|
| 648 |
+
'size': os.path.getsize(file_path) if os.path.exists(file_path) else 0
|
| 649 |
+
}
|
| 650 |
+
chunks.append(chunk_info)
|
| 651 |
+
|
| 652 |
+
return chunks
|
| 653 |
+
|
| 654 |
+
except Exception as e:
|
| 655 |
+
print(f"Error getting project chunks: {e}")
|
| 656 |
+
return []
|
src/audiobook/voice_management.py
ADDED
|
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Voice management utilities for audiobook generation.
|
| 3 |
+
|
| 4 |
+
Handles voice profile CRUD operations, voice library management, and voice selection.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import json
|
| 9 |
+
import shutil
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import List, Dict, Tuple, Optional, Any
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def ensure_voice_library_exists(voice_library_path: str) -> None:
|
| 15 |
+
"""Ensure the voice library directory exists.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
voice_library_path: Path to voice library directory
|
| 19 |
+
"""
|
| 20 |
+
os.makedirs(voice_library_path, exist_ok=True)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_voice_profiles(voice_library_path: str) -> List[Dict[str, Any]]:
|
| 24 |
+
"""Get all voice profiles from the voice library.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
voice_library_path: Path to voice library directory
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
List of voice profile dictionaries
|
| 31 |
+
"""
|
| 32 |
+
ensure_voice_library_exists(voice_library_path)
|
| 33 |
+
profiles = []
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
for item in os.listdir(voice_library_path):
|
| 37 |
+
profile_dir = os.path.join(voice_library_path, item)
|
| 38 |
+
if os.path.isdir(profile_dir):
|
| 39 |
+
config_file = os.path.join(profile_dir, "config.json")
|
| 40 |
+
if os.path.exists(config_file):
|
| 41 |
+
try:
|
| 42 |
+
with open(config_file, 'r', encoding='utf-8') as f:
|
| 43 |
+
profile = json.load(f)
|
| 44 |
+
profile['voice_name'] = item
|
| 45 |
+
profiles.append(profile)
|
| 46 |
+
except Exception as e:
|
| 47 |
+
print(f"Warning: Could not load profile {item}: {e}")
|
| 48 |
+
except Exception as e:
|
| 49 |
+
print(f"Warning: Could not read voice library: {e}")
|
| 50 |
+
|
| 51 |
+
return profiles
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def get_voice_choices(voice_library_path: str) -> List[str]:
|
| 55 |
+
"""Get list of available voice names for UI dropdowns.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
voice_library_path: Path to voice library directory
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
List of voice names
|
| 62 |
+
"""
|
| 63 |
+
profiles = get_voice_profiles(voice_library_path)
|
| 64 |
+
return [profile['voice_name'] for profile in profiles]
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def get_audiobook_voice_choices(voice_library_path: str) -> List[str]:
|
| 68 |
+
"""Get voice choices formatted for audiobook interface.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
voice_library_path: Path to voice library directory
|
| 72 |
+
|
| 73 |
+
Returns:
|
| 74 |
+
List of voice names with display formatting
|
| 75 |
+
"""
|
| 76 |
+
choices = get_voice_choices(voice_library_path)
|
| 77 |
+
if not choices:
|
| 78 |
+
return ["No voices available - Please add voices first"]
|
| 79 |
+
return choices
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def get_voice_config(voice_library_path: str, voice_name: str) -> Dict[str, Any]:
|
| 83 |
+
"""Get configuration for a specific voice.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
voice_library_path: Path to voice library directory
|
| 87 |
+
voice_name: Name of the voice
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
Voice configuration dictionary
|
| 91 |
+
"""
|
| 92 |
+
profile_dir = os.path.join(voice_library_path, voice_name)
|
| 93 |
+
config_file = os.path.join(profile_dir, "config.json")
|
| 94 |
+
|
| 95 |
+
default_config = {
|
| 96 |
+
'voice_name': voice_name,
|
| 97 |
+
'display_name': voice_name,
|
| 98 |
+
'description': '',
|
| 99 |
+
'exaggeration': 1.0,
|
| 100 |
+
'cfg_weight': 1.0,
|
| 101 |
+
'temperature': 0.7
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
if os.path.exists(config_file):
|
| 105 |
+
try:
|
| 106 |
+
with open(config_file, 'r', encoding='utf-8') as f:
|
| 107 |
+
config = json.load(f)
|
| 108 |
+
return {**default_config, **config}
|
| 109 |
+
except Exception as e:
|
| 110 |
+
print(f"Warning: Could not load config for {voice_name}: {e}")
|
| 111 |
+
|
| 112 |
+
return default_config
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def load_voice_for_tts(voice_library_path: str, voice_name: str) -> Tuple[Optional[str], Dict[str, Any]]:
|
| 116 |
+
"""Load voice audio file and configuration for TTS generation.
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
voice_library_path: Path to voice library directory
|
| 120 |
+
voice_name: Name of the voice to load
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
tuple: (audio_file_path, voice_config)
|
| 124 |
+
"""
|
| 125 |
+
if not voice_name:
|
| 126 |
+
return None, {}
|
| 127 |
+
|
| 128 |
+
profile_dir = os.path.join(voice_library_path, voice_name)
|
| 129 |
+
if not os.path.exists(profile_dir):
|
| 130 |
+
return None, {}
|
| 131 |
+
|
| 132 |
+
# Look for audio file
|
| 133 |
+
audio_file = None
|
| 134 |
+
for ext in ['.wav', '.mp3', '.flac']:
|
| 135 |
+
potential_file = os.path.join(profile_dir, f"voice{ext}")
|
| 136 |
+
if os.path.exists(potential_file):
|
| 137 |
+
audio_file = potential_file
|
| 138 |
+
break
|
| 139 |
+
|
| 140 |
+
# Get voice configuration
|
| 141 |
+
config = get_voice_config(voice_library_path, voice_name)
|
| 142 |
+
|
| 143 |
+
return audio_file, config
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def save_voice_profile(
|
| 147 |
+
voice_library_path: str,
|
| 148 |
+
voice_name: str,
|
| 149 |
+
display_name: str,
|
| 150 |
+
description: str,
|
| 151 |
+
audio_file: Any,
|
| 152 |
+
exaggeration: float,
|
| 153 |
+
cfg_weight: float,
|
| 154 |
+
temperature: float
|
| 155 |
+
) -> str:
|
| 156 |
+
"""Save a new voice profile to the library.
|
| 157 |
+
|
| 158 |
+
Args:
|
| 159 |
+
voice_library_path: Path to voice library directory
|
| 160 |
+
voice_name: Internal voice name (used for directory)
|
| 161 |
+
display_name: Display name for UI
|
| 162 |
+
description: Voice description
|
| 163 |
+
audio_file: Audio file data from Gradio
|
| 164 |
+
exaggeration: Exaggeration parameter
|
| 165 |
+
cfg_weight: CFG weight parameter
|
| 166 |
+
temperature: Temperature parameter
|
| 167 |
+
|
| 168 |
+
Returns:
|
| 169 |
+
Status message
|
| 170 |
+
"""
|
| 171 |
+
if not voice_name.strip():
|
| 172 |
+
return "❌ Voice name cannot be empty"
|
| 173 |
+
|
| 174 |
+
# Sanitize voice name for directory
|
| 175 |
+
safe_voice_name = "".join(c for c in voice_name if c.isalnum() or c in (' ', '-', '_')).strip()
|
| 176 |
+
safe_voice_name = safe_voice_name.replace(' ', '_')
|
| 177 |
+
|
| 178 |
+
if not safe_voice_name:
|
| 179 |
+
return "❌ Voice name contains only invalid characters"
|
| 180 |
+
|
| 181 |
+
ensure_voice_library_exists(voice_library_path)
|
| 182 |
+
|
| 183 |
+
profile_dir = os.path.join(voice_library_path, safe_voice_name)
|
| 184 |
+
os.makedirs(profile_dir, exist_ok=True)
|
| 185 |
+
|
| 186 |
+
try:
|
| 187 |
+
# Save audio file
|
| 188 |
+
if audio_file is not None:
|
| 189 |
+
audio_path = os.path.join(profile_dir, "voice.wav")
|
| 190 |
+
if isinstance(audio_file, str):
|
| 191 |
+
# File path provided
|
| 192 |
+
shutil.copy2(audio_file, audio_path)
|
| 193 |
+
elif hasattr(audio_file, 'name'):
|
| 194 |
+
# Gradio file object
|
| 195 |
+
shutil.copy2(audio_file.name, audio_path)
|
| 196 |
+
else:
|
| 197 |
+
return "❌ Invalid audio file format"
|
| 198 |
+
|
| 199 |
+
# Save configuration
|
| 200 |
+
config = {
|
| 201 |
+
'voice_name': safe_voice_name,
|
| 202 |
+
'display_name': display_name or safe_voice_name,
|
| 203 |
+
'description': description or '',
|
| 204 |
+
'exaggeration': float(exaggeration),
|
| 205 |
+
'cfg_weight': float(cfg_weight),
|
| 206 |
+
'temperature': float(temperature)
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
config_path = os.path.join(profile_dir, "config.json")
|
| 210 |
+
with open(config_path, 'w', encoding='utf-8') as f:
|
| 211 |
+
json.dump(config, f, indent=2)
|
| 212 |
+
|
| 213 |
+
return f"✅ Voice profile '{display_name}' saved successfully"
|
| 214 |
+
|
| 215 |
+
except Exception as e:
|
| 216 |
+
return f"❌ Error saving voice profile: {str(e)}"
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def load_voice_profile(voice_library_path: str, voice_name: str) -> Tuple[str, str, str, float, float, float]:
|
| 220 |
+
"""Load voice profile data for editing.
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
voice_library_path: Path to voice library directory
|
| 224 |
+
voice_name: Name of voice to load
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
tuple: (display_name, description, audio_path, exaggeration, cfg_weight, temperature)
|
| 228 |
+
"""
|
| 229 |
+
if not voice_name:
|
| 230 |
+
return "", "", "", 1.0, 1.0, 0.7
|
| 231 |
+
|
| 232 |
+
config = get_voice_config(voice_library_path, voice_name)
|
| 233 |
+
audio_file, _ = load_voice_for_tts(voice_library_path, voice_name)
|
| 234 |
+
|
| 235 |
+
return (
|
| 236 |
+
config.get('display_name', voice_name),
|
| 237 |
+
config.get('description', ''),
|
| 238 |
+
audio_file or "",
|
| 239 |
+
config.get('exaggeration', 1.0),
|
| 240 |
+
config.get('cfg_weight', 1.0),
|
| 241 |
+
config.get('temperature', 0.7)
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def delete_voice_profile(voice_library_path: str, voice_name: str) -> str:
|
| 246 |
+
"""Delete a voice profile from the library.
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
voice_library_path: Path to voice library directory
|
| 250 |
+
voice_name: Name of voice to delete
|
| 251 |
+
|
| 252 |
+
Returns:
|
| 253 |
+
Status message
|
| 254 |
+
"""
|
| 255 |
+
if not voice_name:
|
| 256 |
+
return "❌ No voice selected for deletion"
|
| 257 |
+
|
| 258 |
+
profile_dir = os.path.join(voice_library_path, voice_name)
|
| 259 |
+
|
| 260 |
+
if not os.path.exists(profile_dir):
|
| 261 |
+
return f"❌ Voice profile '{voice_name}' not found"
|
| 262 |
+
|
| 263 |
+
try:
|
| 264 |
+
shutil.rmtree(profile_dir)
|
| 265 |
+
return f"✅ Voice profile '{voice_name}' deleted successfully"
|
| 266 |
+
except Exception as e:
|
| 267 |
+
return f"❌ Error deleting voice profile: {str(e)}"
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def refresh_voice_list(voice_library_path: str) -> List[str]:
|
| 271 |
+
"""Refresh and return the current voice list.
|
| 272 |
+
|
| 273 |
+
Args:
|
| 274 |
+
voice_library_path: Path to voice library directory
|
| 275 |
+
|
| 276 |
+
Returns:
|
| 277 |
+
Updated list of voice names
|
| 278 |
+
"""
|
| 279 |
+
return get_voice_choices(voice_library_path)
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def refresh_voice_choices(voice_library_path: str) -> List[str]:
|
| 283 |
+
"""Refresh voice choices for regular dropdowns.
|
| 284 |
+
|
| 285 |
+
Args:
|
| 286 |
+
voice_library_path: Path to voice library directory
|
| 287 |
+
|
| 288 |
+
Returns:
|
| 289 |
+
Updated list of voice choices
|
| 290 |
+
"""
|
| 291 |
+
return get_voice_choices(voice_library_path)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def refresh_audiobook_voice_choices(voice_library_path: str) -> List[str]:
|
| 295 |
+
"""Refresh voice choices for audiobook interface.
|
| 296 |
+
|
| 297 |
+
Args:
|
| 298 |
+
voice_library_path: Path to voice library directory
|
| 299 |
+
|
| 300 |
+
Returns:
|
| 301 |
+
Updated list of audiobook voice choices
|
| 302 |
+
"""
|
| 303 |
+
return get_audiobook_voice_choices(voice_library_path)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def create_assignment_interface_with_dropdowns(
|
| 307 |
+
voice_counts: Dict[str, int],
|
| 308 |
+
voice_library_path: str
|
| 309 |
+
) -> List[Any]:
|
| 310 |
+
"""Create voice assignment interface components.
|
| 311 |
+
|
| 312 |
+
Args:
|
| 313 |
+
voice_counts: Dictionary mapping character names to word counts
|
| 314 |
+
voice_library_path: Path to voice library directory
|
| 315 |
+
|
| 316 |
+
Returns:
|
| 317 |
+
List of interface components
|
| 318 |
+
"""
|
| 319 |
+
# This would typically return Gradio components
|
| 320 |
+
# For now, return character names and available voices
|
| 321 |
+
characters = list(voice_counts.keys())
|
| 322 |
+
available_voices = get_voice_choices(voice_library_path)
|
| 323 |
+
|
| 324 |
+
# Return data that can be used to create dropdowns
|
| 325 |
+
return [
|
| 326 |
+
{
|
| 327 |
+
'character': char,
|
| 328 |
+
'word_count': voice_counts[char],
|
| 329 |
+
'available_voices': available_voices
|
| 330 |
+
}
|
| 331 |
+
for char in characters[:6] # Limit to 6 characters
|
| 332 |
+
]
|
src/chatterbox/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .tts import ChatterboxTTS
|
| 2 |
+
from .vc import ChatterboxVC
|
src/chatterbox/models/s3gen/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .s3gen import S3Token2Wav as S3Gen
|
| 2 |
+
from .const import S3GEN_SR
|
src/chatterbox/models/s3gen/const.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
S3GEN_SR = 24000
|
src/chatterbox/models/s3gen/decoder.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn as nn
|
| 16 |
+
import torch.nn.functional as F
|
| 17 |
+
from einops import pack, rearrange, repeat
|
| 18 |
+
|
| 19 |
+
from .utils.mask import add_optional_chunk_mask
|
| 20 |
+
from .matcha.decoder import SinusoidalPosEmb, Block1D, ResnetBlock1D, Downsample1D, \
|
| 21 |
+
TimestepEmbedding, Upsample1D
|
| 22 |
+
from .matcha.transformer import BasicTransformerBlock
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def mask_to_bias(mask: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
|
| 26 |
+
assert mask.dtype == torch.bool
|
| 27 |
+
assert dtype in [torch.float32, torch.bfloat16, torch.float16]
|
| 28 |
+
mask = mask.to(dtype)
|
| 29 |
+
# attention mask bias
|
| 30 |
+
# NOTE(Mddct): torch.finfo jit issues
|
| 31 |
+
# chunk_masks = (1.0 - chunk_masks) * torch.finfo(dtype).min
|
| 32 |
+
mask = (1.0 - mask) * -1.0e+10
|
| 33 |
+
return mask
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class Transpose(torch.nn.Module):
|
| 38 |
+
def __init__(self, dim0: int, dim1: int):
|
| 39 |
+
super().__init__()
|
| 40 |
+
self.dim0 = dim0
|
| 41 |
+
self.dim1 = dim1
|
| 42 |
+
|
| 43 |
+
def forward(self, x: torch.Tensor):
|
| 44 |
+
x = torch.transpose(x, self.dim0, self.dim1)
|
| 45 |
+
return x
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class CausalBlock1D(Block1D):
|
| 49 |
+
def __init__(self, dim: int, dim_out: int):
|
| 50 |
+
super(CausalBlock1D, self).__init__(dim, dim_out)
|
| 51 |
+
self.block = torch.nn.Sequential(
|
| 52 |
+
CausalConv1d(dim, dim_out, 3),
|
| 53 |
+
Transpose(1, 2),
|
| 54 |
+
nn.LayerNorm(dim_out),
|
| 55 |
+
Transpose(1, 2),
|
| 56 |
+
nn.Mish(),
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
def forward(self, x: torch.Tensor, mask: torch.Tensor):
|
| 60 |
+
output = self.block(x * mask)
|
| 61 |
+
return output * mask
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class CausalResnetBlock1D(ResnetBlock1D):
|
| 65 |
+
def __init__(self, dim: int, dim_out: int, time_emb_dim: int, groups: int = 8):
|
| 66 |
+
super(CausalResnetBlock1D, self).__init__(dim, dim_out, time_emb_dim, groups)
|
| 67 |
+
self.block1 = CausalBlock1D(dim, dim_out)
|
| 68 |
+
self.block2 = CausalBlock1D(dim_out, dim_out)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class CausalConv1d(torch.nn.Conv1d):
|
| 72 |
+
def __init__(
|
| 73 |
+
self,
|
| 74 |
+
in_channels: int,
|
| 75 |
+
out_channels: int,
|
| 76 |
+
kernel_size: int,
|
| 77 |
+
stride: int = 1,
|
| 78 |
+
dilation: int = 1,
|
| 79 |
+
groups: int = 1,
|
| 80 |
+
bias: bool = True,
|
| 81 |
+
padding_mode: str = 'zeros',
|
| 82 |
+
device=None,
|
| 83 |
+
dtype=None
|
| 84 |
+
) -> None:
|
| 85 |
+
super(CausalConv1d, self).__init__(in_channels, out_channels,
|
| 86 |
+
kernel_size, stride,
|
| 87 |
+
padding=0, dilation=dilation,
|
| 88 |
+
groups=groups, bias=bias,
|
| 89 |
+
padding_mode=padding_mode,
|
| 90 |
+
device=device, dtype=dtype)
|
| 91 |
+
assert stride == 1
|
| 92 |
+
self.causal_padding = (kernel_size - 1, 0)
|
| 93 |
+
|
| 94 |
+
def forward(self, x: torch.Tensor):
|
| 95 |
+
x = F.pad(x, self.causal_padding)
|
| 96 |
+
x = super(CausalConv1d, self).forward(x)
|
| 97 |
+
return x
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class ConditionalDecoder(nn.Module):
|
| 101 |
+
def __init__(
|
| 102 |
+
self,
|
| 103 |
+
in_channels=320,
|
| 104 |
+
out_channels=80,
|
| 105 |
+
causal=True,
|
| 106 |
+
channels=[256],
|
| 107 |
+
dropout=0.0,
|
| 108 |
+
attention_head_dim=64,
|
| 109 |
+
n_blocks=4,
|
| 110 |
+
num_mid_blocks=12,
|
| 111 |
+
num_heads=8,
|
| 112 |
+
act_fn="gelu",
|
| 113 |
+
):
|
| 114 |
+
"""
|
| 115 |
+
This decoder requires an input with the same shape of the target. So, if your text content
|
| 116 |
+
is shorter or longer than the outputs, please re-sampling it before feeding to the decoder.
|
| 117 |
+
"""
|
| 118 |
+
super().__init__()
|
| 119 |
+
channels = tuple(channels)
|
| 120 |
+
self.in_channels = in_channels
|
| 121 |
+
self.out_channels = out_channels
|
| 122 |
+
self.causal = causal
|
| 123 |
+
self.time_embeddings = SinusoidalPosEmb(in_channels)
|
| 124 |
+
time_embed_dim = channels[0] * 4
|
| 125 |
+
self.time_mlp = TimestepEmbedding(
|
| 126 |
+
in_channels=in_channels,
|
| 127 |
+
time_embed_dim=time_embed_dim,
|
| 128 |
+
act_fn="silu",
|
| 129 |
+
)
|
| 130 |
+
self.down_blocks = nn.ModuleList([])
|
| 131 |
+
self.mid_blocks = nn.ModuleList([])
|
| 132 |
+
self.up_blocks = nn.ModuleList([])
|
| 133 |
+
|
| 134 |
+
# NOTE jrm: `static_chunk_size` is missing?
|
| 135 |
+
self.static_chunk_size = 0
|
| 136 |
+
|
| 137 |
+
output_channel = in_channels
|
| 138 |
+
for i in range(len(channels)): # pylint: disable=consider-using-enumerate
|
| 139 |
+
input_channel = output_channel
|
| 140 |
+
output_channel = channels[i]
|
| 141 |
+
is_last = i == len(channels) - 1
|
| 142 |
+
resnet = CausalResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) if self.causal else \
|
| 143 |
+
ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
|
| 144 |
+
transformer_blocks = nn.ModuleList(
|
| 145 |
+
[
|
| 146 |
+
BasicTransformerBlock(
|
| 147 |
+
dim=output_channel,
|
| 148 |
+
num_attention_heads=num_heads,
|
| 149 |
+
attention_head_dim=attention_head_dim,
|
| 150 |
+
dropout=dropout,
|
| 151 |
+
activation_fn=act_fn,
|
| 152 |
+
)
|
| 153 |
+
for _ in range(n_blocks)
|
| 154 |
+
]
|
| 155 |
+
)
|
| 156 |
+
downsample = (
|
| 157 |
+
Downsample1D(output_channel) if not is_last else
|
| 158 |
+
CausalConv1d(output_channel, output_channel, 3) if self.causal else nn.Conv1d(output_channel, output_channel, 3, padding=1)
|
| 159 |
+
)
|
| 160 |
+
self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample]))
|
| 161 |
+
|
| 162 |
+
for _ in range(num_mid_blocks):
|
| 163 |
+
input_channel = channels[-1]
|
| 164 |
+
out_channels = channels[-1]
|
| 165 |
+
resnet = CausalResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) if self.causal else \
|
| 166 |
+
ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
|
| 167 |
+
|
| 168 |
+
transformer_blocks = nn.ModuleList(
|
| 169 |
+
[
|
| 170 |
+
BasicTransformerBlock(
|
| 171 |
+
dim=output_channel,
|
| 172 |
+
num_attention_heads=num_heads,
|
| 173 |
+
attention_head_dim=attention_head_dim,
|
| 174 |
+
dropout=dropout,
|
| 175 |
+
activation_fn=act_fn,
|
| 176 |
+
)
|
| 177 |
+
for _ in range(n_blocks)
|
| 178 |
+
]
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks]))
|
| 182 |
+
|
| 183 |
+
channels = channels[::-1] + (channels[0],)
|
| 184 |
+
for i in range(len(channels) - 1):
|
| 185 |
+
input_channel = channels[i] * 2
|
| 186 |
+
output_channel = channels[i + 1]
|
| 187 |
+
is_last = i == len(channels) - 2
|
| 188 |
+
resnet = CausalResnetBlock1D(
|
| 189 |
+
dim=input_channel,
|
| 190 |
+
dim_out=output_channel,
|
| 191 |
+
time_emb_dim=time_embed_dim,
|
| 192 |
+
) if self.causal else ResnetBlock1D(
|
| 193 |
+
dim=input_channel,
|
| 194 |
+
dim_out=output_channel,
|
| 195 |
+
time_emb_dim=time_embed_dim,
|
| 196 |
+
)
|
| 197 |
+
transformer_blocks = nn.ModuleList(
|
| 198 |
+
[
|
| 199 |
+
BasicTransformerBlock(
|
| 200 |
+
dim=output_channel,
|
| 201 |
+
num_attention_heads=num_heads,
|
| 202 |
+
attention_head_dim=attention_head_dim,
|
| 203 |
+
dropout=dropout,
|
| 204 |
+
activation_fn=act_fn,
|
| 205 |
+
)
|
| 206 |
+
for _ in range(n_blocks)
|
| 207 |
+
]
|
| 208 |
+
)
|
| 209 |
+
upsample = (
|
| 210 |
+
Upsample1D(output_channel, use_conv_transpose=True)
|
| 211 |
+
if not is_last
|
| 212 |
+
else CausalConv1d(output_channel, output_channel, 3) if self.causal else nn.Conv1d(output_channel, output_channel, 3, padding=1)
|
| 213 |
+
)
|
| 214 |
+
self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample]))
|
| 215 |
+
self.final_block = CausalBlock1D(channels[-1], channels[-1]) if self.causal else Block1D(channels[-1], channels[-1])
|
| 216 |
+
self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1)
|
| 217 |
+
self.initialize_weights()
|
| 218 |
+
|
| 219 |
+
def initialize_weights(self):
|
| 220 |
+
for m in self.modules():
|
| 221 |
+
if isinstance(m, nn.Conv1d):
|
| 222 |
+
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
|
| 223 |
+
if m.bias is not None:
|
| 224 |
+
nn.init.constant_(m.bias, 0)
|
| 225 |
+
elif isinstance(m, nn.GroupNorm):
|
| 226 |
+
nn.init.constant_(m.weight, 1)
|
| 227 |
+
nn.init.constant_(m.bias, 0)
|
| 228 |
+
elif isinstance(m, nn.Linear):
|
| 229 |
+
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
|
| 230 |
+
if m.bias is not None:
|
| 231 |
+
nn.init.constant_(m.bias, 0)
|
| 232 |
+
|
| 233 |
+
def forward(self, x, mask, mu, t, spks=None, cond=None):
|
| 234 |
+
"""Forward pass of the UNet1DConditional model.
|
| 235 |
+
|
| 236 |
+
Args:
|
| 237 |
+
x (torch.Tensor): shape (batch_size, in_channels, time)
|
| 238 |
+
mask (_type_): shape (batch_size, 1, time)
|
| 239 |
+
t (_type_): shape (batch_size)
|
| 240 |
+
spks (_type_, optional): shape: (batch_size, condition_channels). Defaults to None.
|
| 241 |
+
cond (_type_, optional): placeholder for future use. Defaults to None.
|
| 242 |
+
|
| 243 |
+
Raises:
|
| 244 |
+
ValueError: _description_
|
| 245 |
+
ValueError: _description_
|
| 246 |
+
|
| 247 |
+
Returns:
|
| 248 |
+
_type_: _description_
|
| 249 |
+
"""
|
| 250 |
+
|
| 251 |
+
t = self.time_embeddings(t).to(t.dtype)
|
| 252 |
+
t = self.time_mlp(t)
|
| 253 |
+
|
| 254 |
+
x = pack([x, mu], "b * t")[0]
|
| 255 |
+
|
| 256 |
+
if spks is not None:
|
| 257 |
+
spks = repeat(spks, "b c -> b c t", t=x.shape[-1])
|
| 258 |
+
x = pack([x, spks], "b * t")[0]
|
| 259 |
+
if cond is not None:
|
| 260 |
+
x = pack([x, cond], "b * t")[0]
|
| 261 |
+
|
| 262 |
+
hiddens = []
|
| 263 |
+
masks = [mask]
|
| 264 |
+
for resnet, transformer_blocks, downsample in self.down_blocks:
|
| 265 |
+
mask_down = masks[-1]
|
| 266 |
+
x = resnet(x, mask_down, t)
|
| 267 |
+
x = rearrange(x, "b c t -> b t c").contiguous()
|
| 268 |
+
# attn_mask = torch.matmul(mask_down.transpose(1, 2).contiguous(), mask_down)
|
| 269 |
+
attn_mask = add_optional_chunk_mask(x, mask_down.bool(), False, False, 0, self.static_chunk_size, -1)
|
| 270 |
+
attn_mask = mask_to_bias(attn_mask == 1, x.dtype)
|
| 271 |
+
for transformer_block in transformer_blocks:
|
| 272 |
+
x = transformer_block(
|
| 273 |
+
hidden_states=x,
|
| 274 |
+
attention_mask=attn_mask,
|
| 275 |
+
timestep=t,
|
| 276 |
+
)
|
| 277 |
+
x = rearrange(x, "b t c -> b c t").contiguous()
|
| 278 |
+
hiddens.append(x) # Save hidden states for skip connections
|
| 279 |
+
x = downsample(x * mask_down)
|
| 280 |
+
masks.append(mask_down[:, :, ::2])
|
| 281 |
+
masks = masks[:-1]
|
| 282 |
+
mask_mid = masks[-1]
|
| 283 |
+
|
| 284 |
+
for resnet, transformer_blocks in self.mid_blocks:
|
| 285 |
+
x = resnet(x, mask_mid, t)
|
| 286 |
+
x = rearrange(x, "b c t -> b t c").contiguous()
|
| 287 |
+
# attn_mask = torch.matmul(mask_mid.transpose(1, 2).contiguous(), mask_mid)
|
| 288 |
+
attn_mask = add_optional_chunk_mask(x, mask_mid.bool(), False, False, 0, self.static_chunk_size, -1)
|
| 289 |
+
attn_mask = mask_to_bias(attn_mask == 1, x.dtype)
|
| 290 |
+
for transformer_block in transformer_blocks:
|
| 291 |
+
x = transformer_block(
|
| 292 |
+
hidden_states=x,
|
| 293 |
+
attention_mask=attn_mask,
|
| 294 |
+
timestep=t,
|
| 295 |
+
)
|
| 296 |
+
x = rearrange(x, "b t c -> b c t").contiguous()
|
| 297 |
+
|
| 298 |
+
for resnet, transformer_blocks, upsample in self.up_blocks:
|
| 299 |
+
mask_up = masks.pop()
|
| 300 |
+
skip = hiddens.pop()
|
| 301 |
+
x = pack([x[:, :, :skip.shape[-1]], skip], "b * t")[0]
|
| 302 |
+
x = resnet(x, mask_up, t)
|
| 303 |
+
x = rearrange(x, "b c t -> b t c").contiguous()
|
| 304 |
+
# attn_mask = torch.matmul(mask_up.transpose(1, 2).contiguous(), mask_up)
|
| 305 |
+
attn_mask = add_optional_chunk_mask(x, mask_up.bool(), False, False, 0, self.static_chunk_size, -1)
|
| 306 |
+
attn_mask = mask_to_bias(attn_mask == 1, x.dtype)
|
| 307 |
+
for transformer_block in transformer_blocks:
|
| 308 |
+
x = transformer_block(
|
| 309 |
+
hidden_states=x,
|
| 310 |
+
attention_mask=attn_mask,
|
| 311 |
+
timestep=t,
|
| 312 |
+
)
|
| 313 |
+
x = rearrange(x, "b t c -> b c t").contiguous()
|
| 314 |
+
x = upsample(x * mask_up)
|
| 315 |
+
x = self.final_block(x, mask_up)
|
| 316 |
+
output = self.final_proj(x * mask_up)
|
| 317 |
+
return output * mask
|
src/chatterbox/models/s3gen/f0_predictor.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn as nn
|
| 16 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class ConvRNNF0Predictor(nn.Module):
|
| 20 |
+
def __init__(self,
|
| 21 |
+
num_class: int = 1,
|
| 22 |
+
in_channels: int = 80,
|
| 23 |
+
cond_channels: int = 512
|
| 24 |
+
):
|
| 25 |
+
super().__init__()
|
| 26 |
+
|
| 27 |
+
self.num_class = num_class
|
| 28 |
+
self.condnet = nn.Sequential(
|
| 29 |
+
weight_norm(
|
| 30 |
+
nn.Conv1d(in_channels, cond_channels, kernel_size=3, padding=1)
|
| 31 |
+
),
|
| 32 |
+
nn.ELU(),
|
| 33 |
+
weight_norm(
|
| 34 |
+
nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
|
| 35 |
+
),
|
| 36 |
+
nn.ELU(),
|
| 37 |
+
weight_norm(
|
| 38 |
+
nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
|
| 39 |
+
),
|
| 40 |
+
nn.ELU(),
|
| 41 |
+
weight_norm(
|
| 42 |
+
nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
|
| 43 |
+
),
|
| 44 |
+
nn.ELU(),
|
| 45 |
+
weight_norm(
|
| 46 |
+
nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
|
| 47 |
+
),
|
| 48 |
+
nn.ELU(),
|
| 49 |
+
)
|
| 50 |
+
self.classifier = nn.Linear(in_features=cond_channels, out_features=self.num_class)
|
| 51 |
+
|
| 52 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 53 |
+
x = self.condnet(x)
|
| 54 |
+
x = x.transpose(1, 2)
|
| 55 |
+
return torch.abs(self.classifier(x).squeeze(-1))
|
src/chatterbox/models/s3gen/flow.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import logging
|
| 15 |
+
import random
|
| 16 |
+
from typing import Dict, Optional
|
| 17 |
+
import torch
|
| 18 |
+
import torch.nn as nn
|
| 19 |
+
from torch.nn import functional as F
|
| 20 |
+
from omegaconf import DictConfig
|
| 21 |
+
from .utils.mask import make_pad_mask
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class MaskedDiffWithXvec(torch.nn.Module):
|
| 25 |
+
def __init__(self,
|
| 26 |
+
input_size: int = 512,
|
| 27 |
+
output_size: int = 80,
|
| 28 |
+
spk_embed_dim: int = 192,
|
| 29 |
+
output_type: str = "mel",
|
| 30 |
+
vocab_size: int = 4096,
|
| 31 |
+
input_frame_rate: int = 50,
|
| 32 |
+
only_mask_loss: bool = True,
|
| 33 |
+
encoder: torch.nn.Module = None,
|
| 34 |
+
length_regulator: torch.nn.Module = None,
|
| 35 |
+
decoder: torch.nn.Module = None,
|
| 36 |
+
decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1,
|
| 37 |
+
'cfm_params': DictConfig({'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine',
|
| 38 |
+
'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7, 'reg_loss_type': 'l1'}),
|
| 39 |
+
'decoder_params': {'channels': [256, 256], 'dropout': 0.0, 'attention_head_dim': 64,
|
| 40 |
+
'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8, 'act_fn': 'gelu'}},
|
| 41 |
+
mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050,
|
| 42 |
+
'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}):
|
| 43 |
+
super().__init__()
|
| 44 |
+
self.input_size = input_size
|
| 45 |
+
self.output_size = output_size
|
| 46 |
+
self.decoder_conf = decoder_conf
|
| 47 |
+
self.mel_feat_conf = mel_feat_conf
|
| 48 |
+
self.vocab_size = vocab_size
|
| 49 |
+
self.output_type = output_type
|
| 50 |
+
self.input_frame_rate = input_frame_rate
|
| 51 |
+
logging.info(f"input frame rate={self.input_frame_rate}")
|
| 52 |
+
self.input_embedding = nn.Embedding(vocab_size, input_size)
|
| 53 |
+
self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size)
|
| 54 |
+
self.encoder = encoder
|
| 55 |
+
self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size)
|
| 56 |
+
self.decoder = decoder
|
| 57 |
+
self.length_regulator = length_regulator
|
| 58 |
+
self.only_mask_loss = only_mask_loss
|
| 59 |
+
|
| 60 |
+
def forward(
|
| 61 |
+
self,
|
| 62 |
+
batch: dict,
|
| 63 |
+
device: torch.device,
|
| 64 |
+
) -> Dict[str, Optional[torch.Tensor]]:
|
| 65 |
+
token = batch['speech_token'].to(device)
|
| 66 |
+
token_len = batch['speech_token_len'].to(device)
|
| 67 |
+
feat = batch['speech_feat'].to(device)
|
| 68 |
+
feat_len = batch['speech_feat_len'].to(device)
|
| 69 |
+
embedding = batch['embedding'].to(device)
|
| 70 |
+
|
| 71 |
+
# xvec projection
|
| 72 |
+
embedding = F.normalize(embedding, dim=1)
|
| 73 |
+
embedding = self.spk_embed_affine_layer(embedding)
|
| 74 |
+
|
| 75 |
+
# concat text and prompt_text
|
| 76 |
+
mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(device)
|
| 77 |
+
token = self.input_embedding(torch.clamp(token, min=0)) * mask
|
| 78 |
+
|
| 79 |
+
# text encode
|
| 80 |
+
h, h_lengths = self.encoder(token, token_len)
|
| 81 |
+
h = self.encoder_proj(h)
|
| 82 |
+
h, h_lengths = self.length_regulator(h, feat_len)
|
| 83 |
+
|
| 84 |
+
# get conditions
|
| 85 |
+
conds = torch.zeros(feat.shape, device=token.device)
|
| 86 |
+
for i, j in enumerate(feat_len):
|
| 87 |
+
if random.random() < 0.5:
|
| 88 |
+
continue
|
| 89 |
+
index = random.randint(0, int(0.3 * j))
|
| 90 |
+
conds[i, :index] = feat[i, :index]
|
| 91 |
+
conds = conds.transpose(1, 2)
|
| 92 |
+
|
| 93 |
+
mask = (~make_pad_mask(feat_len)).to(h)
|
| 94 |
+
feat = F.interpolate(feat.unsqueeze(dim=1), size=h.shape[1:], mode="nearest").squeeze(dim=1)
|
| 95 |
+
loss, _ = self.decoder.compute_loss(
|
| 96 |
+
feat.transpose(1, 2).contiguous(),
|
| 97 |
+
mask.unsqueeze(1),
|
| 98 |
+
h.transpose(1, 2).contiguous(),
|
| 99 |
+
embedding,
|
| 100 |
+
cond=conds
|
| 101 |
+
)
|
| 102 |
+
return {'loss': loss}
|
| 103 |
+
|
| 104 |
+
@torch.inference_mode()
|
| 105 |
+
def inference(self,
|
| 106 |
+
token,
|
| 107 |
+
token_len,
|
| 108 |
+
prompt_token,
|
| 109 |
+
prompt_token_len,
|
| 110 |
+
prompt_feat,
|
| 111 |
+
prompt_feat_len,
|
| 112 |
+
embedding,
|
| 113 |
+
flow_cache):
|
| 114 |
+
if self.fp16 is True:
|
| 115 |
+
prompt_feat = prompt_feat.half()
|
| 116 |
+
embedding = embedding.half()
|
| 117 |
+
|
| 118 |
+
assert token.shape[0] == 1
|
| 119 |
+
# xvec projection
|
| 120 |
+
embedding = F.normalize(embedding, dim=1)
|
| 121 |
+
embedding = self.spk_embed_affine_layer(embedding)
|
| 122 |
+
|
| 123 |
+
# concat text and prompt_text
|
| 124 |
+
token_len1, token_len2 = prompt_token.shape[1], token.shape[1]
|
| 125 |
+
token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len
|
| 126 |
+
mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding)
|
| 127 |
+
token = self.input_embedding(torch.clamp(token, min=0)) * mask
|
| 128 |
+
|
| 129 |
+
# text encode
|
| 130 |
+
h, h_lengths = self.encoder(token, token_len)
|
| 131 |
+
h = self.encoder_proj(h)
|
| 132 |
+
mel_len1, mel_len2 = prompt_feat.shape[1], int(token_len2 / self.input_frame_rate * 22050 / 256)
|
| 133 |
+
h, h_lengths = self.length_regulator.inference(h[:, :token_len1], h[:, token_len1:], mel_len1, mel_len2, self.input_frame_rate)
|
| 134 |
+
|
| 135 |
+
# get conditions
|
| 136 |
+
conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype)
|
| 137 |
+
conds[:, :mel_len1] = prompt_feat
|
| 138 |
+
conds = conds.transpose(1, 2)
|
| 139 |
+
|
| 140 |
+
mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h)
|
| 141 |
+
feat, flow_cache = self.decoder(
|
| 142 |
+
mu=h.transpose(1, 2).contiguous(),
|
| 143 |
+
mask=mask.unsqueeze(1),
|
| 144 |
+
spks=embedding,
|
| 145 |
+
cond=conds,
|
| 146 |
+
n_timesteps=10,
|
| 147 |
+
prompt_len=mel_len1,
|
| 148 |
+
flow_cache=flow_cache
|
| 149 |
+
)
|
| 150 |
+
feat = feat[:, :, mel_len1:]
|
| 151 |
+
assert feat.shape[2] == mel_len2
|
| 152 |
+
return feat.float(), flow_cache
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class CausalMaskedDiffWithXvec(torch.nn.Module):
|
| 156 |
+
def __init__(self,
|
| 157 |
+
input_size: int = 512,
|
| 158 |
+
output_size: int = 80,
|
| 159 |
+
spk_embed_dim: int = 192,
|
| 160 |
+
output_type: str = "mel",
|
| 161 |
+
vocab_size: int = 6561,
|
| 162 |
+
input_frame_rate: int = 25,
|
| 163 |
+
only_mask_loss: bool = True,
|
| 164 |
+
token_mel_ratio: int = 2,
|
| 165 |
+
pre_lookahead_len: int = 3,
|
| 166 |
+
encoder: torch.nn.Module = None,
|
| 167 |
+
decoder: torch.nn.Module = None,
|
| 168 |
+
decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1,
|
| 169 |
+
'cfm_params': DictConfig({'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine',
|
| 170 |
+
'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7, 'reg_loss_type': 'l1'}),
|
| 171 |
+
'decoder_params': {'channels': [256, 256], 'dropout': 0.0, 'attention_head_dim': 64,
|
| 172 |
+
'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8, 'act_fn': 'gelu'}},
|
| 173 |
+
mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050,
|
| 174 |
+
'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}):
|
| 175 |
+
super().__init__()
|
| 176 |
+
self.input_size = input_size
|
| 177 |
+
self.output_size = output_size
|
| 178 |
+
self.decoder_conf = decoder_conf
|
| 179 |
+
self.mel_feat_conf = mel_feat_conf
|
| 180 |
+
self.vocab_size = vocab_size
|
| 181 |
+
self.output_type = output_type
|
| 182 |
+
self.input_frame_rate = input_frame_rate
|
| 183 |
+
logging.info(f"input frame rate={self.input_frame_rate}")
|
| 184 |
+
self.input_embedding = nn.Embedding(vocab_size, input_size)
|
| 185 |
+
self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size)
|
| 186 |
+
self.encoder = encoder
|
| 187 |
+
self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size)
|
| 188 |
+
self.decoder = decoder
|
| 189 |
+
self.only_mask_loss = only_mask_loss
|
| 190 |
+
self.token_mel_ratio = token_mel_ratio
|
| 191 |
+
self.pre_lookahead_len = pre_lookahead_len
|
| 192 |
+
|
| 193 |
+
# FIXME: this was missing - just putting it in as false
|
| 194 |
+
self.fp16 = False
|
| 195 |
+
|
| 196 |
+
@torch.inference_mode()
|
| 197 |
+
def inference(self,
|
| 198 |
+
token,
|
| 199 |
+
token_len,
|
| 200 |
+
prompt_token,
|
| 201 |
+
prompt_token_len,
|
| 202 |
+
prompt_feat,
|
| 203 |
+
prompt_feat_len,
|
| 204 |
+
embedding,
|
| 205 |
+
finalize):
|
| 206 |
+
if self.fp16 is True:
|
| 207 |
+
prompt_feat = prompt_feat.half()
|
| 208 |
+
embedding = embedding.half()
|
| 209 |
+
|
| 210 |
+
assert token.shape[0] == 1
|
| 211 |
+
# xvec projection
|
| 212 |
+
embedding = F.normalize(embedding, dim=1)
|
| 213 |
+
embedding = self.spk_embed_affine_layer(embedding)
|
| 214 |
+
|
| 215 |
+
# concat text and prompt_text
|
| 216 |
+
token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len
|
| 217 |
+
mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding)
|
| 218 |
+
token = self.input_embedding(torch.clamp(token, min=0)) * mask
|
| 219 |
+
|
| 220 |
+
# text encode
|
| 221 |
+
h, h_lengths = self.encoder(token, token_len)
|
| 222 |
+
if finalize is False:
|
| 223 |
+
h = h[:, :-self.pre_lookahead_len * self.token_mel_ratio]
|
| 224 |
+
mel_len1, mel_len2 = prompt_feat.shape[1], h.shape[1] - prompt_feat.shape[1]
|
| 225 |
+
h = self.encoder_proj(h)
|
| 226 |
+
|
| 227 |
+
# get conditions
|
| 228 |
+
conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype)
|
| 229 |
+
conds[:, :mel_len1] = prompt_feat
|
| 230 |
+
conds = conds.transpose(1, 2)
|
| 231 |
+
|
| 232 |
+
mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h)
|
| 233 |
+
feat, _ = self.decoder(
|
| 234 |
+
mu=h.transpose(1, 2).contiguous(),
|
| 235 |
+
mask=mask.unsqueeze(1),
|
| 236 |
+
spks=embedding,
|
| 237 |
+
cond=conds,
|
| 238 |
+
n_timesteps=10
|
| 239 |
+
)
|
| 240 |
+
feat = feat[:, :, mel_len1:]
|
| 241 |
+
assert feat.shape[2] == mel_len2
|
| 242 |
+
return feat.float(), None # NOTE jrm: why are they returning None here?
|
src/chatterbox/models/s3gen/flow_matching.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import threading
|
| 15 |
+
import torch
|
| 16 |
+
import torch.nn.functional as F
|
| 17 |
+
from .matcha.flow_matching import BASECFM
|
| 18 |
+
from omegaconf import OmegaConf
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
CFM_PARAMS = OmegaConf.create({
|
| 22 |
+
"sigma_min": 1e-06,
|
| 23 |
+
"solver": "euler",
|
| 24 |
+
"t_scheduler": "cosine",
|
| 25 |
+
"training_cfg_rate": 0.2,
|
| 26 |
+
"inference_cfg_rate": 0.7,
|
| 27 |
+
"reg_loss_type": "l1"
|
| 28 |
+
})
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class ConditionalCFM(BASECFM):
|
| 32 |
+
def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None):
|
| 33 |
+
super().__init__(
|
| 34 |
+
n_feats=in_channels,
|
| 35 |
+
cfm_params=cfm_params,
|
| 36 |
+
n_spks=n_spks,
|
| 37 |
+
spk_emb_dim=spk_emb_dim,
|
| 38 |
+
)
|
| 39 |
+
self.t_scheduler = cfm_params.t_scheduler
|
| 40 |
+
self.training_cfg_rate = cfm_params.training_cfg_rate
|
| 41 |
+
self.inference_cfg_rate = cfm_params.inference_cfg_rate
|
| 42 |
+
in_channels = in_channels + (spk_emb_dim if n_spks > 0 else 0)
|
| 43 |
+
# Just change the architecture of the estimator here
|
| 44 |
+
self.estimator = estimator
|
| 45 |
+
self.lock = threading.Lock()
|
| 46 |
+
|
| 47 |
+
@torch.inference_mode()
|
| 48 |
+
def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None, prompt_len=0, flow_cache=torch.zeros(1, 80, 0, 2)):
|
| 49 |
+
"""Forward diffusion
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
mu (torch.Tensor): output of encoder
|
| 53 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 54 |
+
mask (torch.Tensor): output_mask
|
| 55 |
+
shape: (batch_size, 1, mel_timesteps)
|
| 56 |
+
n_timesteps (int): number of diffusion steps
|
| 57 |
+
temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
|
| 58 |
+
spks (torch.Tensor, optional): speaker ids. Defaults to None.
|
| 59 |
+
shape: (batch_size, spk_emb_dim)
|
| 60 |
+
cond: Not used but kept for future purposes
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
sample: generated mel-spectrogram
|
| 64 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
z = torch.randn_like(mu).to(mu.device).to(mu.dtype) * temperature
|
| 68 |
+
cache_size = flow_cache.shape[2]
|
| 69 |
+
# fix prompt and overlap part mu and z
|
| 70 |
+
if cache_size != 0:
|
| 71 |
+
z[:, :, :cache_size] = flow_cache[:, :, :, 0]
|
| 72 |
+
mu[:, :, :cache_size] = flow_cache[:, :, :, 1]
|
| 73 |
+
z_cache = torch.concat([z[:, :, :prompt_len], z[:, :, -34:]], dim=2)
|
| 74 |
+
mu_cache = torch.concat([mu[:, :, :prompt_len], mu[:, :, -34:]], dim=2)
|
| 75 |
+
flow_cache = torch.stack([z_cache, mu_cache], dim=-1)
|
| 76 |
+
|
| 77 |
+
t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype)
|
| 78 |
+
if self.t_scheduler == 'cosine':
|
| 79 |
+
t_span = 1 - torch.cos(t_span * 0.5 * torch.pi)
|
| 80 |
+
return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), flow_cache
|
| 81 |
+
|
| 82 |
+
def solve_euler(self, x, t_span, mu, mask, spks, cond):
|
| 83 |
+
"""
|
| 84 |
+
Fixed euler solver for ODEs.
|
| 85 |
+
Args:
|
| 86 |
+
x (torch.Tensor): random noise
|
| 87 |
+
t_span (torch.Tensor): n_timesteps interpolated
|
| 88 |
+
shape: (n_timesteps + 1,)
|
| 89 |
+
mu (torch.Tensor): output of encoder
|
| 90 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 91 |
+
mask (torch.Tensor): output_mask
|
| 92 |
+
shape: (batch_size, 1, mel_timesteps)
|
| 93 |
+
spks (torch.Tensor, optional): speaker ids. Defaults to None.
|
| 94 |
+
shape: (batch_size, spk_emb_dim)
|
| 95 |
+
cond: Not used but kept for future purposes
|
| 96 |
+
"""
|
| 97 |
+
t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0]
|
| 98 |
+
t = t.unsqueeze(dim=0)
|
| 99 |
+
|
| 100 |
+
# I am storing this because I can later plot it by putting a debugger here and saving it to a file
|
| 101 |
+
# Or in future might add like a return_all_steps flag
|
| 102 |
+
sol = []
|
| 103 |
+
|
| 104 |
+
# Do not use concat, it may cause memory format changed and trt infer with wrong results!
|
| 105 |
+
x_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype)
|
| 106 |
+
mask_in = torch.zeros([2, 1, x.size(2)], device=x.device, dtype=x.dtype)
|
| 107 |
+
mu_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype)
|
| 108 |
+
t_in = torch.zeros([2], device=x.device, dtype=x.dtype)
|
| 109 |
+
spks_in = torch.zeros([2, 80], device=x.device, dtype=x.dtype)
|
| 110 |
+
cond_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype)
|
| 111 |
+
for step in range(1, len(t_span)):
|
| 112 |
+
# Classifier-Free Guidance inference introduced in VoiceBox
|
| 113 |
+
x_in[:] = x
|
| 114 |
+
mask_in[:] = mask
|
| 115 |
+
mu_in[0] = mu
|
| 116 |
+
t_in[:] = t.unsqueeze(0)
|
| 117 |
+
spks_in[0] = spks
|
| 118 |
+
cond_in[0] = cond
|
| 119 |
+
dphi_dt = self.forward_estimator(
|
| 120 |
+
x_in, mask_in,
|
| 121 |
+
mu_in, t_in,
|
| 122 |
+
spks_in,
|
| 123 |
+
cond_in
|
| 124 |
+
)
|
| 125 |
+
dphi_dt, cfg_dphi_dt = torch.split(dphi_dt, [x.size(0), x.size(0)], dim=0)
|
| 126 |
+
dphi_dt = ((1.0 + self.inference_cfg_rate) * dphi_dt - self.inference_cfg_rate * cfg_dphi_dt)
|
| 127 |
+
x = x + dt * dphi_dt
|
| 128 |
+
t = t + dt
|
| 129 |
+
sol.append(x)
|
| 130 |
+
if step < len(t_span) - 1:
|
| 131 |
+
dt = t_span[step + 1] - t
|
| 132 |
+
|
| 133 |
+
return sol[-1].float()
|
| 134 |
+
|
| 135 |
+
def forward_estimator(self, x, mask, mu, t, spks, cond):
|
| 136 |
+
if isinstance(self.estimator, torch.nn.Module):
|
| 137 |
+
return self.estimator.forward(x, mask, mu, t, spks, cond)
|
| 138 |
+
else:
|
| 139 |
+
with self.lock:
|
| 140 |
+
self.estimator.set_input_shape('x', (2, 80, x.size(2)))
|
| 141 |
+
self.estimator.set_input_shape('mask', (2, 1, x.size(2)))
|
| 142 |
+
self.estimator.set_input_shape('mu', (2, 80, x.size(2)))
|
| 143 |
+
self.estimator.set_input_shape('t', (2,))
|
| 144 |
+
self.estimator.set_input_shape('spks', (2, 80))
|
| 145 |
+
self.estimator.set_input_shape('cond', (2, 80, x.size(2)))
|
| 146 |
+
# run trt engine
|
| 147 |
+
self.estimator.execute_v2([x.contiguous().data_ptr(),
|
| 148 |
+
mask.contiguous().data_ptr(),
|
| 149 |
+
mu.contiguous().data_ptr(),
|
| 150 |
+
t.contiguous().data_ptr(),
|
| 151 |
+
spks.contiguous().data_ptr(),
|
| 152 |
+
cond.contiguous().data_ptr(),
|
| 153 |
+
x.data_ptr()])
|
| 154 |
+
return x
|
| 155 |
+
|
| 156 |
+
def compute_loss(self, x1, mask, mu, spks=None, cond=None):
|
| 157 |
+
"""Computes diffusion loss
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
x1 (torch.Tensor): Target
|
| 161 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 162 |
+
mask (torch.Tensor): target mask
|
| 163 |
+
shape: (batch_size, 1, mel_timesteps)
|
| 164 |
+
mu (torch.Tensor): output of encoder
|
| 165 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 166 |
+
spks (torch.Tensor, optional): speaker embedding. Defaults to None.
|
| 167 |
+
shape: (batch_size, spk_emb_dim)
|
| 168 |
+
|
| 169 |
+
Returns:
|
| 170 |
+
loss: conditional flow matching loss
|
| 171 |
+
y: conditional flow
|
| 172 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 173 |
+
"""
|
| 174 |
+
b, _, t = mu.shape
|
| 175 |
+
|
| 176 |
+
# random timestep
|
| 177 |
+
t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype)
|
| 178 |
+
if self.t_scheduler == 'cosine':
|
| 179 |
+
t = 1 - torch.cos(t * 0.5 * torch.pi)
|
| 180 |
+
# sample noise p(x_0)
|
| 181 |
+
z = torch.randn_like(x1)
|
| 182 |
+
|
| 183 |
+
y = (1 - (1 - self.sigma_min) * t) * z + t * x1
|
| 184 |
+
u = x1 - (1 - self.sigma_min) * z
|
| 185 |
+
|
| 186 |
+
# during training, we randomly drop condition to trade off mode coverage and sample fidelity
|
| 187 |
+
if self.training_cfg_rate > 0:
|
| 188 |
+
cfg_mask = torch.rand(b, device=x1.device) > self.training_cfg_rate
|
| 189 |
+
mu = mu * cfg_mask.view(-1, 1, 1)
|
| 190 |
+
spks = spks * cfg_mask.view(-1, 1)
|
| 191 |
+
cond = cond * cfg_mask.view(-1, 1, 1)
|
| 192 |
+
|
| 193 |
+
pred = self.estimator(y, mask, mu, t.squeeze(), spks, cond)
|
| 194 |
+
loss = F.mse_loss(pred * mask, u * mask, reduction="sum") / (torch.sum(mask) * u.shape[1])
|
| 195 |
+
return loss, y
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
class CausalConditionalCFM(ConditionalCFM):
|
| 199 |
+
def __init__(self, in_channels=240, cfm_params=CFM_PARAMS, n_spks=1, spk_emb_dim=80, estimator=None):
|
| 200 |
+
super().__init__(in_channels, cfm_params, n_spks, spk_emb_dim, estimator)
|
| 201 |
+
self.rand_noise = torch.randn([1, 80, 50 * 300])
|
| 202 |
+
|
| 203 |
+
@torch.inference_mode()
|
| 204 |
+
def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None):
|
| 205 |
+
"""Forward diffusion
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
mu (torch.Tensor): output of encoder
|
| 209 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 210 |
+
mask (torch.Tensor): output_mask
|
| 211 |
+
shape: (batch_size, 1, mel_timesteps)
|
| 212 |
+
n_timesteps (int): number of diffusion steps
|
| 213 |
+
temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
|
| 214 |
+
spks (torch.Tensor, optional): speaker ids. Defaults to None.
|
| 215 |
+
shape: (batch_size, spk_emb_dim)
|
| 216 |
+
cond: Not used but kept for future purposes
|
| 217 |
+
|
| 218 |
+
Returns:
|
| 219 |
+
sample: generated mel-spectrogram
|
| 220 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 221 |
+
"""
|
| 222 |
+
|
| 223 |
+
z = self.rand_noise[:, :, :mu.size(2)].to(mu.device).to(mu.dtype) * temperature
|
| 224 |
+
# fix prompt and overlap part mu and z
|
| 225 |
+
t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype)
|
| 226 |
+
if self.t_scheduler == 'cosine':
|
| 227 |
+
t_span = 1 - torch.cos(t_span * 0.5 * torch.pi)
|
| 228 |
+
return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), None
|
src/chatterbox/models/s3gen/hifigan.py
ADDED
|
@@ -0,0 +1,474 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# jrm: adapted from CosyVoice/cosyvoice/hifigan/generator.py
|
| 2 |
+
# most modules should be reusable, but I found their SineGen changed a git.
|
| 3 |
+
|
| 4 |
+
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
|
| 5 |
+
#
|
| 6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 7 |
+
# you may not use this file except in compliance with the License.
|
| 8 |
+
# You may obtain a copy of the License at
|
| 9 |
+
#
|
| 10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 11 |
+
#
|
| 12 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 15 |
+
# See the License for the specific language governing permissions and
|
| 16 |
+
# limitations under the License.
|
| 17 |
+
|
| 18 |
+
"""HIFI-GAN"""
|
| 19 |
+
|
| 20 |
+
from typing import Dict, Optional, List
|
| 21 |
+
import numpy as np
|
| 22 |
+
from scipy.signal import get_window
|
| 23 |
+
import torch
|
| 24 |
+
import torch.nn.functional as F
|
| 25 |
+
from torch.nn import Conv1d
|
| 26 |
+
from torch.nn import ConvTranspose1d
|
| 27 |
+
from torch.nn.utils import remove_weight_norm
|
| 28 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 29 |
+
from torch.distributions.uniform import Uniform
|
| 30 |
+
from torch import nn, sin, pow
|
| 31 |
+
from torch.nn import Parameter
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class Snake(nn.Module):
|
| 35 |
+
'''
|
| 36 |
+
Implementation of a sine-based periodic activation function
|
| 37 |
+
Shape:
|
| 38 |
+
- Input: (B, C, T)
|
| 39 |
+
- Output: (B, C, T), same shape as the input
|
| 40 |
+
Parameters:
|
| 41 |
+
- alpha - trainable parameter
|
| 42 |
+
References:
|
| 43 |
+
- This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
| 44 |
+
https://arxiv.org/abs/2006.08195
|
| 45 |
+
Examples:
|
| 46 |
+
>>> a1 = snake(256)
|
| 47 |
+
>>> x = torch.randn(256)
|
| 48 |
+
>>> x = a1(x)
|
| 49 |
+
'''
|
| 50 |
+
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
|
| 51 |
+
'''
|
| 52 |
+
Initialization.
|
| 53 |
+
INPUT:
|
| 54 |
+
- in_features: shape of the input
|
| 55 |
+
- alpha: trainable parameter
|
| 56 |
+
alpha is initialized to 1 by default, higher values = higher-frequency.
|
| 57 |
+
alpha will be trained along with the rest of your model.
|
| 58 |
+
'''
|
| 59 |
+
super(Snake, self).__init__()
|
| 60 |
+
self.in_features = in_features
|
| 61 |
+
|
| 62 |
+
# initialize alpha
|
| 63 |
+
self.alpha_logscale = alpha_logscale
|
| 64 |
+
if self.alpha_logscale: # log scale alphas initialized to zeros
|
| 65 |
+
self.alpha = Parameter(torch.zeros(in_features) * alpha)
|
| 66 |
+
else: # linear scale alphas initialized to ones
|
| 67 |
+
self.alpha = Parameter(torch.ones(in_features) * alpha)
|
| 68 |
+
|
| 69 |
+
self.alpha.requires_grad = alpha_trainable
|
| 70 |
+
|
| 71 |
+
self.no_div_by_zero = 0.000000001
|
| 72 |
+
|
| 73 |
+
def forward(self, x):
|
| 74 |
+
'''
|
| 75 |
+
Forward pass of the function.
|
| 76 |
+
Applies the function to the input elementwise.
|
| 77 |
+
Snake ∶= x + 1/a * sin^2 (xa)
|
| 78 |
+
'''
|
| 79 |
+
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
|
| 80 |
+
if self.alpha_logscale:
|
| 81 |
+
alpha = torch.exp(alpha)
|
| 82 |
+
x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
|
| 83 |
+
|
| 84 |
+
return x
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def get_padding(kernel_size, dilation=1):
|
| 89 |
+
return int((kernel_size * dilation - dilation) / 2)
|
| 90 |
+
|
| 91 |
+
def init_weights(m, mean=0.0, std=0.01):
|
| 92 |
+
classname = m.__class__.__name__
|
| 93 |
+
if classname.find("Conv") != -1:
|
| 94 |
+
m.weight.data.normal_(mean, std)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
"""hifigan based generator implementation.
|
| 98 |
+
|
| 99 |
+
This code is modified from https://github.com/jik876/hifi-gan
|
| 100 |
+
,https://github.com/kan-bayashi/ParallelWaveGAN and
|
| 101 |
+
https://github.com/NVIDIA/BigVGAN
|
| 102 |
+
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class ResBlock(torch.nn.Module):
|
| 107 |
+
"""Residual block module in HiFiGAN/BigVGAN."""
|
| 108 |
+
def __init__(
|
| 109 |
+
self,
|
| 110 |
+
channels: int = 512,
|
| 111 |
+
kernel_size: int = 3,
|
| 112 |
+
dilations: List[int] = [1, 3, 5],
|
| 113 |
+
):
|
| 114 |
+
super(ResBlock, self).__init__()
|
| 115 |
+
self.convs1 = nn.ModuleList()
|
| 116 |
+
self.convs2 = nn.ModuleList()
|
| 117 |
+
|
| 118 |
+
for dilation in dilations:
|
| 119 |
+
self.convs1.append(
|
| 120 |
+
weight_norm(
|
| 121 |
+
Conv1d(
|
| 122 |
+
channels,
|
| 123 |
+
channels,
|
| 124 |
+
kernel_size,
|
| 125 |
+
1,
|
| 126 |
+
dilation=dilation,
|
| 127 |
+
padding=get_padding(kernel_size, dilation)
|
| 128 |
+
)
|
| 129 |
+
)
|
| 130 |
+
)
|
| 131 |
+
self.convs2.append(
|
| 132 |
+
weight_norm(
|
| 133 |
+
Conv1d(
|
| 134 |
+
channels,
|
| 135 |
+
channels,
|
| 136 |
+
kernel_size,
|
| 137 |
+
1,
|
| 138 |
+
dilation=1,
|
| 139 |
+
padding=get_padding(kernel_size, 1)
|
| 140 |
+
)
|
| 141 |
+
)
|
| 142 |
+
)
|
| 143 |
+
self.convs1.apply(init_weights)
|
| 144 |
+
self.convs2.apply(init_weights)
|
| 145 |
+
self.activations1 = nn.ModuleList([
|
| 146 |
+
Snake(channels, alpha_logscale=False)
|
| 147 |
+
for _ in range(len(self.convs1))
|
| 148 |
+
])
|
| 149 |
+
self.activations2 = nn.ModuleList([
|
| 150 |
+
Snake(channels, alpha_logscale=False)
|
| 151 |
+
for _ in range(len(self.convs2))
|
| 152 |
+
])
|
| 153 |
+
|
| 154 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 155 |
+
for idx in range(len(self.convs1)):
|
| 156 |
+
xt = self.activations1[idx](x)
|
| 157 |
+
xt = self.convs1[idx](xt)
|
| 158 |
+
xt = self.activations2[idx](xt)
|
| 159 |
+
xt = self.convs2[idx](xt)
|
| 160 |
+
x = xt + x
|
| 161 |
+
return x
|
| 162 |
+
|
| 163 |
+
def remove_weight_norm(self):
|
| 164 |
+
for idx in range(len(self.convs1)):
|
| 165 |
+
remove_weight_norm(self.convs1[idx])
|
| 166 |
+
remove_weight_norm(self.convs2[idx])
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class SineGen(torch.nn.Module):
|
| 170 |
+
""" Definition of sine generator
|
| 171 |
+
SineGen(samp_rate, harmonic_num = 0,
|
| 172 |
+
sine_amp = 0.1, noise_std = 0.003,
|
| 173 |
+
voiced_threshold = 0,
|
| 174 |
+
flag_for_pulse=False)
|
| 175 |
+
samp_rate: sampling rate in Hz
|
| 176 |
+
harmonic_num: number of harmonic overtones (default 0)
|
| 177 |
+
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
| 178 |
+
noise_std: std of Gaussian noise (default 0.003)
|
| 179 |
+
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
| 180 |
+
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
| 181 |
+
Note: when flag_for_pulse is True, the first time step of a voiced
|
| 182 |
+
segment is always sin(np.pi) or cos(0)
|
| 183 |
+
"""
|
| 184 |
+
|
| 185 |
+
def __init__(self, samp_rate, harmonic_num=0,
|
| 186 |
+
sine_amp=0.1, noise_std=0.003,
|
| 187 |
+
voiced_threshold=0):
|
| 188 |
+
super(SineGen, self).__init__()
|
| 189 |
+
self.sine_amp = sine_amp
|
| 190 |
+
self.noise_std = noise_std
|
| 191 |
+
self.harmonic_num = harmonic_num
|
| 192 |
+
self.sampling_rate = samp_rate
|
| 193 |
+
self.voiced_threshold = voiced_threshold
|
| 194 |
+
|
| 195 |
+
def _f02uv(self, f0):
|
| 196 |
+
# generate uv signal
|
| 197 |
+
uv = (f0 > self.voiced_threshold).type(torch.float32)
|
| 198 |
+
return uv
|
| 199 |
+
|
| 200 |
+
@torch.no_grad()
|
| 201 |
+
def forward(self, f0):
|
| 202 |
+
"""
|
| 203 |
+
:param f0: [B, 1, sample_len], Hz
|
| 204 |
+
:return: [B, 1, sample_len]
|
| 205 |
+
"""
|
| 206 |
+
|
| 207 |
+
F_mat = torch.zeros((f0.size(0), self.harmonic_num + 1, f0.size(-1))).to(f0.device)
|
| 208 |
+
for i in range(self.harmonic_num + 1):
|
| 209 |
+
F_mat[:, i: i + 1, :] = f0 * (i + 1) / self.sampling_rate
|
| 210 |
+
|
| 211 |
+
theta_mat = 2 * np.pi * (torch.cumsum(F_mat, dim=-1) % 1)
|
| 212 |
+
u_dist = Uniform(low=-np.pi, high=np.pi)
|
| 213 |
+
phase_vec = u_dist.sample(sample_shape=(f0.size(0), self.harmonic_num + 1, 1)).to(F_mat.device)
|
| 214 |
+
phase_vec[:, 0, :] = 0
|
| 215 |
+
|
| 216 |
+
# generate sine waveforms
|
| 217 |
+
sine_waves = self.sine_amp * torch.sin(theta_mat + phase_vec)
|
| 218 |
+
|
| 219 |
+
# generate uv signal
|
| 220 |
+
uv = self._f02uv(f0)
|
| 221 |
+
|
| 222 |
+
# noise: for unvoiced should be similar to sine_amp
|
| 223 |
+
# std = self.sine_amp/3 -> max value ~ self.sine_amp
|
| 224 |
+
# . for voiced regions is self.noise_std
|
| 225 |
+
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
| 226 |
+
noise = noise_amp * torch.randn_like(sine_waves)
|
| 227 |
+
|
| 228 |
+
# first: set the unvoiced part to 0 by uv
|
| 229 |
+
# then: additive noise
|
| 230 |
+
sine_waves = sine_waves * uv + noise
|
| 231 |
+
return sine_waves, uv, noise
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
| 235 |
+
""" SourceModule for hn-nsf
|
| 236 |
+
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
| 237 |
+
add_noise_std=0.003, voiced_threshod=0)
|
| 238 |
+
sampling_rate: sampling_rate in Hz
|
| 239 |
+
harmonic_num: number of harmonic above F0 (default: 0)
|
| 240 |
+
sine_amp: amplitude of sine source signal (default: 0.1)
|
| 241 |
+
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
| 242 |
+
note that amplitude of noise in unvoiced is decided
|
| 243 |
+
by sine_amp
|
| 244 |
+
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
| 245 |
+
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
| 246 |
+
F0_sampled (batchsize, length, 1)
|
| 247 |
+
Sine_source (batchsize, length, 1)
|
| 248 |
+
noise_source (batchsize, length 1)
|
| 249 |
+
uv (batchsize, length, 1)
|
| 250 |
+
"""
|
| 251 |
+
|
| 252 |
+
def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1,
|
| 253 |
+
add_noise_std=0.003, voiced_threshod=0):
|
| 254 |
+
super(SourceModuleHnNSF, self).__init__()
|
| 255 |
+
|
| 256 |
+
self.sine_amp = sine_amp
|
| 257 |
+
self.noise_std = add_noise_std
|
| 258 |
+
|
| 259 |
+
# to produce sine waveforms
|
| 260 |
+
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
|
| 261 |
+
sine_amp, add_noise_std, voiced_threshod)
|
| 262 |
+
|
| 263 |
+
# to merge source harmonics into a single excitation
|
| 264 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
| 265 |
+
self.l_tanh = torch.nn.Tanh()
|
| 266 |
+
|
| 267 |
+
def forward(self, x):
|
| 268 |
+
"""
|
| 269 |
+
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
| 270 |
+
F0_sampled (batchsize, length, 1)
|
| 271 |
+
Sine_source (batchsize, length, 1)
|
| 272 |
+
noise_source (batchsize, length 1)
|
| 273 |
+
"""
|
| 274 |
+
# source for harmonic branch
|
| 275 |
+
with torch.no_grad():
|
| 276 |
+
sine_wavs, uv, _ = self.l_sin_gen(x.transpose(1, 2))
|
| 277 |
+
sine_wavs = sine_wavs.transpose(1, 2)
|
| 278 |
+
uv = uv.transpose(1, 2)
|
| 279 |
+
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
| 280 |
+
|
| 281 |
+
# source for noise branch, in the same shape as uv
|
| 282 |
+
noise = torch.randn_like(uv) * self.sine_amp / 3
|
| 283 |
+
return sine_merge, noise, uv
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
class HiFTGenerator(nn.Module):
|
| 287 |
+
"""
|
| 288 |
+
HiFTNet Generator: Neural Source Filter + ISTFTNet
|
| 289 |
+
https://arxiv.org/abs/2309.09493
|
| 290 |
+
"""
|
| 291 |
+
def __init__(
|
| 292 |
+
self,
|
| 293 |
+
in_channels: int = 80,
|
| 294 |
+
base_channels: int = 512,
|
| 295 |
+
nb_harmonics: int = 8,
|
| 296 |
+
sampling_rate: int = 22050,
|
| 297 |
+
nsf_alpha: float = 0.1,
|
| 298 |
+
nsf_sigma: float = 0.003,
|
| 299 |
+
nsf_voiced_threshold: float = 10,
|
| 300 |
+
upsample_rates: List[int] = [8, 8],
|
| 301 |
+
upsample_kernel_sizes: List[int] = [16, 16],
|
| 302 |
+
istft_params: Dict[str, int] = {"n_fft": 16, "hop_len": 4},
|
| 303 |
+
resblock_kernel_sizes: List[int] = [3, 7, 11],
|
| 304 |
+
resblock_dilation_sizes: List[List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
| 305 |
+
source_resblock_kernel_sizes: List[int] = [7, 11],
|
| 306 |
+
source_resblock_dilation_sizes: List[List[int]] = [[1, 3, 5], [1, 3, 5]],
|
| 307 |
+
lrelu_slope: float = 0.1,
|
| 308 |
+
audio_limit: float = 0.99,
|
| 309 |
+
f0_predictor: torch.nn.Module = None,
|
| 310 |
+
):
|
| 311 |
+
super(HiFTGenerator, self).__init__()
|
| 312 |
+
|
| 313 |
+
self.out_channels = 1
|
| 314 |
+
self.nb_harmonics = nb_harmonics
|
| 315 |
+
self.sampling_rate = sampling_rate
|
| 316 |
+
self.istft_params = istft_params
|
| 317 |
+
self.lrelu_slope = lrelu_slope
|
| 318 |
+
self.audio_limit = audio_limit
|
| 319 |
+
|
| 320 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
| 321 |
+
self.num_upsamples = len(upsample_rates)
|
| 322 |
+
self.m_source = SourceModuleHnNSF(
|
| 323 |
+
sampling_rate=sampling_rate,
|
| 324 |
+
upsample_scale=np.prod(upsample_rates) * istft_params["hop_len"],
|
| 325 |
+
harmonic_num=nb_harmonics,
|
| 326 |
+
sine_amp=nsf_alpha,
|
| 327 |
+
add_noise_std=nsf_sigma,
|
| 328 |
+
voiced_threshod=nsf_voiced_threshold)
|
| 329 |
+
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates) * istft_params["hop_len"])
|
| 330 |
+
|
| 331 |
+
self.conv_pre = weight_norm(
|
| 332 |
+
Conv1d(in_channels, base_channels, 7, 1, padding=3)
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
# Up
|
| 336 |
+
self.ups = nn.ModuleList()
|
| 337 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
| 338 |
+
self.ups.append(
|
| 339 |
+
weight_norm(
|
| 340 |
+
ConvTranspose1d(
|
| 341 |
+
base_channels // (2**i),
|
| 342 |
+
base_channels // (2**(i + 1)),
|
| 343 |
+
k,
|
| 344 |
+
u,
|
| 345 |
+
padding=(k - u) // 2,
|
| 346 |
+
)
|
| 347 |
+
)
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
# Down
|
| 351 |
+
self.source_downs = nn.ModuleList()
|
| 352 |
+
self.source_resblocks = nn.ModuleList()
|
| 353 |
+
downsample_rates = [1] + upsample_rates[::-1][:-1]
|
| 354 |
+
downsample_cum_rates = np.cumprod(downsample_rates)
|
| 355 |
+
for i, (u, k, d) in enumerate(zip(downsample_cum_rates[::-1], source_resblock_kernel_sizes, source_resblock_dilation_sizes)):
|
| 356 |
+
if u == 1:
|
| 357 |
+
self.source_downs.append(
|
| 358 |
+
Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), 1, 1)
|
| 359 |
+
)
|
| 360 |
+
else:
|
| 361 |
+
self.source_downs.append(
|
| 362 |
+
Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), u * 2, u, padding=(u // 2))
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
self.source_resblocks.append(
|
| 366 |
+
ResBlock(base_channels // (2 ** (i + 1)), k, d)
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
self.resblocks = nn.ModuleList()
|
| 370 |
+
for i in range(len(self.ups)):
|
| 371 |
+
ch = base_channels // (2**(i + 1))
|
| 372 |
+
for _, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
| 373 |
+
self.resblocks.append(ResBlock(ch, k, d))
|
| 374 |
+
|
| 375 |
+
self.conv_post = weight_norm(Conv1d(ch, istft_params["n_fft"] + 2, 7, 1, padding=3))
|
| 376 |
+
self.ups.apply(init_weights)
|
| 377 |
+
self.conv_post.apply(init_weights)
|
| 378 |
+
self.reflection_pad = nn.ReflectionPad1d((1, 0))
|
| 379 |
+
self.stft_window = torch.from_numpy(get_window("hann", istft_params["n_fft"], fftbins=True).astype(np.float32))
|
| 380 |
+
self.f0_predictor = f0_predictor
|
| 381 |
+
|
| 382 |
+
def remove_weight_norm(self):
|
| 383 |
+
print('Removing weight norm...')
|
| 384 |
+
for l in self.ups:
|
| 385 |
+
remove_weight_norm(l)
|
| 386 |
+
for l in self.resblocks:
|
| 387 |
+
l.remove_weight_norm()
|
| 388 |
+
remove_weight_norm(self.conv_pre)
|
| 389 |
+
remove_weight_norm(self.conv_post)
|
| 390 |
+
self.m_source.remove_weight_norm()
|
| 391 |
+
for l in self.source_downs:
|
| 392 |
+
remove_weight_norm(l)
|
| 393 |
+
for l in self.source_resblocks:
|
| 394 |
+
l.remove_weight_norm()
|
| 395 |
+
|
| 396 |
+
def _stft(self, x):
|
| 397 |
+
spec = torch.stft(
|
| 398 |
+
x,
|
| 399 |
+
self.istft_params["n_fft"], self.istft_params["hop_len"], self.istft_params["n_fft"], window=self.stft_window.to(x.device),
|
| 400 |
+
return_complex=True)
|
| 401 |
+
spec = torch.view_as_real(spec) # [B, F, TT, 2]
|
| 402 |
+
return spec[..., 0], spec[..., 1]
|
| 403 |
+
|
| 404 |
+
def _istft(self, magnitude, phase):
|
| 405 |
+
magnitude = torch.clip(magnitude, max=1e2)
|
| 406 |
+
real = magnitude * torch.cos(phase)
|
| 407 |
+
img = magnitude * torch.sin(phase)
|
| 408 |
+
inverse_transform = torch.istft(torch.complex(real, img), self.istft_params["n_fft"], self.istft_params["hop_len"],
|
| 409 |
+
self.istft_params["n_fft"], window=self.stft_window.to(magnitude.device))
|
| 410 |
+
return inverse_transform
|
| 411 |
+
|
| 412 |
+
def decode(self, x: torch.Tensor, s: torch.Tensor = torch.zeros(1, 1, 0)) -> torch.Tensor:
|
| 413 |
+
s_stft_real, s_stft_imag = self._stft(s.squeeze(1))
|
| 414 |
+
s_stft = torch.cat([s_stft_real, s_stft_imag], dim=1)
|
| 415 |
+
|
| 416 |
+
x = self.conv_pre(x)
|
| 417 |
+
for i in range(self.num_upsamples):
|
| 418 |
+
x = F.leaky_relu(x, self.lrelu_slope)
|
| 419 |
+
x = self.ups[i](x)
|
| 420 |
+
|
| 421 |
+
if i == self.num_upsamples - 1:
|
| 422 |
+
x = self.reflection_pad(x)
|
| 423 |
+
|
| 424 |
+
# fusion
|
| 425 |
+
si = self.source_downs[i](s_stft)
|
| 426 |
+
si = self.source_resblocks[i](si)
|
| 427 |
+
x = x + si
|
| 428 |
+
|
| 429 |
+
xs = None
|
| 430 |
+
for j in range(self.num_kernels):
|
| 431 |
+
if xs is None:
|
| 432 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
| 433 |
+
else:
|
| 434 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
| 435 |
+
x = xs / self.num_kernels
|
| 436 |
+
|
| 437 |
+
x = F.leaky_relu(x)
|
| 438 |
+
x = self.conv_post(x)
|
| 439 |
+
magnitude = torch.exp(x[:, :self.istft_params["n_fft"] // 2 + 1, :])
|
| 440 |
+
phase = torch.sin(x[:, self.istft_params["n_fft"] // 2 + 1:, :]) # actually, sin is redundancy
|
| 441 |
+
|
| 442 |
+
x = self._istft(magnitude, phase)
|
| 443 |
+
x = torch.clamp(x, -self.audio_limit, self.audio_limit)
|
| 444 |
+
return x
|
| 445 |
+
|
| 446 |
+
def forward(
|
| 447 |
+
self,
|
| 448 |
+
batch: dict,
|
| 449 |
+
device: torch.device,
|
| 450 |
+
) -> Dict[str, Optional[torch.Tensor]]:
|
| 451 |
+
speech_feat = batch['speech_feat'].transpose(1, 2).to(device)
|
| 452 |
+
# mel->f0
|
| 453 |
+
f0 = self.f0_predictor(speech_feat)
|
| 454 |
+
# f0->source
|
| 455 |
+
s = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
|
| 456 |
+
s, _, _ = self.m_source(s)
|
| 457 |
+
s = s.transpose(1, 2)
|
| 458 |
+
# mel+source->speech
|
| 459 |
+
generated_speech = self.decode(x=speech_feat, s=s)
|
| 460 |
+
return generated_speech, f0
|
| 461 |
+
|
| 462 |
+
@torch.inference_mode()
|
| 463 |
+
def inference(self, speech_feat: torch.Tensor, cache_source: torch.Tensor = torch.zeros(1, 1, 0)) -> torch.Tensor:
|
| 464 |
+
# mel->f0
|
| 465 |
+
f0 = self.f0_predictor(speech_feat)
|
| 466 |
+
# f0->source
|
| 467 |
+
s = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
|
| 468 |
+
s, _, _ = self.m_source(s)
|
| 469 |
+
s = s.transpose(1, 2)
|
| 470 |
+
# use cache_source to avoid glitch
|
| 471 |
+
if cache_source.shape[2] != 0:
|
| 472 |
+
s[:, :, :cache_source.shape[2]] = cache_source
|
| 473 |
+
generated_speech = self.decode(x=speech_feat, s=s)
|
| 474 |
+
return generated_speech, s
|
src/chatterbox/models/s3gen/matcha/decoder.py
ADDED
|
@@ -0,0 +1,443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from conformer import ConformerBlock
|
| 8 |
+
from diffusers.models.activations import get_activation
|
| 9 |
+
from einops import pack, rearrange, repeat
|
| 10 |
+
|
| 11 |
+
from .transformer import BasicTransformerBlock
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class SinusoidalPosEmb(torch.nn.Module):
|
| 15 |
+
def __init__(self, dim):
|
| 16 |
+
super().__init__()
|
| 17 |
+
self.dim = dim
|
| 18 |
+
assert self.dim % 2 == 0, "SinusoidalPosEmb requires dim to be even"
|
| 19 |
+
|
| 20 |
+
def forward(self, x, scale=1000):
|
| 21 |
+
if x.ndim < 1:
|
| 22 |
+
x = x.unsqueeze(0)
|
| 23 |
+
device = x.device
|
| 24 |
+
half_dim = self.dim // 2
|
| 25 |
+
emb = math.log(10000) / (half_dim - 1)
|
| 26 |
+
emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb)
|
| 27 |
+
emb = scale * x.unsqueeze(1) * emb.unsqueeze(0)
|
| 28 |
+
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
|
| 29 |
+
return emb
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class Block1D(torch.nn.Module):
|
| 33 |
+
def __init__(self, dim, dim_out, groups=8):
|
| 34 |
+
super().__init__()
|
| 35 |
+
self.block = torch.nn.Sequential(
|
| 36 |
+
torch.nn.Conv1d(dim, dim_out, 3, padding=1),
|
| 37 |
+
torch.nn.GroupNorm(groups, dim_out),
|
| 38 |
+
nn.Mish(),
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
def forward(self, x, mask):
|
| 42 |
+
output = self.block(x * mask)
|
| 43 |
+
return output * mask
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class ResnetBlock1D(torch.nn.Module):
|
| 47 |
+
def __init__(self, dim, dim_out, time_emb_dim, groups=8):
|
| 48 |
+
super().__init__()
|
| 49 |
+
self.mlp = torch.nn.Sequential(nn.Mish(), torch.nn.Linear(time_emb_dim, dim_out))
|
| 50 |
+
|
| 51 |
+
self.block1 = Block1D(dim, dim_out, groups=groups)
|
| 52 |
+
self.block2 = Block1D(dim_out, dim_out, groups=groups)
|
| 53 |
+
|
| 54 |
+
self.res_conv = torch.nn.Conv1d(dim, dim_out, 1)
|
| 55 |
+
|
| 56 |
+
def forward(self, x, mask, time_emb):
|
| 57 |
+
h = self.block1(x, mask)
|
| 58 |
+
h += self.mlp(time_emb).unsqueeze(-1)
|
| 59 |
+
h = self.block2(h, mask)
|
| 60 |
+
output = h + self.res_conv(x * mask)
|
| 61 |
+
return output
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class Downsample1D(nn.Module):
|
| 65 |
+
def __init__(self, dim):
|
| 66 |
+
super().__init__()
|
| 67 |
+
self.conv = torch.nn.Conv1d(dim, dim, 3, 2, 1)
|
| 68 |
+
|
| 69 |
+
def forward(self, x):
|
| 70 |
+
return self.conv(x)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class TimestepEmbedding(nn.Module):
|
| 74 |
+
def __init__(
|
| 75 |
+
self,
|
| 76 |
+
in_channels: int,
|
| 77 |
+
time_embed_dim: int,
|
| 78 |
+
act_fn: str = "silu",
|
| 79 |
+
out_dim: int = None,
|
| 80 |
+
post_act_fn: Optional[str] = None,
|
| 81 |
+
cond_proj_dim=None,
|
| 82 |
+
):
|
| 83 |
+
super().__init__()
|
| 84 |
+
|
| 85 |
+
self.linear_1 = nn.Linear(in_channels, time_embed_dim)
|
| 86 |
+
|
| 87 |
+
if cond_proj_dim is not None:
|
| 88 |
+
self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False)
|
| 89 |
+
else:
|
| 90 |
+
self.cond_proj = None
|
| 91 |
+
|
| 92 |
+
self.act = get_activation(act_fn)
|
| 93 |
+
|
| 94 |
+
if out_dim is not None:
|
| 95 |
+
time_embed_dim_out = out_dim
|
| 96 |
+
else:
|
| 97 |
+
time_embed_dim_out = time_embed_dim
|
| 98 |
+
self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out)
|
| 99 |
+
|
| 100 |
+
if post_act_fn is None:
|
| 101 |
+
self.post_act = None
|
| 102 |
+
else:
|
| 103 |
+
self.post_act = get_activation(post_act_fn)
|
| 104 |
+
|
| 105 |
+
def forward(self, sample, condition=None):
|
| 106 |
+
if condition is not None:
|
| 107 |
+
sample = sample + self.cond_proj(condition)
|
| 108 |
+
sample = self.linear_1(sample)
|
| 109 |
+
|
| 110 |
+
if self.act is not None:
|
| 111 |
+
sample = self.act(sample)
|
| 112 |
+
|
| 113 |
+
sample = self.linear_2(sample)
|
| 114 |
+
|
| 115 |
+
if self.post_act is not None:
|
| 116 |
+
sample = self.post_act(sample)
|
| 117 |
+
return sample
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class Upsample1D(nn.Module):
|
| 121 |
+
"""A 1D upsampling layer with an optional convolution.
|
| 122 |
+
|
| 123 |
+
Parameters:
|
| 124 |
+
channels (`int`):
|
| 125 |
+
number of channels in the inputs and outputs.
|
| 126 |
+
use_conv (`bool`, default `False`):
|
| 127 |
+
option to use a convolution.
|
| 128 |
+
use_conv_transpose (`bool`, default `False`):
|
| 129 |
+
option to use a convolution transpose.
|
| 130 |
+
out_channels (`int`, optional):
|
| 131 |
+
number of output channels. Defaults to `channels`.
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
def __init__(self, channels, use_conv=False, use_conv_transpose=True, out_channels=None, name="conv"):
|
| 135 |
+
super().__init__()
|
| 136 |
+
self.channels = channels
|
| 137 |
+
self.out_channels = out_channels or channels
|
| 138 |
+
self.use_conv = use_conv
|
| 139 |
+
self.use_conv_transpose = use_conv_transpose
|
| 140 |
+
self.name = name
|
| 141 |
+
|
| 142 |
+
self.conv = None
|
| 143 |
+
if use_conv_transpose:
|
| 144 |
+
self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1)
|
| 145 |
+
elif use_conv:
|
| 146 |
+
self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1)
|
| 147 |
+
|
| 148 |
+
def forward(self, inputs):
|
| 149 |
+
assert inputs.shape[1] == self.channels
|
| 150 |
+
if self.use_conv_transpose:
|
| 151 |
+
return self.conv(inputs)
|
| 152 |
+
|
| 153 |
+
outputs = F.interpolate(inputs, scale_factor=2.0, mode="nearest")
|
| 154 |
+
|
| 155 |
+
if self.use_conv:
|
| 156 |
+
outputs = self.conv(outputs)
|
| 157 |
+
|
| 158 |
+
return outputs
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
class ConformerWrapper(ConformerBlock):
|
| 162 |
+
def __init__( # pylint: disable=useless-super-delegation
|
| 163 |
+
self,
|
| 164 |
+
*,
|
| 165 |
+
dim,
|
| 166 |
+
dim_head=64,
|
| 167 |
+
heads=8,
|
| 168 |
+
ff_mult=4,
|
| 169 |
+
conv_expansion_factor=2,
|
| 170 |
+
conv_kernel_size=31,
|
| 171 |
+
attn_dropout=0,
|
| 172 |
+
ff_dropout=0,
|
| 173 |
+
conv_dropout=0,
|
| 174 |
+
conv_causal=False,
|
| 175 |
+
):
|
| 176 |
+
super().__init__(
|
| 177 |
+
dim=dim,
|
| 178 |
+
dim_head=dim_head,
|
| 179 |
+
heads=heads,
|
| 180 |
+
ff_mult=ff_mult,
|
| 181 |
+
conv_expansion_factor=conv_expansion_factor,
|
| 182 |
+
conv_kernel_size=conv_kernel_size,
|
| 183 |
+
attn_dropout=attn_dropout,
|
| 184 |
+
ff_dropout=ff_dropout,
|
| 185 |
+
conv_dropout=conv_dropout,
|
| 186 |
+
conv_causal=conv_causal,
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
def forward(
|
| 190 |
+
self,
|
| 191 |
+
hidden_states,
|
| 192 |
+
attention_mask,
|
| 193 |
+
encoder_hidden_states=None,
|
| 194 |
+
encoder_attention_mask=None,
|
| 195 |
+
timestep=None,
|
| 196 |
+
):
|
| 197 |
+
return super().forward(x=hidden_states, mask=attention_mask.bool())
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class Decoder(nn.Module):
|
| 201 |
+
def __init__(
|
| 202 |
+
self,
|
| 203 |
+
in_channels,
|
| 204 |
+
out_channels,
|
| 205 |
+
channels=(256, 256),
|
| 206 |
+
dropout=0.05,
|
| 207 |
+
attention_head_dim=64,
|
| 208 |
+
n_blocks=1,
|
| 209 |
+
num_mid_blocks=2,
|
| 210 |
+
num_heads=4,
|
| 211 |
+
act_fn="snake",
|
| 212 |
+
down_block_type="transformer",
|
| 213 |
+
mid_block_type="transformer",
|
| 214 |
+
up_block_type="transformer",
|
| 215 |
+
):
|
| 216 |
+
super().__init__()
|
| 217 |
+
channels = tuple(channels)
|
| 218 |
+
self.in_channels = in_channels
|
| 219 |
+
self.out_channels = out_channels
|
| 220 |
+
|
| 221 |
+
self.time_embeddings = SinusoidalPosEmb(in_channels)
|
| 222 |
+
time_embed_dim = channels[0] * 4
|
| 223 |
+
self.time_mlp = TimestepEmbedding(
|
| 224 |
+
in_channels=in_channels,
|
| 225 |
+
time_embed_dim=time_embed_dim,
|
| 226 |
+
act_fn="silu",
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
self.down_blocks = nn.ModuleList([])
|
| 230 |
+
self.mid_blocks = nn.ModuleList([])
|
| 231 |
+
self.up_blocks = nn.ModuleList([])
|
| 232 |
+
|
| 233 |
+
output_channel = in_channels
|
| 234 |
+
for i in range(len(channels)): # pylint: disable=consider-using-enumerate
|
| 235 |
+
input_channel = output_channel
|
| 236 |
+
output_channel = channels[i]
|
| 237 |
+
is_last = i == len(channels) - 1
|
| 238 |
+
resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
|
| 239 |
+
transformer_blocks = nn.ModuleList(
|
| 240 |
+
[
|
| 241 |
+
self.get_block(
|
| 242 |
+
down_block_type,
|
| 243 |
+
output_channel,
|
| 244 |
+
attention_head_dim,
|
| 245 |
+
num_heads,
|
| 246 |
+
dropout,
|
| 247 |
+
act_fn,
|
| 248 |
+
)
|
| 249 |
+
for _ in range(n_blocks)
|
| 250 |
+
]
|
| 251 |
+
)
|
| 252 |
+
downsample = (
|
| 253 |
+
Downsample1D(output_channel) if not is_last else nn.Conv1d(output_channel, output_channel, 3, padding=1)
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample]))
|
| 257 |
+
|
| 258 |
+
for i in range(num_mid_blocks):
|
| 259 |
+
input_channel = channels[-1]
|
| 260 |
+
out_channels = channels[-1]
|
| 261 |
+
|
| 262 |
+
resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
|
| 263 |
+
|
| 264 |
+
transformer_blocks = nn.ModuleList(
|
| 265 |
+
[
|
| 266 |
+
self.get_block(
|
| 267 |
+
mid_block_type,
|
| 268 |
+
output_channel,
|
| 269 |
+
attention_head_dim,
|
| 270 |
+
num_heads,
|
| 271 |
+
dropout,
|
| 272 |
+
act_fn,
|
| 273 |
+
)
|
| 274 |
+
for _ in range(n_blocks)
|
| 275 |
+
]
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks]))
|
| 279 |
+
|
| 280 |
+
channels = channels[::-1] + (channels[0],)
|
| 281 |
+
for i in range(len(channels) - 1):
|
| 282 |
+
input_channel = channels[i]
|
| 283 |
+
output_channel = channels[i + 1]
|
| 284 |
+
is_last = i == len(channels) - 2
|
| 285 |
+
|
| 286 |
+
resnet = ResnetBlock1D(
|
| 287 |
+
dim=2 * input_channel,
|
| 288 |
+
dim_out=output_channel,
|
| 289 |
+
time_emb_dim=time_embed_dim,
|
| 290 |
+
)
|
| 291 |
+
transformer_blocks = nn.ModuleList(
|
| 292 |
+
[
|
| 293 |
+
self.get_block(
|
| 294 |
+
up_block_type,
|
| 295 |
+
output_channel,
|
| 296 |
+
attention_head_dim,
|
| 297 |
+
num_heads,
|
| 298 |
+
dropout,
|
| 299 |
+
act_fn,
|
| 300 |
+
)
|
| 301 |
+
for _ in range(n_blocks)
|
| 302 |
+
]
|
| 303 |
+
)
|
| 304 |
+
upsample = (
|
| 305 |
+
Upsample1D(output_channel, use_conv_transpose=True)
|
| 306 |
+
if not is_last
|
| 307 |
+
else nn.Conv1d(output_channel, output_channel, 3, padding=1)
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample]))
|
| 311 |
+
|
| 312 |
+
self.final_block = Block1D(channels[-1], channels[-1])
|
| 313 |
+
self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1)
|
| 314 |
+
|
| 315 |
+
self.initialize_weights()
|
| 316 |
+
# nn.init.normal_(self.final_proj.weight)
|
| 317 |
+
|
| 318 |
+
@staticmethod
|
| 319 |
+
def get_block(block_type, dim, attention_head_dim, num_heads, dropout, act_fn):
|
| 320 |
+
if block_type == "conformer":
|
| 321 |
+
block = ConformerWrapper(
|
| 322 |
+
dim=dim,
|
| 323 |
+
dim_head=attention_head_dim,
|
| 324 |
+
heads=num_heads,
|
| 325 |
+
ff_mult=1,
|
| 326 |
+
conv_expansion_factor=2,
|
| 327 |
+
ff_dropout=dropout,
|
| 328 |
+
attn_dropout=dropout,
|
| 329 |
+
conv_dropout=dropout,
|
| 330 |
+
conv_kernel_size=31,
|
| 331 |
+
)
|
| 332 |
+
elif block_type == "transformer":
|
| 333 |
+
block = BasicTransformerBlock(
|
| 334 |
+
dim=dim,
|
| 335 |
+
num_attention_heads=num_heads,
|
| 336 |
+
attention_head_dim=attention_head_dim,
|
| 337 |
+
dropout=dropout,
|
| 338 |
+
activation_fn=act_fn,
|
| 339 |
+
)
|
| 340 |
+
else:
|
| 341 |
+
raise ValueError(f"Unknown block type {block_type}")
|
| 342 |
+
|
| 343 |
+
return block
|
| 344 |
+
|
| 345 |
+
def initialize_weights(self):
|
| 346 |
+
for m in self.modules():
|
| 347 |
+
if isinstance(m, nn.Conv1d):
|
| 348 |
+
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
|
| 349 |
+
|
| 350 |
+
if m.bias is not None:
|
| 351 |
+
nn.init.constant_(m.bias, 0)
|
| 352 |
+
|
| 353 |
+
elif isinstance(m, nn.GroupNorm):
|
| 354 |
+
nn.init.constant_(m.weight, 1)
|
| 355 |
+
nn.init.constant_(m.bias, 0)
|
| 356 |
+
|
| 357 |
+
elif isinstance(m, nn.Linear):
|
| 358 |
+
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
|
| 359 |
+
|
| 360 |
+
if m.bias is not None:
|
| 361 |
+
nn.init.constant_(m.bias, 0)
|
| 362 |
+
|
| 363 |
+
def forward(self, x, mask, mu, t, spks=None, cond=None):
|
| 364 |
+
"""Forward pass of the UNet1DConditional model.
|
| 365 |
+
|
| 366 |
+
Args:
|
| 367 |
+
x (torch.Tensor): shape (batch_size, in_channels, time)
|
| 368 |
+
mask (_type_): shape (batch_size, 1, time)
|
| 369 |
+
t (_type_): shape (batch_size)
|
| 370 |
+
spks (_type_, optional): shape: (batch_size, condition_channels). Defaults to None.
|
| 371 |
+
cond (_type_, optional): placeholder for future use. Defaults to None.
|
| 372 |
+
|
| 373 |
+
Raises:
|
| 374 |
+
ValueError: _description_
|
| 375 |
+
ValueError: _description_
|
| 376 |
+
|
| 377 |
+
Returns:
|
| 378 |
+
_type_: _description_
|
| 379 |
+
"""
|
| 380 |
+
|
| 381 |
+
t = self.time_embeddings(t)
|
| 382 |
+
t = self.time_mlp(t)
|
| 383 |
+
|
| 384 |
+
x = pack([x, mu], "b * t")[0]
|
| 385 |
+
|
| 386 |
+
if spks is not None:
|
| 387 |
+
spks = repeat(spks, "b c -> b c t", t=x.shape[-1])
|
| 388 |
+
x = pack([x, spks], "b * t")[0]
|
| 389 |
+
|
| 390 |
+
hiddens = []
|
| 391 |
+
masks = [mask]
|
| 392 |
+
for resnet, transformer_blocks, downsample in self.down_blocks:
|
| 393 |
+
mask_down = masks[-1]
|
| 394 |
+
x = resnet(x, mask_down, t)
|
| 395 |
+
x = rearrange(x, "b c t -> b t c")
|
| 396 |
+
mask_down = rearrange(mask_down, "b 1 t -> b t")
|
| 397 |
+
for transformer_block in transformer_blocks:
|
| 398 |
+
x = transformer_block(
|
| 399 |
+
hidden_states=x,
|
| 400 |
+
attention_mask=mask_down,
|
| 401 |
+
timestep=t,
|
| 402 |
+
)
|
| 403 |
+
x = rearrange(x, "b t c -> b c t")
|
| 404 |
+
mask_down = rearrange(mask_down, "b t -> b 1 t")
|
| 405 |
+
hiddens.append(x) # Save hidden states for skip connections
|
| 406 |
+
x = downsample(x * mask_down)
|
| 407 |
+
masks.append(mask_down[:, :, ::2])
|
| 408 |
+
|
| 409 |
+
masks = masks[:-1]
|
| 410 |
+
mask_mid = masks[-1]
|
| 411 |
+
|
| 412 |
+
for resnet, transformer_blocks in self.mid_blocks:
|
| 413 |
+
x = resnet(x, mask_mid, t)
|
| 414 |
+
x = rearrange(x, "b c t -> b t c")
|
| 415 |
+
mask_mid = rearrange(mask_mid, "b 1 t -> b t")
|
| 416 |
+
for transformer_block in transformer_blocks:
|
| 417 |
+
x = transformer_block(
|
| 418 |
+
hidden_states=x,
|
| 419 |
+
attention_mask=mask_mid,
|
| 420 |
+
timestep=t,
|
| 421 |
+
)
|
| 422 |
+
x = rearrange(x, "b t c -> b c t")
|
| 423 |
+
mask_mid = rearrange(mask_mid, "b t -> b 1 t")
|
| 424 |
+
|
| 425 |
+
for resnet, transformer_blocks, upsample in self.up_blocks:
|
| 426 |
+
mask_up = masks.pop()
|
| 427 |
+
x = resnet(pack([x, hiddens.pop()], "b * t")[0], mask_up, t)
|
| 428 |
+
x = rearrange(x, "b c t -> b t c")
|
| 429 |
+
mask_up = rearrange(mask_up, "b 1 t -> b t")
|
| 430 |
+
for transformer_block in transformer_blocks:
|
| 431 |
+
x = transformer_block(
|
| 432 |
+
hidden_states=x,
|
| 433 |
+
attention_mask=mask_up,
|
| 434 |
+
timestep=t,
|
| 435 |
+
)
|
| 436 |
+
x = rearrange(x, "b t c -> b c t")
|
| 437 |
+
mask_up = rearrange(mask_up, "b t -> b 1 t")
|
| 438 |
+
x = upsample(x * mask_up)
|
| 439 |
+
|
| 440 |
+
x = self.final_block(x, mask_up)
|
| 441 |
+
output = self.final_proj(x * mask_up)
|
| 442 |
+
|
| 443 |
+
return output * mask
|
src/chatterbox/models/s3gen/matcha/flow_matching.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABC
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
from .decoder import Decoder
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class BASECFM(torch.nn.Module, ABC):
|
| 10 |
+
def __init__(
|
| 11 |
+
self,
|
| 12 |
+
n_feats,
|
| 13 |
+
cfm_params,
|
| 14 |
+
n_spks=1,
|
| 15 |
+
spk_emb_dim=128,
|
| 16 |
+
):
|
| 17 |
+
super().__init__()
|
| 18 |
+
self.n_feats = n_feats
|
| 19 |
+
self.n_spks = n_spks
|
| 20 |
+
self.spk_emb_dim = spk_emb_dim
|
| 21 |
+
self.solver = cfm_params.solver
|
| 22 |
+
if hasattr(cfm_params, "sigma_min"):
|
| 23 |
+
self.sigma_min = cfm_params.sigma_min
|
| 24 |
+
else:
|
| 25 |
+
self.sigma_min = 1e-4
|
| 26 |
+
|
| 27 |
+
self.estimator = None
|
| 28 |
+
|
| 29 |
+
@torch.inference_mode()
|
| 30 |
+
def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None):
|
| 31 |
+
"""Forward diffusion
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
mu (torch.Tensor): output of encoder
|
| 35 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 36 |
+
mask (torch.Tensor): output_mask
|
| 37 |
+
shape: (batch_size, 1, mel_timesteps)
|
| 38 |
+
n_timesteps (int): number of diffusion steps
|
| 39 |
+
temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
|
| 40 |
+
spks (torch.Tensor, optional): speaker ids. Defaults to None.
|
| 41 |
+
shape: (batch_size, spk_emb_dim)
|
| 42 |
+
cond: Not used but kept for future purposes
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
sample: generated mel-spectrogram
|
| 46 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 47 |
+
"""
|
| 48 |
+
z = torch.randn_like(mu) * temperature
|
| 49 |
+
t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device)
|
| 50 |
+
return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond)
|
| 51 |
+
|
| 52 |
+
def solve_euler(self, x, t_span, mu, mask, spks, cond):
|
| 53 |
+
"""
|
| 54 |
+
Fixed euler solver for ODEs.
|
| 55 |
+
Args:
|
| 56 |
+
x (torch.Tensor): random noise
|
| 57 |
+
t_span (torch.Tensor): n_timesteps interpolated
|
| 58 |
+
shape: (n_timesteps + 1,)
|
| 59 |
+
mu (torch.Tensor): output of encoder
|
| 60 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 61 |
+
mask (torch.Tensor): output_mask
|
| 62 |
+
shape: (batch_size, 1, mel_timesteps)
|
| 63 |
+
spks (torch.Tensor, optional): speaker ids. Defaults to None.
|
| 64 |
+
shape: (batch_size, spk_emb_dim)
|
| 65 |
+
cond: Not used but kept for future purposes
|
| 66 |
+
"""
|
| 67 |
+
t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0]
|
| 68 |
+
|
| 69 |
+
# I am storing this because I can later plot it by putting a debugger here and saving it to a file
|
| 70 |
+
# Or in future might add like a return_all_steps flag
|
| 71 |
+
sol = []
|
| 72 |
+
|
| 73 |
+
for step in range(1, len(t_span)):
|
| 74 |
+
dphi_dt = self.estimator(x, mask, mu, t, spks, cond)
|
| 75 |
+
|
| 76 |
+
x = x + dt * dphi_dt
|
| 77 |
+
t = t + dt
|
| 78 |
+
sol.append(x)
|
| 79 |
+
if step < len(t_span) - 1:
|
| 80 |
+
dt = t_span[step + 1] - t
|
| 81 |
+
|
| 82 |
+
return sol[-1]
|
| 83 |
+
|
| 84 |
+
def compute_loss(self, x1, mask, mu, spks=None, cond=None):
|
| 85 |
+
"""Computes diffusion loss
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
x1 (torch.Tensor): Target
|
| 89 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 90 |
+
mask (torch.Tensor): target mask
|
| 91 |
+
shape: (batch_size, 1, mel_timesteps)
|
| 92 |
+
mu (torch.Tensor): output of encoder
|
| 93 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 94 |
+
spks (torch.Tensor, optional): speaker embedding. Defaults to None.
|
| 95 |
+
shape: (batch_size, spk_emb_dim)
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
loss: conditional flow matching loss
|
| 99 |
+
y: conditional flow
|
| 100 |
+
shape: (batch_size, n_feats, mel_timesteps)
|
| 101 |
+
"""
|
| 102 |
+
b, _, t = mu.shape
|
| 103 |
+
|
| 104 |
+
# random timestep
|
| 105 |
+
t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype)
|
| 106 |
+
# sample noise p(x_0)
|
| 107 |
+
z = torch.randn_like(x1)
|
| 108 |
+
|
| 109 |
+
y = (1 - (1 - self.sigma_min) * t) * z + t * x1
|
| 110 |
+
u = x1 - (1 - self.sigma_min) * z
|
| 111 |
+
|
| 112 |
+
loss = F.mse_loss(self.estimator(y, mask, mu, t.squeeze(), spks), u, reduction="sum") / (
|
| 113 |
+
torch.sum(mask) * u.shape[1]
|
| 114 |
+
)
|
| 115 |
+
return loss, y
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class CFM(BASECFM):
|
| 119 |
+
def __init__(self, in_channels, out_channel, cfm_params, decoder_params, n_spks=1, spk_emb_dim=64):
|
| 120 |
+
super().__init__(
|
| 121 |
+
n_feats=in_channels,
|
| 122 |
+
cfm_params=cfm_params,
|
| 123 |
+
n_spks=n_spks,
|
| 124 |
+
spk_emb_dim=spk_emb_dim,
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
in_channels = in_channels + (spk_emb_dim if n_spks > 1 else 0)
|
| 128 |
+
# Just change the architecture of the estimator here
|
| 129 |
+
self.estimator = Decoder(in_channels=in_channels, out_channels=out_channel, **decoder_params)
|
src/chatterbox/models/s3gen/matcha/text_encoder.py
ADDED
|
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" from https://github.com/jaywalnut310/glow-tts """
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
from einops import rearrange
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def sequence_mask(length, max_length=None):
|
| 11 |
+
if max_length is None:
|
| 12 |
+
max_length = length.max()
|
| 13 |
+
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
| 14 |
+
return x.unsqueeze(0) < length.unsqueeze(1)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class LayerNorm(nn.Module):
|
| 19 |
+
def __init__(self, channels, eps=1e-4):
|
| 20 |
+
super().__init__()
|
| 21 |
+
self.channels = channels
|
| 22 |
+
self.eps = eps
|
| 23 |
+
|
| 24 |
+
self.gamma = torch.nn.Parameter(torch.ones(channels))
|
| 25 |
+
self.beta = torch.nn.Parameter(torch.zeros(channels))
|
| 26 |
+
|
| 27 |
+
def forward(self, x):
|
| 28 |
+
n_dims = len(x.shape)
|
| 29 |
+
mean = torch.mean(x, 1, keepdim=True)
|
| 30 |
+
variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
|
| 31 |
+
|
| 32 |
+
x = (x - mean) * torch.rsqrt(variance + self.eps)
|
| 33 |
+
|
| 34 |
+
shape = [1, -1] + [1] * (n_dims - 2)
|
| 35 |
+
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
|
| 36 |
+
return x
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ConvReluNorm(nn.Module):
|
| 40 |
+
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
|
| 41 |
+
super().__init__()
|
| 42 |
+
self.in_channels = in_channels
|
| 43 |
+
self.hidden_channels = hidden_channels
|
| 44 |
+
self.out_channels = out_channels
|
| 45 |
+
self.kernel_size = kernel_size
|
| 46 |
+
self.n_layers = n_layers
|
| 47 |
+
self.p_dropout = p_dropout
|
| 48 |
+
|
| 49 |
+
self.conv_layers = torch.nn.ModuleList()
|
| 50 |
+
self.norm_layers = torch.nn.ModuleList()
|
| 51 |
+
self.conv_layers.append(torch.nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
|
| 52 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
| 53 |
+
self.relu_drop = torch.nn.Sequential(torch.nn.ReLU(), torch.nn.Dropout(p_dropout))
|
| 54 |
+
for _ in range(n_layers - 1):
|
| 55 |
+
self.conv_layers.append(
|
| 56 |
+
torch.nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)
|
| 57 |
+
)
|
| 58 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
| 59 |
+
self.proj = torch.nn.Conv1d(hidden_channels, out_channels, 1)
|
| 60 |
+
self.proj.weight.data.zero_()
|
| 61 |
+
self.proj.bias.data.zero_()
|
| 62 |
+
|
| 63 |
+
def forward(self, x, x_mask):
|
| 64 |
+
x_org = x
|
| 65 |
+
for i in range(self.n_layers):
|
| 66 |
+
x = self.conv_layers[i](x * x_mask)
|
| 67 |
+
x = self.norm_layers[i](x)
|
| 68 |
+
x = self.relu_drop(x)
|
| 69 |
+
x = x_org + self.proj(x)
|
| 70 |
+
return x * x_mask
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class DurationPredictor(nn.Module):
|
| 74 |
+
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout):
|
| 75 |
+
super().__init__()
|
| 76 |
+
self.in_channels = in_channels
|
| 77 |
+
self.filter_channels = filter_channels
|
| 78 |
+
self.p_dropout = p_dropout
|
| 79 |
+
|
| 80 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
| 81 |
+
self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
|
| 82 |
+
self.norm_1 = LayerNorm(filter_channels)
|
| 83 |
+
self.conv_2 = torch.nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
|
| 84 |
+
self.norm_2 = LayerNorm(filter_channels)
|
| 85 |
+
self.proj = torch.nn.Conv1d(filter_channels, 1, 1)
|
| 86 |
+
|
| 87 |
+
def forward(self, x, x_mask):
|
| 88 |
+
x = self.conv_1(x * x_mask)
|
| 89 |
+
x = torch.relu(x)
|
| 90 |
+
x = self.norm_1(x)
|
| 91 |
+
x = self.drop(x)
|
| 92 |
+
x = self.conv_2(x * x_mask)
|
| 93 |
+
x = torch.relu(x)
|
| 94 |
+
x = self.norm_2(x)
|
| 95 |
+
x = self.drop(x)
|
| 96 |
+
x = self.proj(x * x_mask)
|
| 97 |
+
return x * x_mask
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class RotaryPositionalEmbeddings(nn.Module):
|
| 101 |
+
"""
|
| 102 |
+
## RoPE module
|
| 103 |
+
|
| 104 |
+
Rotary encoding transforms pairs of features by rotating in the 2D plane.
|
| 105 |
+
That is, it organizes the $d$ features as $\frac{d}{2}$ pairs.
|
| 106 |
+
Each pair can be considered a coordinate in a 2D plane, and the encoding will rotate it
|
| 107 |
+
by an angle depending on the position of the token.
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
def __init__(self, d: int, base: int = 10_000):
|
| 111 |
+
r"""
|
| 112 |
+
* `d` is the number of features $d$
|
| 113 |
+
* `base` is the constant used for calculating $\Theta$
|
| 114 |
+
"""
|
| 115 |
+
super().__init__()
|
| 116 |
+
|
| 117 |
+
self.base = base
|
| 118 |
+
self.d = int(d)
|
| 119 |
+
self.cos_cached = None
|
| 120 |
+
self.sin_cached = None
|
| 121 |
+
|
| 122 |
+
def _build_cache(self, x: torch.Tensor):
|
| 123 |
+
r"""
|
| 124 |
+
Cache $\cos$ and $\sin$ values
|
| 125 |
+
"""
|
| 126 |
+
# Return if cache is already built
|
| 127 |
+
if self.cos_cached is not None and x.shape[0] <= self.cos_cached.shape[0]:
|
| 128 |
+
return
|
| 129 |
+
|
| 130 |
+
# Get sequence length
|
| 131 |
+
seq_len = x.shape[0]
|
| 132 |
+
|
| 133 |
+
# $\Theta = {\theta_i = 10000^{-\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
|
| 134 |
+
theta = 1.0 / (self.base ** (torch.arange(0, self.d, 2).float() / self.d)).to(x.device)
|
| 135 |
+
|
| 136 |
+
# Create position indexes `[0, 1, ..., seq_len - 1]`
|
| 137 |
+
seq_idx = torch.arange(seq_len, device=x.device).float().to(x.device)
|
| 138 |
+
|
| 139 |
+
# Calculate the product of position index and $\theta_i$
|
| 140 |
+
idx_theta = torch.einsum("n,d->nd", seq_idx, theta)
|
| 141 |
+
|
| 142 |
+
# Concatenate so that for row $m$ we have
|
| 143 |
+
# $[m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}, m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}]$
|
| 144 |
+
idx_theta2 = torch.cat([idx_theta, idx_theta], dim=1)
|
| 145 |
+
|
| 146 |
+
# Cache them
|
| 147 |
+
self.cos_cached = idx_theta2.cos()[:, None, None, :]
|
| 148 |
+
self.sin_cached = idx_theta2.sin()[:, None, None, :]
|
| 149 |
+
|
| 150 |
+
def _neg_half(self, x: torch.Tensor):
|
| 151 |
+
# $\frac{d}{2}$
|
| 152 |
+
d_2 = self.d // 2
|
| 153 |
+
|
| 154 |
+
# Calculate $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
|
| 155 |
+
return torch.cat([-x[:, :, :, d_2:], x[:, :, :, :d_2]], dim=-1)
|
| 156 |
+
|
| 157 |
+
def forward(self, x: torch.Tensor):
|
| 158 |
+
"""
|
| 159 |
+
* `x` is the Tensor at the head of a key or a query with shape `[seq_len, batch_size, n_heads, d]`
|
| 160 |
+
"""
|
| 161 |
+
# Cache $\cos$ and $\sin$ values
|
| 162 |
+
x = rearrange(x, "b h t d -> t b h d")
|
| 163 |
+
|
| 164 |
+
self._build_cache(x)
|
| 165 |
+
|
| 166 |
+
# Split the features, we can choose to apply rotary embeddings only to a partial set of features.
|
| 167 |
+
x_rope, x_pass = x[..., : self.d], x[..., self.d :]
|
| 168 |
+
|
| 169 |
+
# Calculate
|
| 170 |
+
# $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
|
| 171 |
+
neg_half_x = self._neg_half(x_rope)
|
| 172 |
+
|
| 173 |
+
x_rope = (x_rope * self.cos_cached[: x.shape[0]]) + (neg_half_x * self.sin_cached[: x.shape[0]])
|
| 174 |
+
|
| 175 |
+
return rearrange(torch.cat((x_rope, x_pass), dim=-1), "t b h d -> b h t d")
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class MultiHeadAttention(nn.Module):
|
| 179 |
+
def __init__(
|
| 180 |
+
self,
|
| 181 |
+
channels,
|
| 182 |
+
out_channels,
|
| 183 |
+
n_heads,
|
| 184 |
+
heads_share=True,
|
| 185 |
+
p_dropout=0.0,
|
| 186 |
+
proximal_bias=False,
|
| 187 |
+
proximal_init=False,
|
| 188 |
+
):
|
| 189 |
+
super().__init__()
|
| 190 |
+
assert channels % n_heads == 0
|
| 191 |
+
|
| 192 |
+
self.channels = channels
|
| 193 |
+
self.out_channels = out_channels
|
| 194 |
+
self.n_heads = n_heads
|
| 195 |
+
self.heads_share = heads_share
|
| 196 |
+
self.proximal_bias = proximal_bias
|
| 197 |
+
self.p_dropout = p_dropout
|
| 198 |
+
self.attn = None
|
| 199 |
+
|
| 200 |
+
self.k_channels = channels // n_heads
|
| 201 |
+
self.conv_q = torch.nn.Conv1d(channels, channels, 1)
|
| 202 |
+
self.conv_k = torch.nn.Conv1d(channels, channels, 1)
|
| 203 |
+
self.conv_v = torch.nn.Conv1d(channels, channels, 1)
|
| 204 |
+
|
| 205 |
+
# from https://nn.labml.ai/transformers/rope/index.html
|
| 206 |
+
self.query_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5)
|
| 207 |
+
self.key_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5)
|
| 208 |
+
|
| 209 |
+
self.conv_o = torch.nn.Conv1d(channels, out_channels, 1)
|
| 210 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
| 211 |
+
|
| 212 |
+
torch.nn.init.xavier_uniform_(self.conv_q.weight)
|
| 213 |
+
torch.nn.init.xavier_uniform_(self.conv_k.weight)
|
| 214 |
+
if proximal_init:
|
| 215 |
+
self.conv_k.weight.data.copy_(self.conv_q.weight.data)
|
| 216 |
+
self.conv_k.bias.data.copy_(self.conv_q.bias.data)
|
| 217 |
+
torch.nn.init.xavier_uniform_(self.conv_v.weight)
|
| 218 |
+
|
| 219 |
+
def forward(self, x, c, attn_mask=None):
|
| 220 |
+
q = self.conv_q(x)
|
| 221 |
+
k = self.conv_k(c)
|
| 222 |
+
v = self.conv_v(c)
|
| 223 |
+
|
| 224 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
| 225 |
+
|
| 226 |
+
x = self.conv_o(x)
|
| 227 |
+
return x
|
| 228 |
+
|
| 229 |
+
def attention(self, query, key, value, mask=None):
|
| 230 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
| 231 |
+
query = rearrange(query, "b (h c) t-> b h t c", h=self.n_heads)
|
| 232 |
+
key = rearrange(key, "b (h c) t-> b h t c", h=self.n_heads)
|
| 233 |
+
value = rearrange(value, "b (h c) t-> b h t c", h=self.n_heads)
|
| 234 |
+
|
| 235 |
+
query = self.query_rotary_pe(query)
|
| 236 |
+
key = self.key_rotary_pe(key)
|
| 237 |
+
|
| 238 |
+
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels)
|
| 239 |
+
|
| 240 |
+
if self.proximal_bias:
|
| 241 |
+
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
| 242 |
+
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
| 243 |
+
if mask is not None:
|
| 244 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
| 245 |
+
p_attn = torch.nn.functional.softmax(scores, dim=-1)
|
| 246 |
+
p_attn = self.drop(p_attn)
|
| 247 |
+
output = torch.matmul(p_attn, value)
|
| 248 |
+
output = output.transpose(2, 3).contiguous().view(b, d, t_t)
|
| 249 |
+
return output, p_attn
|
| 250 |
+
|
| 251 |
+
@staticmethod
|
| 252 |
+
def _attention_bias_proximal(length):
|
| 253 |
+
r = torch.arange(length, dtype=torch.float32)
|
| 254 |
+
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
| 255 |
+
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
class FFN(nn.Module):
|
| 259 |
+
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0):
|
| 260 |
+
super().__init__()
|
| 261 |
+
self.in_channels = in_channels
|
| 262 |
+
self.out_channels = out_channels
|
| 263 |
+
self.filter_channels = filter_channels
|
| 264 |
+
self.kernel_size = kernel_size
|
| 265 |
+
self.p_dropout = p_dropout
|
| 266 |
+
|
| 267 |
+
self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
|
| 268 |
+
self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size, padding=kernel_size // 2)
|
| 269 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
| 270 |
+
|
| 271 |
+
def forward(self, x, x_mask):
|
| 272 |
+
x = self.conv_1(x * x_mask)
|
| 273 |
+
x = torch.relu(x)
|
| 274 |
+
x = self.drop(x)
|
| 275 |
+
x = self.conv_2(x * x_mask)
|
| 276 |
+
return x * x_mask
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class Encoder(nn.Module):
|
| 280 |
+
def __init__(
|
| 281 |
+
self,
|
| 282 |
+
hidden_channels,
|
| 283 |
+
filter_channels,
|
| 284 |
+
n_heads,
|
| 285 |
+
n_layers,
|
| 286 |
+
kernel_size=1,
|
| 287 |
+
p_dropout=0.0,
|
| 288 |
+
**kwargs,
|
| 289 |
+
):
|
| 290 |
+
super().__init__()
|
| 291 |
+
self.hidden_channels = hidden_channels
|
| 292 |
+
self.filter_channels = filter_channels
|
| 293 |
+
self.n_heads = n_heads
|
| 294 |
+
self.n_layers = n_layers
|
| 295 |
+
self.kernel_size = kernel_size
|
| 296 |
+
self.p_dropout = p_dropout
|
| 297 |
+
|
| 298 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
| 299 |
+
self.attn_layers = torch.nn.ModuleList()
|
| 300 |
+
self.norm_layers_1 = torch.nn.ModuleList()
|
| 301 |
+
self.ffn_layers = torch.nn.ModuleList()
|
| 302 |
+
self.norm_layers_2 = torch.nn.ModuleList()
|
| 303 |
+
for _ in range(self.n_layers):
|
| 304 |
+
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
| 305 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
| 306 |
+
self.ffn_layers.append(
|
| 307 |
+
FFN(
|
| 308 |
+
hidden_channels,
|
| 309 |
+
hidden_channels,
|
| 310 |
+
filter_channels,
|
| 311 |
+
kernel_size,
|
| 312 |
+
p_dropout=p_dropout,
|
| 313 |
+
)
|
| 314 |
+
)
|
| 315 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
| 316 |
+
|
| 317 |
+
def forward(self, x, x_mask):
|
| 318 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
| 319 |
+
for i in range(self.n_layers):
|
| 320 |
+
x = x * x_mask
|
| 321 |
+
y = self.attn_layers[i](x, x, attn_mask)
|
| 322 |
+
y = self.drop(y)
|
| 323 |
+
x = self.norm_layers_1[i](x + y)
|
| 324 |
+
y = self.ffn_layers[i](x, x_mask)
|
| 325 |
+
y = self.drop(y)
|
| 326 |
+
x = self.norm_layers_2[i](x + y)
|
| 327 |
+
x = x * x_mask
|
| 328 |
+
return x
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
class TextEncoder(nn.Module):
|
| 332 |
+
def __init__(
|
| 333 |
+
self,
|
| 334 |
+
encoder_type,
|
| 335 |
+
encoder_params,
|
| 336 |
+
duration_predictor_params,
|
| 337 |
+
n_vocab,
|
| 338 |
+
n_spks=1,
|
| 339 |
+
spk_emb_dim=128,
|
| 340 |
+
):
|
| 341 |
+
super().__init__()
|
| 342 |
+
self.encoder_type = encoder_type
|
| 343 |
+
self.n_vocab = n_vocab
|
| 344 |
+
self.n_feats = encoder_params.n_feats
|
| 345 |
+
self.n_channels = encoder_params.n_channels
|
| 346 |
+
self.spk_emb_dim = spk_emb_dim
|
| 347 |
+
self.n_spks = n_spks
|
| 348 |
+
|
| 349 |
+
self.emb = torch.nn.Embedding(n_vocab, self.n_channels)
|
| 350 |
+
torch.nn.init.normal_(self.emb.weight, 0.0, self.n_channels**-0.5)
|
| 351 |
+
|
| 352 |
+
if encoder_params.prenet:
|
| 353 |
+
self.prenet = ConvReluNorm(
|
| 354 |
+
self.n_channels,
|
| 355 |
+
self.n_channels,
|
| 356 |
+
self.n_channels,
|
| 357 |
+
kernel_size=5,
|
| 358 |
+
n_layers=3,
|
| 359 |
+
p_dropout=0.5,
|
| 360 |
+
)
|
| 361 |
+
else:
|
| 362 |
+
self.prenet = lambda x, x_mask: x
|
| 363 |
+
|
| 364 |
+
self.encoder = Encoder(
|
| 365 |
+
encoder_params.n_channels + (spk_emb_dim if n_spks > 1 else 0),
|
| 366 |
+
encoder_params.filter_channels,
|
| 367 |
+
encoder_params.n_heads,
|
| 368 |
+
encoder_params.n_layers,
|
| 369 |
+
encoder_params.kernel_size,
|
| 370 |
+
encoder_params.p_dropout,
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
self.proj_m = torch.nn.Conv1d(self.n_channels + (spk_emb_dim if n_spks > 1 else 0), self.n_feats, 1)
|
| 374 |
+
self.proj_w = DurationPredictor(
|
| 375 |
+
self.n_channels + (spk_emb_dim if n_spks > 1 else 0),
|
| 376 |
+
duration_predictor_params.filter_channels_dp,
|
| 377 |
+
duration_predictor_params.kernel_size,
|
| 378 |
+
duration_predictor_params.p_dropout,
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
def forward(self, x, x_lengths, spks=None):
|
| 382 |
+
"""Run forward pass to the transformer based encoder and duration predictor
|
| 383 |
+
|
| 384 |
+
Args:
|
| 385 |
+
x (torch.Tensor): text input
|
| 386 |
+
shape: (batch_size, max_text_length)
|
| 387 |
+
x_lengths (torch.Tensor): text input lengths
|
| 388 |
+
shape: (batch_size,)
|
| 389 |
+
spks (torch.Tensor, optional): speaker ids. Defaults to None.
|
| 390 |
+
shape: (batch_size,)
|
| 391 |
+
|
| 392 |
+
Returns:
|
| 393 |
+
mu (torch.Tensor): average output of the encoder
|
| 394 |
+
shape: (batch_size, n_feats, max_text_length)
|
| 395 |
+
logw (torch.Tensor): log duration predicted by the duration predictor
|
| 396 |
+
shape: (batch_size, 1, max_text_length)
|
| 397 |
+
x_mask (torch.Tensor): mask for the text input
|
| 398 |
+
shape: (batch_size, 1, max_text_length)
|
| 399 |
+
"""
|
| 400 |
+
x = self.emb(x) * math.sqrt(self.n_channels)
|
| 401 |
+
x = torch.transpose(x, 1, -1)
|
| 402 |
+
x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
| 403 |
+
|
| 404 |
+
x = self.prenet(x, x_mask)
|
| 405 |
+
if self.n_spks > 1:
|
| 406 |
+
x = torch.cat([x, spks.unsqueeze(-1).repeat(1, 1, x.shape[-1])], dim=1)
|
| 407 |
+
x = self.encoder(x, x_mask)
|
| 408 |
+
mu = self.proj_m(x) * x_mask
|
| 409 |
+
|
| 410 |
+
x_dp = torch.detach(x)
|
| 411 |
+
logw = self.proj_w(x_dp, x_mask)
|
| 412 |
+
|
| 413 |
+
return mu, logw, x_mask
|
src/chatterbox/models/s3gen/matcha/transformer.py
ADDED
|
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from diffusers.models.attention import (
|
| 6 |
+
GEGLU,
|
| 7 |
+
GELU,
|
| 8 |
+
AdaLayerNorm,
|
| 9 |
+
AdaLayerNormZero,
|
| 10 |
+
ApproximateGELU,
|
| 11 |
+
)
|
| 12 |
+
from diffusers.models.attention_processor import Attention
|
| 13 |
+
from diffusers.models.lora import LoRACompatibleLinear
|
| 14 |
+
from diffusers.utils.torch_utils import maybe_allow_in_graph
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class SnakeBeta(nn.Module):
|
| 18 |
+
"""
|
| 19 |
+
A modified Snake function which uses separate parameters for the magnitude of the periodic components
|
| 20 |
+
Shape:
|
| 21 |
+
- Input: (B, C, T)
|
| 22 |
+
- Output: (B, C, T), same shape as the input
|
| 23 |
+
Parameters:
|
| 24 |
+
- alpha - trainable parameter that controls frequency
|
| 25 |
+
- beta - trainable parameter that controls magnitude
|
| 26 |
+
References:
|
| 27 |
+
- This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
| 28 |
+
https://arxiv.org/abs/2006.08195
|
| 29 |
+
Examples:
|
| 30 |
+
>>> a1 = snakebeta(256)
|
| 31 |
+
>>> x = torch.randn(256)
|
| 32 |
+
>>> x = a1(x)
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, in_features, out_features, alpha=1.0, alpha_trainable=True, alpha_logscale=True):
|
| 36 |
+
"""
|
| 37 |
+
Initialization.
|
| 38 |
+
INPUT:
|
| 39 |
+
- in_features: shape of the input
|
| 40 |
+
- alpha - trainable parameter that controls frequency
|
| 41 |
+
- beta - trainable parameter that controls magnitude
|
| 42 |
+
alpha is initialized to 1 by default, higher values = higher-frequency.
|
| 43 |
+
beta is initialized to 1 by default, higher values = higher-magnitude.
|
| 44 |
+
alpha will be trained along with the rest of your model.
|
| 45 |
+
"""
|
| 46 |
+
super().__init__()
|
| 47 |
+
self.in_features = out_features if isinstance(out_features, list) else [out_features]
|
| 48 |
+
self.proj = LoRACompatibleLinear(in_features, out_features)
|
| 49 |
+
|
| 50 |
+
# initialize alpha
|
| 51 |
+
self.alpha_logscale = alpha_logscale
|
| 52 |
+
if self.alpha_logscale: # log scale alphas initialized to zeros
|
| 53 |
+
self.alpha = nn.Parameter(torch.zeros(self.in_features) * alpha)
|
| 54 |
+
self.beta = nn.Parameter(torch.zeros(self.in_features) * alpha)
|
| 55 |
+
else: # linear scale alphas initialized to ones
|
| 56 |
+
self.alpha = nn.Parameter(torch.ones(self.in_features) * alpha)
|
| 57 |
+
self.beta = nn.Parameter(torch.ones(self.in_features) * alpha)
|
| 58 |
+
|
| 59 |
+
self.alpha.requires_grad = alpha_trainable
|
| 60 |
+
self.beta.requires_grad = alpha_trainable
|
| 61 |
+
|
| 62 |
+
self.no_div_by_zero = 0.000000001
|
| 63 |
+
|
| 64 |
+
def forward(self, x):
|
| 65 |
+
"""
|
| 66 |
+
Forward pass of the function.
|
| 67 |
+
Applies the function to the input elementwise.
|
| 68 |
+
SnakeBeta ∶= x + 1/b * sin^2 (xa)
|
| 69 |
+
"""
|
| 70 |
+
x = self.proj(x)
|
| 71 |
+
if self.alpha_logscale:
|
| 72 |
+
alpha = torch.exp(self.alpha)
|
| 73 |
+
beta = torch.exp(self.beta)
|
| 74 |
+
else:
|
| 75 |
+
alpha = self.alpha
|
| 76 |
+
beta = self.beta
|
| 77 |
+
|
| 78 |
+
x = x + (1.0 / (beta + self.no_div_by_zero)) * torch.pow(torch.sin(x * alpha), 2)
|
| 79 |
+
|
| 80 |
+
return x
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class FeedForward(nn.Module):
|
| 84 |
+
r"""
|
| 85 |
+
A feed-forward layer.
|
| 86 |
+
|
| 87 |
+
Parameters:
|
| 88 |
+
dim (`int`): The number of channels in the input.
|
| 89 |
+
dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
|
| 90 |
+
mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
|
| 91 |
+
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
| 92 |
+
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
| 93 |
+
final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
def __init__(
|
| 97 |
+
self,
|
| 98 |
+
dim: int,
|
| 99 |
+
dim_out: Optional[int] = None,
|
| 100 |
+
mult: int = 4,
|
| 101 |
+
dropout: float = 0.0,
|
| 102 |
+
activation_fn: str = "geglu",
|
| 103 |
+
final_dropout: bool = False,
|
| 104 |
+
):
|
| 105 |
+
super().__init__()
|
| 106 |
+
inner_dim = int(dim * mult)
|
| 107 |
+
dim_out = dim_out if dim_out is not None else dim
|
| 108 |
+
|
| 109 |
+
if activation_fn == "gelu":
|
| 110 |
+
act_fn = GELU(dim, inner_dim)
|
| 111 |
+
if activation_fn == "gelu-approximate":
|
| 112 |
+
act_fn = GELU(dim, inner_dim, approximate="tanh")
|
| 113 |
+
elif activation_fn == "geglu":
|
| 114 |
+
act_fn = GEGLU(dim, inner_dim)
|
| 115 |
+
elif activation_fn == "geglu-approximate":
|
| 116 |
+
act_fn = ApproximateGELU(dim, inner_dim)
|
| 117 |
+
elif activation_fn == "snakebeta":
|
| 118 |
+
act_fn = SnakeBeta(dim, inner_dim)
|
| 119 |
+
|
| 120 |
+
self.net = nn.ModuleList([])
|
| 121 |
+
# project in
|
| 122 |
+
self.net.append(act_fn)
|
| 123 |
+
# project dropout
|
| 124 |
+
self.net.append(nn.Dropout(dropout))
|
| 125 |
+
# project out
|
| 126 |
+
self.net.append(LoRACompatibleLinear(inner_dim, dim_out))
|
| 127 |
+
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
|
| 128 |
+
if final_dropout:
|
| 129 |
+
self.net.append(nn.Dropout(dropout))
|
| 130 |
+
|
| 131 |
+
def forward(self, hidden_states):
|
| 132 |
+
for module in self.net:
|
| 133 |
+
hidden_states = module(hidden_states)
|
| 134 |
+
return hidden_states
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
@maybe_allow_in_graph
|
| 138 |
+
class BasicTransformerBlock(nn.Module):
|
| 139 |
+
r"""
|
| 140 |
+
A basic Transformer block.
|
| 141 |
+
|
| 142 |
+
Parameters:
|
| 143 |
+
dim (`int`): The number of channels in the input and output.
|
| 144 |
+
num_attention_heads (`int`): The number of heads to use for multi-head attention.
|
| 145 |
+
attention_head_dim (`int`): The number of channels in each head.
|
| 146 |
+
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
| 147 |
+
cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
|
| 148 |
+
only_cross_attention (`bool`, *optional*):
|
| 149 |
+
Whether to use only cross-attention layers. In this case two cross attention layers are used.
|
| 150 |
+
double_self_attention (`bool`, *optional*):
|
| 151 |
+
Whether to use two self-attention layers. In this case no cross attention layers are used.
|
| 152 |
+
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
| 153 |
+
num_embeds_ada_norm (:
|
| 154 |
+
obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
|
| 155 |
+
attention_bias (:
|
| 156 |
+
obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
|
| 157 |
+
"""
|
| 158 |
+
|
| 159 |
+
def __init__(
|
| 160 |
+
self,
|
| 161 |
+
dim: int,
|
| 162 |
+
num_attention_heads: int,
|
| 163 |
+
attention_head_dim: int,
|
| 164 |
+
dropout=0.0,
|
| 165 |
+
cross_attention_dim: Optional[int] = None,
|
| 166 |
+
activation_fn: str = "geglu",
|
| 167 |
+
num_embeds_ada_norm: Optional[int] = None,
|
| 168 |
+
attention_bias: bool = False,
|
| 169 |
+
only_cross_attention: bool = False,
|
| 170 |
+
double_self_attention: bool = False,
|
| 171 |
+
upcast_attention: bool = False,
|
| 172 |
+
norm_elementwise_affine: bool = True,
|
| 173 |
+
norm_type: str = "layer_norm",
|
| 174 |
+
final_dropout: bool = False,
|
| 175 |
+
):
|
| 176 |
+
super().__init__()
|
| 177 |
+
self.only_cross_attention = only_cross_attention
|
| 178 |
+
|
| 179 |
+
self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
|
| 180 |
+
self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
|
| 181 |
+
|
| 182 |
+
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
|
| 183 |
+
raise ValueError(
|
| 184 |
+
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
|
| 185 |
+
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
# Define 3 blocks. Each block has its own normalization layer.
|
| 189 |
+
# 1. Self-Attn
|
| 190 |
+
if self.use_ada_layer_norm:
|
| 191 |
+
self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
|
| 192 |
+
elif self.use_ada_layer_norm_zero:
|
| 193 |
+
self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
|
| 194 |
+
else:
|
| 195 |
+
self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
| 196 |
+
self.attn1 = Attention(
|
| 197 |
+
query_dim=dim,
|
| 198 |
+
heads=num_attention_heads,
|
| 199 |
+
dim_head=attention_head_dim,
|
| 200 |
+
dropout=dropout,
|
| 201 |
+
bias=attention_bias,
|
| 202 |
+
cross_attention_dim=cross_attention_dim if only_cross_attention else None,
|
| 203 |
+
upcast_attention=upcast_attention,
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
# 2. Cross-Attn
|
| 207 |
+
if cross_attention_dim is not None or double_self_attention:
|
| 208 |
+
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
|
| 209 |
+
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
|
| 210 |
+
# the second cross attention block.
|
| 211 |
+
self.norm2 = (
|
| 212 |
+
AdaLayerNorm(dim, num_embeds_ada_norm)
|
| 213 |
+
if self.use_ada_layer_norm
|
| 214 |
+
else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
| 215 |
+
)
|
| 216 |
+
self.attn2 = Attention(
|
| 217 |
+
query_dim=dim,
|
| 218 |
+
cross_attention_dim=cross_attention_dim if not double_self_attention else None,
|
| 219 |
+
heads=num_attention_heads,
|
| 220 |
+
dim_head=attention_head_dim,
|
| 221 |
+
dropout=dropout,
|
| 222 |
+
bias=attention_bias,
|
| 223 |
+
upcast_attention=upcast_attention,
|
| 224 |
+
# scale_qk=False, # uncomment this to not to use flash attention
|
| 225 |
+
) # is self-attn if encoder_hidden_states is none
|
| 226 |
+
else:
|
| 227 |
+
self.norm2 = None
|
| 228 |
+
self.attn2 = None
|
| 229 |
+
|
| 230 |
+
# 3. Feed-forward
|
| 231 |
+
self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
| 232 |
+
self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
|
| 233 |
+
|
| 234 |
+
# let chunk size default to None
|
| 235 |
+
self._chunk_size = None
|
| 236 |
+
self._chunk_dim = 0
|
| 237 |
+
|
| 238 |
+
def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int):
|
| 239 |
+
# Sets chunk feed-forward
|
| 240 |
+
self._chunk_size = chunk_size
|
| 241 |
+
self._chunk_dim = dim
|
| 242 |
+
|
| 243 |
+
def forward(
|
| 244 |
+
self,
|
| 245 |
+
hidden_states: torch.FloatTensor,
|
| 246 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 247 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
| 248 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
| 249 |
+
timestep: Optional[torch.LongTensor] = None,
|
| 250 |
+
cross_attention_kwargs: Dict[str, Any] = None,
|
| 251 |
+
class_labels: Optional[torch.LongTensor] = None,
|
| 252 |
+
):
|
| 253 |
+
# Notice that normalization is always applied before the real computation in the following blocks.
|
| 254 |
+
# 1. Self-Attention
|
| 255 |
+
if self.use_ada_layer_norm:
|
| 256 |
+
norm_hidden_states = self.norm1(hidden_states, timestep)
|
| 257 |
+
elif self.use_ada_layer_norm_zero:
|
| 258 |
+
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
|
| 259 |
+
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
|
| 260 |
+
)
|
| 261 |
+
else:
|
| 262 |
+
norm_hidden_states = self.norm1(hidden_states)
|
| 263 |
+
|
| 264 |
+
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
|
| 265 |
+
|
| 266 |
+
attn_output = self.attn1(
|
| 267 |
+
norm_hidden_states,
|
| 268 |
+
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
|
| 269 |
+
attention_mask=encoder_attention_mask if self.only_cross_attention else attention_mask,
|
| 270 |
+
**cross_attention_kwargs,
|
| 271 |
+
)
|
| 272 |
+
if self.use_ada_layer_norm_zero:
|
| 273 |
+
attn_output = gate_msa.unsqueeze(1) * attn_output
|
| 274 |
+
hidden_states = attn_output + hidden_states
|
| 275 |
+
|
| 276 |
+
# 2. Cross-Attention
|
| 277 |
+
if self.attn2 is not None:
|
| 278 |
+
norm_hidden_states = (
|
| 279 |
+
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
attn_output = self.attn2(
|
| 283 |
+
norm_hidden_states,
|
| 284 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 285 |
+
attention_mask=encoder_attention_mask,
|
| 286 |
+
**cross_attention_kwargs,
|
| 287 |
+
)
|
| 288 |
+
hidden_states = attn_output + hidden_states
|
| 289 |
+
|
| 290 |
+
# 3. Feed-forward
|
| 291 |
+
norm_hidden_states = self.norm3(hidden_states)
|
| 292 |
+
|
| 293 |
+
if self.use_ada_layer_norm_zero:
|
| 294 |
+
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
|
| 295 |
+
|
| 296 |
+
if self._chunk_size is not None:
|
| 297 |
+
# "feed_forward_chunk_size" can be used to save memory
|
| 298 |
+
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
|
| 299 |
+
raise ValueError(
|
| 300 |
+
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
|
| 304 |
+
ff_output = torch.cat(
|
| 305 |
+
[self.ff(hid_slice) for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)],
|
| 306 |
+
dim=self._chunk_dim,
|
| 307 |
+
)
|
| 308 |
+
else:
|
| 309 |
+
ff_output = self.ff(norm_hidden_states)
|
| 310 |
+
|
| 311 |
+
if self.use_ada_layer_norm_zero:
|
| 312 |
+
ff_output = gate_mlp.unsqueeze(1) * ff_output
|
| 313 |
+
|
| 314 |
+
hidden_states = ff_output + hidden_states
|
| 315 |
+
|
| 316 |
+
return hidden_states
|
src/chatterbox/models/s3gen/s3gen.py
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Modified from CosyVoice https://github.com/FunAudioLLM/CosyVoice
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
import torch
|
| 19 |
+
import torchaudio as ta
|
| 20 |
+
from functools import lru_cache
|
| 21 |
+
from typing import Optional
|
| 22 |
+
from omegaconf import DictConfig
|
| 23 |
+
|
| 24 |
+
from ..s3tokenizer import S3_SR, SPEECH_VOCAB_SIZE, S3Tokenizer
|
| 25 |
+
from .const import S3GEN_SR
|
| 26 |
+
from .flow import CausalMaskedDiffWithXvec
|
| 27 |
+
from .xvector import CAMPPlus
|
| 28 |
+
from .utils.mel import mel_spectrogram
|
| 29 |
+
from .f0_predictor import ConvRNNF0Predictor
|
| 30 |
+
from .hifigan import HiFTGenerator
|
| 31 |
+
from .transformer.upsample_encoder import UpsampleConformerEncoder
|
| 32 |
+
from .flow_matching import CausalConditionalCFM
|
| 33 |
+
from .decoder import ConditionalDecoder
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def drop_invalid_tokens(x):
|
| 37 |
+
assert len(x.shape) <= 2 and x.shape[0] == 1, "only batch size of one allowed for now"
|
| 38 |
+
return x[x < SPEECH_VOCAB_SIZE]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# TODO: global resampler cache
|
| 42 |
+
@lru_cache(100)
|
| 43 |
+
def get_resampler(src_sr, dst_sr, device):
|
| 44 |
+
return ta.transforms.Resample(src_sr, dst_sr).to(device)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class S3Token2Mel(torch.nn.Module):
|
| 48 |
+
"""
|
| 49 |
+
CosyVoice2's CFM decoder maps S3 speech tokens to mel-spectrograms.
|
| 50 |
+
|
| 51 |
+
TODO: make these modules configurable?
|
| 52 |
+
"""
|
| 53 |
+
def __init__(self):
|
| 54 |
+
super().__init__()
|
| 55 |
+
self.tokenizer = S3Tokenizer("speech_tokenizer_v2_25hz")
|
| 56 |
+
self.mel_extractor = mel_spectrogram # TODO: make it a torch module?
|
| 57 |
+
self.speaker_encoder = CAMPPlus() # use default args
|
| 58 |
+
|
| 59 |
+
encoder = UpsampleConformerEncoder(
|
| 60 |
+
output_size=512,
|
| 61 |
+
attention_heads=8,
|
| 62 |
+
linear_units=2048,
|
| 63 |
+
num_blocks=6,
|
| 64 |
+
dropout_rate=0.1,
|
| 65 |
+
positional_dropout_rate=0.1,
|
| 66 |
+
attention_dropout_rate=0.1,
|
| 67 |
+
normalize_before=True,
|
| 68 |
+
input_layer='linear',
|
| 69 |
+
pos_enc_layer_type='rel_pos_espnet',
|
| 70 |
+
selfattention_layer_type='rel_selfattn',
|
| 71 |
+
input_size=512,
|
| 72 |
+
use_cnn_module=False,
|
| 73 |
+
macaron_style=False,
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
estimator = ConditionalDecoder(
|
| 77 |
+
in_channels=320,
|
| 78 |
+
out_channels=80,
|
| 79 |
+
causal=True,
|
| 80 |
+
channels=[256],
|
| 81 |
+
dropout=0.0,
|
| 82 |
+
attention_head_dim=64,
|
| 83 |
+
n_blocks=4,
|
| 84 |
+
num_mid_blocks=12,
|
| 85 |
+
num_heads=8,
|
| 86 |
+
act_fn='gelu',
|
| 87 |
+
)
|
| 88 |
+
cfm_params = DictConfig({
|
| 89 |
+
"sigma_min": 1e-06,
|
| 90 |
+
"solver": 'euler',
|
| 91 |
+
"t_scheduler": 'cosine',
|
| 92 |
+
"training_cfg_rate": 0.2,
|
| 93 |
+
"inference_cfg_rate": 0.7,
|
| 94 |
+
"reg_loss_type": 'l1',
|
| 95 |
+
})
|
| 96 |
+
decoder = CausalConditionalCFM(
|
| 97 |
+
spk_emb_dim=80,
|
| 98 |
+
cfm_params=cfm_params,
|
| 99 |
+
estimator=estimator,
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
self.flow = CausalMaskedDiffWithXvec(
|
| 103 |
+
encoder=encoder,
|
| 104 |
+
decoder=decoder
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
self.resamplers = {}
|
| 108 |
+
|
| 109 |
+
@property
|
| 110 |
+
def device(self):
|
| 111 |
+
params = self.tokenizer.parameters()
|
| 112 |
+
return next(params).device
|
| 113 |
+
|
| 114 |
+
def embed_ref(
|
| 115 |
+
self,
|
| 116 |
+
ref_wav: torch.Tensor,
|
| 117 |
+
ref_sr: int,
|
| 118 |
+
device="auto",
|
| 119 |
+
ref_fade_out=True,
|
| 120 |
+
):
|
| 121 |
+
device = self.device if device == "auto" else device
|
| 122 |
+
if isinstance(ref_wav, np.ndarray):
|
| 123 |
+
ref_wav = torch.from_numpy(ref_wav).float()
|
| 124 |
+
|
| 125 |
+
if ref_wav.device != device:
|
| 126 |
+
ref_wav = ref_wav.to(device)
|
| 127 |
+
|
| 128 |
+
if len(ref_wav.shape) == 1:
|
| 129 |
+
ref_wav = ref_wav.unsqueeze(0) # (B, L)
|
| 130 |
+
|
| 131 |
+
if ref_wav.size(1) > 10 * ref_sr:
|
| 132 |
+
print("WARNING: cosydec received ref longer than 10s")
|
| 133 |
+
|
| 134 |
+
ref_wav_24 = ref_wav
|
| 135 |
+
if ref_sr != S3GEN_SR:
|
| 136 |
+
ref_wav_24 = get_resampler(ref_sr, S3GEN_SR, device)(ref_wav)
|
| 137 |
+
|
| 138 |
+
ref_mels_24 = self.mel_extractor(ref_wav_24).transpose(1, 2).to(device)
|
| 139 |
+
ref_mels_24_len = None
|
| 140 |
+
|
| 141 |
+
# Resample to 16kHz
|
| 142 |
+
ref_wav_16 = get_resampler(ref_sr, S3_SR, device)(ref_wav).to(device)
|
| 143 |
+
|
| 144 |
+
# Speaker embedding
|
| 145 |
+
ref_x_vector = self.speaker_encoder.inference(ref_wav_16)
|
| 146 |
+
|
| 147 |
+
# Tokenize 16khz reference
|
| 148 |
+
ref_speech_tokens, ref_speech_token_lens = self.tokenizer(ref_wav_16)
|
| 149 |
+
|
| 150 |
+
# Make sure mel_len = 2 * stoken_len (happens when the input is not padded to multiple of 40ms)
|
| 151 |
+
if ref_mels_24.shape[1] != 2 * ref_speech_tokens.shape[1]:
|
| 152 |
+
logging.warning(
|
| 153 |
+
"Reference mel length is not equal to 2 * reference token length.\n"
|
| 154 |
+
)
|
| 155 |
+
ref_speech_tokens = ref_speech_tokens[:, :ref_mels_24.shape[1] // 2]
|
| 156 |
+
ref_speech_token_lens[0] = ref_speech_tokens.shape[1]
|
| 157 |
+
|
| 158 |
+
return dict(
|
| 159 |
+
prompt_token=ref_speech_tokens.to(device),
|
| 160 |
+
prompt_token_len=ref_speech_token_lens,
|
| 161 |
+
prompt_feat=ref_mels_24,
|
| 162 |
+
prompt_feat_len=ref_mels_24_len,
|
| 163 |
+
embedding=ref_x_vector,
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
def forward(
|
| 167 |
+
self,
|
| 168 |
+
speech_tokens: torch.LongTensor,
|
| 169 |
+
# locally-computed ref embedding (mutex with ref_dict)
|
| 170 |
+
ref_wav: Optional[torch.Tensor],
|
| 171 |
+
ref_sr: Optional[int],
|
| 172 |
+
# pre-computed ref embedding (prod API)
|
| 173 |
+
ref_dict: Optional[dict] = None,
|
| 174 |
+
finalize: bool = False,
|
| 175 |
+
):
|
| 176 |
+
"""
|
| 177 |
+
Generate waveforms from S3 speech tokens and a reference waveform, which the speaker timbre is inferred from.
|
| 178 |
+
|
| 179 |
+
NOTE:
|
| 180 |
+
- The speaker encoder accepts 16 kHz waveform.
|
| 181 |
+
- S3TokenizerV2 accepts 16 kHz waveform.
|
| 182 |
+
- The mel-spectrogram for the reference assumes 24 kHz input signal.
|
| 183 |
+
- This function is designed for batch_size=1 only.
|
| 184 |
+
|
| 185 |
+
Args
|
| 186 |
+
----
|
| 187 |
+
- `speech_tokens`: S3 speech tokens [B=1, T]
|
| 188 |
+
- `ref_wav`: reference waveform (`torch.Tensor` with shape=[B=1, T])
|
| 189 |
+
- `ref_sr`: reference sample rate
|
| 190 |
+
- `finalize`: whether streaming is finished or not. Note that if False, the last 3 tokens will be ignored.
|
| 191 |
+
"""
|
| 192 |
+
assert (ref_wav is None) ^ (ref_dict is None), f"Must provide exactly one of ref_wav or ref_dict (got {ref_wav} and {ref_dict})"
|
| 193 |
+
|
| 194 |
+
if ref_dict is None:
|
| 195 |
+
ref_dict = self.embed_ref(ref_wav, ref_sr)
|
| 196 |
+
else:
|
| 197 |
+
# type/device casting (all values will be numpy if it's from a prod API call)
|
| 198 |
+
for rk in list(ref_dict):
|
| 199 |
+
if isinstance(ref_dict[rk], np.ndarray):
|
| 200 |
+
ref_dict[rk] = torch.from_numpy(ref_dict[rk])
|
| 201 |
+
if torch.is_tensor(ref_dict[rk]):
|
| 202 |
+
ref_dict[rk] = ref_dict[rk].to(self.device)
|
| 203 |
+
|
| 204 |
+
if len(speech_tokens.shape) == 1:
|
| 205 |
+
speech_tokens = speech_tokens.unsqueeze(0)
|
| 206 |
+
|
| 207 |
+
# assert speech_tokens.shape[0] == 1, "only batch size of one allowed for now"
|
| 208 |
+
speech_token_lens = torch.LongTensor([speech_tokens.size(1)]).to(self.device)
|
| 209 |
+
|
| 210 |
+
output_mels, _ = self.flow.inference(
|
| 211 |
+
token=speech_tokens,
|
| 212 |
+
token_len=speech_token_lens,
|
| 213 |
+
finalize=finalize,
|
| 214 |
+
**ref_dict,
|
| 215 |
+
)
|
| 216 |
+
return output_mels
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class S3Token2Wav(S3Token2Mel):
|
| 220 |
+
"""
|
| 221 |
+
The decoder of CosyVoice2 is a concat of token-to-mel (CFM) and a mel-to-waveform (HiFiGAN) modules.
|
| 222 |
+
|
| 223 |
+
TODO: make these modules configurable?
|
| 224 |
+
"""
|
| 225 |
+
|
| 226 |
+
def __init__(self):
|
| 227 |
+
super().__init__()
|
| 228 |
+
|
| 229 |
+
f0_predictor = ConvRNNF0Predictor()
|
| 230 |
+
self.mel2wav = HiFTGenerator(
|
| 231 |
+
sampling_rate=S3GEN_SR,
|
| 232 |
+
upsample_rates=[8, 5, 3],
|
| 233 |
+
upsample_kernel_sizes=[16, 11, 7],
|
| 234 |
+
source_resblock_kernel_sizes=[7, 7, 11],
|
| 235 |
+
source_resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
| 236 |
+
f0_predictor=f0_predictor,
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
# silence out a few ms and fade audio in to reduce artifacts
|
| 240 |
+
n_trim = S3GEN_SR // 50 # 20ms = half of a frame
|
| 241 |
+
trim_fade = torch.zeros(2 * n_trim)
|
| 242 |
+
trim_fade[n_trim:] = (torch.cos(torch.linspace(torch.pi, 0, n_trim)) + 1) / 2
|
| 243 |
+
self.register_buffer("trim_fade", trim_fade, persistent=False) # (buffers get automatic device casting)
|
| 244 |
+
|
| 245 |
+
def forward(
|
| 246 |
+
self,
|
| 247 |
+
speech_tokens,
|
| 248 |
+
# locally-computed ref embedding (mutex with ref_dict)
|
| 249 |
+
ref_wav: Optional[torch.Tensor],
|
| 250 |
+
ref_sr: Optional[int],
|
| 251 |
+
# pre-computed ref embedding (prod API)
|
| 252 |
+
ref_dict: Optional[dict] = None,
|
| 253 |
+
finalize: bool = False
|
| 254 |
+
):
|
| 255 |
+
output_mels = super().forward(speech_tokens, ref_wav=ref_wav, ref_sr=ref_sr, ref_dict=ref_dict, finalize=finalize)
|
| 256 |
+
|
| 257 |
+
# TODO jrm: ignoring the speed control (mel interpolation) and the HiFTGAN caching mechanisms for now.
|
| 258 |
+
hift_cache_source = torch.zeros(1, 1, 0).to(self.device)
|
| 259 |
+
|
| 260 |
+
output_wavs, *_ = self.mel2wav.inference(speech_feat=output_mels, cache_source=hift_cache_source)
|
| 261 |
+
|
| 262 |
+
if not self.training:
|
| 263 |
+
# NOTE: ad-hoc method to reduce "spillover" from the reference clip.
|
| 264 |
+
output_wavs[:, :len(self.trim_fade)] *= self.trim_fade
|
| 265 |
+
|
| 266 |
+
return output_wavs
|
| 267 |
+
|
| 268 |
+
@torch.inference_mode()
|
| 269 |
+
def flow_inference(
|
| 270 |
+
self,
|
| 271 |
+
speech_tokens,
|
| 272 |
+
# locally-computed ref embedding (mutex with ref_dict)
|
| 273 |
+
ref_wav: Optional[torch.Tensor] = None,
|
| 274 |
+
ref_sr: Optional[int] = None,
|
| 275 |
+
# pre-computed ref embedding (prod API)
|
| 276 |
+
ref_dict: Optional[dict] = None,
|
| 277 |
+
finalize: bool = False,
|
| 278 |
+
):
|
| 279 |
+
return super().forward(speech_tokens, ref_wav=ref_wav, ref_sr=ref_sr, ref_dict=ref_dict, finalize=finalize)
|
| 280 |
+
|
| 281 |
+
@torch.inference_mode()
|
| 282 |
+
def hift_inference(self, speech_feat, cache_source: torch.Tensor = None):
|
| 283 |
+
if cache_source is None:
|
| 284 |
+
cache_source = torch.zeros(1, 1, 0).to(self.device)
|
| 285 |
+
return self.mel2wav.inference(speech_feat=speech_feat, cache_source=cache_source)
|
| 286 |
+
|
| 287 |
+
@torch.inference_mode()
|
| 288 |
+
def inference(
|
| 289 |
+
self,
|
| 290 |
+
speech_tokens,
|
| 291 |
+
# locally-computed ref embedding (mutex with ref_dict)
|
| 292 |
+
ref_wav: Optional[torch.Tensor] = None,
|
| 293 |
+
ref_sr: Optional[int] = None,
|
| 294 |
+
# pre-computed ref embedding (prod API)
|
| 295 |
+
ref_dict: Optional[dict] = None,
|
| 296 |
+
cache_source: torch.Tensor = None, # NOTE: this arg is for streaming, it can probably be removed here
|
| 297 |
+
finalize: bool = True,
|
| 298 |
+
):
|
| 299 |
+
output_mels = self.flow_inference(speech_tokens, ref_wav=ref_wav, ref_sr=ref_sr, ref_dict=ref_dict, finalize=finalize)
|
| 300 |
+
output_wavs, output_sources = self.hift_inference(output_mels, cache_source)
|
| 301 |
+
|
| 302 |
+
# NOTE: ad-hoc method to reduce "spillover" from the reference clip.
|
| 303 |
+
output_wavs[:, :len(self.trim_fade)] *= self.trim_fade
|
| 304 |
+
|
| 305 |
+
return output_wavs, output_sources
|
src/chatterbox/models/s3gen/transformer/__init__.py
ADDED
|
File without changes
|
src/chatterbox/models/s3gen/transformer/activation.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2020 Johns Hopkins University (Shinji Watanabe)
|
| 2 |
+
# 2020 Northwestern Polytechnical University (Pengcheng Guo)
|
| 3 |
+
# 2020 Mobvoi Inc (Binbin Zhang)
|
| 4 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
| 5 |
+
#
|
| 6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 7 |
+
# you may not use this file except in compliance with the License.
|
| 8 |
+
# You may obtain a copy of the License at
|
| 9 |
+
#
|
| 10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 11 |
+
#
|
| 12 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 15 |
+
# See the License for the specific language governing permissions and
|
| 16 |
+
# limitations under the License.
|
| 17 |
+
"""Swish() activation function for Conformer."""
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
from torch import nn, sin, pow
|
| 21 |
+
from torch.nn import Parameter
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class Swish(torch.nn.Module):
|
| 25 |
+
"""Construct an Swish object."""
|
| 26 |
+
|
| 27 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 28 |
+
"""Return Swish activation function."""
|
| 29 |
+
return x * torch.sigmoid(x)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
|
| 33 |
+
# LICENSE is in incl_licenses directory.
|
| 34 |
+
class Snake(nn.Module):
|
| 35 |
+
'''
|
| 36 |
+
Implementation of a sine-based periodic activation function
|
| 37 |
+
Shape:
|
| 38 |
+
- Input: (B, C, T)
|
| 39 |
+
- Output: (B, C, T), same shape as the input
|
| 40 |
+
Parameters:
|
| 41 |
+
- alpha - trainable parameter
|
| 42 |
+
References:
|
| 43 |
+
- This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
| 44 |
+
https://arxiv.org/abs/2006.08195
|
| 45 |
+
Examples:
|
| 46 |
+
>>> a1 = snake(256)
|
| 47 |
+
>>> x = torch.randn(256)
|
| 48 |
+
>>> x = a1(x)
|
| 49 |
+
'''
|
| 50 |
+
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
|
| 51 |
+
'''
|
| 52 |
+
Initialization.
|
| 53 |
+
INPUT:
|
| 54 |
+
- in_features: shape of the input
|
| 55 |
+
- alpha: trainable parameter
|
| 56 |
+
alpha is initialized to 1 by default, higher values = higher-frequency.
|
| 57 |
+
alpha will be trained along with the rest of your model.
|
| 58 |
+
'''
|
| 59 |
+
super(Snake, self).__init__()
|
| 60 |
+
self.in_features = in_features
|
| 61 |
+
|
| 62 |
+
# initialize alpha
|
| 63 |
+
self.alpha_logscale = alpha_logscale
|
| 64 |
+
if self.alpha_logscale: # log scale alphas initialized to zeros
|
| 65 |
+
self.alpha = Parameter(torch.zeros(in_features) * alpha)
|
| 66 |
+
else: # linear scale alphas initialized to ones
|
| 67 |
+
self.alpha = Parameter(torch.ones(in_features) * alpha)
|
| 68 |
+
|
| 69 |
+
self.alpha.requires_grad = alpha_trainable
|
| 70 |
+
|
| 71 |
+
self.no_div_by_zero = 0.000000001
|
| 72 |
+
|
| 73 |
+
def forward(self, x):
|
| 74 |
+
'''
|
| 75 |
+
Forward pass of the function.
|
| 76 |
+
Applies the function to the input elementwise.
|
| 77 |
+
Snake ∶= x + 1/a * sin^2 (xa)
|
| 78 |
+
'''
|
| 79 |
+
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
|
| 80 |
+
if self.alpha_logscale:
|
| 81 |
+
alpha = torch.exp(alpha)
|
| 82 |
+
x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
|
| 83 |
+
|
| 84 |
+
return x
|
src/chatterbox/models/s3gen/transformer/attention.py
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2019 Shigeki Karita
|
| 2 |
+
# 2020 Mobvoi Inc (Binbin Zhang)
|
| 3 |
+
# 2022 Xingchen Song ([email protected])
|
| 4 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
| 5 |
+
#
|
| 6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 7 |
+
# you may not use this file except in compliance with the License.
|
| 8 |
+
# You may obtain a copy of the License at
|
| 9 |
+
#
|
| 10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 11 |
+
#
|
| 12 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 15 |
+
# See the License for the specific language governing permissions and
|
| 16 |
+
# limitations under the License.
|
| 17 |
+
"""Multi-Head Attention layer definition."""
|
| 18 |
+
|
| 19 |
+
import math
|
| 20 |
+
from typing import Tuple
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
from torch import nn
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class MultiHeadedAttention(nn.Module):
|
| 27 |
+
"""Multi-Head Attention layer.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
n_head (int): The number of heads.
|
| 31 |
+
n_feat (int): The number of features.
|
| 32 |
+
dropout_rate (float): Dropout rate.
|
| 33 |
+
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self,
|
| 37 |
+
n_head: int,
|
| 38 |
+
n_feat: int,
|
| 39 |
+
dropout_rate: float,
|
| 40 |
+
key_bias: bool = True):
|
| 41 |
+
"""Construct an MultiHeadedAttention object."""
|
| 42 |
+
super().__init__()
|
| 43 |
+
assert n_feat % n_head == 0
|
| 44 |
+
# We assume d_v always equals d_k
|
| 45 |
+
self.d_k = n_feat // n_head
|
| 46 |
+
self.h = n_head
|
| 47 |
+
self.linear_q = nn.Linear(n_feat, n_feat)
|
| 48 |
+
self.linear_k = nn.Linear(n_feat, n_feat, bias=key_bias)
|
| 49 |
+
self.linear_v = nn.Linear(n_feat, n_feat)
|
| 50 |
+
self.linear_out = nn.Linear(n_feat, n_feat)
|
| 51 |
+
self.dropout = nn.Dropout(p=dropout_rate)
|
| 52 |
+
|
| 53 |
+
def forward_qkv(
|
| 54 |
+
self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
|
| 55 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 56 |
+
"""Transform query, key and value.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
query (torch.Tensor): Query tensor (#batch, time1, size).
|
| 60 |
+
key (torch.Tensor): Key tensor (#batch, time2, size).
|
| 61 |
+
value (torch.Tensor): Value tensor (#batch, time2, size).
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
torch.Tensor: Transformed query tensor, size
|
| 65 |
+
(#batch, n_head, time1, d_k).
|
| 66 |
+
torch.Tensor: Transformed key tensor, size
|
| 67 |
+
(#batch, n_head, time2, d_k).
|
| 68 |
+
torch.Tensor: Transformed value tensor, size
|
| 69 |
+
(#batch, n_head, time2, d_k).
|
| 70 |
+
|
| 71 |
+
"""
|
| 72 |
+
n_batch = query.size(0)
|
| 73 |
+
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
|
| 74 |
+
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
|
| 75 |
+
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
|
| 76 |
+
q = q.transpose(1, 2) # (batch, head, time1, d_k)
|
| 77 |
+
k = k.transpose(1, 2) # (batch, head, time2, d_k)
|
| 78 |
+
v = v.transpose(1, 2) # (batch, head, time2, d_k)
|
| 79 |
+
|
| 80 |
+
return q, k, v
|
| 81 |
+
|
| 82 |
+
def forward_attention(
|
| 83 |
+
self,
|
| 84 |
+
value: torch.Tensor,
|
| 85 |
+
scores: torch.Tensor,
|
| 86 |
+
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool)
|
| 87 |
+
) -> torch.Tensor:
|
| 88 |
+
"""Compute attention context vector.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
value (torch.Tensor): Transformed value, size
|
| 92 |
+
(#batch, n_head, time2, d_k).
|
| 93 |
+
scores (torch.Tensor): Attention score, size
|
| 94 |
+
(#batch, n_head, time1, time2).
|
| 95 |
+
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
|
| 96 |
+
(#batch, time1, time2), (0, 0, 0) means fake mask.
|
| 97 |
+
|
| 98 |
+
Returns:
|
| 99 |
+
torch.Tensor: Transformed value (#batch, time1, d_model)
|
| 100 |
+
weighted by the attention score (#batch, time1, time2).
|
| 101 |
+
|
| 102 |
+
"""
|
| 103 |
+
n_batch = value.size(0)
|
| 104 |
+
# NOTE(xcsong): When will `if mask.size(2) > 0` be True?
|
| 105 |
+
# 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the
|
| 106 |
+
# 1st chunk to ease the onnx export.]
|
| 107 |
+
# 2. pytorch training
|
| 108 |
+
if mask.size(2) > 0: # time2 > 0
|
| 109 |
+
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
|
| 110 |
+
# For last chunk, time2 might be larger than scores.size(-1)
|
| 111 |
+
mask = mask[:, :, :, :scores.size(-1)] # (batch, 1, *, time2)
|
| 112 |
+
scores = scores.masked_fill(mask, -float('inf'))
|
| 113 |
+
attn = torch.softmax(scores, dim=-1).masked_fill(
|
| 114 |
+
mask, 0.0) # (batch, head, time1, time2)
|
| 115 |
+
# NOTE(xcsong): When will `if mask.size(2) > 0` be False?
|
| 116 |
+
# 1. onnx(16/-1, -1/-1, 16/0)
|
| 117 |
+
# 2. jit (16/-1, -1/-1, 16/0, 16/4)
|
| 118 |
+
else:
|
| 119 |
+
attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
|
| 120 |
+
|
| 121 |
+
p_attn = self.dropout(attn)
|
| 122 |
+
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
|
| 123 |
+
x = (x.transpose(1, 2).contiguous().view(n_batch, -1,
|
| 124 |
+
self.h * self.d_k)
|
| 125 |
+
) # (batch, time1, d_model)
|
| 126 |
+
|
| 127 |
+
return self.linear_out(x) # (batch, time1, d_model)
|
| 128 |
+
|
| 129 |
+
def forward(
|
| 130 |
+
self,
|
| 131 |
+
query: torch.Tensor,
|
| 132 |
+
key: torch.Tensor,
|
| 133 |
+
value: torch.Tensor,
|
| 134 |
+
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
| 135 |
+
pos_emb: torch.Tensor = torch.empty(0),
|
| 136 |
+
cache: torch.Tensor = torch.zeros((0, 0, 0, 0))
|
| 137 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 138 |
+
"""Compute scaled dot product attention.
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
query (torch.Tensor): Query tensor (#batch, time1, size).
|
| 142 |
+
key (torch.Tensor): Key tensor (#batch, time2, size).
|
| 143 |
+
value (torch.Tensor): Value tensor (#batch, time2, size).
|
| 144 |
+
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
|
| 145 |
+
(#batch, time1, time2).
|
| 146 |
+
1.When applying cross attention between decoder and encoder,
|
| 147 |
+
the batch padding mask for input is in (#batch, 1, T) shape.
|
| 148 |
+
2.When applying self attention of encoder,
|
| 149 |
+
the mask is in (#batch, T, T) shape.
|
| 150 |
+
3.When applying self attention of decoder,
|
| 151 |
+
the mask is in (#batch, L, L) shape.
|
| 152 |
+
4.If the different position in decoder see different block
|
| 153 |
+
of the encoder, such as Mocha, the passed in mask could be
|
| 154 |
+
in (#batch, L, T) shape. But there is no such case in current
|
| 155 |
+
CosyVoice.
|
| 156 |
+
cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),
|
| 157 |
+
where `cache_t == chunk_size * num_decoding_left_chunks`
|
| 158 |
+
and `head * d_k == size`
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
Returns:
|
| 162 |
+
torch.Tensor: Output tensor (#batch, time1, d_model).
|
| 163 |
+
torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)
|
| 164 |
+
where `cache_t == chunk_size * num_decoding_left_chunks`
|
| 165 |
+
and `head * d_k == size`
|
| 166 |
+
|
| 167 |
+
"""
|
| 168 |
+
q, k, v = self.forward_qkv(query, key, value)
|
| 169 |
+
|
| 170 |
+
# NOTE(xcsong):
|
| 171 |
+
# when export onnx model, for 1st chunk, we feed
|
| 172 |
+
# cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
|
| 173 |
+
# or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).
|
| 174 |
+
# In all modes, `if cache.size(0) > 0` will alwayse be `True`
|
| 175 |
+
# and we will always do splitting and
|
| 176 |
+
# concatnation(this will simplify onnx export). Note that
|
| 177 |
+
# it's OK to concat & split zero-shaped tensors(see code below).
|
| 178 |
+
# when export jit model, for 1st chunk, we always feed
|
| 179 |
+
# cache(0, 0, 0, 0) since jit supports dynamic if-branch.
|
| 180 |
+
# >>> a = torch.ones((1, 2, 0, 4))
|
| 181 |
+
# >>> b = torch.ones((1, 2, 3, 4))
|
| 182 |
+
# >>> c = torch.cat((a, b), dim=2)
|
| 183 |
+
# >>> torch.equal(b, c) # True
|
| 184 |
+
# >>> d = torch.split(a, 2, dim=-1)
|
| 185 |
+
# >>> torch.equal(d[0], d[1]) # True
|
| 186 |
+
if cache.size(0) > 0:
|
| 187 |
+
key_cache, value_cache = torch.split(cache,
|
| 188 |
+
cache.size(-1) // 2,
|
| 189 |
+
dim=-1)
|
| 190 |
+
k = torch.cat([key_cache, k], dim=2)
|
| 191 |
+
v = torch.cat([value_cache, v], dim=2)
|
| 192 |
+
# NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's
|
| 193 |
+
# non-trivial to calculate `next_cache_start` here.
|
| 194 |
+
new_cache = torch.cat((k, v), dim=-1)
|
| 195 |
+
|
| 196 |
+
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
|
| 197 |
+
return self.forward_attention(v, scores, mask), new_cache
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
|
| 201 |
+
"""Multi-Head Attention layer with relative position encoding.
|
| 202 |
+
Paper: https://arxiv.org/abs/1901.02860
|
| 203 |
+
Args:
|
| 204 |
+
n_head (int): The number of heads.
|
| 205 |
+
n_feat (int): The number of features.
|
| 206 |
+
dropout_rate (float): Dropout rate.
|
| 207 |
+
"""
|
| 208 |
+
|
| 209 |
+
def __init__(self,
|
| 210 |
+
n_head: int,
|
| 211 |
+
n_feat: int,
|
| 212 |
+
dropout_rate: float,
|
| 213 |
+
key_bias: bool = True):
|
| 214 |
+
"""Construct an RelPositionMultiHeadedAttention object."""
|
| 215 |
+
super().__init__(n_head, n_feat, dropout_rate, key_bias)
|
| 216 |
+
# linear transformation for positional encoding
|
| 217 |
+
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
|
| 218 |
+
# these two learnable bias are used in matrix c and matrix d
|
| 219 |
+
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
|
| 220 |
+
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
|
| 221 |
+
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
|
| 222 |
+
torch.nn.init.xavier_uniform_(self.pos_bias_u)
|
| 223 |
+
torch.nn.init.xavier_uniform_(self.pos_bias_v)
|
| 224 |
+
|
| 225 |
+
def rel_shift(self, x: torch.Tensor) -> torch.Tensor:
|
| 226 |
+
"""Compute relative positional encoding.
|
| 227 |
+
|
| 228 |
+
Args:
|
| 229 |
+
x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).
|
| 230 |
+
time1 means the length of query vector.
|
| 231 |
+
|
| 232 |
+
Returns:
|
| 233 |
+
torch.Tensor: Output tensor.
|
| 234 |
+
|
| 235 |
+
"""
|
| 236 |
+
zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1),
|
| 237 |
+
device=x.device,
|
| 238 |
+
dtype=x.dtype)
|
| 239 |
+
x_padded = torch.cat([zero_pad, x], dim=-1)
|
| 240 |
+
|
| 241 |
+
x_padded = x_padded.view(x.size()[0],
|
| 242 |
+
x.size()[1],
|
| 243 |
+
x.size(3) + 1, x.size(2))
|
| 244 |
+
x = x_padded[:, :, 1:].view_as(x)[
|
| 245 |
+
:, :, :, : x.size(-1) // 2 + 1
|
| 246 |
+
] # only keep the positions from 0 to time2
|
| 247 |
+
return x
|
| 248 |
+
|
| 249 |
+
def forward(
|
| 250 |
+
self,
|
| 251 |
+
query: torch.Tensor,
|
| 252 |
+
key: torch.Tensor,
|
| 253 |
+
value: torch.Tensor,
|
| 254 |
+
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
| 255 |
+
pos_emb: torch.Tensor = torch.empty(0),
|
| 256 |
+
cache: torch.Tensor = torch.zeros((0, 0, 0, 0))
|
| 257 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 258 |
+
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
|
| 259 |
+
Args:
|
| 260 |
+
query (torch.Tensor): Query tensor (#batch, time1, size).
|
| 261 |
+
key (torch.Tensor): Key tensor (#batch, time2, size).
|
| 262 |
+
value (torch.Tensor): Value tensor (#batch, time2, size).
|
| 263 |
+
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
|
| 264 |
+
(#batch, time1, time2), (0, 0, 0) means fake mask.
|
| 265 |
+
pos_emb (torch.Tensor): Positional embedding tensor
|
| 266 |
+
(#batch, time2, size).
|
| 267 |
+
cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),
|
| 268 |
+
where `cache_t == chunk_size * num_decoding_left_chunks`
|
| 269 |
+
and `head * d_k == size`
|
| 270 |
+
Returns:
|
| 271 |
+
torch.Tensor: Output tensor (#batch, time1, d_model).
|
| 272 |
+
torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)
|
| 273 |
+
where `cache_t == chunk_size * num_decoding_left_chunks`
|
| 274 |
+
and `head * d_k == size`
|
| 275 |
+
"""
|
| 276 |
+
q, k, v = self.forward_qkv(query, key, value)
|
| 277 |
+
q = q.transpose(1, 2) # (batch, time1, head, d_k)
|
| 278 |
+
|
| 279 |
+
# NOTE(xcsong):
|
| 280 |
+
# when export onnx model, for 1st chunk, we feed
|
| 281 |
+
# cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
|
| 282 |
+
# or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).
|
| 283 |
+
# In all modes, `if cache.size(0) > 0` will alwayse be `True`
|
| 284 |
+
# and we will always do splitting and
|
| 285 |
+
# concatnation(this will simplify onnx export). Note that
|
| 286 |
+
# it's OK to concat & split zero-shaped tensors(see code below).
|
| 287 |
+
# when export jit model, for 1st chunk, we always feed
|
| 288 |
+
# cache(0, 0, 0, 0) since jit supports dynamic if-branch.
|
| 289 |
+
# >>> a = torch.ones((1, 2, 0, 4))
|
| 290 |
+
# >>> b = torch.ones((1, 2, 3, 4))
|
| 291 |
+
# >>> c = torch.cat((a, b), dim=2)
|
| 292 |
+
# >>> torch.equal(b, c) # True
|
| 293 |
+
# >>> d = torch.split(a, 2, dim=-1)
|
| 294 |
+
# >>> torch.equal(d[0], d[1]) # True
|
| 295 |
+
if cache.size(0) > 0:
|
| 296 |
+
key_cache, value_cache = torch.split(cache,
|
| 297 |
+
cache.size(-1) // 2,
|
| 298 |
+
dim=-1)
|
| 299 |
+
k = torch.cat([key_cache, k], dim=2)
|
| 300 |
+
v = torch.cat([value_cache, v], dim=2)
|
| 301 |
+
# NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's
|
| 302 |
+
# non-trivial to calculate `next_cache_start` here.
|
| 303 |
+
new_cache = torch.cat((k, v), dim=-1)
|
| 304 |
+
|
| 305 |
+
n_batch_pos = pos_emb.size(0)
|
| 306 |
+
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
|
| 307 |
+
p = p.transpose(1, 2) # (batch, head, time1, d_k)
|
| 308 |
+
|
| 309 |
+
# (batch, head, time1, d_k)
|
| 310 |
+
q_with_bias_u = (q + self.pos_bias_u.to(q.device)).transpose(1, 2)
|
| 311 |
+
# (batch, head, time1, d_k)
|
| 312 |
+
q_with_bias_v = (q + self.pos_bias_v.to(q.device)).transpose(1, 2)
|
| 313 |
+
|
| 314 |
+
# compute attention score
|
| 315 |
+
# first compute matrix a and matrix c
|
| 316 |
+
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
|
| 317 |
+
# (batch, head, time1, time2)
|
| 318 |
+
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
|
| 319 |
+
|
| 320 |
+
# compute matrix b and matrix d
|
| 321 |
+
# (batch, head, time1, time2)
|
| 322 |
+
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
|
| 323 |
+
# NOTE(Xiang Lyu): Keep rel_shift since espnet rel_pos_emb is used
|
| 324 |
+
if matrix_ac.shape != matrix_bd.shape:
|
| 325 |
+
matrix_bd = self.rel_shift(matrix_bd)
|
| 326 |
+
|
| 327 |
+
scores = (matrix_ac + matrix_bd) / math.sqrt(
|
| 328 |
+
self.d_k) # (batch, head, time1, time2)
|
| 329 |
+
|
| 330 |
+
return self.forward_attention(v, scores, mask), new_cache
|
src/chatterbox/models/s3gen/transformer/convolution.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
|
| 2 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
| 16 |
+
"""ConvolutionModule definition."""
|
| 17 |
+
|
| 18 |
+
from typing import Tuple
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
from torch import nn
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class ConvolutionModule(nn.Module):
|
| 25 |
+
"""ConvolutionModule in Conformer model."""
|
| 26 |
+
|
| 27 |
+
def __init__(self,
|
| 28 |
+
channels: int,
|
| 29 |
+
kernel_size: int = 15,
|
| 30 |
+
activation: nn.Module = nn.ReLU(),
|
| 31 |
+
norm: str = "batch_norm",
|
| 32 |
+
causal: bool = False,
|
| 33 |
+
bias: bool = True):
|
| 34 |
+
"""Construct an ConvolutionModule object.
|
| 35 |
+
Args:
|
| 36 |
+
channels (int): The number of channels of conv layers.
|
| 37 |
+
kernel_size (int): Kernel size of conv layers.
|
| 38 |
+
causal (int): Whether use causal convolution or not
|
| 39 |
+
"""
|
| 40 |
+
super().__init__()
|
| 41 |
+
|
| 42 |
+
self.pointwise_conv1 = nn.Conv1d(
|
| 43 |
+
channels,
|
| 44 |
+
2 * channels,
|
| 45 |
+
kernel_size=1,
|
| 46 |
+
stride=1,
|
| 47 |
+
padding=0,
|
| 48 |
+
bias=bias,
|
| 49 |
+
)
|
| 50 |
+
# self.lorder is used to distinguish if it's a causal convolution,
|
| 51 |
+
# if self.lorder > 0: it's a causal convolution, the input will be
|
| 52 |
+
# padded with self.lorder frames on the left in forward.
|
| 53 |
+
# else: it's a symmetrical convolution
|
| 54 |
+
if causal:
|
| 55 |
+
padding = 0
|
| 56 |
+
self.lorder = kernel_size - 1
|
| 57 |
+
else:
|
| 58 |
+
# kernel_size should be an odd number for none causal convolution
|
| 59 |
+
assert (kernel_size - 1) % 2 == 0
|
| 60 |
+
padding = (kernel_size - 1) // 2
|
| 61 |
+
self.lorder = 0
|
| 62 |
+
self.depthwise_conv = nn.Conv1d(
|
| 63 |
+
channels,
|
| 64 |
+
channels,
|
| 65 |
+
kernel_size,
|
| 66 |
+
stride=1,
|
| 67 |
+
padding=padding,
|
| 68 |
+
groups=channels,
|
| 69 |
+
bias=bias,
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
assert norm in ['batch_norm', 'layer_norm']
|
| 73 |
+
if norm == "batch_norm":
|
| 74 |
+
self.use_layer_norm = False
|
| 75 |
+
self.norm = nn.BatchNorm1d(channels)
|
| 76 |
+
else:
|
| 77 |
+
self.use_layer_norm = True
|
| 78 |
+
self.norm = nn.LayerNorm(channels)
|
| 79 |
+
|
| 80 |
+
self.pointwise_conv2 = nn.Conv1d(
|
| 81 |
+
channels,
|
| 82 |
+
channels,
|
| 83 |
+
kernel_size=1,
|
| 84 |
+
stride=1,
|
| 85 |
+
padding=0,
|
| 86 |
+
bias=bias,
|
| 87 |
+
)
|
| 88 |
+
self.activation = activation
|
| 89 |
+
|
| 90 |
+
def forward(
|
| 91 |
+
self,
|
| 92 |
+
x: torch.Tensor,
|
| 93 |
+
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
| 94 |
+
cache: torch.Tensor = torch.zeros((0, 0, 0)),
|
| 95 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 96 |
+
"""Compute convolution module.
|
| 97 |
+
Args:
|
| 98 |
+
x (torch.Tensor): Input tensor (#batch, time, channels).
|
| 99 |
+
mask_pad (torch.Tensor): used for batch padding (#batch, 1, time),
|
| 100 |
+
(0, 0, 0) means fake mask.
|
| 101 |
+
cache (torch.Tensor): left context cache, it is only
|
| 102 |
+
used in causal convolution (#batch, channels, cache_t),
|
| 103 |
+
(0, 0, 0) meas fake cache.
|
| 104 |
+
Returns:
|
| 105 |
+
torch.Tensor: Output tensor (#batch, time, channels).
|
| 106 |
+
"""
|
| 107 |
+
# exchange the temporal dimension and the feature dimension
|
| 108 |
+
x = x.transpose(1, 2) # (#batch, channels, time)
|
| 109 |
+
|
| 110 |
+
# mask batch padding
|
| 111 |
+
if mask_pad.size(2) > 0: # time > 0
|
| 112 |
+
x.masked_fill_(~mask_pad, 0.0)
|
| 113 |
+
|
| 114 |
+
if self.lorder > 0:
|
| 115 |
+
if cache.size(2) == 0: # cache_t == 0
|
| 116 |
+
x = nn.functional.pad(x, (self.lorder, 0), 'constant', 0.0)
|
| 117 |
+
else:
|
| 118 |
+
assert cache.size(0) == x.size(0) # equal batch
|
| 119 |
+
assert cache.size(1) == x.size(1) # equal channel
|
| 120 |
+
x = torch.cat((cache, x), dim=2)
|
| 121 |
+
assert (x.size(2) > self.lorder)
|
| 122 |
+
new_cache = x[:, :, -self.lorder:]
|
| 123 |
+
else:
|
| 124 |
+
# It's better we just return None if no cache is required,
|
| 125 |
+
# However, for JIT export, here we just fake one tensor instead of
|
| 126 |
+
# None.
|
| 127 |
+
new_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
|
| 128 |
+
|
| 129 |
+
# GLU mechanism
|
| 130 |
+
x = self.pointwise_conv1(x) # (batch, 2*channel, dim)
|
| 131 |
+
x = nn.functional.glu(x, dim=1) # (batch, channel, dim)
|
| 132 |
+
|
| 133 |
+
# 1D Depthwise Conv
|
| 134 |
+
x = self.depthwise_conv(x)
|
| 135 |
+
if self.use_layer_norm:
|
| 136 |
+
x = x.transpose(1, 2)
|
| 137 |
+
x = self.activation(self.norm(x))
|
| 138 |
+
if self.use_layer_norm:
|
| 139 |
+
x = x.transpose(1, 2)
|
| 140 |
+
x = self.pointwise_conv2(x)
|
| 141 |
+
# mask batch padding
|
| 142 |
+
if mask_pad.size(2) > 0: # time > 0
|
| 143 |
+
x.masked_fill_(~mask_pad, 0.0)
|
| 144 |
+
|
| 145 |
+
return x.transpose(1, 2), new_cache
|
src/chatterbox/models/s3gen/transformer/embedding.py
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
|
| 2 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
| 16 |
+
"""Positonal Encoding Module."""
|
| 17 |
+
|
| 18 |
+
import math
|
| 19 |
+
from typing import Tuple, Union
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
import numpy as np
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class PositionalEncoding(torch.nn.Module):
|
| 27 |
+
"""Positional encoding.
|
| 28 |
+
|
| 29 |
+
:param int d_model: embedding dim
|
| 30 |
+
:param float dropout_rate: dropout rate
|
| 31 |
+
:param int max_len: maximum input length
|
| 32 |
+
|
| 33 |
+
PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))
|
| 34 |
+
PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self,
|
| 38 |
+
d_model: int,
|
| 39 |
+
dropout_rate: float,
|
| 40 |
+
max_len: int = 5000,
|
| 41 |
+
reverse: bool = False):
|
| 42 |
+
"""Construct an PositionalEncoding object."""
|
| 43 |
+
super().__init__()
|
| 44 |
+
self.d_model = d_model
|
| 45 |
+
self.xscale = math.sqrt(self.d_model)
|
| 46 |
+
self.dropout = torch.nn.Dropout(p=dropout_rate)
|
| 47 |
+
self.max_len = max_len
|
| 48 |
+
|
| 49 |
+
self.pe = torch.zeros(self.max_len, self.d_model)
|
| 50 |
+
position = torch.arange(0, self.max_len,
|
| 51 |
+
dtype=torch.float32).unsqueeze(1)
|
| 52 |
+
div_term = torch.exp(
|
| 53 |
+
torch.arange(0, self.d_model, 2, dtype=torch.float32) *
|
| 54 |
+
-(math.log(10000.0) / self.d_model))
|
| 55 |
+
self.pe[:, 0::2] = torch.sin(position * div_term)
|
| 56 |
+
self.pe[:, 1::2] = torch.cos(position * div_term)
|
| 57 |
+
self.pe = self.pe.unsqueeze(0)
|
| 58 |
+
|
| 59 |
+
def forward(self,
|
| 60 |
+
x: torch.Tensor,
|
| 61 |
+
offset: Union[int, torch.Tensor] = 0) \
|
| 62 |
+
-> Tuple[torch.Tensor, torch.Tensor]:
|
| 63 |
+
"""Add positional encoding.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
x (torch.Tensor): Input. Its shape is (batch, time, ...)
|
| 67 |
+
offset (int, torch.tensor): position offset
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)
|
| 71 |
+
torch.Tensor: for compatibility to RelPositionalEncoding
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
self.pe = self.pe.to(x.device)
|
| 75 |
+
pos_emb = self.position_encoding(offset, x.size(1), False)
|
| 76 |
+
x = x * self.xscale + pos_emb
|
| 77 |
+
return self.dropout(x), self.dropout(pos_emb)
|
| 78 |
+
|
| 79 |
+
def position_encoding(self,
|
| 80 |
+
offset: Union[int, torch.Tensor],
|
| 81 |
+
size: int,
|
| 82 |
+
apply_dropout: bool = True) -> torch.Tensor:
|
| 83 |
+
""" For getting encoding in a streaming fashion
|
| 84 |
+
|
| 85 |
+
Attention!!!!!
|
| 86 |
+
we apply dropout only once at the whole utterance level in a none
|
| 87 |
+
streaming way, but will call this function several times with
|
| 88 |
+
increasing input size in a streaming scenario, so the dropout will
|
| 89 |
+
be applied several times.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
offset (int or torch.tensor): start offset
|
| 93 |
+
size (int): required size of position encoding
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
torch.Tensor: Corresponding encoding
|
| 97 |
+
"""
|
| 98 |
+
# How to subscript a Union type:
|
| 99 |
+
# https://github.com/pytorch/pytorch/issues/69434
|
| 100 |
+
if isinstance(offset, int):
|
| 101 |
+
assert offset + size <= self.max_len
|
| 102 |
+
pos_emb = self.pe[:, offset:offset + size]
|
| 103 |
+
elif isinstance(offset, torch.Tensor) and offset.dim() == 0: # scalar
|
| 104 |
+
assert offset + size <= self.max_len
|
| 105 |
+
pos_emb = self.pe[:, offset:offset + size]
|
| 106 |
+
else: # for batched streaming decoding on GPU
|
| 107 |
+
assert torch.max(offset) + size <= self.max_len
|
| 108 |
+
index = offset.unsqueeze(1) + \
|
| 109 |
+
torch.arange(0, size).to(offset.device) # B X T
|
| 110 |
+
flag = index > 0
|
| 111 |
+
# remove negative offset
|
| 112 |
+
index = index * flag
|
| 113 |
+
pos_emb = F.embedding(index, self.pe[0]) # B X T X d_model
|
| 114 |
+
|
| 115 |
+
if apply_dropout:
|
| 116 |
+
pos_emb = self.dropout(pos_emb)
|
| 117 |
+
return pos_emb
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class RelPositionalEncoding(PositionalEncoding):
|
| 121 |
+
"""Relative positional encoding module.
|
| 122 |
+
See : Appendix B in https://arxiv.org/abs/1901.02860
|
| 123 |
+
Args:
|
| 124 |
+
d_model (int): Embedding dimension.
|
| 125 |
+
dropout_rate (float): Dropout rate.
|
| 126 |
+
max_len (int): Maximum input length.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):
|
| 130 |
+
"""Initialize class."""
|
| 131 |
+
super().__init__(d_model, dropout_rate, max_len, reverse=True)
|
| 132 |
+
|
| 133 |
+
def forward(self,
|
| 134 |
+
x: torch.Tensor,
|
| 135 |
+
offset: Union[int, torch.Tensor] = 0) \
|
| 136 |
+
-> Tuple[torch.Tensor, torch.Tensor]:
|
| 137 |
+
"""Compute positional encoding.
|
| 138 |
+
Args:
|
| 139 |
+
x (torch.Tensor): Input tensor (batch, time, `*`).
|
| 140 |
+
Returns:
|
| 141 |
+
torch.Tensor: Encoded tensor (batch, time, `*`).
|
| 142 |
+
torch.Tensor: Positional embedding tensor (1, time, `*`).
|
| 143 |
+
"""
|
| 144 |
+
self.pe = self.pe.to(x.device)
|
| 145 |
+
x = x * self.xscale
|
| 146 |
+
pos_emb = self.position_encoding(offset, x.size(1), False)
|
| 147 |
+
return self.dropout(x), self.dropout(pos_emb)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class WhisperPositionalEncoding(PositionalEncoding):
|
| 151 |
+
""" Sinusoids position encoding used in openai-whisper.encoder
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 1500):
|
| 155 |
+
super().__init__(d_model, dropout_rate, max_len)
|
| 156 |
+
self.xscale = 1.0
|
| 157 |
+
log_timescale_increment = np.log(10000) / (d_model // 2 - 1)
|
| 158 |
+
inv_timescales = torch.exp(-log_timescale_increment *
|
| 159 |
+
torch.arange(d_model // 2))
|
| 160 |
+
scaled_time = torch.arange(max_len)[:, np.newaxis] * \
|
| 161 |
+
inv_timescales[np.newaxis, :]
|
| 162 |
+
pe = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
|
| 163 |
+
delattr(self, "pe")
|
| 164 |
+
self.register_buffer("pe", pe.unsqueeze(0))
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class LearnablePositionalEncoding(PositionalEncoding):
|
| 168 |
+
""" Learnable position encoding used in openai-whisper.decoder
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 448):
|
| 172 |
+
super().__init__(d_model, dropout_rate, max_len)
|
| 173 |
+
# NOTE(xcsong): overwrite self.pe & self.xscale
|
| 174 |
+
self.pe = torch.nn.Parameter(torch.empty(1, max_len, d_model))
|
| 175 |
+
self.xscale = 1.0
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class NoPositionalEncoding(torch.nn.Module):
|
| 179 |
+
""" No position encoding
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
def __init__(self, d_model: int, dropout_rate: float):
|
| 183 |
+
super().__init__()
|
| 184 |
+
self.d_model = d_model
|
| 185 |
+
self.dropout = torch.nn.Dropout(p=dropout_rate)
|
| 186 |
+
|
| 187 |
+
def forward(self,
|
| 188 |
+
x: torch.Tensor,
|
| 189 |
+
offset: Union[int, torch.Tensor] = 0) \
|
| 190 |
+
-> Tuple[torch.Tensor, torch.Tensor]:
|
| 191 |
+
""" Just return zero vector for interface compatibility
|
| 192 |
+
"""
|
| 193 |
+
pos_emb = torch.zeros(1, x.size(1), self.d_model).to(x.device)
|
| 194 |
+
return self.dropout(x), pos_emb
|
| 195 |
+
|
| 196 |
+
def position_encoding(self, offset: Union[int, torch.Tensor],
|
| 197 |
+
size: int) -> torch.Tensor:
|
| 198 |
+
return torch.zeros(1, size, self.d_model)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
class EspnetRelPositionalEncoding(torch.nn.Module):
|
| 202 |
+
"""Relative positional encoding module (new implementation).
|
| 203 |
+
|
| 204 |
+
Details can be found in https://github.com/espnet/espnet/pull/2816.
|
| 205 |
+
|
| 206 |
+
See : Appendix B in https://arxiv.org/abs/1901.02860
|
| 207 |
+
|
| 208 |
+
Args:
|
| 209 |
+
d_model (int): Embedding dimension.
|
| 210 |
+
dropout_rate (float): Dropout rate.
|
| 211 |
+
max_len (int): Maximum input length.
|
| 212 |
+
|
| 213 |
+
"""
|
| 214 |
+
|
| 215 |
+
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):
|
| 216 |
+
"""Construct an PositionalEncoding object."""
|
| 217 |
+
super(EspnetRelPositionalEncoding, self).__init__()
|
| 218 |
+
self.d_model = d_model
|
| 219 |
+
self.xscale = math.sqrt(self.d_model)
|
| 220 |
+
self.dropout = torch.nn.Dropout(p=dropout_rate)
|
| 221 |
+
self.pe = None
|
| 222 |
+
self.extend_pe(torch.tensor(0.0).expand(1, max_len))
|
| 223 |
+
|
| 224 |
+
def extend_pe(self, x: torch.Tensor):
|
| 225 |
+
"""Reset the positional encodings."""
|
| 226 |
+
if self.pe is not None:
|
| 227 |
+
# self.pe contains both positive and negative parts
|
| 228 |
+
# the length of self.pe is 2 * input_len - 1
|
| 229 |
+
if self.pe.size(1) >= x.size(1) * 2 - 1:
|
| 230 |
+
if self.pe.dtype != x.dtype or self.pe.device != x.device:
|
| 231 |
+
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
|
| 232 |
+
return
|
| 233 |
+
# Suppose `i` means to the position of query vecotr and `j` means the
|
| 234 |
+
# position of key vector. We use position relative positions when keys
|
| 235 |
+
# are to the left (i>j) and negative relative positions otherwise (i<j).
|
| 236 |
+
pe_positive = torch.zeros(x.size(1), self.d_model)
|
| 237 |
+
pe_negative = torch.zeros(x.size(1), self.d_model)
|
| 238 |
+
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
|
| 239 |
+
div_term = torch.exp(
|
| 240 |
+
torch.arange(0, self.d_model, 2, dtype=torch.float32)
|
| 241 |
+
* -(math.log(10000.0) / self.d_model)
|
| 242 |
+
)
|
| 243 |
+
pe_positive[:, 0::2] = torch.sin(position * div_term)
|
| 244 |
+
pe_positive[:, 1::2] = torch.cos(position * div_term)
|
| 245 |
+
pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
|
| 246 |
+
pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
|
| 247 |
+
|
| 248 |
+
# Reserve the order of positive indices and concat both positive and
|
| 249 |
+
# negative indices. This is used to support the shifting trick
|
| 250 |
+
# as in https://arxiv.org/abs/1901.02860
|
| 251 |
+
pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
|
| 252 |
+
pe_negative = pe_negative[1:].unsqueeze(0)
|
| 253 |
+
pe = torch.cat([pe_positive, pe_negative], dim=1)
|
| 254 |
+
self.pe = pe.to(device=x.device, dtype=x.dtype)
|
| 255 |
+
|
| 256 |
+
def forward(self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0) \
|
| 257 |
+
-> Tuple[torch.Tensor, torch.Tensor]:
|
| 258 |
+
"""Add positional encoding.
|
| 259 |
+
|
| 260 |
+
Args:
|
| 261 |
+
x (torch.Tensor): Input tensor (batch, time, `*`).
|
| 262 |
+
|
| 263 |
+
Returns:
|
| 264 |
+
torch.Tensor: Encoded tensor (batch, time, `*`).
|
| 265 |
+
|
| 266 |
+
"""
|
| 267 |
+
self.extend_pe(x)
|
| 268 |
+
x = x * self.xscale
|
| 269 |
+
pos_emb = self.position_encoding(size=x.size(1), offset=offset)
|
| 270 |
+
return self.dropout(x), self.dropout(pos_emb)
|
| 271 |
+
|
| 272 |
+
def position_encoding(self,
|
| 273 |
+
offset: Union[int, torch.Tensor],
|
| 274 |
+
size: int) -> torch.Tensor:
|
| 275 |
+
""" For getting encoding in a streaming fashion
|
| 276 |
+
|
| 277 |
+
Attention!!!!!
|
| 278 |
+
we apply dropout only once at the whole utterance level in a none
|
| 279 |
+
streaming way, but will call this function several times with
|
| 280 |
+
increasing input size in a streaming scenario, so the dropout will
|
| 281 |
+
be applied several times.
|
| 282 |
+
|
| 283 |
+
Args:
|
| 284 |
+
offset (int or torch.tensor): start offset
|
| 285 |
+
size (int): required size of position encoding
|
| 286 |
+
|
| 287 |
+
Returns:
|
| 288 |
+
torch.Tensor: Corresponding encoding
|
| 289 |
+
"""
|
| 290 |
+
pos_emb = self.pe[
|
| 291 |
+
:,
|
| 292 |
+
self.pe.size(1) // 2 - size + 1: self.pe.size(1) // 2 + size,
|
| 293 |
+
]
|
| 294 |
+
return pos_emb
|
src/chatterbox/models/s3gen/transformer/encoder_layer.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
|
| 2 |
+
# 2022 Xingchen Song ([email protected])
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
| 16 |
+
"""Encoder self-attention layer definition."""
|
| 17 |
+
|
| 18 |
+
from typing import Optional, Tuple
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
from torch import nn
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class TransformerEncoderLayer(nn.Module):
|
| 25 |
+
"""Encoder layer module.
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
size (int): Input dimension.
|
| 29 |
+
self_attn (torch.nn.Module): Self-attention module instance.
|
| 30 |
+
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention`
|
| 31 |
+
instance can be used as the argument.
|
| 32 |
+
feed_forward (torch.nn.Module): Feed-forward module instance.
|
| 33 |
+
`PositionwiseFeedForward`, instance can be used as the argument.
|
| 34 |
+
dropout_rate (float): Dropout rate.
|
| 35 |
+
normalize_before (bool):
|
| 36 |
+
True: use layer_norm before each sub-block.
|
| 37 |
+
False: to use layer_norm after each sub-block.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
size: int,
|
| 43 |
+
self_attn: torch.nn.Module,
|
| 44 |
+
feed_forward: torch.nn.Module,
|
| 45 |
+
dropout_rate: float,
|
| 46 |
+
normalize_before: bool = True,
|
| 47 |
+
):
|
| 48 |
+
"""Construct an EncoderLayer object."""
|
| 49 |
+
super().__init__()
|
| 50 |
+
self.self_attn = self_attn
|
| 51 |
+
self.feed_forward = feed_forward
|
| 52 |
+
self.norm1 = nn.LayerNorm(size, eps=1e-12)
|
| 53 |
+
self.norm2 = nn.LayerNorm(size, eps=1e-12)
|
| 54 |
+
self.dropout = nn.Dropout(dropout_rate)
|
| 55 |
+
self.size = size
|
| 56 |
+
self.normalize_before = normalize_before
|
| 57 |
+
|
| 58 |
+
def forward(
|
| 59 |
+
self,
|
| 60 |
+
x: torch.Tensor,
|
| 61 |
+
mask: torch.Tensor,
|
| 62 |
+
pos_emb: torch.Tensor,
|
| 63 |
+
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
| 64 |
+
att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
|
| 65 |
+
cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
|
| 66 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 67 |
+
"""Compute encoded features.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
x (torch.Tensor): (#batch, time, size)
|
| 71 |
+
mask (torch.Tensor): Mask tensor for the input (#batch, time,time),
|
| 72 |
+
(0, 0, 0) means fake mask.
|
| 73 |
+
pos_emb (torch.Tensor): just for interface compatibility
|
| 74 |
+
to ConformerEncoderLayer
|
| 75 |
+
mask_pad (torch.Tensor): does not used in transformer layer,
|
| 76 |
+
just for unified api with conformer.
|
| 77 |
+
att_cache (torch.Tensor): Cache tensor of the KEY & VALUE
|
| 78 |
+
(#batch=1, head, cache_t1, d_k * 2), head * d_k == size.
|
| 79 |
+
cnn_cache (torch.Tensor): Convolution cache in conformer layer
|
| 80 |
+
(#batch=1, size, cache_t2), not used here, it's for interface
|
| 81 |
+
compatibility to ConformerEncoderLayer.
|
| 82 |
+
Returns:
|
| 83 |
+
torch.Tensor: Output tensor (#batch, time, size).
|
| 84 |
+
torch.Tensor: Mask tensor (#batch, time, time).
|
| 85 |
+
torch.Tensor: att_cache tensor,
|
| 86 |
+
(#batch=1, head, cache_t1 + time, d_k * 2).
|
| 87 |
+
torch.Tensor: cnn_cahce tensor (#batch=1, size, cache_t2).
|
| 88 |
+
|
| 89 |
+
"""
|
| 90 |
+
residual = x
|
| 91 |
+
if self.normalize_before:
|
| 92 |
+
x = self.norm1(x)
|
| 93 |
+
x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb=pos_emb, cache=att_cache)
|
| 94 |
+
x = residual + self.dropout(x_att)
|
| 95 |
+
if not self.normalize_before:
|
| 96 |
+
x = self.norm1(x)
|
| 97 |
+
|
| 98 |
+
residual = x
|
| 99 |
+
if self.normalize_before:
|
| 100 |
+
x = self.norm2(x)
|
| 101 |
+
x = residual + self.dropout(self.feed_forward(x))
|
| 102 |
+
if not self.normalize_before:
|
| 103 |
+
x = self.norm2(x)
|
| 104 |
+
|
| 105 |
+
fake_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
|
| 106 |
+
return x, mask, new_att_cache, fake_cnn_cache
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
class ConformerEncoderLayer(nn.Module):
|
| 110 |
+
"""Encoder layer module.
|
| 111 |
+
Args:
|
| 112 |
+
size (int): Input dimension.
|
| 113 |
+
self_attn (torch.nn.Module): Self-attention module instance.
|
| 114 |
+
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention`
|
| 115 |
+
instance can be used as the argument.
|
| 116 |
+
feed_forward (torch.nn.Module): Feed-forward module instance.
|
| 117 |
+
`PositionwiseFeedForward` instance can be used as the argument.
|
| 118 |
+
feed_forward_macaron (torch.nn.Module): Additional feed-forward module
|
| 119 |
+
instance.
|
| 120 |
+
`PositionwiseFeedForward` instance can be used as the argument.
|
| 121 |
+
conv_module (torch.nn.Module): Convolution module instance.
|
| 122 |
+
`ConvlutionModule` instance can be used as the argument.
|
| 123 |
+
dropout_rate (float): Dropout rate.
|
| 124 |
+
normalize_before (bool):
|
| 125 |
+
True: use layer_norm before each sub-block.
|
| 126 |
+
False: use layer_norm after each sub-block.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
def __init__(
|
| 130 |
+
self,
|
| 131 |
+
size: int,
|
| 132 |
+
self_attn: torch.nn.Module,
|
| 133 |
+
feed_forward: Optional[nn.Module] = None,
|
| 134 |
+
feed_forward_macaron: Optional[nn.Module] = None,
|
| 135 |
+
conv_module: Optional[nn.Module] = None,
|
| 136 |
+
dropout_rate: float = 0.1,
|
| 137 |
+
normalize_before: bool = True,
|
| 138 |
+
):
|
| 139 |
+
"""Construct an EncoderLayer object."""
|
| 140 |
+
super().__init__()
|
| 141 |
+
self.self_attn = self_attn
|
| 142 |
+
self.feed_forward = feed_forward
|
| 143 |
+
self.feed_forward_macaron = feed_forward_macaron
|
| 144 |
+
self.conv_module = conv_module
|
| 145 |
+
self.norm_ff = nn.LayerNorm(size, eps=1e-12) # for the FNN module
|
| 146 |
+
self.norm_mha = nn.LayerNorm(size, eps=1e-12) # for the MHA module
|
| 147 |
+
if feed_forward_macaron is not None:
|
| 148 |
+
self.norm_ff_macaron = nn.LayerNorm(size, eps=1e-12)
|
| 149 |
+
self.ff_scale = 0.5
|
| 150 |
+
else:
|
| 151 |
+
self.ff_scale = 1.0
|
| 152 |
+
if self.conv_module is not None:
|
| 153 |
+
self.norm_conv = nn.LayerNorm(size, eps=1e-12) # for the CNN module
|
| 154 |
+
self.norm_final = nn.LayerNorm(
|
| 155 |
+
size, eps=1e-12) # for the final output of the block
|
| 156 |
+
self.dropout = nn.Dropout(dropout_rate)
|
| 157 |
+
self.size = size
|
| 158 |
+
self.normalize_before = normalize_before
|
| 159 |
+
|
| 160 |
+
def forward(
|
| 161 |
+
self,
|
| 162 |
+
x: torch.Tensor,
|
| 163 |
+
mask: torch.Tensor,
|
| 164 |
+
pos_emb: torch.Tensor,
|
| 165 |
+
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
| 166 |
+
att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
|
| 167 |
+
cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
|
| 168 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 169 |
+
"""Compute encoded features.
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
x (torch.Tensor): (#batch, time, size)
|
| 173 |
+
mask (torch.Tensor): Mask tensor for the input (#batch, time,time),
|
| 174 |
+
(0, 0, 0) means fake mask.
|
| 175 |
+
pos_emb (torch.Tensor): positional encoding, must not be None
|
| 176 |
+
for ConformerEncoderLayer.
|
| 177 |
+
mask_pad (torch.Tensor): batch padding mask used for conv module.
|
| 178 |
+
(#batch, 1,time), (0, 0, 0) means fake mask.
|
| 179 |
+
att_cache (torch.Tensor): Cache tensor of the KEY & VALUE
|
| 180 |
+
(#batch=1, head, cache_t1, d_k * 2), head * d_k == size.
|
| 181 |
+
cnn_cache (torch.Tensor): Convolution cache in conformer layer
|
| 182 |
+
(#batch=1, size, cache_t2)
|
| 183 |
+
Returns:
|
| 184 |
+
torch.Tensor: Output tensor (#batch, time, size).
|
| 185 |
+
torch.Tensor: Mask tensor (#batch, time, time).
|
| 186 |
+
torch.Tensor: att_cache tensor,
|
| 187 |
+
(#batch=1, head, cache_t1 + time, d_k * 2).
|
| 188 |
+
torch.Tensor: cnn_cahce tensor (#batch, size, cache_t2).
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
# whether to use macaron style
|
| 192 |
+
if self.feed_forward_macaron is not None:
|
| 193 |
+
residual = x
|
| 194 |
+
if self.normalize_before:
|
| 195 |
+
x = self.norm_ff_macaron(x)
|
| 196 |
+
x = residual + self.ff_scale * self.dropout(
|
| 197 |
+
self.feed_forward_macaron(x))
|
| 198 |
+
if not self.normalize_before:
|
| 199 |
+
x = self.norm_ff_macaron(x)
|
| 200 |
+
|
| 201 |
+
# multi-headed self-attention module
|
| 202 |
+
residual = x
|
| 203 |
+
if self.normalize_before:
|
| 204 |
+
x = self.norm_mha(x)
|
| 205 |
+
x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb,
|
| 206 |
+
att_cache)
|
| 207 |
+
x = residual + self.dropout(x_att)
|
| 208 |
+
if not self.normalize_before:
|
| 209 |
+
x = self.norm_mha(x)
|
| 210 |
+
|
| 211 |
+
# convolution module
|
| 212 |
+
# Fake new cnn cache here, and then change it in conv_module
|
| 213 |
+
new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
|
| 214 |
+
if self.conv_module is not None:
|
| 215 |
+
residual = x
|
| 216 |
+
if self.normalize_before:
|
| 217 |
+
x = self.norm_conv(x)
|
| 218 |
+
x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache)
|
| 219 |
+
x = residual + self.dropout(x)
|
| 220 |
+
|
| 221 |
+
if not self.normalize_before:
|
| 222 |
+
x = self.norm_conv(x)
|
| 223 |
+
|
| 224 |
+
# feed forward module
|
| 225 |
+
residual = x
|
| 226 |
+
if self.normalize_before:
|
| 227 |
+
x = self.norm_ff(x)
|
| 228 |
+
|
| 229 |
+
x = residual + self.ff_scale * self.dropout(self.feed_forward(x))
|
| 230 |
+
if not self.normalize_before:
|
| 231 |
+
x = self.norm_ff(x)
|
| 232 |
+
|
| 233 |
+
if self.conv_module is not None:
|
| 234 |
+
x = self.norm_final(x)
|
| 235 |
+
|
| 236 |
+
return x, mask, new_att_cache, new_cnn_cache
|
src/chatterbox/models/s3gen/transformer/positionwise_feed_forward.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2019 Shigeki Karita
|
| 2 |
+
# 2020 Mobvoi Inc (Binbin Zhang)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Positionwise feed forward layer definition."""
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class PositionwiseFeedForward(torch.nn.Module):
|
| 21 |
+
"""Positionwise feed forward layer.
|
| 22 |
+
|
| 23 |
+
FeedForward are appied on each position of the sequence.
|
| 24 |
+
The output dim is same with the input dim.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
idim (int): Input dimenstion.
|
| 28 |
+
hidden_units (int): The number of hidden units.
|
| 29 |
+
dropout_rate (float): Dropout rate.
|
| 30 |
+
activation (torch.nn.Module): Activation function
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
idim: int,
|
| 36 |
+
hidden_units: int,
|
| 37 |
+
dropout_rate: float,
|
| 38 |
+
activation: torch.nn.Module = torch.nn.ReLU(),
|
| 39 |
+
):
|
| 40 |
+
"""Construct a PositionwiseFeedForward object."""
|
| 41 |
+
super(PositionwiseFeedForward, self).__init__()
|
| 42 |
+
self.w_1 = torch.nn.Linear(idim, hidden_units)
|
| 43 |
+
self.activation = activation
|
| 44 |
+
self.dropout = torch.nn.Dropout(dropout_rate)
|
| 45 |
+
self.w_2 = torch.nn.Linear(hidden_units, idim)
|
| 46 |
+
|
| 47 |
+
def forward(self, xs: torch.Tensor) -> torch.Tensor:
|
| 48 |
+
"""Forward function.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
xs: input tensor (B, L, D)
|
| 52 |
+
Returns:
|
| 53 |
+
output tensor, (B, L, D)
|
| 54 |
+
"""
|
| 55 |
+
return self.w_2(self.dropout(self.activation(self.w_1(xs))))
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class MoEFFNLayer(torch.nn.Module):
|
| 59 |
+
"""
|
| 60 |
+
Mixture of expert with Positionwise feed forward layer
|
| 61 |
+
See also figure 1 in https://arxiv.org/pdf/2305.15663.pdf
|
| 62 |
+
The output dim is same with the input dim.
|
| 63 |
+
|
| 64 |
+
Modified from https://github.com/Lightning-AI/lit-gpt/pull/823
|
| 65 |
+
https://github.com/mistralai/mistral-src/blob/b46d6/moe_one_file_ref.py#L203-L219
|
| 66 |
+
Args:
|
| 67 |
+
n_expert: number of expert.
|
| 68 |
+
n_expert_per_token: The actual number of experts used for each frame
|
| 69 |
+
idim (int): Input dimenstion.
|
| 70 |
+
hidden_units (int): The number of hidden units.
|
| 71 |
+
dropout_rate (float): Dropout rate.
|
| 72 |
+
activation (torch.nn.Module): Activation function
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
def __init__(
|
| 76 |
+
self,
|
| 77 |
+
n_expert: int,
|
| 78 |
+
n_expert_per_token: int,
|
| 79 |
+
idim: int,
|
| 80 |
+
hidden_units: int,
|
| 81 |
+
dropout_rate: float,
|
| 82 |
+
activation: torch.nn.Module = torch.nn.ReLU(),
|
| 83 |
+
):
|
| 84 |
+
super(MoEFFNLayer, self).__init__()
|
| 85 |
+
self.gate = torch.nn.Linear(idim, n_expert, bias=False)
|
| 86 |
+
self.experts = torch.nn.ModuleList(
|
| 87 |
+
PositionwiseFeedForward(idim, hidden_units, dropout_rate,
|
| 88 |
+
activation) for _ in range(n_expert))
|
| 89 |
+
self.n_expert_per_token = n_expert_per_token
|
| 90 |
+
|
| 91 |
+
def forward(self, xs: torch.Tensor) -> torch.Tensor:
|
| 92 |
+
"""Foward function.
|
| 93 |
+
Args:
|
| 94 |
+
xs: input tensor (B, L, D)
|
| 95 |
+
Returns:
|
| 96 |
+
output tensor, (B, L, D)
|
| 97 |
+
|
| 98 |
+
"""
|
| 99 |
+
B, L, D = xs.size(
|
| 100 |
+
) # batch size, sequence length, embedding dimension (idim)
|
| 101 |
+
xs = xs.view(-1, D) # (B*L, D)
|
| 102 |
+
router = self.gate(xs) # (B*L, n_expert)
|
| 103 |
+
logits, indices = torch.topk(
|
| 104 |
+
router, self.n_expert_per_token
|
| 105 |
+
) # probs:(B*L, n_expert), indices: (B*L, n_expert)
|
| 106 |
+
weights = torch.nn.functional.softmax(
|
| 107 |
+
logits, dim=1,
|
| 108 |
+
dtype=torch.float).to(dtype=xs.dtype) # (B*L, n_expert_per_token)
|
| 109 |
+
output = torch.zeros_like(xs) # (B*L, D)
|
| 110 |
+
for i, expert in enumerate(self.experts):
|
| 111 |
+
mask = indices == i
|
| 112 |
+
batch_idx, ith_expert = torch.where(mask)
|
| 113 |
+
output[batch_idx] += weights[batch_idx, ith_expert, None] * expert(
|
| 114 |
+
xs[batch_idx])
|
| 115 |
+
return output.view(B, L, D)
|
src/chatterbox/models/s3gen/transformer/subsampling.py
ADDED
|
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
|
| 2 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
| 16 |
+
"""Subsampling layer definition."""
|
| 17 |
+
|
| 18 |
+
from typing import Tuple, Union
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class BaseSubsampling(torch.nn.Module):
|
| 24 |
+
|
| 25 |
+
def __init__(self):
|
| 26 |
+
super().__init__()
|
| 27 |
+
self.right_context = 0
|
| 28 |
+
self.subsampling_rate = 1
|
| 29 |
+
|
| 30 |
+
def position_encoding(self, offset: Union[int, torch.Tensor],
|
| 31 |
+
size: int) -> torch.Tensor:
|
| 32 |
+
return self.pos_enc.position_encoding(offset, size)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class EmbedinigNoSubsampling(BaseSubsampling):
|
| 36 |
+
"""Embedding input without subsampling
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 40 |
+
pos_enc_class: torch.nn.Module):
|
| 41 |
+
super().__init__()
|
| 42 |
+
self.embed = torch.nn.Embedding(idim, odim)
|
| 43 |
+
self.pos_enc = pos_enc_class
|
| 44 |
+
|
| 45 |
+
def forward(
|
| 46 |
+
self,
|
| 47 |
+
x: torch.Tensor,
|
| 48 |
+
x_mask: torch.Tensor,
|
| 49 |
+
offset: Union[int, torch.Tensor] = 0
|
| 50 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 51 |
+
"""Input x.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 55 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
torch.Tensor: linear input tensor (#batch, time', odim),
|
| 59 |
+
where time' = time .
|
| 60 |
+
torch.Tensor: linear input mask (#batch, 1, time'),
|
| 61 |
+
where time' = time .
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
x = self.embed(x)
|
| 65 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 66 |
+
return x, pos_emb, x_mask
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class LinearNoSubsampling(BaseSubsampling):
|
| 70 |
+
"""Linear transform the input without subsampling
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
idim (int): Input dimension.
|
| 74 |
+
odim (int): Output dimension.
|
| 75 |
+
dropout_rate (float): Dropout rate.
|
| 76 |
+
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 80 |
+
pos_enc_class: torch.nn.Module):
|
| 81 |
+
"""Construct an linear object."""
|
| 82 |
+
super().__init__()
|
| 83 |
+
self.out = torch.nn.Sequential(
|
| 84 |
+
torch.nn.Linear(idim, odim),
|
| 85 |
+
torch.nn.LayerNorm(odim, eps=1e-5),
|
| 86 |
+
torch.nn.Dropout(dropout_rate),
|
| 87 |
+
)
|
| 88 |
+
self.pos_enc = pos_enc_class
|
| 89 |
+
self.right_context = 0
|
| 90 |
+
self.subsampling_rate = 1
|
| 91 |
+
|
| 92 |
+
def forward(
|
| 93 |
+
self,
|
| 94 |
+
x: torch.Tensor,
|
| 95 |
+
x_mask: torch.Tensor,
|
| 96 |
+
offset: Union[int, torch.Tensor] = 0
|
| 97 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 98 |
+
"""Input x.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 102 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
torch.Tensor: linear input tensor (#batch, time', odim),
|
| 106 |
+
where time' = time .
|
| 107 |
+
torch.Tensor: linear input mask (#batch, 1, time'),
|
| 108 |
+
where time' = time .
|
| 109 |
+
|
| 110 |
+
"""
|
| 111 |
+
x = self.out(x)
|
| 112 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 113 |
+
return x, pos_emb, x_mask
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class Conv1dSubsampling2(BaseSubsampling):
|
| 117 |
+
"""Convolutional 1D subsampling (to 1/2 length).
|
| 118 |
+
It is designed for Whisper, ref:
|
| 119 |
+
https://github.com/openai/whisper/blob/main/whisper/model.py
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
idim (int): Input dimension.
|
| 123 |
+
odim (int): Output dimension.
|
| 124 |
+
dropout_rate (float): Dropout rate.
|
| 125 |
+
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 129 |
+
pos_enc_class: torch.nn.Module):
|
| 130 |
+
"""Construct an Conv1dSubsampling2 object."""
|
| 131 |
+
super().__init__()
|
| 132 |
+
self.conv = torch.nn.Sequential(
|
| 133 |
+
torch.nn.Conv1d(idim, odim, kernel_size=3, padding=1),
|
| 134 |
+
torch.nn.GELU(),
|
| 135 |
+
torch.nn.Conv1d(odim, odim, kernel_size=3, stride=2, padding=1),
|
| 136 |
+
torch.nn.GELU(),
|
| 137 |
+
)
|
| 138 |
+
self.pos_enc = pos_enc_class
|
| 139 |
+
# The right context for every conv layer is computed by:
|
| 140 |
+
# (kernel_size - 1) * frame_rate_of_this_layer
|
| 141 |
+
self.subsampling_rate = 2
|
| 142 |
+
# 4 = (3 - 1) * 1 + (3 - 1) * 1
|
| 143 |
+
self.right_context = 4
|
| 144 |
+
|
| 145 |
+
def forward(
|
| 146 |
+
self,
|
| 147 |
+
x: torch.Tensor,
|
| 148 |
+
x_mask: torch.Tensor,
|
| 149 |
+
offset: Union[int, torch.Tensor] = 0
|
| 150 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 151 |
+
"""Subsample x.
|
| 152 |
+
|
| 153 |
+
Args:
|
| 154 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 155 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 156 |
+
|
| 157 |
+
Returns:
|
| 158 |
+
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
| 159 |
+
where time' = time // 2.
|
| 160 |
+
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
| 161 |
+
where time' = time // 2.
|
| 162 |
+
torch.Tensor: positional encoding
|
| 163 |
+
|
| 164 |
+
"""
|
| 165 |
+
time = x.size(1)
|
| 166 |
+
x = x.transpose(1, 2) # (b, f, t)
|
| 167 |
+
x = self.conv(x)
|
| 168 |
+
x = x.transpose(1, 2) # (b, t, f)
|
| 169 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 170 |
+
return x, pos_emb, x_mask[:, :, (time + 1) % 2::2]
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
class Conv2dSubsampling4(BaseSubsampling):
|
| 174 |
+
"""Convolutional 2D subsampling (to 1/4 length).
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
idim (int): Input dimension.
|
| 178 |
+
odim (int): Output dimension.
|
| 179 |
+
dropout_rate (float): Dropout rate.
|
| 180 |
+
|
| 181 |
+
"""
|
| 182 |
+
|
| 183 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 184 |
+
pos_enc_class: torch.nn.Module):
|
| 185 |
+
"""Construct an Conv2dSubsampling4 object."""
|
| 186 |
+
super().__init__()
|
| 187 |
+
self.conv = torch.nn.Sequential(
|
| 188 |
+
torch.nn.Conv2d(1, odim, 3, 2),
|
| 189 |
+
torch.nn.ReLU(),
|
| 190 |
+
torch.nn.Conv2d(odim, odim, 3, 2),
|
| 191 |
+
torch.nn.ReLU(),
|
| 192 |
+
)
|
| 193 |
+
self.out = torch.nn.Sequential(
|
| 194 |
+
torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim))
|
| 195 |
+
self.pos_enc = pos_enc_class
|
| 196 |
+
# The right context for every conv layer is computed by:
|
| 197 |
+
# (kernel_size - 1) * frame_rate_of_this_layer
|
| 198 |
+
self.subsampling_rate = 4
|
| 199 |
+
# 6 = (3 - 1) * 1 + (3 - 1) * 2
|
| 200 |
+
self.right_context = 6
|
| 201 |
+
|
| 202 |
+
def forward(
|
| 203 |
+
self,
|
| 204 |
+
x: torch.Tensor,
|
| 205 |
+
x_mask: torch.Tensor,
|
| 206 |
+
offset: Union[int, torch.Tensor] = 0
|
| 207 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 208 |
+
"""Subsample x.
|
| 209 |
+
|
| 210 |
+
Args:
|
| 211 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 212 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 213 |
+
|
| 214 |
+
Returns:
|
| 215 |
+
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
| 216 |
+
where time' = time // 4.
|
| 217 |
+
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
| 218 |
+
where time' = time // 4.
|
| 219 |
+
torch.Tensor: positional encoding
|
| 220 |
+
|
| 221 |
+
"""
|
| 222 |
+
x = x.unsqueeze(1) # (b, c=1, t, f)
|
| 223 |
+
x = self.conv(x)
|
| 224 |
+
b, c, t, f = x.size()
|
| 225 |
+
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
| 226 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 227 |
+
return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2]
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
class Conv2dSubsampling6(BaseSubsampling):
|
| 231 |
+
"""Convolutional 2D subsampling (to 1/6 length).
|
| 232 |
+
Args:
|
| 233 |
+
idim (int): Input dimension.
|
| 234 |
+
odim (int): Output dimension.
|
| 235 |
+
dropout_rate (float): Dropout rate.
|
| 236 |
+
pos_enc (torch.nn.Module): Custom position encoding layer.
|
| 237 |
+
"""
|
| 238 |
+
|
| 239 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 240 |
+
pos_enc_class: torch.nn.Module):
|
| 241 |
+
"""Construct an Conv2dSubsampling6 object."""
|
| 242 |
+
super().__init__()
|
| 243 |
+
self.conv = torch.nn.Sequential(
|
| 244 |
+
torch.nn.Conv2d(1, odim, 3, 2),
|
| 245 |
+
torch.nn.ReLU(),
|
| 246 |
+
torch.nn.Conv2d(odim, odim, 5, 3),
|
| 247 |
+
torch.nn.ReLU(),
|
| 248 |
+
)
|
| 249 |
+
self.linear = torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3),
|
| 250 |
+
odim)
|
| 251 |
+
self.pos_enc = pos_enc_class
|
| 252 |
+
# 10 = (3 - 1) * 1 + (5 - 1) * 2
|
| 253 |
+
self.subsampling_rate = 6
|
| 254 |
+
self.right_context = 10
|
| 255 |
+
|
| 256 |
+
def forward(
|
| 257 |
+
self,
|
| 258 |
+
x: torch.Tensor,
|
| 259 |
+
x_mask: torch.Tensor,
|
| 260 |
+
offset: Union[int, torch.Tensor] = 0
|
| 261 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 262 |
+
"""Subsample x.
|
| 263 |
+
Args:
|
| 264 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 265 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 266 |
+
|
| 267 |
+
Returns:
|
| 268 |
+
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
| 269 |
+
where time' = time // 6.
|
| 270 |
+
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
| 271 |
+
where time' = time // 6.
|
| 272 |
+
torch.Tensor: positional encoding
|
| 273 |
+
"""
|
| 274 |
+
x = x.unsqueeze(1) # (b, c, t, f)
|
| 275 |
+
x = self.conv(x)
|
| 276 |
+
b, c, t, f = x.size()
|
| 277 |
+
x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
| 278 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 279 |
+
return x, pos_emb, x_mask[:, :, 2::2][:, :, 4::3]
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
class Conv2dSubsampling8(BaseSubsampling):
|
| 283 |
+
"""Convolutional 2D subsampling (to 1/8 length).
|
| 284 |
+
|
| 285 |
+
Args:
|
| 286 |
+
idim (int): Input dimension.
|
| 287 |
+
odim (int): Output dimension.
|
| 288 |
+
dropout_rate (float): Dropout rate.
|
| 289 |
+
|
| 290 |
+
"""
|
| 291 |
+
|
| 292 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 293 |
+
pos_enc_class: torch.nn.Module):
|
| 294 |
+
"""Construct an Conv2dSubsampling8 object."""
|
| 295 |
+
super().__init__()
|
| 296 |
+
self.conv = torch.nn.Sequential(
|
| 297 |
+
torch.nn.Conv2d(1, odim, 3, 2),
|
| 298 |
+
torch.nn.ReLU(),
|
| 299 |
+
torch.nn.Conv2d(odim, odim, 3, 2),
|
| 300 |
+
torch.nn.ReLU(),
|
| 301 |
+
torch.nn.Conv2d(odim, odim, 3, 2),
|
| 302 |
+
torch.nn.ReLU(),
|
| 303 |
+
)
|
| 304 |
+
self.linear = torch.nn.Linear(
|
| 305 |
+
odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim)
|
| 306 |
+
self.pos_enc = pos_enc_class
|
| 307 |
+
self.subsampling_rate = 8
|
| 308 |
+
# 14 = (3 - 1) * 1 + (3 - 1) * 2 + (3 - 1) * 4
|
| 309 |
+
self.right_context = 14
|
| 310 |
+
|
| 311 |
+
def forward(
|
| 312 |
+
self,
|
| 313 |
+
x: torch.Tensor,
|
| 314 |
+
x_mask: torch.Tensor,
|
| 315 |
+
offset: Union[int, torch.Tensor] = 0
|
| 316 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 317 |
+
"""Subsample x.
|
| 318 |
+
|
| 319 |
+
Args:
|
| 320 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 321 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 322 |
+
|
| 323 |
+
Returns:
|
| 324 |
+
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
| 325 |
+
where time' = time // 8.
|
| 326 |
+
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
| 327 |
+
where time' = time // 8.
|
| 328 |
+
torch.Tensor: positional encoding
|
| 329 |
+
"""
|
| 330 |
+
x = x.unsqueeze(1) # (b, c, t, f)
|
| 331 |
+
x = self.conv(x)
|
| 332 |
+
b, c, t, f = x.size()
|
| 333 |
+
x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
| 334 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 335 |
+
return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2][:, :, 2::2]
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
class LegacyLinearNoSubsampling(BaseSubsampling):
|
| 339 |
+
"""Linear transform the input without subsampling
|
| 340 |
+
|
| 341 |
+
Args:
|
| 342 |
+
idim (int): Input dimension.
|
| 343 |
+
odim (int): Output dimension.
|
| 344 |
+
dropout_rate (float): Dropout rate.
|
| 345 |
+
|
| 346 |
+
"""
|
| 347 |
+
|
| 348 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 349 |
+
pos_enc_class: torch.nn.Module):
|
| 350 |
+
"""Construct an linear object."""
|
| 351 |
+
super().__init__()
|
| 352 |
+
self.out = torch.nn.Sequential(
|
| 353 |
+
torch.nn.Linear(idim, odim),
|
| 354 |
+
torch.nn.LayerNorm(odim, eps=1e-5),
|
| 355 |
+
torch.nn.Dropout(dropout_rate),
|
| 356 |
+
torch.nn.ReLU(),
|
| 357 |
+
)
|
| 358 |
+
self.pos_enc = pos_enc_class
|
| 359 |
+
self.right_context = 0
|
| 360 |
+
self.subsampling_rate = 1
|
| 361 |
+
|
| 362 |
+
def forward(
|
| 363 |
+
self,
|
| 364 |
+
x: torch.Tensor,
|
| 365 |
+
x_mask: torch.Tensor,
|
| 366 |
+
offset: Union[int, torch.Tensor] = 0
|
| 367 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 368 |
+
"""Input x.
|
| 369 |
+
|
| 370 |
+
Args:
|
| 371 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 372 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 373 |
+
|
| 374 |
+
Returns:
|
| 375 |
+
torch.Tensor: linear input tensor (#batch, time', odim),
|
| 376 |
+
where time' = time .
|
| 377 |
+
torch.Tensor: linear input mask (#batch, 1, time'),
|
| 378 |
+
where time' = time .
|
| 379 |
+
|
| 380 |
+
"""
|
| 381 |
+
x = self.out(x)
|
| 382 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 383 |
+
return x, pos_emb, x_mask
|
src/chatterbox/models/s3gen/transformer/upsample_encoder.py
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
|
| 2 |
+
# 2022 Xingchen Song ([email protected])
|
| 3 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
| 17 |
+
"""Encoder definition."""
|
| 18 |
+
from typing import Tuple
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
from torch import nn
|
| 22 |
+
from torch.nn import functional as F
|
| 23 |
+
|
| 24 |
+
from .convolution import ConvolutionModule
|
| 25 |
+
from .encoder_layer import ConformerEncoderLayer
|
| 26 |
+
from .positionwise_feed_forward import PositionwiseFeedForward
|
| 27 |
+
from ..utils.class_utils import (
|
| 28 |
+
COSYVOICE_EMB_CLASSES,
|
| 29 |
+
COSYVOICE_SUBSAMPLE_CLASSES,
|
| 30 |
+
COSYVOICE_ATTENTION_CLASSES,
|
| 31 |
+
COSYVOICE_ACTIVATION_CLASSES,
|
| 32 |
+
)
|
| 33 |
+
from ..utils.mask import make_pad_mask
|
| 34 |
+
from ..utils.mask import add_optional_chunk_mask
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class Upsample1D(nn.Module):
|
| 38 |
+
"""A 1D upsampling layer with an optional convolution.
|
| 39 |
+
|
| 40 |
+
Parameters:
|
| 41 |
+
channels (`int`):
|
| 42 |
+
number of channels in the inputs and outputs.
|
| 43 |
+
use_conv (`bool`, default `False`):
|
| 44 |
+
option to use a convolution.
|
| 45 |
+
use_conv_transpose (`bool`, default `False`):
|
| 46 |
+
option to use a convolution transpose.
|
| 47 |
+
out_channels (`int`, optional):
|
| 48 |
+
number of output channels. Defaults to `channels`.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self, channels: int, out_channels: int, stride: int = 2):
|
| 52 |
+
super().__init__()
|
| 53 |
+
self.channels = channels
|
| 54 |
+
self.out_channels = out_channels
|
| 55 |
+
self.stride = stride
|
| 56 |
+
# In this mode, first repeat interpolate, than conv with stride=1
|
| 57 |
+
self.conv = nn.Conv1d(self.channels, self.out_channels, stride * 2 + 1, stride=1, padding=0)
|
| 58 |
+
|
| 59 |
+
def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor):
|
| 60 |
+
outputs = F.interpolate(inputs, scale_factor=float(self.stride), mode="nearest")
|
| 61 |
+
outputs = F.pad(outputs, (self.stride * 2, 0), value=0.0)
|
| 62 |
+
outputs = self.conv(outputs)
|
| 63 |
+
return outputs, input_lengths * self.stride
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class PreLookaheadLayer(nn.Module):
|
| 67 |
+
def __init__(self, channels: int, pre_lookahead_len: int = 1):
|
| 68 |
+
super().__init__()
|
| 69 |
+
self.channels = channels
|
| 70 |
+
self.pre_lookahead_len = pre_lookahead_len
|
| 71 |
+
self.conv1 = nn.Conv1d(
|
| 72 |
+
channels, channels,
|
| 73 |
+
kernel_size=pre_lookahead_len + 1,
|
| 74 |
+
stride=1, padding=0,
|
| 75 |
+
)
|
| 76 |
+
self.conv2 = nn.Conv1d(
|
| 77 |
+
channels, channels,
|
| 78 |
+
kernel_size=3, stride=1, padding=0,
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
| 82 |
+
"""
|
| 83 |
+
inputs: (batch_size, seq_len, channels)
|
| 84 |
+
"""
|
| 85 |
+
outputs = inputs.transpose(1, 2).contiguous()
|
| 86 |
+
# look ahead
|
| 87 |
+
outputs = F.pad(outputs, (0, self.pre_lookahead_len), mode='constant', value=0.0)
|
| 88 |
+
outputs = F.leaky_relu(self.conv1(outputs))
|
| 89 |
+
# outputs
|
| 90 |
+
outputs = F.pad(outputs, (2, 0), mode='constant', value=0.0)
|
| 91 |
+
outputs = self.conv2(outputs)
|
| 92 |
+
outputs = outputs.transpose(1, 2).contiguous()
|
| 93 |
+
|
| 94 |
+
# residual connection
|
| 95 |
+
outputs = outputs + inputs
|
| 96 |
+
return outputs
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class UpsampleConformerEncoder(torch.nn.Module):
|
| 100 |
+
|
| 101 |
+
def __init__(
|
| 102 |
+
self,
|
| 103 |
+
input_size: int = 512,
|
| 104 |
+
output_size: int = 512,
|
| 105 |
+
attention_heads: int = 8,
|
| 106 |
+
linear_units: int = 2048,
|
| 107 |
+
num_blocks: int = 6,
|
| 108 |
+
dropout_rate: float = 0.1,
|
| 109 |
+
positional_dropout_rate: float = 0.1,
|
| 110 |
+
attention_dropout_rate: float = 0.1,
|
| 111 |
+
input_layer: str = "linear",
|
| 112 |
+
pos_enc_layer_type: str = "rel_pos_espnet",
|
| 113 |
+
normalize_before: bool = True,
|
| 114 |
+
static_chunk_size: int = 0,
|
| 115 |
+
use_dynamic_chunk: bool = False,
|
| 116 |
+
global_cmvn: torch.nn.Module = None,
|
| 117 |
+
use_dynamic_left_chunk: bool = False,
|
| 118 |
+
positionwise_conv_kernel_size: int = 1,
|
| 119 |
+
macaron_style: bool = False,
|
| 120 |
+
selfattention_layer_type: str = "rel_selfattn",
|
| 121 |
+
activation_type: str = "swish",
|
| 122 |
+
use_cnn_module: bool = False,
|
| 123 |
+
cnn_module_kernel: int = 15,
|
| 124 |
+
causal: bool = False,
|
| 125 |
+
cnn_module_norm: str = "batch_norm",
|
| 126 |
+
key_bias: bool = True,
|
| 127 |
+
gradient_checkpointing: bool = False,
|
| 128 |
+
):
|
| 129 |
+
"""
|
| 130 |
+
Args:
|
| 131 |
+
input_size (int): input dim
|
| 132 |
+
output_size (int): dimension of attention
|
| 133 |
+
attention_heads (int): the number of heads of multi head attention
|
| 134 |
+
linear_units (int): the hidden units number of position-wise feed
|
| 135 |
+
forward
|
| 136 |
+
num_blocks (int): the number of decoder blocks
|
| 137 |
+
dropout_rate (float): dropout rate
|
| 138 |
+
attention_dropout_rate (float): dropout rate in attention
|
| 139 |
+
positional_dropout_rate (float): dropout rate after adding
|
| 140 |
+
positional encoding
|
| 141 |
+
input_layer (str): input layer type.
|
| 142 |
+
optional [linear, conv2d, conv2d6, conv2d8]
|
| 143 |
+
pos_enc_layer_type (str): Encoder positional encoding layer type.
|
| 144 |
+
opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos]
|
| 145 |
+
normalize_before (bool):
|
| 146 |
+
True: use layer_norm before each sub-block of a layer.
|
| 147 |
+
False: use layer_norm after each sub-block of a layer.
|
| 148 |
+
static_chunk_size (int): chunk size for static chunk training and
|
| 149 |
+
decoding
|
| 150 |
+
use_dynamic_chunk (bool): whether use dynamic chunk size for
|
| 151 |
+
training or not, You can only use fixed chunk(chunk_size > 0)
|
| 152 |
+
or dyanmic chunk size(use_dynamic_chunk = True)
|
| 153 |
+
global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module
|
| 154 |
+
use_dynamic_left_chunk (bool): whether use dynamic left chunk in
|
| 155 |
+
dynamic chunk training
|
| 156 |
+
key_bias: whether use bias in attention.linear_k, False for whisper models.
|
| 157 |
+
gradient_checkpointing: rerunning a forward-pass segment for each
|
| 158 |
+
checkpointed segment during backward.
|
| 159 |
+
"""
|
| 160 |
+
super().__init__()
|
| 161 |
+
self._output_size = output_size
|
| 162 |
+
|
| 163 |
+
self.global_cmvn = global_cmvn
|
| 164 |
+
self.embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
|
| 165 |
+
input_size,
|
| 166 |
+
output_size,
|
| 167 |
+
dropout_rate,
|
| 168 |
+
COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
|
| 169 |
+
positional_dropout_rate),
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
self.normalize_before = normalize_before
|
| 173 |
+
self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5)
|
| 174 |
+
self.static_chunk_size = static_chunk_size
|
| 175 |
+
self.use_dynamic_chunk = use_dynamic_chunk
|
| 176 |
+
self.use_dynamic_left_chunk = use_dynamic_left_chunk
|
| 177 |
+
self.gradient_checkpointing = gradient_checkpointing
|
| 178 |
+
activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
|
| 179 |
+
# self-attention module definition
|
| 180 |
+
encoder_selfattn_layer_args = (
|
| 181 |
+
attention_heads,
|
| 182 |
+
output_size,
|
| 183 |
+
attention_dropout_rate,
|
| 184 |
+
key_bias,
|
| 185 |
+
)
|
| 186 |
+
# feed-forward module definition
|
| 187 |
+
positionwise_layer_args = (
|
| 188 |
+
output_size,
|
| 189 |
+
linear_units,
|
| 190 |
+
dropout_rate,
|
| 191 |
+
activation,
|
| 192 |
+
)
|
| 193 |
+
# convolution module definition
|
| 194 |
+
convolution_layer_args = (output_size, cnn_module_kernel, activation,
|
| 195 |
+
cnn_module_norm, causal)
|
| 196 |
+
self.pre_lookahead_layer = PreLookaheadLayer(channels=512, pre_lookahead_len=3)
|
| 197 |
+
self.encoders = torch.nn.ModuleList([
|
| 198 |
+
ConformerEncoderLayer(
|
| 199 |
+
output_size,
|
| 200 |
+
COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
|
| 201 |
+
*encoder_selfattn_layer_args),
|
| 202 |
+
PositionwiseFeedForward(*positionwise_layer_args),
|
| 203 |
+
PositionwiseFeedForward(
|
| 204 |
+
*positionwise_layer_args) if macaron_style else None,
|
| 205 |
+
ConvolutionModule(
|
| 206 |
+
*convolution_layer_args) if use_cnn_module else None,
|
| 207 |
+
dropout_rate,
|
| 208 |
+
normalize_before,
|
| 209 |
+
) for _ in range(num_blocks)
|
| 210 |
+
])
|
| 211 |
+
self.up_layer = Upsample1D(channels=512, out_channels=512, stride=2)
|
| 212 |
+
self.up_embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
|
| 213 |
+
input_size,
|
| 214 |
+
output_size,
|
| 215 |
+
dropout_rate,
|
| 216 |
+
COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
|
| 217 |
+
positional_dropout_rate),
|
| 218 |
+
)
|
| 219 |
+
self.up_encoders = torch.nn.ModuleList([
|
| 220 |
+
ConformerEncoderLayer(
|
| 221 |
+
output_size,
|
| 222 |
+
COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
|
| 223 |
+
*encoder_selfattn_layer_args),
|
| 224 |
+
PositionwiseFeedForward(*positionwise_layer_args),
|
| 225 |
+
PositionwiseFeedForward(
|
| 226 |
+
*positionwise_layer_args) if macaron_style else None,
|
| 227 |
+
ConvolutionModule(
|
| 228 |
+
*convolution_layer_args) if use_cnn_module else None,
|
| 229 |
+
dropout_rate,
|
| 230 |
+
normalize_before,
|
| 231 |
+
) for _ in range(4)
|
| 232 |
+
])
|
| 233 |
+
|
| 234 |
+
def output_size(self) -> int:
|
| 235 |
+
return self._output_size
|
| 236 |
+
|
| 237 |
+
def forward(
|
| 238 |
+
self,
|
| 239 |
+
xs: torch.Tensor,
|
| 240 |
+
xs_lens: torch.Tensor,
|
| 241 |
+
decoding_chunk_size: int = 0,
|
| 242 |
+
num_decoding_left_chunks: int = -1,
|
| 243 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 244 |
+
"""Embed positions in tensor.
|
| 245 |
+
|
| 246 |
+
Args:
|
| 247 |
+
xs: padded input tensor (B, T, D)
|
| 248 |
+
xs_lens: input length (B)
|
| 249 |
+
decoding_chunk_size: decoding chunk size for dynamic chunk
|
| 250 |
+
0: default for training, use random dynamic chunk.
|
| 251 |
+
<0: for decoding, use full chunk.
|
| 252 |
+
>0: for decoding, use fixed chunk size as set.
|
| 253 |
+
num_decoding_left_chunks: number of left chunks, this is for decoding,
|
| 254 |
+
the chunk size is decoding_chunk_size.
|
| 255 |
+
>=0: use num_decoding_left_chunks
|
| 256 |
+
<0: use all left chunks
|
| 257 |
+
Returns:
|
| 258 |
+
encoder output tensor xs, and subsampled masks
|
| 259 |
+
xs: padded output tensor (B, T' ~= T/subsample_rate, D)
|
| 260 |
+
masks: torch.Tensor batch padding mask after subsample
|
| 261 |
+
(B, 1, T' ~= T/subsample_rate)
|
| 262 |
+
NOTE(xcsong):
|
| 263 |
+
We pass the `__call__` method of the modules instead of `forward` to the
|
| 264 |
+
checkpointing API because `__call__` attaches all the hooks of the module.
|
| 265 |
+
https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
|
| 266 |
+
"""
|
| 267 |
+
T = xs.size(1)
|
| 268 |
+
masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
|
| 269 |
+
if self.global_cmvn is not None:
|
| 270 |
+
xs = self.global_cmvn(xs)
|
| 271 |
+
xs, pos_emb, masks = self.embed(xs, masks)
|
| 272 |
+
mask_pad = masks # (B, 1, T/subsample_rate)
|
| 273 |
+
chunk_masks = add_optional_chunk_mask(xs, masks,
|
| 274 |
+
self.use_dynamic_chunk,
|
| 275 |
+
self.use_dynamic_left_chunk,
|
| 276 |
+
decoding_chunk_size,
|
| 277 |
+
self.static_chunk_size,
|
| 278 |
+
num_decoding_left_chunks)
|
| 279 |
+
# lookahead + conformer encoder
|
| 280 |
+
xs = self.pre_lookahead_layer(xs)
|
| 281 |
+
xs = self.forward_layers(xs, chunk_masks, pos_emb, mask_pad)
|
| 282 |
+
|
| 283 |
+
# upsample + conformer encoder
|
| 284 |
+
xs = xs.transpose(1, 2).contiguous()
|
| 285 |
+
xs, xs_lens = self.up_layer(xs, xs_lens)
|
| 286 |
+
xs = xs.transpose(1, 2).contiguous()
|
| 287 |
+
T = xs.size(1)
|
| 288 |
+
masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
|
| 289 |
+
xs, pos_emb, masks = self.up_embed(xs, masks)
|
| 290 |
+
mask_pad = masks # (B, 1, T/subsample_rate)
|
| 291 |
+
chunk_masks = add_optional_chunk_mask(xs, masks,
|
| 292 |
+
self.use_dynamic_chunk,
|
| 293 |
+
self.use_dynamic_left_chunk,
|
| 294 |
+
decoding_chunk_size,
|
| 295 |
+
self.static_chunk_size * self.up_layer.stride,
|
| 296 |
+
num_decoding_left_chunks)
|
| 297 |
+
xs = self.forward_up_layers(xs, chunk_masks, pos_emb, mask_pad)
|
| 298 |
+
|
| 299 |
+
if self.normalize_before:
|
| 300 |
+
xs = self.after_norm(xs)
|
| 301 |
+
# Here we assume the mask is not changed in encoder layers, so just
|
| 302 |
+
# return the masks before encoder layers, and the masks will be used
|
| 303 |
+
# for cross attention with decoder later
|
| 304 |
+
return xs, masks
|
| 305 |
+
|
| 306 |
+
def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
|
| 307 |
+
pos_emb: torch.Tensor,
|
| 308 |
+
mask_pad: torch.Tensor) -> torch.Tensor:
|
| 309 |
+
for layer in self.encoders:
|
| 310 |
+
xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
|
| 311 |
+
return xs
|
| 312 |
+
|
| 313 |
+
def forward_up_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
|
| 314 |
+
pos_emb: torch.Tensor,
|
| 315 |
+
mask_pad: torch.Tensor) -> torch.Tensor:
|
| 316 |
+
for layer in self.up_encoders:
|
| 317 |
+
xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
|
| 318 |
+
return xs
|
src/chatterbox/models/s3gen/utils/class_utils.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright [2023-11-28] <[email protected], Xingchen Song>
|
| 2 |
+
# 2024 Alibaba Inc (authors: Xiang Lyu)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import torch
|
| 16 |
+
|
| 17 |
+
from ..transformer.activation import Swish
|
| 18 |
+
from ..transformer.subsampling import (
|
| 19 |
+
LinearNoSubsampling,
|
| 20 |
+
EmbedinigNoSubsampling,
|
| 21 |
+
Conv1dSubsampling2,
|
| 22 |
+
Conv2dSubsampling4,
|
| 23 |
+
Conv2dSubsampling6,
|
| 24 |
+
Conv2dSubsampling8,
|
| 25 |
+
)
|
| 26 |
+
from ..transformer.embedding import (
|
| 27 |
+
PositionalEncoding,
|
| 28 |
+
RelPositionalEncoding,
|
| 29 |
+
WhisperPositionalEncoding,
|
| 30 |
+
LearnablePositionalEncoding,
|
| 31 |
+
NoPositionalEncoding)
|
| 32 |
+
from ..transformer.attention import (MultiHeadedAttention,
|
| 33 |
+
RelPositionMultiHeadedAttention)
|
| 34 |
+
from ..transformer.embedding import EspnetRelPositionalEncoding
|
| 35 |
+
from ..transformer.subsampling import LegacyLinearNoSubsampling
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
COSYVOICE_ACTIVATION_CLASSES = {
|
| 39 |
+
"hardtanh": torch.nn.Hardtanh,
|
| 40 |
+
"tanh": torch.nn.Tanh,
|
| 41 |
+
"relu": torch.nn.ReLU,
|
| 42 |
+
"selu": torch.nn.SELU,
|
| 43 |
+
"swish": getattr(torch.nn, "SiLU", Swish),
|
| 44 |
+
"gelu": torch.nn.GELU,
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
COSYVOICE_SUBSAMPLE_CLASSES = {
|
| 48 |
+
"linear": LinearNoSubsampling,
|
| 49 |
+
"linear_legacy": LegacyLinearNoSubsampling,
|
| 50 |
+
"embed": EmbedinigNoSubsampling,
|
| 51 |
+
"conv1d2": Conv1dSubsampling2,
|
| 52 |
+
"conv2d": Conv2dSubsampling4,
|
| 53 |
+
"conv2d6": Conv2dSubsampling6,
|
| 54 |
+
"conv2d8": Conv2dSubsampling8,
|
| 55 |
+
'paraformer_dummy': torch.nn.Identity
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
COSYVOICE_EMB_CLASSES = {
|
| 59 |
+
"embed": PositionalEncoding,
|
| 60 |
+
"abs_pos": PositionalEncoding,
|
| 61 |
+
"rel_pos": RelPositionalEncoding,
|
| 62 |
+
"rel_pos_espnet": EspnetRelPositionalEncoding,
|
| 63 |
+
"no_pos": NoPositionalEncoding,
|
| 64 |
+
"abs_pos_whisper": WhisperPositionalEncoding,
|
| 65 |
+
"embed_learnable_pe": LearnablePositionalEncoding,
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
COSYVOICE_ATTENTION_CLASSES = {
|
| 69 |
+
"selfattn": MultiHeadedAttention,
|
| 70 |
+
"rel_selfattn": RelPositionMultiHeadedAttention,
|
| 71 |
+
}
|
src/chatterbox/models/s3gen/utils/mask.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2019 Shigeki Karita
|
| 2 |
+
# 2020 Mobvoi Inc (Binbin Zhang)
|
| 3 |
+
# 2024 Alibaba Inc (authors: Xiang Lyu)
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
|
| 19 |
+
'''
|
| 20 |
+
def subsequent_mask(
|
| 21 |
+
size: int,
|
| 22 |
+
device: torch.device = torch.device("cpu"),
|
| 23 |
+
) -> torch.Tensor:
|
| 24 |
+
"""Create mask for subsequent steps (size, size).
|
| 25 |
+
|
| 26 |
+
This mask is used only in decoder which works in an auto-regressive mode.
|
| 27 |
+
This means the current step could only do attention with its left steps.
|
| 28 |
+
|
| 29 |
+
In encoder, fully attention is used when streaming is not necessary and
|
| 30 |
+
the sequence is not long. In this case, no attention mask is needed.
|
| 31 |
+
|
| 32 |
+
When streaming is need, chunk-based attention is used in encoder. See
|
| 33 |
+
subsequent_chunk_mask for the chunk-based attention mask.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
size (int): size of mask
|
| 37 |
+
str device (str): "cpu" or "cuda" or torch.Tensor.device
|
| 38 |
+
dtype (torch.device): result dtype
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
torch.Tensor: mask
|
| 42 |
+
|
| 43 |
+
Examples:
|
| 44 |
+
>>> subsequent_mask(3)
|
| 45 |
+
[[1, 0, 0],
|
| 46 |
+
[1, 1, 0],
|
| 47 |
+
[1, 1, 1]]
|
| 48 |
+
"""
|
| 49 |
+
ret = torch.ones(size, size, device=device, dtype=torch.bool)
|
| 50 |
+
return torch.tril(ret)
|
| 51 |
+
'''
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def subsequent_chunk_mask(
|
| 55 |
+
size: int,
|
| 56 |
+
chunk_size: int,
|
| 57 |
+
num_left_chunks: int = -1,
|
| 58 |
+
device: torch.device = torch.device("cpu"),
|
| 59 |
+
) -> torch.Tensor:
|
| 60 |
+
"""Create mask for subsequent steps (size, size) with chunk size,
|
| 61 |
+
this is for streaming encoder
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
size (int): size of mask
|
| 65 |
+
chunk_size (int): size of chunk
|
| 66 |
+
num_left_chunks (int): number of left chunks
|
| 67 |
+
<0: use full chunk
|
| 68 |
+
>=0: use num_left_chunks
|
| 69 |
+
device (torch.device): "cpu" or "cuda" or torch.Tensor.device
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
torch.Tensor: mask
|
| 73 |
+
|
| 74 |
+
Examples:
|
| 75 |
+
>>> subsequent_chunk_mask(4, 2)
|
| 76 |
+
[[1, 1, 0, 0],
|
| 77 |
+
[1, 1, 0, 0],
|
| 78 |
+
[1, 1, 1, 1],
|
| 79 |
+
[1, 1, 1, 1]]
|
| 80 |
+
"""
|
| 81 |
+
# NOTE this modified implementation meets onnx export requirements, but it doesn't support num_left_chunks
|
| 82 |
+
# actually this is not needed after we have inference cache implemented, will remove it later
|
| 83 |
+
pos_idx = torch.arange(size, device=device)
|
| 84 |
+
block_value = (torch.div(pos_idx, chunk_size, rounding_mode='trunc') + 1) * chunk_size
|
| 85 |
+
ret = pos_idx.unsqueeze(0) < block_value.unsqueeze(1)
|
| 86 |
+
return ret
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def add_optional_chunk_mask(xs: torch.Tensor,
|
| 90 |
+
masks: torch.Tensor,
|
| 91 |
+
use_dynamic_chunk: bool,
|
| 92 |
+
use_dynamic_left_chunk: bool,
|
| 93 |
+
decoding_chunk_size: int,
|
| 94 |
+
static_chunk_size: int,
|
| 95 |
+
num_decoding_left_chunks: int,
|
| 96 |
+
enable_full_context: bool = True):
|
| 97 |
+
""" Apply optional mask for encoder.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
xs (torch.Tensor): padded input, (B, L, D), L for max length
|
| 101 |
+
mask (torch.Tensor): mask for xs, (B, 1, L)
|
| 102 |
+
use_dynamic_chunk (bool): whether to use dynamic chunk or not
|
| 103 |
+
use_dynamic_left_chunk (bool): whether to use dynamic left chunk for
|
| 104 |
+
training.
|
| 105 |
+
decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's
|
| 106 |
+
0: default for training, use random dynamic chunk.
|
| 107 |
+
<0: for decoding, use full chunk.
|
| 108 |
+
>0: for decoding, use fixed chunk size as set.
|
| 109 |
+
static_chunk_size (int): chunk size for static chunk training/decoding
|
| 110 |
+
if it's greater than 0, if use_dynamic_chunk is true,
|
| 111 |
+
this parameter will be ignored
|
| 112 |
+
num_decoding_left_chunks: number of left chunks, this is for decoding,
|
| 113 |
+
the chunk size is decoding_chunk_size.
|
| 114 |
+
>=0: use num_decoding_left_chunks
|
| 115 |
+
<0: use all left chunks
|
| 116 |
+
enable_full_context (bool):
|
| 117 |
+
True: chunk size is either [1, 25] or full context(max_len)
|
| 118 |
+
False: chunk size ~ U[1, 25]
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
torch.Tensor: chunk mask of the input xs.
|
| 122 |
+
"""
|
| 123 |
+
# Whether to use chunk mask or not
|
| 124 |
+
if use_dynamic_chunk:
|
| 125 |
+
max_len = xs.size(1)
|
| 126 |
+
if decoding_chunk_size < 0:
|
| 127 |
+
chunk_size = max_len
|
| 128 |
+
num_left_chunks = -1
|
| 129 |
+
elif decoding_chunk_size > 0:
|
| 130 |
+
chunk_size = decoding_chunk_size
|
| 131 |
+
num_left_chunks = num_decoding_left_chunks
|
| 132 |
+
else:
|
| 133 |
+
# chunk size is either [1, 25] or full context(max_len).
|
| 134 |
+
# Since we use 4 times subsampling and allow up to 1s(100 frames)
|
| 135 |
+
# delay, the maximum frame is 100 / 4 = 25.
|
| 136 |
+
chunk_size = torch.randint(1, max_len, (1, )).item()
|
| 137 |
+
num_left_chunks = -1
|
| 138 |
+
if chunk_size > max_len // 2 and enable_full_context:
|
| 139 |
+
chunk_size = max_len
|
| 140 |
+
else:
|
| 141 |
+
chunk_size = chunk_size % 25 + 1
|
| 142 |
+
if use_dynamic_left_chunk:
|
| 143 |
+
max_left_chunks = (max_len - 1) // chunk_size
|
| 144 |
+
num_left_chunks = torch.randint(0, max_left_chunks,
|
| 145 |
+
(1, )).item()
|
| 146 |
+
chunk_masks = subsequent_chunk_mask(xs.size(1), chunk_size,
|
| 147 |
+
num_left_chunks,
|
| 148 |
+
xs.device) # (L, L)
|
| 149 |
+
chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
|
| 150 |
+
chunk_masks = masks & chunk_masks # (B, L, L)
|
| 151 |
+
elif static_chunk_size > 0:
|
| 152 |
+
num_left_chunks = num_decoding_left_chunks
|
| 153 |
+
chunk_masks = subsequent_chunk_mask(xs.size(1), static_chunk_size,
|
| 154 |
+
num_left_chunks,
|
| 155 |
+
xs.device) # (L, L)
|
| 156 |
+
chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
|
| 157 |
+
chunk_masks = masks & chunk_masks # (B, L, L)
|
| 158 |
+
else:
|
| 159 |
+
chunk_masks = masks
|
| 160 |
+
assert chunk_masks.dtype == torch.bool
|
| 161 |
+
if (chunk_masks.sum(dim=-1) == 0).sum().item() != 0:
|
| 162 |
+
logging.warning('get chunk_masks all false at some timestep, force set to true, make sure they are masked in futuer computation!')
|
| 163 |
+
chunk_masks[chunk_masks.sum(dim=-1)==0] = True
|
| 164 |
+
return chunk_masks
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
|
| 168 |
+
"""Make mask tensor containing indices of padded part.
|
| 169 |
+
|
| 170 |
+
See description of make_non_pad_mask.
|
| 171 |
+
|
| 172 |
+
Args:
|
| 173 |
+
lengths (torch.Tensor): Batch of lengths (B,).
|
| 174 |
+
Returns:
|
| 175 |
+
torch.Tensor: Mask tensor containing indices of padded part.
|
| 176 |
+
|
| 177 |
+
Examples:
|
| 178 |
+
>>> lengths = [5, 3, 2]
|
| 179 |
+
>>> make_pad_mask(lengths)
|
| 180 |
+
masks = [[0, 0, 0, 0 ,0],
|
| 181 |
+
[0, 0, 0, 1, 1],
|
| 182 |
+
[0, 0, 1, 1, 1]]
|
| 183 |
+
"""
|
| 184 |
+
batch_size = lengths.size(0)
|
| 185 |
+
max_len = max_len if max_len > 0 else lengths.max().item()
|
| 186 |
+
seq_range = torch.arange(0,
|
| 187 |
+
max_len,
|
| 188 |
+
dtype=torch.int64,
|
| 189 |
+
device=lengths.device)
|
| 190 |
+
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
|
| 191 |
+
seq_length_expand = lengths.unsqueeze(-1)
|
| 192 |
+
mask = seq_range_expand >= seq_length_expand
|
| 193 |
+
return mask
|
src/chatterbox/models/s3gen/utils/mel.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""mel-spectrogram extraction in Matcha-TTS"""
|
| 2 |
+
from librosa.filters import mel as librosa_mel_fn
|
| 3 |
+
import torch
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# NOTE: they decalred these global vars
|
| 8 |
+
mel_basis = {}
|
| 9 |
+
hann_window = {}
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
| 13 |
+
return torch.log(torch.clamp(x, min=clip_val) * C)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def spectral_normalize_torch(magnitudes):
|
| 17 |
+
output = dynamic_range_compression_torch(magnitudes)
|
| 18 |
+
return output
|
| 19 |
+
|
| 20 |
+
"""
|
| 21 |
+
feat_extractor: !name:matcha.utils.audio.mel_spectrogram
|
| 22 |
+
n_fft: 1920
|
| 23 |
+
num_mels: 80
|
| 24 |
+
sampling_rate: 24000
|
| 25 |
+
hop_size: 480
|
| 26 |
+
win_size: 1920
|
| 27 |
+
fmin: 0
|
| 28 |
+
fmax: 8000
|
| 29 |
+
center: False
|
| 30 |
+
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def mel_spectrogram(y, n_fft=1920, num_mels=80, sampling_rate=24000, hop_size=480, win_size=1920,
|
| 34 |
+
fmin=0, fmax=8000, center=False):
|
| 35 |
+
"""Copied from https://github.com/shivammehta25/Matcha-TTS/blob/main/matcha/utils/audio.py
|
| 36 |
+
Set default values according to Cosyvoice's config.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
if isinstance(y, np.ndarray):
|
| 40 |
+
y = torch.tensor(y).float()
|
| 41 |
+
|
| 42 |
+
if len(y.shape) == 1:
|
| 43 |
+
y = y[None, ]
|
| 44 |
+
|
| 45 |
+
if torch.min(y) < -1.0:
|
| 46 |
+
print("min value is ", torch.min(y))
|
| 47 |
+
if torch.max(y) > 1.0:
|
| 48 |
+
print("max value is ", torch.max(y))
|
| 49 |
+
|
| 50 |
+
global mel_basis, hann_window # pylint: disable=global-statement,global-variable-not-assigned
|
| 51 |
+
if f"{str(fmax)}_{str(y.device)}" not in mel_basis:
|
| 52 |
+
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
|
| 53 |
+
mel_basis[str(fmax) + "_" + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
|
| 54 |
+
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
|
| 55 |
+
|
| 56 |
+
y = torch.nn.functional.pad(
|
| 57 |
+
y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect"
|
| 58 |
+
)
|
| 59 |
+
y = y.squeeze(1)
|
| 60 |
+
|
| 61 |
+
spec = torch.view_as_real(
|
| 62 |
+
torch.stft(
|
| 63 |
+
y,
|
| 64 |
+
n_fft,
|
| 65 |
+
hop_length=hop_size,
|
| 66 |
+
win_length=win_size,
|
| 67 |
+
window=hann_window[str(y.device)],
|
| 68 |
+
center=center,
|
| 69 |
+
pad_mode="reflect",
|
| 70 |
+
normalized=False,
|
| 71 |
+
onesided=True,
|
| 72 |
+
return_complex=True,
|
| 73 |
+
)
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
|
| 77 |
+
|
| 78 |
+
spec = torch.matmul(mel_basis[str(fmax) + "_" + str(y.device)], spec)
|
| 79 |
+
spec = spectral_normalize_torch(spec)
|
| 80 |
+
|
| 81 |
+
return spec
|
src/chatterbox/models/s3gen/xvector.py
ADDED
|
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# -*- encoding: utf-8 -*-
|
| 3 |
+
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
|
| 4 |
+
# MIT License (https://opensource.org/licenses/MIT)
|
| 5 |
+
# Modified from 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker)
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
from collections import OrderedDict
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
import torch.utils.checkpoint as cp
|
| 12 |
+
import torchaudio.compliance.kaldi as Kaldi
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def pad_list(xs, pad_value):
|
| 16 |
+
"""Perform padding for the list of tensors.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
|
| 20 |
+
pad_value (float): Value for padding.
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
Tensor: Padded tensor (B, Tmax, `*`).
|
| 24 |
+
|
| 25 |
+
Examples:
|
| 26 |
+
>>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
|
| 27 |
+
>>> x
|
| 28 |
+
[tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
|
| 29 |
+
>>> pad_list(x, 0)
|
| 30 |
+
tensor([[1., 1., 1., 1.],
|
| 31 |
+
[1., 1., 0., 0.],
|
| 32 |
+
[1., 0., 0., 0.]])
|
| 33 |
+
|
| 34 |
+
"""
|
| 35 |
+
n_batch = len(xs)
|
| 36 |
+
max_len = max(x.size(0) for x in xs)
|
| 37 |
+
pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)
|
| 38 |
+
|
| 39 |
+
for i in range(n_batch):
|
| 40 |
+
pad[i, : xs[i].size(0)] = xs[i]
|
| 41 |
+
|
| 42 |
+
return pad
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def extract_feature(audio):
|
| 46 |
+
features = []
|
| 47 |
+
feature_times = []
|
| 48 |
+
feature_lengths = []
|
| 49 |
+
for au in audio:
|
| 50 |
+
feature = Kaldi.fbank(au.unsqueeze(0), num_mel_bins=80)
|
| 51 |
+
feature = feature - feature.mean(dim=0, keepdim=True)
|
| 52 |
+
features.append(feature)
|
| 53 |
+
feature_times.append(au.shape[0])
|
| 54 |
+
feature_lengths.append(feature.shape[0])
|
| 55 |
+
# padding for batch inference
|
| 56 |
+
features_padded = pad_list(features, pad_value=0)
|
| 57 |
+
# features = torch.cat(features)
|
| 58 |
+
return features_padded, feature_lengths, feature_times
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class BasicResBlock(torch.nn.Module):
|
| 62 |
+
expansion = 1
|
| 63 |
+
|
| 64 |
+
def __init__(self, in_planes, planes, stride=1):
|
| 65 |
+
super(BasicResBlock, self).__init__()
|
| 66 |
+
self.conv1 = torch.nn.Conv2d(
|
| 67 |
+
in_planes, planes, kernel_size=3, stride=(stride, 1), padding=1, bias=False
|
| 68 |
+
)
|
| 69 |
+
self.bn1 = torch.nn.BatchNorm2d(planes)
|
| 70 |
+
self.conv2 = torch.nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
|
| 71 |
+
self.bn2 = torch.nn.BatchNorm2d(planes)
|
| 72 |
+
|
| 73 |
+
self.shortcut = torch.nn.Sequential()
|
| 74 |
+
if stride != 1 or in_planes != self.expansion * planes:
|
| 75 |
+
self.shortcut = torch.nn.Sequential(
|
| 76 |
+
torch.nn.Conv2d(
|
| 77 |
+
in_planes,
|
| 78 |
+
self.expansion * planes,
|
| 79 |
+
kernel_size=1,
|
| 80 |
+
stride=(stride, 1),
|
| 81 |
+
bias=False,
|
| 82 |
+
),
|
| 83 |
+
torch.nn.BatchNorm2d(self.expansion * planes),
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
def forward(self, x):
|
| 87 |
+
out = F.relu(self.bn1(self.conv1(x)))
|
| 88 |
+
out = self.bn2(self.conv2(out))
|
| 89 |
+
out += self.shortcut(x)
|
| 90 |
+
out = F.relu(out)
|
| 91 |
+
return out
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class FCM(torch.nn.Module):
|
| 95 |
+
def __init__(self, block=BasicResBlock, num_blocks=[2, 2], m_channels=32, feat_dim=80):
|
| 96 |
+
super(FCM, self).__init__()
|
| 97 |
+
self.in_planes = m_channels
|
| 98 |
+
self.conv1 = torch.nn.Conv2d(1, m_channels, kernel_size=3, stride=1, padding=1, bias=False)
|
| 99 |
+
self.bn1 = torch.nn.BatchNorm2d(m_channels)
|
| 100 |
+
|
| 101 |
+
self.layer1 = self._make_layer(block, m_channels, num_blocks[0], stride=2)
|
| 102 |
+
self.layer2 = self._make_layer(block, m_channels, num_blocks[0], stride=2)
|
| 103 |
+
|
| 104 |
+
self.conv2 = torch.nn.Conv2d(
|
| 105 |
+
m_channels, m_channels, kernel_size=3, stride=(2, 1), padding=1, bias=False
|
| 106 |
+
)
|
| 107 |
+
self.bn2 = torch.nn.BatchNorm2d(m_channels)
|
| 108 |
+
self.out_channels = m_channels * (feat_dim // 8)
|
| 109 |
+
|
| 110 |
+
def _make_layer(self, block, planes, num_blocks, stride):
|
| 111 |
+
strides = [stride] + [1] * (num_blocks - 1)
|
| 112 |
+
layers = []
|
| 113 |
+
for stride in strides:
|
| 114 |
+
layers.append(block(self.in_planes, planes, stride))
|
| 115 |
+
self.in_planes = planes * block.expansion
|
| 116 |
+
return torch.nn.Sequential(*layers)
|
| 117 |
+
|
| 118 |
+
def forward(self, x):
|
| 119 |
+
x = x.unsqueeze(1)
|
| 120 |
+
out = F.relu(self.bn1(self.conv1(x)))
|
| 121 |
+
out = self.layer1(out)
|
| 122 |
+
out = self.layer2(out)
|
| 123 |
+
out = F.relu(self.bn2(self.conv2(out)))
|
| 124 |
+
|
| 125 |
+
shape = out.shape
|
| 126 |
+
out = out.reshape(shape[0], shape[1] * shape[2], shape[3])
|
| 127 |
+
return out
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def get_nonlinear(config_str, channels):
|
| 131 |
+
nonlinear = torch.nn.Sequential()
|
| 132 |
+
for name in config_str.split("-"):
|
| 133 |
+
if name == "relu":
|
| 134 |
+
nonlinear.add_module("relu", torch.nn.ReLU(inplace=True))
|
| 135 |
+
elif name == "prelu":
|
| 136 |
+
nonlinear.add_module("prelu", torch.nn.PReLU(channels))
|
| 137 |
+
elif name == "batchnorm":
|
| 138 |
+
nonlinear.add_module("batchnorm", torch.nn.BatchNorm1d(channels))
|
| 139 |
+
elif name == "batchnorm_":
|
| 140 |
+
nonlinear.add_module("batchnorm", torch.nn.BatchNorm1d(channels, affine=False))
|
| 141 |
+
else:
|
| 142 |
+
raise ValueError("Unexpected module ({}).".format(name))
|
| 143 |
+
return nonlinear
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def statistics_pooling(x, dim=-1, keepdim=False, unbiased=True, eps=1e-2):
|
| 147 |
+
mean = x.mean(dim=dim)
|
| 148 |
+
std = x.std(dim=dim, unbiased=unbiased)
|
| 149 |
+
stats = torch.cat([mean, std], dim=-1)
|
| 150 |
+
if keepdim:
|
| 151 |
+
stats = stats.unsqueeze(dim=dim)
|
| 152 |
+
return stats
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class StatsPool(torch.nn.Module):
|
| 156 |
+
def forward(self, x):
|
| 157 |
+
return statistics_pooling(x)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
class TDNNLayer(torch.nn.Module):
|
| 161 |
+
def __init__(
|
| 162 |
+
self,
|
| 163 |
+
in_channels,
|
| 164 |
+
out_channels,
|
| 165 |
+
kernel_size,
|
| 166 |
+
stride=1,
|
| 167 |
+
padding=0,
|
| 168 |
+
dilation=1,
|
| 169 |
+
bias=False,
|
| 170 |
+
config_str="batchnorm-relu",
|
| 171 |
+
):
|
| 172 |
+
super(TDNNLayer, self).__init__()
|
| 173 |
+
if padding < 0:
|
| 174 |
+
assert (
|
| 175 |
+
kernel_size % 2 == 1
|
| 176 |
+
), "Expect equal paddings, but got even kernel size ({})".format(kernel_size)
|
| 177 |
+
padding = (kernel_size - 1) // 2 * dilation
|
| 178 |
+
self.linear = torch.nn.Conv1d(
|
| 179 |
+
in_channels,
|
| 180 |
+
out_channels,
|
| 181 |
+
kernel_size,
|
| 182 |
+
stride=stride,
|
| 183 |
+
padding=padding,
|
| 184 |
+
dilation=dilation,
|
| 185 |
+
bias=bias,
|
| 186 |
+
)
|
| 187 |
+
self.nonlinear = get_nonlinear(config_str, out_channels)
|
| 188 |
+
|
| 189 |
+
def forward(self, x):
|
| 190 |
+
x = self.linear(x)
|
| 191 |
+
x = self.nonlinear(x)
|
| 192 |
+
return x
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
class CAMLayer(torch.nn.Module):
|
| 196 |
+
def __init__(
|
| 197 |
+
self, bn_channels, out_channels, kernel_size, stride, padding, dilation, bias, reduction=2
|
| 198 |
+
):
|
| 199 |
+
super(CAMLayer, self).__init__()
|
| 200 |
+
self.linear_local = torch.nn.Conv1d(
|
| 201 |
+
bn_channels,
|
| 202 |
+
out_channels,
|
| 203 |
+
kernel_size,
|
| 204 |
+
stride=stride,
|
| 205 |
+
padding=padding,
|
| 206 |
+
dilation=dilation,
|
| 207 |
+
bias=bias,
|
| 208 |
+
)
|
| 209 |
+
self.linear1 = torch.nn.Conv1d(bn_channels, bn_channels // reduction, 1)
|
| 210 |
+
self.relu = torch.nn.ReLU(inplace=True)
|
| 211 |
+
self.linear2 = torch.nn.Conv1d(bn_channels // reduction, out_channels, 1)
|
| 212 |
+
self.sigmoid = torch.nn.Sigmoid()
|
| 213 |
+
|
| 214 |
+
def forward(self, x):
|
| 215 |
+
y = self.linear_local(x)
|
| 216 |
+
context = x.mean(-1, keepdim=True) + self.seg_pooling(x)
|
| 217 |
+
context = self.relu(self.linear1(context))
|
| 218 |
+
m = self.sigmoid(self.linear2(context))
|
| 219 |
+
return y * m
|
| 220 |
+
|
| 221 |
+
def seg_pooling(self, x, seg_len=100, stype="avg"):
|
| 222 |
+
if stype == "avg":
|
| 223 |
+
seg = F.avg_pool1d(x, kernel_size=seg_len, stride=seg_len, ceil_mode=True)
|
| 224 |
+
elif stype == "max":
|
| 225 |
+
seg = F.max_pool1d(x, kernel_size=seg_len, stride=seg_len, ceil_mode=True)
|
| 226 |
+
else:
|
| 227 |
+
raise ValueError("Wrong segment pooling type.")
|
| 228 |
+
shape = seg.shape
|
| 229 |
+
seg = seg.unsqueeze(-1).expand(*shape, seg_len).reshape(*shape[:-1], -1)
|
| 230 |
+
seg = seg[..., : x.shape[-1]]
|
| 231 |
+
return seg
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
class CAMDenseTDNNLayer(torch.nn.Module):
|
| 235 |
+
def __init__(
|
| 236 |
+
self,
|
| 237 |
+
in_channels,
|
| 238 |
+
out_channels,
|
| 239 |
+
bn_channels,
|
| 240 |
+
kernel_size,
|
| 241 |
+
stride=1,
|
| 242 |
+
dilation=1,
|
| 243 |
+
bias=False,
|
| 244 |
+
config_str="batchnorm-relu",
|
| 245 |
+
memory_efficient=False,
|
| 246 |
+
):
|
| 247 |
+
super(CAMDenseTDNNLayer, self).__init__()
|
| 248 |
+
assert kernel_size % 2 == 1, "Expect equal paddings, but got even kernel size ({})".format(
|
| 249 |
+
kernel_size
|
| 250 |
+
)
|
| 251 |
+
padding = (kernel_size - 1) // 2 * dilation
|
| 252 |
+
self.memory_efficient = memory_efficient
|
| 253 |
+
self.nonlinear1 = get_nonlinear(config_str, in_channels)
|
| 254 |
+
self.linear1 = torch.nn.Conv1d(in_channels, bn_channels, 1, bias=False)
|
| 255 |
+
self.nonlinear2 = get_nonlinear(config_str, bn_channels)
|
| 256 |
+
self.cam_layer = CAMLayer(
|
| 257 |
+
bn_channels,
|
| 258 |
+
out_channels,
|
| 259 |
+
kernel_size,
|
| 260 |
+
stride=stride,
|
| 261 |
+
padding=padding,
|
| 262 |
+
dilation=dilation,
|
| 263 |
+
bias=bias,
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
def bn_function(self, x):
|
| 267 |
+
return self.linear1(self.nonlinear1(x))
|
| 268 |
+
|
| 269 |
+
def forward(self, x):
|
| 270 |
+
if self.training and self.memory_efficient:
|
| 271 |
+
x = cp.checkpoint(self.bn_function, x)
|
| 272 |
+
else:
|
| 273 |
+
x = self.bn_function(x)
|
| 274 |
+
x = self.cam_layer(self.nonlinear2(x))
|
| 275 |
+
return x
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
class CAMDenseTDNNBlock(torch.nn.ModuleList):
|
| 279 |
+
def __init__(
|
| 280 |
+
self,
|
| 281 |
+
num_layers,
|
| 282 |
+
in_channels,
|
| 283 |
+
out_channels,
|
| 284 |
+
bn_channels,
|
| 285 |
+
kernel_size,
|
| 286 |
+
stride=1,
|
| 287 |
+
dilation=1,
|
| 288 |
+
bias=False,
|
| 289 |
+
config_str="batchnorm-relu",
|
| 290 |
+
memory_efficient=False,
|
| 291 |
+
):
|
| 292 |
+
super(CAMDenseTDNNBlock, self).__init__()
|
| 293 |
+
for i in range(num_layers):
|
| 294 |
+
layer = CAMDenseTDNNLayer(
|
| 295 |
+
in_channels=in_channels + i * out_channels,
|
| 296 |
+
out_channels=out_channels,
|
| 297 |
+
bn_channels=bn_channels,
|
| 298 |
+
kernel_size=kernel_size,
|
| 299 |
+
stride=stride,
|
| 300 |
+
dilation=dilation,
|
| 301 |
+
bias=bias,
|
| 302 |
+
config_str=config_str,
|
| 303 |
+
memory_efficient=memory_efficient,
|
| 304 |
+
)
|
| 305 |
+
self.add_module("tdnnd%d" % (i + 1), layer)
|
| 306 |
+
|
| 307 |
+
def forward(self, x):
|
| 308 |
+
for layer in self:
|
| 309 |
+
x = torch.cat([x, layer(x)], dim=1)
|
| 310 |
+
return x
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
class TransitLayer(torch.nn.Module):
|
| 314 |
+
def __init__(self, in_channels, out_channels, bias=True, config_str="batchnorm-relu"):
|
| 315 |
+
super(TransitLayer, self).__init__()
|
| 316 |
+
self.nonlinear = get_nonlinear(config_str, in_channels)
|
| 317 |
+
self.linear = torch.nn.Conv1d(in_channels, out_channels, 1, bias=bias)
|
| 318 |
+
|
| 319 |
+
def forward(self, x):
|
| 320 |
+
x = self.nonlinear(x)
|
| 321 |
+
x = self.linear(x)
|
| 322 |
+
return x
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
class DenseLayer(torch.nn.Module):
|
| 326 |
+
def __init__(self, in_channels, out_channels, bias=False, config_str="batchnorm-relu"):
|
| 327 |
+
super(DenseLayer, self).__init__()
|
| 328 |
+
self.linear = torch.nn.Conv1d(in_channels, out_channels, 1, bias=bias)
|
| 329 |
+
self.nonlinear = get_nonlinear(config_str, out_channels)
|
| 330 |
+
|
| 331 |
+
def forward(self, x):
|
| 332 |
+
if len(x.shape) == 2:
|
| 333 |
+
x = self.linear(x.unsqueeze(dim=-1)).squeeze(dim=-1)
|
| 334 |
+
else:
|
| 335 |
+
x = self.linear(x)
|
| 336 |
+
x = self.nonlinear(x)
|
| 337 |
+
return x
|
| 338 |
+
|
| 339 |
+
# @tables.register("model_classes", "CAMPPlus")
|
| 340 |
+
class CAMPPlus(torch.nn.Module):
|
| 341 |
+
def __init__(
|
| 342 |
+
self,
|
| 343 |
+
feat_dim=80,
|
| 344 |
+
embedding_size=192,
|
| 345 |
+
growth_rate=32,
|
| 346 |
+
bn_size=4,
|
| 347 |
+
init_channels=128,
|
| 348 |
+
config_str="batchnorm-relu",
|
| 349 |
+
memory_efficient=True,
|
| 350 |
+
output_level="segment",
|
| 351 |
+
**kwargs,
|
| 352 |
+
):
|
| 353 |
+
super().__init__()
|
| 354 |
+
|
| 355 |
+
self.head = FCM(feat_dim=feat_dim)
|
| 356 |
+
channels = self.head.out_channels
|
| 357 |
+
self.output_level = output_level
|
| 358 |
+
|
| 359 |
+
self.xvector = torch.nn.Sequential(
|
| 360 |
+
OrderedDict(
|
| 361 |
+
[
|
| 362 |
+
(
|
| 363 |
+
"tdnn",
|
| 364 |
+
TDNNLayer(
|
| 365 |
+
channels,
|
| 366 |
+
init_channels,
|
| 367 |
+
5,
|
| 368 |
+
stride=2,
|
| 369 |
+
dilation=1,
|
| 370 |
+
padding=-1,
|
| 371 |
+
config_str=config_str,
|
| 372 |
+
),
|
| 373 |
+
),
|
| 374 |
+
]
|
| 375 |
+
)
|
| 376 |
+
)
|
| 377 |
+
channels = init_channels
|
| 378 |
+
for i, (num_layers, kernel_size, dilation) in enumerate(
|
| 379 |
+
zip((12, 24, 16), (3, 3, 3), (1, 2, 2))
|
| 380 |
+
):
|
| 381 |
+
block = CAMDenseTDNNBlock(
|
| 382 |
+
num_layers=num_layers,
|
| 383 |
+
in_channels=channels,
|
| 384 |
+
out_channels=growth_rate,
|
| 385 |
+
bn_channels=bn_size * growth_rate,
|
| 386 |
+
kernel_size=kernel_size,
|
| 387 |
+
dilation=dilation,
|
| 388 |
+
config_str=config_str,
|
| 389 |
+
memory_efficient=memory_efficient,
|
| 390 |
+
)
|
| 391 |
+
self.xvector.add_module("block%d" % (i + 1), block)
|
| 392 |
+
channels = channels + num_layers * growth_rate
|
| 393 |
+
self.xvector.add_module(
|
| 394 |
+
"transit%d" % (i + 1),
|
| 395 |
+
TransitLayer(channels, channels // 2, bias=False, config_str=config_str),
|
| 396 |
+
)
|
| 397 |
+
channels //= 2
|
| 398 |
+
|
| 399 |
+
self.xvector.add_module("out_nonlinear", get_nonlinear(config_str, channels))
|
| 400 |
+
|
| 401 |
+
if self.output_level == "segment":
|
| 402 |
+
self.xvector.add_module("stats", StatsPool())
|
| 403 |
+
self.xvector.add_module(
|
| 404 |
+
"dense", DenseLayer(channels * 2, embedding_size, config_str="batchnorm_")
|
| 405 |
+
)
|
| 406 |
+
else:
|
| 407 |
+
assert (
|
| 408 |
+
self.output_level == "frame"
|
| 409 |
+
), "`output_level` should be set to 'segment' or 'frame'. "
|
| 410 |
+
|
| 411 |
+
for m in self.modules():
|
| 412 |
+
if isinstance(m, (torch.nn.Conv1d, torch.nn.Linear)):
|
| 413 |
+
torch.nn.init.kaiming_normal_(m.weight.data)
|
| 414 |
+
if m.bias is not None:
|
| 415 |
+
torch.nn.init.zeros_(m.bias)
|
| 416 |
+
|
| 417 |
+
def forward(self, x):
|
| 418 |
+
x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T)
|
| 419 |
+
x = self.head(x)
|
| 420 |
+
x = self.xvector(x)
|
| 421 |
+
if self.output_level == "frame":
|
| 422 |
+
x = x.transpose(1, 2)
|
| 423 |
+
return x
|
| 424 |
+
|
| 425 |
+
def inference(self, audio_list):
|
| 426 |
+
speech, speech_lengths, speech_times = extract_feature(audio_list)
|
| 427 |
+
results = self.forward(speech.to(torch.float32))
|
| 428 |
+
return results
|
src/chatterbox/models/s3tokenizer/__init__.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .s3tokenizer import (
|
| 2 |
+
S3_SR,
|
| 3 |
+
S3_HOP,
|
| 4 |
+
S3_TOKEN_HOP,
|
| 5 |
+
S3_TOKEN_RATE,
|
| 6 |
+
SPEECH_VOCAB_SIZE,
|
| 7 |
+
S3Tokenizer,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
SOS = SPEECH_VOCAB_SIZE
|
| 12 |
+
EOS = SPEECH_VOCAB_SIZE + 1
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def drop_invalid_tokens(x):
|
| 17 |
+
"""Drop SoS and EoS"""
|
| 18 |
+
assert len(x.shape) == 1 or (len(x.shape) == 2 and x.shape[0] == 1), "only batch size of one allowed for now"
|
| 19 |
+
if SOS in x:
|
| 20 |
+
s = (x == SOS).nonzero(as_tuple=True)[0].squeeze(0) + 1
|
| 21 |
+
else:
|
| 22 |
+
s = 0
|
| 23 |
+
|
| 24 |
+
if EOS in x:
|
| 25 |
+
e = (x == EOS).nonzero(as_tuple=True)[0].squeeze(0)
|
| 26 |
+
else:
|
| 27 |
+
e = None
|
| 28 |
+
|
| 29 |
+
x = x[s: e]
|
| 30 |
+
return x
|
src/chatterbox/models/s3tokenizer/s3tokenizer.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Tuple
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import librosa
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from s3tokenizer.utils import padding
|
| 8 |
+
from s3tokenizer.model_v2 import (
|
| 9 |
+
S3TokenizerV2,
|
| 10 |
+
ModelConfig,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Sampling rate of the inputs to S3TokenizerV2
|
| 15 |
+
S3_SR = 16_000
|
| 16 |
+
S3_HOP = 160 # 100 frames/sec
|
| 17 |
+
S3_TOKEN_HOP = 640 # 25 tokens/sec
|
| 18 |
+
S3_TOKEN_RATE = 25
|
| 19 |
+
SPEECH_VOCAB_SIZE = 6561
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class S3Tokenizer(S3TokenizerV2):
|
| 23 |
+
"""
|
| 24 |
+
s3tokenizer.S3TokenizerV2 with the following changes:
|
| 25 |
+
- a more integrated `forward`
|
| 26 |
+
- compute `log_mel_spectrogram` using `_mel_filters` and `window` in `register_buffers`
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
ignore_state_dict_missing = ("_mel_filters", "window")
|
| 30 |
+
|
| 31 |
+
def __init__(
|
| 32 |
+
self,
|
| 33 |
+
name: str="speech_tokenizer_v2_25hz",
|
| 34 |
+
config: ModelConfig = ModelConfig()
|
| 35 |
+
):
|
| 36 |
+
super().__init__(name)
|
| 37 |
+
|
| 38 |
+
self.n_fft = 400
|
| 39 |
+
_mel_filters = librosa.filters.mel(
|
| 40 |
+
sr=S3_SR,
|
| 41 |
+
n_fft=self.n_fft,
|
| 42 |
+
n_mels=config.n_mels
|
| 43 |
+
)
|
| 44 |
+
self.register_buffer(
|
| 45 |
+
"_mel_filters",
|
| 46 |
+
torch.FloatTensor(_mel_filters),
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
self.register_buffer(
|
| 50 |
+
"window",
|
| 51 |
+
torch.hann_window(self.n_fft),
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
def pad(self, wavs, sr) -> List[torch.Tensor]:
|
| 55 |
+
"""
|
| 56 |
+
Given a list of wavs with the same `sample_rate`, pad them so that the length is multiple of 40ms (S3 runs at 25 token/sec).
|
| 57 |
+
"""
|
| 58 |
+
processed_wavs = []
|
| 59 |
+
for wav in wavs:
|
| 60 |
+
if isinstance(wav, np.ndarray):
|
| 61 |
+
wav = torch.from_numpy(wav)
|
| 62 |
+
if wav.dim() == 1:
|
| 63 |
+
wav = wav.unsqueeze(0)
|
| 64 |
+
|
| 65 |
+
n_tokens = (wav.shape[1] / sr) * S3_TOKEN_RATE
|
| 66 |
+
n_tokens = np.ceil(n_tokens)
|
| 67 |
+
intended_wav_len = n_tokens * (sr / S3_TOKEN_RATE)
|
| 68 |
+
intended_wav_len = int(intended_wav_len)
|
| 69 |
+
wav = torch.nn.functional.pad(
|
| 70 |
+
wav,
|
| 71 |
+
(0, intended_wav_len - wav.shape[-1]),
|
| 72 |
+
mode="constant",
|
| 73 |
+
value=0
|
| 74 |
+
)
|
| 75 |
+
processed_wavs.append(wav)
|
| 76 |
+
return processed_wavs
|
| 77 |
+
|
| 78 |
+
def _prepare_audio(self, wavs):
|
| 79 |
+
"""Prepare a list of audios for s3tokenizer processing."""
|
| 80 |
+
processed_wavs = []
|
| 81 |
+
for wav in wavs:
|
| 82 |
+
if isinstance(wav, np.ndarray):
|
| 83 |
+
wav = torch.from_numpy(wav)
|
| 84 |
+
if wav.dim() == 1:
|
| 85 |
+
wav = wav.unsqueeze(0)
|
| 86 |
+
|
| 87 |
+
processed_wavs.append(wav)
|
| 88 |
+
return processed_wavs
|
| 89 |
+
|
| 90 |
+
@torch.no_grad()
|
| 91 |
+
def forward(
|
| 92 |
+
self,
|
| 93 |
+
wavs: torch.Tensor,
|
| 94 |
+
accelerator: 'Accelerator'=None,
|
| 95 |
+
max_len: int=None,
|
| 96 |
+
) -> Tuple[torch.Tensor, torch.LongTensor]:
|
| 97 |
+
"""
|
| 98 |
+
NOTE: mel-spec has a hop size of 160 points (100 frame/sec).
|
| 99 |
+
FIXME: this class inherits `nn.Module` but doesn't accept `torch.Tensor` and handles a list of wavs one by one, which is unexpected.
|
| 100 |
+
|
| 101 |
+
Args
|
| 102 |
+
----
|
| 103 |
+
- `wavs`: 16 kHz speech audio
|
| 104 |
+
- `max_len` max length to truncate the output sequence to (25 token/sec).
|
| 105 |
+
NOTE: please pad the waveform if longer sequence is needed.
|
| 106 |
+
"""
|
| 107 |
+
processed_wavs = self._prepare_audio(wavs)
|
| 108 |
+
mels, mel_lens = [], []
|
| 109 |
+
for wav in processed_wavs:
|
| 110 |
+
wav = wav.to(self.device)
|
| 111 |
+
mel = self.log_mel_spectrogram(wav) # [B=1, F, T]
|
| 112 |
+
if max_len is not None:
|
| 113 |
+
mel = mel[..., :max_len * 4] # num_mel_frames = 4 * num_tokens
|
| 114 |
+
mels.append(mel.squeeze(0))
|
| 115 |
+
|
| 116 |
+
mels, mel_lens = padding(mels)
|
| 117 |
+
if accelerator is None:
|
| 118 |
+
tokenizer = self
|
| 119 |
+
else:
|
| 120 |
+
tokenizer = accelerator.unwrap_model(self)
|
| 121 |
+
|
| 122 |
+
speech_tokens, speech_token_lens = tokenizer.quantize(mels, mel_lens.to(self.device))
|
| 123 |
+
return (
|
| 124 |
+
speech_tokens.long().detach(),
|
| 125 |
+
speech_token_lens.long().detach(),
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
def log_mel_spectrogram(
|
| 129 |
+
self,
|
| 130 |
+
audio: torch.Tensor,
|
| 131 |
+
padding: int = 0,
|
| 132 |
+
):
|
| 133 |
+
"""
|
| 134 |
+
Compute the log-Mel spectrogram of
|
| 135 |
+
|
| 136 |
+
Parameters
|
| 137 |
+
----------
|
| 138 |
+
audio: torch.Tensor, shape = (*)
|
| 139 |
+
The path to audio or either a NumPy array or Tensor containing the
|
| 140 |
+
audio waveform in 16 kHz
|
| 141 |
+
|
| 142 |
+
padding: int
|
| 143 |
+
Number of zero samples to pad to the right
|
| 144 |
+
|
| 145 |
+
Returns
|
| 146 |
+
-------
|
| 147 |
+
torch.Tensor, shape = (128, n_frames)
|
| 148 |
+
A Tensor that contains the Mel spectrogram
|
| 149 |
+
"""
|
| 150 |
+
if not torch.is_tensor(audio):
|
| 151 |
+
audio = torch.from_numpy(audio)
|
| 152 |
+
|
| 153 |
+
audio = audio.to(self.device)
|
| 154 |
+
if padding > 0:
|
| 155 |
+
audio = F.pad(audio, (0, padding))
|
| 156 |
+
stft = torch.stft(
|
| 157 |
+
audio, self.n_fft, S3_HOP,
|
| 158 |
+
window=self.window.to(self.device),
|
| 159 |
+
return_complex=True
|
| 160 |
+
)
|
| 161 |
+
magnitudes = stft[..., :-1].abs()**2
|
| 162 |
+
|
| 163 |
+
mel_spec = self._mel_filters.to(self.device) @ magnitudes
|
| 164 |
+
|
| 165 |
+
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
|
| 166 |
+
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
|
| 167 |
+
log_spec = (log_spec + 4.0) / 4.0
|
| 168 |
+
return log_spec
|
src/chatterbox/models/t3/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .t3 import T3
|
src/chatterbox/models/t3/inference/alignment_stream_analyzer.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Resemble AI
|
| 2 |
+
# Author: John Meade, Jeremy Hsu
|
| 3 |
+
# MIT License
|
| 4 |
+
import logging
|
| 5 |
+
import torch
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from types import MethodType
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@dataclass
|
| 14 |
+
class AlignmentAnalysisResult:
|
| 15 |
+
# was this frame detected as being part of a noisy beginning chunk with potential hallucinations?
|
| 16 |
+
false_start: bool
|
| 17 |
+
# was this frame detected as being part of a long tail with potential hallucinations?
|
| 18 |
+
long_tail: bool
|
| 19 |
+
# was this frame detected as repeating existing text content?
|
| 20 |
+
repetition: bool
|
| 21 |
+
# was the alignment position of this frame too far from the previous frame?
|
| 22 |
+
discontinuity: bool
|
| 23 |
+
# has inference reached the end of the text tokens? eg, this remains false if inference stops early
|
| 24 |
+
complete: bool
|
| 25 |
+
# approximate position in the text token sequence. Can be used for generating online timestamps.
|
| 26 |
+
position: int
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class AlignmentStreamAnalyzer:
|
| 30 |
+
def __init__(self, tfmr, queue, text_tokens_slice, alignment_layer_idx=9, eos_idx=0):
|
| 31 |
+
"""
|
| 32 |
+
Some transformer TTS models implicitly solve text-speech alignment in one or more of their self-attention
|
| 33 |
+
activation maps. This module exploits this to perform online integrity checks which streaming.
|
| 34 |
+
A hook is injected into the specified attention layer, and heuristics are used to determine alignment
|
| 35 |
+
position, repetition, etc.
|
| 36 |
+
|
| 37 |
+
NOTE: currently requires no queues.
|
| 38 |
+
"""
|
| 39 |
+
# self.queue = queue
|
| 40 |
+
self.text_tokens_slice = (i, j) = text_tokens_slice
|
| 41 |
+
self.eos_idx = eos_idx
|
| 42 |
+
self.alignment = torch.zeros(0, j-i)
|
| 43 |
+
# self.alignment_bin = torch.zeros(0, j-i)
|
| 44 |
+
self.curr_frame_pos = 0
|
| 45 |
+
self.text_position = 0
|
| 46 |
+
|
| 47 |
+
self.started = False
|
| 48 |
+
self.started_at = None
|
| 49 |
+
|
| 50 |
+
self.complete = False
|
| 51 |
+
self.completed_at = None
|
| 52 |
+
|
| 53 |
+
# Using `output_attentions=True` is incompatible with optimized attention kernels, so
|
| 54 |
+
# using it for all layers slows things down too much. We can apply it to just one layer
|
| 55 |
+
# by intercepting the kwargs and adding a forward hook (credit: jrm)
|
| 56 |
+
self.last_aligned_attn = None
|
| 57 |
+
self._add_attention_spy(tfmr, alignment_layer_idx)
|
| 58 |
+
|
| 59 |
+
def _add_attention_spy(self, tfmr, alignment_layer_idx):
|
| 60 |
+
"""
|
| 61 |
+
Adds a forward hook to a specific attention layer to collect outputs.
|
| 62 |
+
Using `output_attentions=True` is incompatible with optimized attention kernels, so
|
| 63 |
+
using it for all layers slows things down too much.
|
| 64 |
+
(credit: jrm)
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
def attention_forward_hook(module, input, output):
|
| 68 |
+
"""
|
| 69 |
+
See `LlamaAttention.forward`; the output is a 3-tuple: `attn_output, attn_weights, past_key_value`.
|
| 70 |
+
NOTE:
|
| 71 |
+
- When `output_attentions=True`, `LlamaSdpaAttention.forward` calls `LlamaAttention.forward`.
|
| 72 |
+
- `attn_output` has shape [B, H, T0, T0] for the 0th entry, and [B, H, 1, T0+i] for the rest i-th.
|
| 73 |
+
"""
|
| 74 |
+
step_attention = output[1].cpu() # (B, 16, N, N)
|
| 75 |
+
self.last_aligned_attn = step_attention[0].mean(0) # (N, N)
|
| 76 |
+
|
| 77 |
+
target_layer = tfmr.layers[alignment_layer_idx].self_attn
|
| 78 |
+
hook_handle = target_layer.register_forward_hook(attention_forward_hook)
|
| 79 |
+
|
| 80 |
+
# Backup original forward
|
| 81 |
+
original_forward = target_layer.forward
|
| 82 |
+
def patched_forward(self, *args, **kwargs):
|
| 83 |
+
kwargs['output_attentions'] = True
|
| 84 |
+
return original_forward(*args, **kwargs)
|
| 85 |
+
|
| 86 |
+
# TODO: how to unpatch it?
|
| 87 |
+
target_layer.forward = MethodType(patched_forward, target_layer)
|
| 88 |
+
|
| 89 |
+
def step(self, logits):
|
| 90 |
+
"""
|
| 91 |
+
Emits an AlignmentAnalysisResult into the output queue, and potentially modifies the logits to force an EOS.
|
| 92 |
+
"""
|
| 93 |
+
# extract approximate alignment matrix chunk (1 frame at a time after the first chunk)
|
| 94 |
+
aligned_attn = self.last_aligned_attn # (N, N)
|
| 95 |
+
i, j = self.text_tokens_slice
|
| 96 |
+
if self.curr_frame_pos == 0:
|
| 97 |
+
# first chunk has conditioning info, text tokens, and BOS token
|
| 98 |
+
A_chunk = aligned_attn[j:, i:j].clone().cpu() # (T, S)
|
| 99 |
+
else:
|
| 100 |
+
# subsequent chunks have 1 frame due to KV-caching
|
| 101 |
+
A_chunk = aligned_attn[:, i:j].clone().cpu() # (1, S)
|
| 102 |
+
|
| 103 |
+
# TODO: monotonic masking; could have issue b/c spaces are often skipped.
|
| 104 |
+
A_chunk[:, self.curr_frame_pos + 1:] = 0
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
self.alignment = torch.cat((self.alignment, A_chunk), dim=0)
|
| 108 |
+
|
| 109 |
+
A = self.alignment
|
| 110 |
+
T, S = A.shape
|
| 111 |
+
|
| 112 |
+
# update position
|
| 113 |
+
cur_text_posn = A_chunk[-1].argmax()
|
| 114 |
+
discontinuity = not(-4 < cur_text_posn - self.text_position < 7) # NOTE: very lenient!
|
| 115 |
+
if not discontinuity:
|
| 116 |
+
self.text_position = cur_text_posn
|
| 117 |
+
|
| 118 |
+
# Hallucinations at the start of speech show up as activations at the bottom of the attention maps!
|
| 119 |
+
# To mitigate this, we just wait until there are no activations far off-diagonal in the last 2 tokens,
|
| 120 |
+
# and there are some strong activations in the first few tokens.
|
| 121 |
+
false_start = (not self.started) and (A[-2:, -2:].max() > 0.1 or A[:, :4].max() < 0.5)
|
| 122 |
+
self.started = not false_start
|
| 123 |
+
if self.started and self.started_at is None:
|
| 124 |
+
self.started_at = T
|
| 125 |
+
|
| 126 |
+
# Is generation likely complete?
|
| 127 |
+
self.complete = self.complete or self.text_position >= S - 3
|
| 128 |
+
if self.complete and self.completed_at is None:
|
| 129 |
+
self.completed_at = T
|
| 130 |
+
|
| 131 |
+
# NOTE: EOS rarely assigned activations, and second-last token is often punctuation, so use last 3 tokens.
|
| 132 |
+
# NOTE: due to the false-start behaviour, we need to make sure we skip activations for the first few tokens.
|
| 133 |
+
last_text_token_duration = A[15:, -3:].sum()
|
| 134 |
+
|
| 135 |
+
# Activations for the final token that last too long are likely hallucinations.
|
| 136 |
+
long_tail = self.complete and (A[self.completed_at:, -3:].sum(dim=0).max() >= 10) # 400ms
|
| 137 |
+
|
| 138 |
+
# If there are activations in previous tokens after generation has completed, assume this is a repetition error.
|
| 139 |
+
repetition = self.complete and (A[self.completed_at:, :-5].max(dim=1).values.sum() > 5)
|
| 140 |
+
|
| 141 |
+
# If a bad ending is detected, force emit EOS by modifying logits
|
| 142 |
+
# NOTE: this means logits may be inconsistent with latents!
|
| 143 |
+
if long_tail or repetition:
|
| 144 |
+
logger.warn(f"forcing EOS token, {long_tail=}, {repetition=}")
|
| 145 |
+
# (±2**15 is safe for all dtypes >= 16bit)
|
| 146 |
+
logits = -(2**15) * torch.ones_like(logits)
|
| 147 |
+
logits[..., self.eos_idx] = 2**15
|
| 148 |
+
|
| 149 |
+
# Suppress EoS to prevent early termination
|
| 150 |
+
if cur_text_posn < S - 3: # FIXME: arbitrary
|
| 151 |
+
logits[..., self.eos_idx] = -2**15
|
| 152 |
+
|
| 153 |
+
self.curr_frame_pos += 1
|
| 154 |
+
return logits
|
src/chatterbox/models/t3/inference/t3_hf_backend.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn as nn
|
| 5 |
+
from transformers import LlamaConfig, LlamaModel, LlamaPreTrainedModel, GenerationMixin
|
| 6 |
+
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class T3HuggingfaceBackend(LlamaPreTrainedModel, GenerationMixin):
|
| 10 |
+
"""
|
| 11 |
+
Override some HuggingFace interface methods so we can use the standard `generate` method with our
|
| 12 |
+
custom embedding / logit layers.
|
| 13 |
+
|
| 14 |
+
NOTE: need to extend "*PreTrainedModel" to avoid re-initializing weights!
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
config: LlamaConfig,
|
| 20 |
+
llama: LlamaModel,
|
| 21 |
+
*,
|
| 22 |
+
speech_enc,
|
| 23 |
+
speech_head,
|
| 24 |
+
latents_queue=None,
|
| 25 |
+
logits_queue=None,
|
| 26 |
+
alignment_stream_analyzer: 'AlignmentStreamAnalyzer'=None,
|
| 27 |
+
):
|
| 28 |
+
super().__init__(config)
|
| 29 |
+
self.model = llama
|
| 30 |
+
self.speech_enc = speech_enc
|
| 31 |
+
self.speech_head = speech_head
|
| 32 |
+
self._added_cond = False
|
| 33 |
+
self.alignment_stream_analyzer = alignment_stream_analyzer
|
| 34 |
+
|
| 35 |
+
@torch.inference_mode()
|
| 36 |
+
def prepare_inputs_for_generation(
|
| 37 |
+
self, input_ids: torch.Tensor, decoder_cond: torch.Tensor, use_cache: bool, past_key_values=None,
|
| 38 |
+
# This argument was introduced in some recent version of transformers (>=4.29.1)
|
| 39 |
+
cache_position=None
|
| 40 |
+
):
|
| 41 |
+
"""
|
| 42 |
+
This is a method used by huggingface's generate() method.
|
| 43 |
+
Overridden here to apply our custom speech token embedding layer.
|
| 44 |
+
|
| 45 |
+
:param input_ids: (B, S) int64 tensors of input tokens.
|
| 46 |
+
:param decoder_cond: (B, T, C) float32 tensor of conditioning (prefixed to <input_embeds>)
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
# Make use of the kv cache: only the last input ID is new, we trim away all the ones before
|
| 50 |
+
if not use_cache:
|
| 51 |
+
past_key_values = None
|
| 52 |
+
if past_key_values is not None:
|
| 53 |
+
input_ids = input_ids[:, -1:]
|
| 54 |
+
|
| 55 |
+
# custom speech token embedding layer
|
| 56 |
+
inputs_embeds = self.speech_enc(input_ids)
|
| 57 |
+
|
| 58 |
+
# prefix decoder conditioning if applicable
|
| 59 |
+
if not self._added_cond:
|
| 60 |
+
assert past_key_values is not None # should be first step
|
| 61 |
+
if decoder_cond.size(0) != inputs_embeds.size(0):
|
| 62 |
+
decoder_cond = decoder_cond.expand(inputs_embeds.size(0), -1, -1)
|
| 63 |
+
inputs_embeds = torch.cat([decoder_cond, inputs_embeds], dim=1)
|
| 64 |
+
self._added_cond = True
|
| 65 |
+
|
| 66 |
+
return {
|
| 67 |
+
"inputs_embeds": inputs_embeds,
|
| 68 |
+
"past_key_values": past_key_values,
|
| 69 |
+
"use_cache": use_cache,
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
@torch.inference_mode()
|
| 73 |
+
def forward(
|
| 74 |
+
self,
|
| 75 |
+
inputs_embeds: torch.Tensor,
|
| 76 |
+
past_key_values: Optional[torch.Tensor]=None,
|
| 77 |
+
use_cache=True,
|
| 78 |
+
output_attentions=False,
|
| 79 |
+
output_hidden_states=True,
|
| 80 |
+
return_dict=True,
|
| 81 |
+
):
|
| 82 |
+
"""
|
| 83 |
+
This is a method used by huggingface's generate() method.
|
| 84 |
+
Overridden here to apply our custom layer norm and speech logit projection layers.
|
| 85 |
+
|
| 86 |
+
:param inputs_embeds: (B, S, C) float32 tensor of conditioning inputs. If past key values are given,
|
| 87 |
+
S should be 1.
|
| 88 |
+
"""
|
| 89 |
+
is_large_input = inputs_embeds.size(1) != 1
|
| 90 |
+
has_cache = past_key_values is not None and len(past_key_values) > 0
|
| 91 |
+
assert not (is_large_input and has_cache)
|
| 92 |
+
assert return_dict
|
| 93 |
+
assert output_hidden_states
|
| 94 |
+
|
| 95 |
+
tfmr_out = self.model(
|
| 96 |
+
inputs_embeds=inputs_embeds,
|
| 97 |
+
past_key_values=past_key_values,
|
| 98 |
+
use_cache=use_cache,
|
| 99 |
+
output_attentions=output_attentions,
|
| 100 |
+
output_hidden_states=output_hidden_states,
|
| 101 |
+
return_dict=True,
|
| 102 |
+
)
|
| 103 |
+
hidden_states = tfmr_out.hidden_states[-1] # (B, seq, dim)
|
| 104 |
+
|
| 105 |
+
logits = self.speech_head(hidden_states)
|
| 106 |
+
# assert inputs_embeds.size(0) == 1 # (disabled for CFG)
|
| 107 |
+
|
| 108 |
+
# NOTE: hallucination handler may modify logits to force emit an EOS token
|
| 109 |
+
# logits = self.alignment_stream_analyzer.step(logits)
|
| 110 |
+
|
| 111 |
+
return CausalLMOutputWithCrossAttentions(
|
| 112 |
+
logits=logits,
|
| 113 |
+
past_key_values=tfmr_out.past_key_values,
|
| 114 |
+
hidden_states=tfmr_out.hidden_states,
|
| 115 |
+
attentions=tfmr_out.attentions,
|
| 116 |
+
)
|
src/chatterbox/models/t3/llama_configs.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
LLAMA_520M_CONFIG_DICT = dict(
|
| 2 |
+
# Arbitrary small number that won't cause problems when loading.
|
| 3 |
+
# These param are unused due to custom input layers.
|
| 4 |
+
vocab_size=8,
|
| 5 |
+
# default params needed for loading most pretrained 1B weights
|
| 6 |
+
max_position_embeddings=131072,
|
| 7 |
+
hidden_size=1024,
|
| 8 |
+
intermediate_size=4096,
|
| 9 |
+
num_hidden_layers=30,
|
| 10 |
+
num_attention_heads=16,
|
| 11 |
+
attn_implementation="sdpa",
|
| 12 |
+
head_dim=64,
|
| 13 |
+
tie_word_embeddings=False,
|
| 14 |
+
hidden_act="silu",
|
| 15 |
+
attention_bias=False,
|
| 16 |
+
attention_dropout=0.0,
|
| 17 |
+
initializer_range=0.02,
|
| 18 |
+
mlp_bias=False,
|
| 19 |
+
model_type="llama",
|
| 20 |
+
num_key_value_heads=16,
|
| 21 |
+
pretraining_tp=1,
|
| 22 |
+
rms_norm_eps=1e-05,
|
| 23 |
+
rope_scaling=dict(
|
| 24 |
+
factor=8.0,
|
| 25 |
+
high_freq_factor=4.0,
|
| 26 |
+
low_freq_factor=1.0,
|
| 27 |
+
original_max_position_embeddings=8192,
|
| 28 |
+
rope_type="llama3"
|
| 29 |
+
),
|
| 30 |
+
rope_theta=500000.0,
|
| 31 |
+
torch_dtype="bfloat16",
|
| 32 |
+
use_cache=True,
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
LLAMA_CONFIGS = {
|
| 36 |
+
"Llama_520M": LLAMA_520M_CONFIG_DICT,
|
| 37 |
+
}
|