|
|
|
|
|
import os |
|
from pathlib import Path |
|
|
|
import jinja2 |
|
from iso639 import Lang |
|
|
|
from piper.generate import get_all_models, get_de_models |
|
from kokoro.v019.generate_v_0_19 import generate_kokoro_v019 |
|
from kokoro.v10.generate_v_10 import generate_kokoro_v_10 |
|
from kokoro.v11_zh.generate_v_11_zh import generate_kokoro_v_11 |
|
from matcha.zh.generate_matcha_icefall_zh import generate_icefall_zh_matcha |
|
|
|
|
|
def get_sherpa_onnx_version(): |
|
return os.environ.get("SHERPA_ONNX_VERSION", "1.12.1") |
|
|
|
|
|
def get_chinese_mixed_english(): |
|
models = [ |
|
"kokoro-multi-lang-v1_0", |
|
"kokoro-multi-lang-v1_1", |
|
] |
|
first = [] |
|
second = [] |
|
for m in models: |
|
first.append(f" - [{m}](Chinese-English/{m}.md)") |
|
second.append(f" - [{m}]({m}.md)") |
|
first = "\n".join(first) |
|
second = "\n".join(second) |
|
|
|
Path(f"./book/src/Chinese-English").mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
|
return first, second |
|
|
|
|
|
def get_android_apk(m): |
|
v = get_sherpa_onnx_version() |
|
url = f"https://huggingface.co/csukuangfj/sherpa-onnx-apk/resolve/main/tts-engine-new/{v}" |
|
url_cn = ( |
|
f"https://hf-mirror.com/csukuangfj/sherpa-onnx-apk/blob/main/tts-engine-new/{v}" |
|
) |
|
|
|
model_dir = f"vits-piper-{m.lang}-{m.name}-{m.kind}" |
|
apk = dict() |
|
apk_cn = dict() |
|
for arch in ["arm64-v8a", "armeabi-v7a", "x86_64", "x86"]: |
|
apk[ |
|
arch |
|
] = f"{url}/sherpa-onnx-{v}-{arch}-{m.lang[:2]}-tts-engine-{model_dir}.apk" |
|
apk_cn[ |
|
arch |
|
] = f"{url_cn}/sherpa-onnx-{v}-{arch}-{m.lang[:2]}-tts-engine-{model_dir}.apk" |
|
|
|
s = f""" |
|
|
|
## Android APK |
|
|
|
The following table shows the Android TTS Engine APK with this model |
|
for [sherpa-onnx](https://github.com/k2-fsa/sherpa-onnx) v{v} |
|
|
|
| ABI | URL | 中国镜像| |
|
|---|---|---| |
|
|arm64-v8a|[Download]({apk['arm64-v8a']})|[下载]({apk_cn['arm64-v8a']})| |
|
|armeabi-v7a|[Download]({apk['armeabi-v7a']})|[下载]({apk_cn['armeabi-v7a']})| |
|
|x86_64|[Download]({apk['x86_64']})|[下载]({apk_cn['x86_64']})| |
|
|x86|[Download]({apk['x86']})|[下载]({apk_cn['x86']})| |
|
|
|
> If you don't know what ABI is, you probably need to select `arm64-v8a`. |
|
|
|
The source code for the APK can be found at |
|
|
|
<https://github.com/k2-fsa/sherpa-onnx/tree/master/android/SherpaOnnxTtsEngine> |
|
|
|
Please refer to the [documentation](https://k2-fsa.github.io/sherpa/onnx/android/index.html) |
|
for how to build the APK from source code. |
|
|
|
More Android APKs can be found at |
|
|
|
<https://k2-fsa.github.io/sherpa/onnx/tts/apk-engine.html> |
|
|
|
""" |
|
return s |
|
|
|
|
|
def get_c_api(m): |
|
model_dir = f"vits-piper-{m.lang}-{m.name}-{m.kind}" |
|
d = { |
|
"model": f"{model_dir}/{m.model_name}", |
|
"data_dir": f"{model_dir}/espeak-ng-data", |
|
"tokens": f"{model_dir}/tokens.txt", |
|
"text": m.text, |
|
} |
|
|
|
with open("./piper/templates/c-api-example.c.in") as f: |
|
t = f.read() |
|
|
|
environment = jinja2.Environment() |
|
template = environment.from_string(t) |
|
|
|
template = template.render( |
|
**d, |
|
sid=0, |
|
) |
|
|
|
s = f""" |
|
## C API |
|
|
|
You can use the following code to play with `{model_dir}` with C API. |
|
|
|
```c |
|
{template} |
|
``` |
|
|
|
|
|
In the following, we describe how to compile and run the above C example. |
|
|
|
### Use shared library (dynamic link) |
|
|
|
```bash |
|
cd /tmp |
|
git clone https://github.com/k2-fsa/sherpa-onnx |
|
cd sherpa-onnx |
|
mkdir build-shared |
|
cd build-shared |
|
|
|
cmake \ |
|
-DSHERPA_ONNX_ENABLE_C_API=ON \ |
|
-DCMAKE_BUILD_TYPE=Release \ |
|
-DBUILD_SHARED_LIBS=ON \ |
|
-DCMAKE_INSTALL_PREFIX=/tmp/sherpa-onnx/shared \ |
|
.. |
|
|
|
make |
|
make install |
|
``` |
|
|
|
You can find required header file and library files inside ``/tmp/sherpa-onnx/shared``. |
|
|
|
Assume you have saved the above example file as `/tmp/test-piper.c`. |
|
Then you can compile it with the following command: |
|
|
|
```bash |
|
gcc \ |
|
-I /tmp/sherpa-onnx/shared/include \ |
|
-L /tmp/sherpa-onnx/shared/lib \ |
|
-lsherpa-onnx-c-api \ |
|
-lonnxruntime \ |
|
-o /tmp/test-piper \ |
|
/tmp/test-piper.c |
|
``` |
|
|
|
Now you can run |
|
```bash |
|
cd /tmp |
|
|
|
# Assume you have downloaded the model and extracted it to /tmp |
|
./test-piper |
|
``` |
|
|
|
> You probably need to run |
|
> ```bash |
|
> # For Linux |
|
> export LD_LIBRARY_PATH=/tmp/sherpa-onnx/shared/lib:$LD_LIBRARY_PATH |
|
> |
|
> # For macOS |
|
> export DYLD_LIBRARY_PATH=/tmp/sherpa-onnx/shared/lib:$DYLD_LIBRARY_PATH |
|
> ``` |
|
> before you run `/tmp/test-piper`. |
|
|
|
### Use static library (static link) |
|
|
|
Please see the documentation at |
|
|
|
<https://k2-fsa.github.io/sherpa/onnx/c-api/index.html> |
|
""" |
|
|
|
return s |
|
|
|
|
|
def get_python_api(m): |
|
model_dir = f"vits-piper-{m.lang}-{m.name}-{m.kind}" |
|
d = { |
|
"model": f"{model_dir}/{m.model_name}", |
|
"data_dir": f"{model_dir}/espeak-ng-data", |
|
"tokens": f"{model_dir}/tokens.txt", |
|
"text": m.text, |
|
} |
|
|
|
with open("./piper/templates/python-api.py.in") as f: |
|
t = f.read() |
|
|
|
environment = jinja2.Environment() |
|
template = environment.from_string(t) |
|
|
|
template = template.render( |
|
**d, |
|
sid=0, |
|
) |
|
|
|
s = f""" |
|
## Python API |
|
|
|
Assume you have installed `sherpa-onnx` via |
|
```bash |
|
pip install sherpa-onnx |
|
``` |
|
and you have downloaded the model from |
|
|
|
<https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/{model_dir}.tar.bz2> |
|
|
|
You can use the following code to play with `{model_dir}` |
|
|
|
```python |
|
{template} |
|
``` |
|
|
|
""" |
|
|
|
return s |
|
|
|
|
|
def process_one_model(m): |
|
lang = Lang(m.lang[:2]).name |
|
|
|
if "Modern Greek" in lang: |
|
lang = "Greek" |
|
|
|
if "Nepali" in lang: |
|
lang = "Nepali" |
|
|
|
if "Swahili" in lang: |
|
lang = "Swahili" |
|
|
|
d = f"vits-piper-{m.lang}-{m.name}-{m.kind}" |
|
s = f""" |
|
# {d} |
|
|
|
|||| |
|
|---|---|---| |
|
|[Info about this model](#info-about-this-model)| [Samples](#samples)|[Andriod APK](#android-apk)| |
|
|[C API](#c-api)|[Python API](#python-api)|| |
|
|
|
## Info about this model |
|
|
|
""" |
|
|
|
if m.url: |
|
s += f""" |
|
This model is converted from <{m.url}> |
|
|
|
| Number of speakers | Sample rate| |
|
|--------------------|------------| |
|
| {m.ns} | {m.sr} | |
|
|
|
""" |
|
s += f""" |
|
|
|
Model download address |
|
|
|
<https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/{d}.tar.bz2> |
|
|
|
""" |
|
|
|
s += get_android_apk(m) |
|
s += get_c_api(m) |
|
s += get_python_api(m) |
|
|
|
text = ".\n ".join(m.text.split(".")) |
|
s += f""" |
|
## Samples |
|
|
|
For the following text: |
|
|
|
{text} |
|
|
|
sample audios for different speakers are listed below: |
|
|
|
|
|
""" |
|
for i in range(m.ns): |
|
s += f"\n### Speaker {i}\n" |
|
s += f"""\n<audio controls> |
|
<source src="/sherpa/onnx/tts/all/piper/mp3/{m.lang}/{d}/{i}.mp3" type="audio/mp3"> |
|
</audio>\n\n""" |
|
|
|
Path(f"./book/src/{lang}").mkdir(parents=True, exist_ok=True) |
|
|
|
with open(f"./book/src/{lang}/{d}.md", "w") as f: |
|
f.write(s) |
|
|
|
|
|
|
|
def process_piper(models: list, is_root=True): |
|
lang = Lang(models[0].lang[:2]).name |
|
if "Modern Greek" in lang: |
|
lang = "Greek" |
|
|
|
if "Nepali" in lang: |
|
lang = "Nepali" |
|
|
|
if "Swahili" in lang: |
|
lang = "Swahili" |
|
|
|
all_names = [] |
|
for m in models: |
|
d = f"vits-piper-{m.lang}-{m.name}-{m.kind}" |
|
if is_root: |
|
all_names.append(f" - [{d}]({lang}/{d}.md)") |
|
else: |
|
all_names.append(f" - [{d}]({d}.md)") |
|
all_names.sort() |
|
all_names = "\n".join(all_names) |
|
|
|
return all_names |
|
|
|
|
|
def main(): |
|
all_model_list = get_all_models() |
|
for m in all_model_list: |
|
m.cmd = "" |
|
en_gb = list(filter(lambda m: m.lang == "en_GB", all_model_list)) |
|
en_us = list(filter(lambda m: m.lang == "en_US", all_model_list)) |
|
de_de = list(filter(lambda m: m.lang == "de_DE", all_model_list)) |
|
ar = list(filter(lambda m: m.lang == "ar_JO", all_model_list)) |
|
ca = list(filter(lambda m: m.lang == "ca_ES", all_model_list)) |
|
cs = list(filter(lambda m: m.lang == "cs_CZ", all_model_list)) |
|
cy = list(filter(lambda m: m.lang == "cy_GB", all_model_list)) |
|
da = list(filter(lambda m: m.lang == "da_DK", all_model_list)) |
|
el = list(filter(lambda m: m.lang == "el_GR", all_model_list)) |
|
es_es = list(filter(lambda m: m.lang == "es_ES", all_model_list)) |
|
es_mx = list(filter(lambda m: m.lang == "es_MX", all_model_list)) |
|
es = es_es + es_mx |
|
|
|
fa = list(filter(lambda m: m.lang == "fa_IR", all_model_list)) |
|
fi = list(filter(lambda m: m.lang == "fi_FI", all_model_list)) |
|
fr = list(filter(lambda m: m.lang == "fr_FR", all_model_list)) |
|
hu = list(filter(lambda m: m.lang == "hu_HU", all_model_list)) |
|
is_is = list(filter(lambda m: m.lang == "is_IS", all_model_list)) |
|
it = list(filter(lambda m: m.lang == "it_IT", all_model_list)) |
|
ka = list(filter(lambda m: m.lang == "ka_GE", all_model_list)) |
|
kk = list(filter(lambda m: m.lang == "kk_KZ", all_model_list)) |
|
lb = list(filter(lambda m: m.lang == "lb_LU", all_model_list)) |
|
lv = list(filter(lambda m: m.lang == "lv_LV", all_model_list)) |
|
ml = list(filter(lambda m: m.lang == "ml_IN", all_model_list)) |
|
ne = list(filter(lambda m: m.lang == "ne_NP", all_model_list)) |
|
nl_be = list(filter(lambda m: m.lang == "nl_BE", all_model_list)) |
|
nl_nl = list(filter(lambda m: m.lang == "nl_NL", all_model_list)) |
|
nl = nl_be + nl_nl |
|
|
|
no = list(filter(lambda m: m.lang == "no_NO", all_model_list)) |
|
pl = list(filter(lambda m: m.lang == "pl_PL", all_model_list)) |
|
pt_br = list(filter(lambda m: m.lang == "pt_BR", all_model_list)) |
|
pt_pt = list(filter(lambda m: m.lang == "pt_PT", all_model_list)) |
|
pt = pt_br + pt_pt |
|
|
|
ro = list(filter(lambda m: m.lang == "ro_RO", all_model_list)) |
|
ru = list(filter(lambda m: m.lang == "ru_RU", all_model_list)) |
|
sk = list(filter(lambda m: m.lang == "sk_SK", all_model_list)) |
|
sl = list(filter(lambda m: m.lang == "sl_SI", all_model_list)) |
|
sr = list(filter(lambda m: m.lang == "sr_RS", all_model_list)) |
|
sv = list(filter(lambda m: m.lang == "sv_SE", all_model_list)) |
|
sw = list(filter(lambda m: m.lang == "sw_CD", all_model_list)) |
|
tr = list(filter(lambda m: m.lang == "tr_TR", all_model_list)) |
|
uk = list(filter(lambda m: m.lang == "uk_UA", all_model_list)) |
|
vi = list(filter(lambda m: m.lang == "vi_VN", all_model_list)) |
|
|
|
arabic = process_piper(ar) |
|
catalan = process_piper(ca) |
|
czech = process_piper(cs) |
|
danish = process_piper(da) |
|
dutch = process_piper(nl) |
|
english = process_piper(en_us + en_gb) |
|
finnish = process_piper(fi) |
|
french = process_piper(fr) |
|
german = process_piper(de_de) |
|
georgian = process_piper(ka) |
|
greek = process_piper(el) |
|
hungarian = process_piper(hu) |
|
icelandic = process_piper(is_is) |
|
italian = process_piper(it) |
|
kazakh = process_piper(kk) |
|
latvian = process_piper(lv) |
|
luxembourgish = process_piper(lb) |
|
malayalam = process_piper(ml) |
|
nepali = process_piper(ne) |
|
norwegian = process_piper(no) |
|
persian = process_piper(fa) |
|
polish = process_piper(pl) |
|
portuguese = process_piper(pt) |
|
romanian = process_piper(ro) |
|
russian = process_piper(ru) |
|
serbian = process_piper(sr) |
|
slovak = process_piper(sk) |
|
slovenian = process_piper(sl) |
|
spanish = process_piper(es) |
|
swahili = process_piper(sw) |
|
swedish = process_piper(sv) |
|
turkish = process_piper(tr) |
|
ukrainian = process_piper(uk) |
|
vietnamese = process_piper(vi) |
|
welsh = process_piper(cy) |
|
|
|
for m in all_model_list: |
|
process_one_model(m) |
|
|
|
english = english.split("\n") |
|
english.insert(0, " - [kokoro-en-v0_19](English/kokoro-en-v0_19.md)") |
|
english = "\n".join(english) |
|
|
|
chinese = [" - [matcha-icefall-zh-baker](Chinese/matcha-icefall-zh-baker.md)"] |
|
chinese = "\n".join(chinese) |
|
|
|
s = f""" |
|
# Summary |
|
|
|
- [TTS models](./index.md) |
|
- [Chinese+English](./Chinese-English/index.md)\n{get_chinese_mixed_english()[0]}\n |
|
- [Arabic](./Arabic/index.md)\n{arabic} |
|
- [Catalan](./Catalan/index.md)\n{catalan} |
|
- [Chinese](./Chinese/index.md)\n{chinese} |
|
- [Czech](./Czech/index.md)\n{czech} |
|
- [Danish](./Danish/index.md)\n{danish} |
|
- [Dutch](./Dutch/index.md)\n{dutch} |
|
- [English](./English/index.md)\n{english} |
|
- [Finnish](./Finnish/index.md)\n{finnish} |
|
- [French](./French/index.md)\n{french} |
|
- [Georgian](./Georgian/index.md)\n{georgian} |
|
- [German](./German/index.md)\n{german} |
|
- [Greek](./Greek/index.md)\n{greek} |
|
- [Hungarian](./Hungarian/index.md)\n{hungarian} |
|
- [Icelandic](./Icelandic/index.md)\n{icelandic} |
|
- [Italian](./Italian/index.md)\n{italian} |
|
- [Kazakh](./Kazakh/index.md)\n{kazakh} |
|
- [Latvian](./Latvian/index.md)\n{latvian} |
|
- [Luxembourgish](./Luxembourgish/index.md)\n{luxembourgish} |
|
- [Malayalam](./Malayalam/index.md)\n{malayalam} |
|
- [Nepali](./Nepali/index.md)\n{nepali} |
|
- [Norwegian](./Norwegian/index.md)\n{norwegian} |
|
- [Persian](./Persian/index.md)\n{persian} |
|
- [Polish](./Polish/index.md)\n{polish} |
|
- [Portuguese](./Portuguese/index.md)\n{portuguese} |
|
- [Romanian](./Romanian/index.md)\n{romanian} |
|
- [Russian](./Russian/index.md)\n{russian} |
|
- [Serbian](./Serbian/index.md)\n{serbian} |
|
- [Slovak](./Slovak/index.md)\n{slovak} |
|
- [Slovenian](./Slovenian/index.md)\n{slovenian} |
|
- [Spanish](./Spanish/index.md)\n{spanish} |
|
- [Swahili](./Swahili/index.md)\n{swahili} |
|
- [Swedish](./Swedish/index.md)\n{swedish} |
|
- [Turkish](./Turkish/index.md)\n{turkish} |
|
- [Ukrainian](./Ukrainian/index.md)\n{ukrainian} |
|
- [Vietnamese](./Vietnamese/index.md)\n{vietnamese} |
|
- [Welsh](./Welsh/index.md)\n{welsh} |
|
""" |
|
|
|
Path(f"./book/src/Arabic").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Catalan").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Chinese").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Czech").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Danish").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Dutch").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/English").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Finnish").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/French").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Georgian").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/German").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Greek").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Hungarian").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Icelandic").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Italian").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Kazakh").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Latvian").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Luxembourgish").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Malayalam").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Nepali").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Norwegian").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Persian").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Polish").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Portuguese").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Romanian").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Russian").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Serbian").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Slovak").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Slovenian").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Spanish").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Swahili").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Swedish").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Turkish").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Ukrainian").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Vietnamese").mkdir(parents=True, exist_ok=True) |
|
Path(f"./book/src/Welsh").mkdir(parents=True, exist_ok=True) |
|
|
|
with open("./book/src/SUMMARY.md", "w") as f: |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Arabic/index.md", "w") as f: |
|
s = f""" |
|
# Arabic |
|
|
|
This section lists text to speech models for Arabic.\n |
|
{process_piper(ar, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Catalan/index.md", "w") as f: |
|
s = f""" |
|
# Catalan |
|
|
|
This section lists text to speech models for Catalan.\n |
|
{process_piper(ca, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Czech/index.md", "w") as f: |
|
s = f""" |
|
# Czech |
|
|
|
This section lists text to speech models for Czech.\n |
|
{process_piper(cs, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Greek/index.md", "w") as f: |
|
s = f""" |
|
# Greek |
|
|
|
This section lists text to speech models for Greek.\n |
|
{process_piper(el, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Hungarian/index.md", "w") as f: |
|
s = f""" |
|
# Hungarian |
|
|
|
This section lists text to speech models for Hungarian.\n |
|
{process_piper(hu, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Icelandic/index.md", "w") as f: |
|
s = f""" |
|
# Icelandic |
|
|
|
This section lists text to speech models for Icelandic.\n |
|
{process_piper(is_is, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Italian/index.md", "w") as f: |
|
s = f""" |
|
# Italian |
|
|
|
This section lists text to speech models for Italian.\n |
|
{process_piper(it, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Kazakh/index.md", "w") as f: |
|
s = f""" |
|
# Kazakh |
|
|
|
This section lists text to speech models for Kazakh.\n |
|
{process_piper(kk, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Latvian/index.md", "w") as f: |
|
s = f""" |
|
# Latvian |
|
|
|
This section lists text to speech models for Latvian.\n |
|
{process_piper(lv, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Luxembourgish/index.md", "w") as f: |
|
s = f""" |
|
# Luxembourgish |
|
|
|
This section lists text to speech models for Luxembourgish.\n |
|
{process_piper(lb, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Malayalam/index.md", "w") as f: |
|
s = f""" |
|
# Malayalam |
|
|
|
This section lists text to speech models for Malayalam.\n |
|
{process_piper(ml, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Nepali/index.md", "w") as f: |
|
s = f""" |
|
# Nepali |
|
|
|
This section lists text to speech models for Nepali.\n |
|
{process_piper(ne, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Norwegian/index.md", "w") as f: |
|
s = f""" |
|
# Norwegian |
|
|
|
This section lists text to speech models for Norwegian.\n |
|
{process_piper(no, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Persian/index.md", "w") as f: |
|
s = f""" |
|
# Persian |
|
|
|
This section lists text to speech models for Persian.\n |
|
{process_piper(fa, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Polish/index.md", "w") as f: |
|
s = f""" |
|
# Polish |
|
|
|
This section lists text to speech models for Polish.\n |
|
{process_piper(pl, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Portuguese/index.md", "w") as f: |
|
s = f""" |
|
# Portuguese |
|
|
|
This section lists text to speech models for Portuguese.\n |
|
{process_piper(pt, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Romanian/index.md", "w") as f: |
|
s = f""" |
|
# Romanian |
|
|
|
This section lists text to speech models for Romanian.\n |
|
{process_piper(ro, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Russian/index.md", "w") as f: |
|
s = f""" |
|
# Russian |
|
|
|
This section lists text to speech models for Russian.\n |
|
{process_piper(ru, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Serbian/index.md", "w") as f: |
|
s = f""" |
|
# Serbian |
|
|
|
This section lists text to speech models for Serbian.\n |
|
{process_piper(sr, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Slovak/index.md", "w") as f: |
|
s = f""" |
|
# Slovak |
|
|
|
This section lists text to speech models for Slovak.\n |
|
{process_piper(sk, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Slovenian/index.md", "w") as f: |
|
s = f""" |
|
# Slovenian |
|
|
|
This section lists text to speech models for Slovenian.\n |
|
{process_piper(sl, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Welsh/index.md", "w") as f: |
|
s = f""" |
|
# Welsh |
|
|
|
This section lists text to speech models for Welsh.\n |
|
{process_piper(cy, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Danish/index.md", "w") as f: |
|
s = f""" |
|
# Danish |
|
|
|
This section lists text to speech models for Danish.\n |
|
{process_piper(da, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Dutch/index.md", "w") as f: |
|
s = f""" |
|
# Dutch |
|
|
|
This section lists text to speech models for Dutch.\n |
|
{process_piper(nl, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Chinese/index.md", "w") as f: |
|
s = f""" |
|
# Chinese |
|
|
|
This section lists text to speech models for Chinese.\n |
|
|
|
- [matcha-icefall-zh-baker ](matcha-icefall-zh-baker.md) |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/English/index.md", "w") as f: |
|
s = f""" |
|
# English |
|
|
|
This section lists text to speech models for English.\n |
|
|
|
- [kokoro-en-v0_19](kokoro-en-v0_19.md) |
|
{process_piper(en_us + en_gb, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Finnish/index.md", "w") as f: |
|
s = f""" |
|
# Finnish |
|
|
|
This section lists text to speech models for Finnish.\n |
|
{process_piper(fi, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/French/index.md", "w") as f: |
|
s = f""" |
|
# French |
|
|
|
This section lists text to speech models for French.\n |
|
{process_piper(fr, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Georgian/index.md", "w") as f: |
|
s = f""" |
|
# Georgian |
|
|
|
This section lists text to speech models for Georgian.\n |
|
{process_piper(ka, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/German/index.md", "w") as f: |
|
s = f""" |
|
# German |
|
|
|
This section lists text to speech models for German.\n |
|
{process_piper(de_de, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Spanish/index.md", "w") as f: |
|
s = f""" |
|
# Spanish |
|
|
|
This section lists text to speech models for Spanish.\n |
|
{process_piper(es, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Swahili/index.md", "w") as f: |
|
s = f""" |
|
# Swahili |
|
|
|
This section lists text to speech models for Swahili.\n |
|
{process_piper(sw, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Swedish/index.md", "w") as f: |
|
s = f""" |
|
# Swedish |
|
|
|
This section lists text to speech models for Swedish.\n |
|
{process_piper(sv, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Turkish/index.md", "w") as f: |
|
s = f""" |
|
# Turkish |
|
|
|
This section lists text to speech models for Turkish.\n |
|
{process_piper(tr, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Ukrainian/index.md", "w") as f: |
|
s = f""" |
|
# Ukrainian |
|
|
|
This section lists text to speech models for Ukrainian.\n |
|
{process_piper(uk, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Vietnamese/index.md", "w") as f: |
|
s = f""" |
|
# Vietnamese |
|
|
|
This section lists text to speech models for Vietnamese.\n |
|
{process_piper(vi, False)} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
with open("./book/src/Chinese-English/index.md", "w") as f: |
|
s = f""" |
|
# Chinese+English |
|
|
|
This section lists text to speech models for Chinese+English.\n |
|
{get_chinese_mixed_english()[1]} |
|
""" |
|
f.write(f"{s}\n") |
|
|
|
generate_kokoro_v019() |
|
generate_kokoro_v_10() |
|
generate_kokoro_v_11() |
|
generate_icefall_zh_matcha() |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|