hexgrad commited on
Commit
d4182d8
·
verified ·
1 Parent(s): f231e90

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -16
app.py CHANGED
@@ -180,6 +180,22 @@ def resolve_voices(voice, warn=True):
180
  voices = [v for v in voices if v in VOICES['cpu']]
181
  return voices if voices else ['af']
182
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
  def phonemize(text, voice, norm=True):
184
  lang = resolve_voices(voice)[0][0]
185
  if norm:
@@ -212,22 +228,6 @@ def length_to_mask(lengths):
212
  mask = torch.gt(mask+1, lengths.unsqueeze(1))
213
  return mask
214
 
215
- def get_vocab():
216
- _pad = "$"
217
- _punctuation = ';:,.!?¡¿—…"«»“” '
218
- _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
219
- _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
220
- symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
221
- dicts = {}
222
- for i in range(len((symbols))):
223
- dicts[symbols[i]] = i
224
- return dicts
225
-
226
- VOCAB = get_vocab()
227
-
228
- def tokenize(ps):
229
- return [i for i in map(VOCAB.get, ps) if i is not None]
230
-
231
  SAMPLE_RATE = 24000
232
 
233
  @torch.no_grad()
 
180
  voices = [v for v in voices if v in VOICES['cpu']]
181
  return voices if voices else ['af']
182
 
183
+ def get_vocab():
184
+ _pad = "$"
185
+ _punctuation = ';:,.!?¡¿—…"«»“” '
186
+ _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
187
+ _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
188
+ symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
189
+ dicts = {}
190
+ for i in range(len((symbols))):
191
+ dicts[symbols[i]] = i
192
+ return dicts
193
+
194
+ VOCAB = get_vocab()
195
+
196
+ def tokenize(ps):
197
+ return [i for i in map(VOCAB.get, ps) if i is not None]
198
+
199
  def phonemize(text, voice, norm=True):
200
  lang = resolve_voices(voice)[0][0]
201
  if norm:
 
228
  mask = torch.gt(mask+1, lengths.unsqueeze(1))
229
  return mask
230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
  SAMPLE_RATE = 24000
232
 
233
  @torch.no_grad()