yama commited on
Commit
5293389
·
1 Parent(s): 8873cb0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +139 -42
app.py CHANGED
@@ -26,12 +26,108 @@ import wave
26
  import contextlib
27
  from transformers import pipeline
28
  import psutil
29
- import shutil
30
 
31
  whisper_models = ["tiny", "base", "small", "medium", "large-v1", "large-v2"]
32
  source_languages = {
33
  "en": "English",
 
 
 
 
 
 
34
  "ja": "Japanese",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  }
36
 
37
  source_language_list = [key[0] for key in source_languages.items()]
@@ -46,8 +142,6 @@ pipe = pipeline(
46
  chunk_length_s=30,
47
  device=device,
48
  )
49
- if os.path.exists('output'):
50
- shutil.rmtree('output')
51
  os.makedirs('output', exist_ok=True)
52
  pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")
53
 
@@ -349,42 +443,45 @@ with demo:
349
  with gr.Column():
350
  download_transcript.render()
351
  transcription_df.render()
352
-
353
- with gr.Tab("Whisper Transcribe Japanese Audio"):
354
- gr.Markdown(f'''
355
- <div>
356
- <h1 style='text-align: center'>Whisper Transcribe Japanese Audio</h1>
357
- </div>
358
- Transcribe long-form microphone or audio inputs with the click of a button! The fine-tuned
359
- checkpoint <a href='https://huggingface.co/{MODEL_NAME}' target='_blank'><b>{MODEL_NAME}</b></a> to transcribe audio files of arbitrary length.
360
- ''')
361
- microphone = gr.inputs.Audio(source="microphone", type="filepath", optional=True)
362
- upload = gr.inputs.Audio(source="upload", type="filepath", optional=True)
363
- transcribe_btn = gr.Button("Transcribe Audio")
364
- text_output = gr.Textbox()
365
- with gr.Row():
366
- gr.Markdown('''
367
- ### You can test by following examples:
368
- ''')
369
- examples = gr.Examples(examples=
370
- ["sample1.wav",
371
- "sample2.wav",
372
- ],
373
- label="Examples", inputs=[upload])
374
- transcribe_btn.click(transcribe, [microphone, upload], outputs=text_output)
375
-
376
- with gr.Tab("Whisper Transcribe Japanese YouTube"):
377
- gr.Markdown(f'''
378
- <div>
379
- <h1 style='text-align: center'>Whisper Transcribe Japanese YouTube</h1>
380
- </div>
381
- Transcribe long-form YouTube videos with the click of a button! The fine-tuned checkpoint:
382
- <a href='https://huggingface.co/{MODEL_NAME}' target='_blank'><b>{MODEL_NAME}</b></a> to transcribe audio files of arbitrary length.
383
- ''')
384
- youtube_link = gr.Textbox(label="Youtube url", lines=1, interactive=True)
385
- yt_transcribe_btn = gr.Button("Transcribe YouTube")
386
- text_output2 = gr.Textbox()
387
- html_output = gr.Markdown()
388
- yt_transcribe_btn.click(yt_transcribe, [youtube_link], outputs=[html_output, text_output2])
389
-
390
- demo.launch(debug=True)
 
 
 
 
26
  import contextlib
27
  from transformers import pipeline
28
  import psutil
 
29
 
30
  whisper_models = ["tiny", "base", "small", "medium", "large-v1", "large-v2"]
31
  source_languages = {
32
  "en": "English",
33
+ "zh": "Chinese",
34
+ "de": "German",
35
+ "es": "Spanish",
36
+ "ru": "Russian",
37
+ "ko": "Korean",
38
+ "fr": "French",
39
  "ja": "Japanese",
40
+ "pt": "Portuguese",
41
+ "tr": "Turkish",
42
+ "pl": "Polish",
43
+ "ca": "Catalan",
44
+ "nl": "Dutch",
45
+ "ar": "Arabic",
46
+ "sv": "Swedish",
47
+ "it": "Italian",
48
+ "id": "Indonesian",
49
+ "hi": "Hindi",
50
+ "fi": "Finnish",
51
+ "vi": "Vietnamese",
52
+ "he": "Hebrew",
53
+ "uk": "Ukrainian",
54
+ "el": "Greek",
55
+ "ms": "Malay",
56
+ "cs": "Czech",
57
+ "ro": "Romanian",
58
+ "da": "Danish",
59
+ "hu": "Hungarian",
60
+ "ta": "Tamil",
61
+ "no": "Norwegian",
62
+ "th": "Thai",
63
+ "ur": "Urdu",
64
+ "hr": "Croatian",
65
+ "bg": "Bulgarian",
66
+ "lt": "Lithuanian",
67
+ "la": "Latin",
68
+ "mi": "Maori",
69
+ "ml": "Malayalam",
70
+ "cy": "Welsh",
71
+ "sk": "Slovak",
72
+ "te": "Telugu",
73
+ "fa": "Persian",
74
+ "lv": "Latvian",
75
+ "bn": "Bengali",
76
+ "sr": "Serbian",
77
+ "az": "Azerbaijani",
78
+ "sl": "Slovenian",
79
+ "kn": "Kannada",
80
+ "et": "Estonian",
81
+ "mk": "Macedonian",
82
+ "br": "Breton",
83
+ "eu": "Basque",
84
+ "is": "Icelandic",
85
+ "hy": "Armenian",
86
+ "ne": "Nepali",
87
+ "mn": "Mongolian",
88
+ "bs": "Bosnian",
89
+ "kk": "Kazakh",
90
+ "sq": "Albanian",
91
+ "sw": "Swahili",
92
+ "gl": "Galician",
93
+ "mr": "Marathi",
94
+ "pa": "Punjabi",
95
+ "si": "Sinhala",
96
+ "km": "Khmer",
97
+ "sn": "Shona",
98
+ "yo": "Yoruba",
99
+ "so": "Somali",
100
+ "af": "Afrikaans",
101
+ "oc": "Occitan",
102
+ "ka": "Georgian",
103
+ "be": "Belarusian",
104
+ "tg": "Tajik",
105
+ "sd": "Sindhi",
106
+ "gu": "Gujarati",
107
+ "am": "Amharic",
108
+ "yi": "Yiddish",
109
+ "lo": "Lao",
110
+ "uz": "Uzbek",
111
+ "fo": "Faroese",
112
+ "ht": "Haitian creole",
113
+ "ps": "Pashto",
114
+ "tk": "Turkmen",
115
+ "nn": "Nynorsk",
116
+ "mt": "Maltese",
117
+ "sa": "Sanskrit",
118
+ "lb": "Luxembourgish",
119
+ "my": "Myanmar",
120
+ "bo": "Tibetan",
121
+ "tl": "Tagalog",
122
+ "mg": "Malagasy",
123
+ "as": "Assamese",
124
+ "tt": "Tatar",
125
+ "haw": "Hawaiian",
126
+ "ln": "Lingala",
127
+ "ha": "Hausa",
128
+ "ba": "Bashkir",
129
+ "jw": "Javanese",
130
+ "su": "Sundanese",
131
  }
132
 
133
  source_language_list = [key[0] for key in source_languages.items()]
 
142
  chunk_length_s=30,
143
  device=device,
144
  )
 
 
145
  os.makedirs('output', exist_ok=True)
146
  pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")
147
 
 
443
  with gr.Column():
444
  download_transcript.render()
445
  transcription_df.render()
446
+ system_info.render()
447
+ gr.Markdown(
448
+ '''<center><img src='https://visitor-badge.glitch.me/badge?page_id=WhisperDiarizationSpeakers' alt='visitor badge'><a href="https://opensource.org/licenses/Apache-2.0"><img src='https://img.shields.io/badge/License-Apache_2.0-blue.svg' alt='License: Apache 2.0'></center>''')
449
+
450
+ # with gr.Tab("Whisper Transcribe Japanese Audio"):
451
+ # gr.Markdown(f'''
452
+ # <div>
453
+ # <h1 style='text-align: center'>Whisper Transcribe Japanese Audio</h1>
454
+ # </div>
455
+ # Transcribe long-form microphone or audio inputs with the click of a button! The fine-tuned
456
+ # checkpoint <a href='https://huggingface.co/{MODEL_NAME}' target='_blank'><b>{MODEL_NAME}</b></a> to transcribe audio files of arbitrary length.
457
+ # ''')
458
+ # microphone = gr.inputs.Audio(source="microphone", type="filepath", optional=True)
459
+ # upload = gr.inputs.Audio(source="upload", type="filepath", optional=True)
460
+ # transcribe_btn = gr.Button("Transcribe Audio")
461
+ # text_output = gr.Textbox()
462
+ # with gr.Row():
463
+ # gr.Markdown('''
464
+ # ### You can test by following examples:
465
+ # ''')
466
+ # examples = gr.Examples(examples=
467
+ # ["sample1.wav",
468
+ # "sample2.wav",
469
+ # ],
470
+ # label="Examples", inputs=[upload])
471
+ # transcribe_btn.click(transcribe, [microphone, upload], outputs=text_output)
472
+ #
473
+ # with gr.Tab("Whisper Transcribe Japanese YouTube"):
474
+ # gr.Markdown(f'''
475
+ # <div>
476
+ # <h1 style='text-align: center'>Whisper Transcribe Japanese YouTube</h1>
477
+ # </div>
478
+ # Transcribe long-form YouTube videos with the click of a button! The fine-tuned checkpoint:
479
+ # <a href='https://huggingface.co/{MODEL_NAME}' target='_blank'><b>{MODEL_NAME}</b></a> to transcribe audio files of arbitrary length.
480
+ # ''')
481
+ # youtube_link = gr.Textbox(label="Youtube url", lines=1, interactive=True)
482
+ # yt_transcribe_btn = gr.Button("Transcribe YouTube")
483
+ # text_output2 = gr.Textbox()
484
+ # html_output = gr.Markdown()
485
+ # yt_transcribe_btn.click(yt_transcribe, [youtube_link], outputs=[html_output, text_output2])
486
+
487
+ demo.launch(debug=True)