yama commited on
Commit
8e28b8d
·
1 Parent(s): fe81c29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +195 -155
app.py CHANGED
@@ -30,104 +30,104 @@ import psutil
30
  whisper_models = ["tiny", "base", "small", "medium", "large-v1", "large-v2"]
31
  source_languages = {
32
  "en": "English",
33
- # "zh": "Chinese",
34
- # "de": "German",
35
- # "es": "Spanish",
36
- # "ru": "Russian",
37
- # "ko": "Korean",
38
- # "fr": "French",
39
  "ja": "Japanese",
40
- # "pt": "Portuguese",
41
- # "tr": "Turkish",
42
- # "pl": "Polish",
43
- # "ca": "Catalan",
44
- # "nl": "Dutch",
45
- # "ar": "Arabic",
46
- # "sv": "Swedish",
47
- # "it": "Italian",
48
- # "id": "Indonesian",
49
- # "hi": "Hindi",
50
- # "fi": "Finnish",
51
- # "vi": "Vietnamese",
52
- # "he": "Hebrew",
53
- # "uk": "Ukrainian",
54
- # "el": "Greek",
55
- # "ms": "Malay",
56
- # "cs": "Czech",
57
- # "ro": "Romanian",
58
- # "da": "Danish",
59
- # "hu": "Hungarian",
60
- # "ta": "Tamil",
61
- # "no": "Norwegian",
62
- # "th": "Thai",
63
- # "ur": "Urdu",
64
- # "hr": "Croatian",
65
- # "bg": "Bulgarian",
66
- # "lt": "Lithuanian",
67
- # "la": "Latin",
68
- # "mi": "Maori",
69
- # "ml": "Malayalam",
70
- # "cy": "Welsh",
71
- # "sk": "Slovak",
72
- # "te": "Telugu",
73
- # "fa": "Persian",
74
- # "lv": "Latvian",
75
- # "bn": "Bengali",
76
- # "sr": "Serbian",
77
- # "az": "Azerbaijani",
78
- # "sl": "Slovenian",
79
- # "kn": "Kannada",
80
- # "et": "Estonian",
81
- # "mk": "Macedonian",
82
- # "br": "Breton",
83
- # "eu": "Basque",
84
- # "is": "Icelandic",
85
- # "hy": "Armenian",
86
- # "ne": "Nepali",
87
- # "mn": "Mongolian",
88
- # "bs": "Bosnian",
89
- # "kk": "Kazakh",
90
- # "sq": "Albanian",
91
- # "sw": "Swahili",
92
- # "gl": "Galician",
93
- # "mr": "Marathi",
94
- # "pa": "Punjabi",
95
- # "si": "Sinhala",
96
- # "km": "Khmer",
97
- # "sn": "Shona",
98
- # "yo": "Yoruba",
99
- # "so": "Somali",
100
- # "af": "Afrikaans",
101
- # "oc": "Occitan",
102
- # "ka": "Georgian",
103
- # "be": "Belarusian",
104
- # "tg": "Tajik",
105
- # "sd": "Sindhi",
106
- # "gu": "Gujarati",
107
- # "am": "Amharic",
108
- # "yi": "Yiddish",
109
- # "lo": "Lao",
110
- # "uz": "Uzbek",
111
- # "fo": "Faroese",
112
- # "ht": "Haitian creole",
113
- # "ps": "Pashto",
114
- # "tk": "Turkmen",
115
- # "nn": "Nynorsk",
116
- # "mt": "Maltese",
117
- # "sa": "Sanskrit",
118
- # "lb": "Luxembourgish",
119
- # "my": "Myanmar",
120
- # "bo": "Tibetan",
121
- # "tl": "Tagalog",
122
- # "mg": "Malagasy",
123
- # "as": "Assamese",
124
- # "tt": "Tatar",
125
- # "haw": "Hawaiian",
126
- # "ln": "Lingala",
127
- # "ha": "Hausa",
128
- # "ba": "Bashkir",
129
- # "jw": "Javanese",
130
- # "su": "Sundanese",
131
  }
132
 
133
  source_language_list = [key[0] for key in source_languages.items()]
@@ -357,7 +357,7 @@ video_in = gr.Video(label="Video file", mirror_webcam=False)
357
  youtube_url_in = gr.Textbox(label="Youtube url", lines=1, interactive=True)
358
  df_init = pd.DataFrame(columns=['Start', 'End', 'Speaker', 'Text'])
359
  memory = psutil.virtual_memory()
360
- selected_source_lang = gr.Dropdown(choices=source_language_list, type="value", value="ja",
361
  label="Spoken language in video", interactive=True)
362
  selected_whisper_model = gr.Dropdown(choices=whisper_models, type="value", value="base", label="Selected Whisper model",
363
  interactive=True)
@@ -374,67 +374,107 @@ demo = gr.Blocks(title=title)
374
  demo.encrypt = False
375
 
376
  with demo:
377
- # gr.Markdown('''
378
- # <div>
379
- # <h1 style='text-align: center'>Whisper speaker diarization</h1>
380
- # This space uses Whisper models from <a href='https://github.com/openai/whisper' target='_blank'><b>OpenAI</b></a> with <a href='https://github.com/guillaumekln/faster-whisper' target='_blank'><b>CTranslate2</b></a> which is a fast inference engine for Transformer models to recognize the speech (4 times faster than original openai model with same accuracy)
381
- # and ECAPA-TDNN model from <a href='https://github.com/speechbrain/speechbrain' target='_blank'><b>SpeechBrain</b></a> to encode and clasify speakers
382
- # </div>
383
- # ''')
384
- #
385
- # with gr.Row():
386
- # gr.Markdown('''
387
- # ### Transcribe youtube link using OpenAI Whisper
388
- # ##### 1. Using Open AI's Whisper model to seperate audio into segments and generate transcripts.
389
- # ##### 2. Generating speaker embeddings for each segments.
390
- # ##### 3. Applying agglomerative clustering on the embeddings to identify the speaker for each segment.
391
- # ''')
392
-
393
- with gr.Row():
394
  gr.Markdown('''
395
- ### You can test by following examples:
 
 
 
 
 
 
 
 
 
 
 
 
396
  ''')
397
- examples = gr.Examples(examples=
398
- ["https://www.youtube.com/watch?v=j7BfEzAFuYc&t=32s",
399
- "https://www.youtube.com/watch?v=-UX0X45sYe4",
400
- "https://www.youtube.com/watch?v=7minSgqi-Gw"],
401
- label="Examples", inputs=[youtube_url_in])
402
-
403
- with gr.Row():
404
- with gr.Column():
405
- youtube_url_in.render()
406
- download_youtube_btn = gr.Button("Download Youtube video")
407
- download_youtube_btn.click(get_youtube, [youtube_url_in], [video_in])
408
- print(video_in)
409
-
410
- with gr.Row():
411
- with gr.Column():
412
- video_in.render()
413
- with gr.Column():
414
- gr.Markdown('''
415
- ##### Here you can start the transcription process.
416
- ##### Please select the source language for transcription.
417
- ##### You can select a range of assumed numbers of speakers.
418
  ''')
419
- selected_source_lang.render()
420
- selected_whisper_model.render()
421
- number_speakers.render()
422
- transcribe_btn = gr.Button("Transcribe audio and diarization")
423
- transcribe_btn.click(speech_to_text,
424
- [video_in, selected_source_lang, selected_whisper_model, number_speakers],
425
- [transcription_df, system_info, download_transcript]
426
- )
427
-
428
- with gr.Row():
429
- gr.Markdown('''
430
- ##### Here you will get transcription output
431
- ##### ''')
432
 
433
- with gr.Row():
434
- with gr.Column():
435
- download_transcript.render()
436
- transcription_df.render()
437
- # system_info.render()
 
 
438
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439
 
440
  demo.launch(debug=True)
 
30
  whisper_models = ["tiny", "base", "small", "medium", "large-v1", "large-v2"]
31
  source_languages = {
32
  "en": "English",
33
+ "zh": "Chinese",
34
+ "de": "German",
35
+ "es": "Spanish",
36
+ "ru": "Russian",
37
+ "ko": "Korean",
38
+ "fr": "French",
39
  "ja": "Japanese",
40
+ "pt": "Portuguese",
41
+ "tr": "Turkish",
42
+ "pl": "Polish",
43
+ "ca": "Catalan",
44
+ "nl": "Dutch",
45
+ "ar": "Arabic",
46
+ "sv": "Swedish",
47
+ "it": "Italian",
48
+ "id": "Indonesian",
49
+ "hi": "Hindi",
50
+ "fi": "Finnish",
51
+ "vi": "Vietnamese",
52
+ "he": "Hebrew",
53
+ "uk": "Ukrainian",
54
+ "el": "Greek",
55
+ "ms": "Malay",
56
+ "cs": "Czech",
57
+ "ro": "Romanian",
58
+ "da": "Danish",
59
+ "hu": "Hungarian",
60
+ "ta": "Tamil",
61
+ "no": "Norwegian",
62
+ "th": "Thai",
63
+ "ur": "Urdu",
64
+ "hr": "Croatian",
65
+ "bg": "Bulgarian",
66
+ "lt": "Lithuanian",
67
+ "la": "Latin",
68
+ "mi": "Maori",
69
+ "ml": "Malayalam",
70
+ "cy": "Welsh",
71
+ "sk": "Slovak",
72
+ "te": "Telugu",
73
+ "fa": "Persian",
74
+ "lv": "Latvian",
75
+ "bn": "Bengali",
76
+ "sr": "Serbian",
77
+ "az": "Azerbaijani",
78
+ "sl": "Slovenian",
79
+ "kn": "Kannada",
80
+ "et": "Estonian",
81
+ "mk": "Macedonian",
82
+ "br": "Breton",
83
+ "eu": "Basque",
84
+ "is": "Icelandic",
85
+ "hy": "Armenian",
86
+ "ne": "Nepali",
87
+ "mn": "Mongolian",
88
+ "bs": "Bosnian",
89
+ "kk": "Kazakh",
90
+ "sq": "Albanian",
91
+ "sw": "Swahili",
92
+ "gl": "Galician",
93
+ "mr": "Marathi",
94
+ "pa": "Punjabi",
95
+ "si": "Sinhala",
96
+ "km": "Khmer",
97
+ "sn": "Shona",
98
+ "yo": "Yoruba",
99
+ "so": "Somali",
100
+ "af": "Afrikaans",
101
+ "oc": "Occitan",
102
+ "ka": "Georgian",
103
+ "be": "Belarusian",
104
+ "tg": "Tajik",
105
+ "sd": "Sindhi",
106
+ "gu": "Gujarati",
107
+ "am": "Amharic",
108
+ "yi": "Yiddish",
109
+ "lo": "Lao",
110
+ "uz": "Uzbek",
111
+ "fo": "Faroese",
112
+ "ht": "Haitian creole",
113
+ "ps": "Pashto",
114
+ "tk": "Turkmen",
115
+ "nn": "Nynorsk",
116
+ "mt": "Maltese",
117
+ "sa": "Sanskrit",
118
+ "lb": "Luxembourgish",
119
+ "my": "Myanmar",
120
+ "bo": "Tibetan",
121
+ "tl": "Tagalog",
122
+ "mg": "Malagasy",
123
+ "as": "Assamese",
124
+ "tt": "Tatar",
125
+ "haw": "Hawaiian",
126
+ "ln": "Lingala",
127
+ "ha": "Hausa",
128
+ "ba": "Bashkir",
129
+ "jw": "Javanese",
130
+ "su": "Sundanese",
131
  }
132
 
133
  source_language_list = [key[0] for key in source_languages.items()]
 
357
  youtube_url_in = gr.Textbox(label="Youtube url", lines=1, interactive=True)
358
  df_init = pd.DataFrame(columns=['Start', 'End', 'Speaker', 'Text'])
359
  memory = psutil.virtual_memory()
360
+ selected_source_lang = gr.Dropdown(choices=source_language_list, type="value", value="en",
361
  label="Spoken language in video", interactive=True)
362
  selected_whisper_model = gr.Dropdown(choices=whisper_models, type="value", value="base", label="Selected Whisper model",
363
  interactive=True)
 
374
  demo.encrypt = False
375
 
376
  with demo:
377
+ with gr.Tab("Whisper speaker diarization"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378
  gr.Markdown('''
379
+ <div>
380
+ <h1 style='text-align: center'>Whisper speaker diarization</h1>
381
+ This space uses Whisper models from <a href='https://github.com/openai/whisper' target='_blank'><b>OpenAI</b></a> with <a href='https://github.com/guillaumekln/faster-whisper' target='_blank'><b>CTranslate2</b></a> which is a fast inference engine for Transformer models to recognize the speech (4 times faster than original openai model with same accuracy)
382
+ and ECAPA-TDNN model from <a href='https://github.com/speechbrain/speechbrain' target='_blank'><b>SpeechBrain</b></a> to encode and clasify speakers
383
+ </div>
384
+ ''')
385
+
386
+ with gr.Row():
387
+ gr.Markdown('''
388
+ ### Transcribe youtube link using OpenAI Whisper
389
+ ##### 1. Using Open AI's Whisper model to seperate audio into segments and generate transcripts.
390
+ ##### 2. Generating speaker embeddings for each segments.
391
+ ##### 3. Applying agglomerative clustering on the embeddings to identify the speaker for each segment.
392
  ''')
393
+
394
+ with gr.Row():
395
+ gr.Markdown('''
396
+ ### You can test by following examples:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  ''')
398
+ examples = gr.Examples(examples=
399
+ ["https://www.youtube.com/watch?v=j7BfEzAFuYc&t=32s",
400
+ "https://www.youtube.com/watch?v=-UX0X45sYe4",
401
+ "https://www.youtube.com/watch?v=7minSgqi-Gw"],
402
+ label="Examples", inputs=[youtube_url_in])
 
 
 
 
 
 
 
 
403
 
404
+ with gr.Row():
405
+ with gr.Column():
406
+ youtube_url_in.render()
407
+ download_youtube_btn = gr.Button("Download Youtube video")
408
+ download_youtube_btn.click(get_youtube, [youtube_url_in], [
409
+ video_in])
410
+ print(video_in)
411
 
412
+ with gr.Row():
413
+ with gr.Column():
414
+ video_in.render()
415
+ with gr.Column():
416
+ gr.Markdown('''
417
+ ##### Here you can start the transcription process.
418
+ ##### Please select the source language for transcription.
419
+ ##### You can select a range of assumed numbers of speakers.
420
+ ''')
421
+ selected_source_lang.render()
422
+ selected_whisper_model.render()
423
+ number_speakers.render()
424
+ transcribe_btn = gr.Button("Transcribe audio and diarization")
425
+ transcribe_btn.click(speech_to_text,
426
+ [video_in, selected_source_lang, selected_whisper_model, number_speakers],
427
+ [transcription_df, system_info, download_transcript]
428
+ )
429
+
430
+ with gr.Row():
431
+ gr.Markdown('''
432
+ ##### Here you will get transcription output
433
+ ##### ''')
434
+
435
+ with gr.Row():
436
+ with gr.Column():
437
+ download_transcript.render()
438
+ transcription_df.render()
439
+ system_info.render()
440
+ gr.Markdown(
441
+ '''<center><img src='https://visitor-badge.glitch.me/badge?page_id=WhisperDiarizationSpeakers' alt='visitor badge'><a href="https://opensource.org/licenses/Apache-2.0"><img src='https://img.shields.io/badge/License-Apache_2.0-blue.svg' alt='License: Apache 2.0'></center>''')
442
+
443
+ with gr.Tab("Whisper Transcribe Japanese Audio"):
444
+ gr.Markdown(f'''
445
+ <div>
446
+ <h1 style='text-align: center'>Whisper Transcribe Japanese Audio</h1>
447
+ </div>
448
+ Transcribe long-form microphone or audio inputs with the click of a button! The fine-tuned
449
+ checkpoint <a href='https://huggingface.co/{MODEL_NAME}' target='_blank'><b>{MODEL_NAME}</b></a> to transcribe audio files of arbitrary length.
450
+ ''')
451
+ microphone = gr.inputs.Audio(source="microphone", type="filepath", optional=True)
452
+ upload = gr.inputs.Audio(source="upload", type="filepath", optional=True)
453
+ transcribe_btn = gr.Button("Transcribe Audio")
454
+ text_output = gr.Textbox()
455
+ with gr.Row():
456
+ gr.Markdown('''
457
+ ### You can test by following examples:
458
+ ''')
459
+ examples = gr.Examples(examples=
460
+ ["sample1.wav",
461
+ "sample2.wav",
462
+ ],
463
+ label="Examples", inputs=[upload])
464
+ transcribe_btn.click(transcribe, [microphone, upload], outputs=text_output)
465
+
466
+ with gr.Tab("Whisper Transcribe Japanese YouTube"):
467
+ gr.Markdown(f'''
468
+ <div>
469
+ <h1 style='text-align: center'>Whisper Transcribe Japanese YouTube</h1>
470
+ </div>
471
+ Transcribe long-form YouTube videos with the click of a button! The fine-tuned checkpoint:
472
+ <a href='https://huggingface.co/{MODEL_NAME}' target='_blank'><b>{MODEL_NAME}</b></a> to transcribe audio files of arbitrary length.
473
+ ''')
474
+ youtube_link = gr.Textbox(label="Youtube url", lines=1, interactive=True)
475
+ yt_transcribe_btn = gr.Button("Transcribe YouTube")
476
+ text_output2 = gr.Textbox()
477
+ html_output = gr.Markdown()
478
+ yt_transcribe_btn.click(yt_transcribe, [youtube_link], outputs=[html_output, text_output2])
479
 
480
  demo.launch(debug=True)