File size: 18,567 Bytes
2c631bc
 
 
 
f88322e
2c631bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7943b58
df3d853
2c631bc
 
 
 
 
 
 
 
 
 
 
f88322e
2c631bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf83e63
2c631bc
 
cf83e63
2c631bc
cf83e63
2c631bc
cf83e63
2c631bc
cf83e63
2c631bc
 
 
 
 
 
1de0e6b
2c631bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427351c
7943b58
 
 
427351c
f22e732
 
7943b58
 
 
 
 
 
 
 
 
 
 
2c631bc
 
 
 
 
 
df3d853
2c631bc
 
 
 
 
 
 
 
 
 
 
31144fa
d268d70
70838ea
d268d70
70838ea
 
 
e289508
df3d853
e289508
2c631bc
 
 
 
 
8e28b8d
2c631bc
8e28b8d
 
 
 
 
 
 
 
 
 
 
 
 
2c631bc
8e28b8d
 
 
 
fe81c29
8e28b8d
52bdf77
8e28b8d
cac804f
8e28b8d
fe81c29
8e28b8d
 
 
 
 
 
 
2c631bc
8e28b8d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
07f47a8
 
 
 
 
 
cac804f
 
 
 
 
3f4c993
 
e289508
 
427351c
e289508
 
07f47a8
 
 
e289508
6006f4b
 
 
5293389
07f47a8
5293389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
# import whisper
from faster_whisper import WhisperModel
import datetime
import subprocess
import gradio as gr
from pathlib import Path
import pandas as pd
import re
import time
import os
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import silhouette_score

from pytube import YouTube
import yt_dlp
import torch
import pyannote.audio
from pyannote.audio.pipelines.speaker_verification import PretrainedSpeakerEmbedding
from pyannote.audio import Audio
from pyannote.core import Segment

from gpuinfo import GPUInfo

import wave
import contextlib
from transformers import pipeline
import psutil
import openai
import tempfile

whisper_models = ["tiny", "base", "small", "medium", "large-v1", "large-v2"]
source_languages = {
    "en": "English",
    "ja": "Japanese",
}

source_language_list = [key[0] for key in source_languages.items()]

MODEL_NAME = "vumichien/whisper-medium-jp"
lang = "ja"

device = 0 if torch.cuda.is_available() else "cpu"
pipe = pipeline(
    task="automatic-speech-recognition",
    model=MODEL_NAME,
    chunk_length_s=30,
    device=device,
)
os.makedirs('output', exist_ok=True)
pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")

embedding_model = PretrainedSpeakerEmbedding(
    "speechbrain/spkrec-ecapa-voxceleb",
    device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))


def transcribe(microphone, file_upload):
    warn_output = ""
    if (microphone is not None) and (file_upload is not None):
        warn_output = (
            "WARNING: You've uploaded an audio file and used the microphone. "
            "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
        )

    elif (microphone is None) and (file_upload is None):
        return "ERROR: You have to either use the microphone or upload an audio file"

    file = microphone if microphone is not None else file_upload

    text = pipe(file)["text"]

    return warn_output + text


def _return_yt_html_embed(yt_url):
    video_id = yt_url.split("?v=")[-1]
    HTML_str = (
        f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
        " </center>"
    )
    return HTML_str


def yt_transcribe(yt_url):
    # yt = YouTube(yt_url)
    # html_embed_str = _return_yt_html_embed(yt_url)
    # stream = yt.streams.filter(only_audio=True)[0]
    # stream.download(filename="audio.mp3")

    ydl_opts = {
        'format': 'bestvideo*+bestaudio/best',
        'postprocessors': [{
            'key': 'FFmpegExtractAudio',
            'preferredcodec': 'mp3',
            'preferredquality': '192',
        }],
        'outtmpl': 'audio.%(ext)s',
    }

    with yt_dlp.YoutubeDL(ydl_opts) as ydl:
        ydl.download([yt_url])

    text = pipe("audio.mp3")["text"]
    return html_embed_str, text


def convert_time(secs):
    return datetime.timedelta(seconds=round(secs))


def get_youtube(video_url):
    # yt = YouTube(video_url)
    # abs_video_path = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first().download()

    ydl_opts = {
        'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best',
    }

    with yt_dlp.YoutubeDL(ydl_opts) as ydl:
        info = ydl.extract_info(video_url, download=False)
        abs_video_path = ydl.prepare_filename(info)
        ydl.process_info(info)

    print("Success download video")
    print(abs_video_path)
    return abs_video_path


def speech_to_text(video_file_path, selected_source_lang, whisper_model, num_speakers):
    """
    # Transcribe youtube link using OpenAI Whisper
    1. Using Open AI's Whisper model to seperate audio into segments and generate transcripts.
    2. Generating speaker embeddings for each segments.
    3. Applying agglomerative clustering on the embeddings to identify the speaker for each segment.

    Speech Recognition is based on models from OpenAI Whisper https://github.com/openai/whisper
    Speaker diarization model and pipeline from by https://github.com/pyannote/pyannote-audio
    """

    # model = whisper.load_model(whisper_model)
    # model = WhisperModel(whisper_model, device="cuda", compute_type="int8_float16")
    model = WhisperModel(whisper_model, compute_type="int8")
    time_start = time.time()
    if (video_file_path == None):
        raise ValueError("Error no video input")
    print(video_file_path)

    try:
        # Read and convert youtube video
        _, file_ending = os.path.splitext(f'{video_file_path}')
        print(f'file enging is {file_ending}')
        audio_file = video_file_path.replace(file_ending, ".wav")
        print("starting conversion to wav")
        os.system(f'ffmpeg -i "{video_file_path}" -ar 16000 -ac 1 -c:a pcm_s16le "{audio_file}"')

        # Get duration
        with contextlib.closing(wave.open(audio_file, 'r')) as f:
            frames = f.getnframes()
            rate = f.getframerate()
            duration = frames / float(rate)
        print(f"conversion to wav ready, duration of audio file: {duration}")

        # Transcribe audio
        options = dict(language=selected_source_lang, beam_size=5, best_of=5)
        transcribe_options = dict(task="transcribe", **options)
        segments_raw, info = model.transcribe(audio_file, **transcribe_options)

        # Convert back to original openai format
        segments = []
        i = 0
        for segment_chunk in segments_raw:
            chunk = {}
            chunk["start"] = segment_chunk.start
            chunk["end"] = segment_chunk.end
            chunk["text"] = segment_chunk.text
            segments.append(chunk)
            i += 1
        print("transcribe audio done with fast whisper")
    except Exception as e:
        raise RuntimeError("Error converting video to audio")

    try:
        # Create embedding
        def segment_embedding(segment):
            audio = Audio()
            start = segment["start"]
            # Whisper overshoots the end timestamp in the last segment
            end = min(duration, segment["end"])
            clip = Segment(start, end)
            waveform, sample_rate = audio.crop(audio_file, clip)
            return embedding_model(waveform[None])

        embeddings = np.zeros(shape=(len(segments), 192))
        for i, segment in enumerate(segments):
            embeddings[i] = segment_embedding(segment)
        embeddings = np.nan_to_num(embeddings)
        print(f'Embedding shape: {embeddings.shape}')

        if num_speakers == 0:
            # Find the best number of speakers
            score_num_speakers = {}

            for num_speakers in range(2, 10 + 1):
                clustering = AgglomerativeClustering(num_speakers).fit(embeddings)
                score = silhouette_score(embeddings, clustering.labels_, metric='euclidean')
                score_num_speakers[num_speakers] = score
            best_num_speaker = max(score_num_speakers, key=lambda x: score_num_speakers[x])
            print(f"The best number of speakers: {best_num_speaker} with {score_num_speakers[best_num_speaker]} score")
        else:
            best_num_speaker = num_speakers

        # Assign speaker label
        clustering = AgglomerativeClustering(best_num_speaker).fit(embeddings)
        labels = clustering.labels_
        for i in range(len(segments)):
            segments[i]["speaker"] = 'SPEAKER ' + str(labels[i] + 1)

        # Make output
        objects = {
            'Start': [],
            'End': [],
            'Speaker': [],
            'Text': []
        }
        text = ''
        for (i, segment) in enumerate(segments):
            if i == 0 or segments[i - 1]["speaker"] != segment["speaker"]:
                objects['Start'].append(str(convert_time(segment["start"])))
                objects['Speaker'].append(segment["speaker"])
                if i != 0:
                    objects['End'].append(str(convert_time(segments[i - 1]["end"])))
                    objects['Text'].append(text)
                    text = ''
            text += segment["text"] + ' '
        objects['End'].append(str(convert_time(segments[i - 1]["end"])))
        objects['Text'].append(text)

        time_end = time.time()
        time_diff = time_end - time_start
        memory = psutil.virtual_memory()
        gpu_utilization, gpu_memory = GPUInfo.gpu_usage()
        gpu_utilization = gpu_utilization[0] if len(gpu_utilization) > 0 else 0
        gpu_memory = gpu_memory[0] if len(gpu_memory) > 0 else 0
        system_info = f"""
        *Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB.* 
        *Processing time: {time_diff:.5} seconds.*
        *GPU Utilization: {gpu_utilization}%, GPU Memory: {gpu_memory}MiB.*
        """
        save_path = "output/transcript_result.csv"
        df_results = pd.DataFrame(objects)
        df_results.to_csv(save_path)
        return df_results, system_info, save_path

    except Exception as e:
        raise RuntimeError("Error Running inference with local model", e)


def create_transcription_summary(openai_key, prompt):
    openai.api_key = openai_key
    system_template = prompt

    with open("output/transcript_result.csv", "r") as file:
        transcript_text = file.read()

    completion = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=[
            {"role": "system", "content": system_template},
            {"role": "user", "content": transcript_text}
        ]
    )
    transcript_summary = completion.choices[0].message.content
    return transcript_summary


# ---- Gradio Layout -----
# Inspiration from https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles
video_in = gr.Video(label="Video file", mirror_webcam=False)
youtube_url_in = gr.Textbox(label="Youtube url", lines=1, interactive=True)
df_init = pd.DataFrame(columns=['Start', 'End', 'Speaker', 'Text'])
memory = psutil.virtual_memory()
selected_source_lang = gr.Dropdown(choices=source_language_list, type="value", value="en",
                                   label="Spoken language in video", interactive=True)
selected_whisper_model = gr.Dropdown(choices=whisper_models, type="value", value="base", label="Selected Whisper model",
                                     interactive=True)
number_speakers = gr.Number(precision=0, value=0,
                            label="Input number of speakers for better results. If value=0, model will automatic find the best number of speakers",
                            interactive=True)
system_info = gr.Markdown(
    f"*Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB*")
download_transcript = gr.File(label="Download transcript")
transcription_df = gr.DataFrame(value=df_init, label="Transcription dataframe", row_count=(0, "dynamic"), max_rows=10,
                                wrap=True, overflow_row_behaviour='paginate')
openai_key_in = gr.Textbox(lines=1, label="openai_key", type="password")
openai_prompt_in = gr.TextArea(label="openai_prompt", value="""音声の文字起こしが渡されます。

この音声のサマリーをMarkdown形式で作成してください。サマリーは、以下のような形式で書いてください。
- 会議の目的
- 会議の内容
- 会議の結果""")
openai_summary_out = gr.Textbox(label="openai_summary")
save_path = "output/transcript_result.csv"

title = "Whisper speaker diarization"
demo = gr.Blocks(title=title)
demo.encrypt = False

with demo:
    with gr.Tab("Whisper speaker diarization"):
        gr.Markdown('''
            <div>
            <h1 style='text-align: center'>Whisper speaker diarization</h1>
            This space uses Whisper models from <a href='https://github.com/openai/whisper' target='_blank'><b>OpenAI</b></a> with <a href='https://github.com/guillaumekln/faster-whisper' target='_blank'><b>CTranslate2</b></a> which is a fast inference engine for Transformer models to recognize the speech (4 times faster than original openai model with same accuracy)
            and ECAPA-TDNN model from <a href='https://github.com/speechbrain/speechbrain' target='_blank'><b>SpeechBrain</b></a> to encode and clasify speakers
            </div>
        ''')

        with gr.Row():
            gr.Markdown('''
            ### Transcribe youtube link using OpenAI Whisper
            ##### 1. Using Open AI's Whisper model to seperate audio into segments and generate transcripts.
            ##### 2. Generating speaker embeddings for each segments.
            ##### 3. Applying agglomerative clustering on the embeddings to identify the speaker for each segment.
            ''')

        with gr.Row():
            gr.Markdown('''
                ### You can test by following examples:
                ''')
        examples = gr.Examples(examples=
                               ["https://www.youtube.com/watch?v=N251e97Awh4",
                                "https://www.youtube.com/watch?v=-UX0X45sYe4",
                                "https://www.youtube.com/watch?v=REMyAsPC2So"],
                               label="Examples", inputs=[youtube_url_in])

        with gr.Row():
            with gr.Column():
                youtube_url_in.render()
                download_youtube_btn = gr.Button("Download Youtube video")
                download_youtube_btn.click(get_youtube, [youtube_url_in], [
                    video_in])
                print(video_in)

        with gr.Row():
            with gr.Column():
                video_in.render()
                with gr.Column():
                    gr.Markdown('''
                    ##### Here you can start the transcription process.
                    ##### Please select the source language for transcription.
                    ##### You can select a range of assumed numbers of speakers.
                    ''')
                selected_source_lang.render()
                selected_whisper_model.render()
                number_speakers.render()
                transcribe_btn = gr.Button("Transcribe audio and diarization")
                transcribe_btn.click(speech_to_text,
                                     [video_in, selected_source_lang, selected_whisper_model, number_speakers],
                                     [transcription_df, system_info, download_transcript]
                                     )

        with gr.Row():
            gr.Markdown('''
            ##### Here you will get transcription  output
            ##### ''')

        with gr.Row():
            with gr.Column():
                download_transcript.render()
                transcription_df.render()
                # system_info.render()
                # gr.Markdown(
                #     '''<center><img src='https://visitor-badge.glitch.me/badge?page_id=WhisperDiarizationSpeakers' alt='visitor badge'><a href="https://opensource.org/licenses/Apache-2.0"><img src='https://img.shields.io/badge/License-Apache_2.0-blue.svg' alt='License: Apache 2.0'></center>''')

        with gr.Row():
            with gr.Column():
                gr.Markdown('''
                From here, you can perform an evaluation analysis based on the transcription done using ChatGPT.
                Feel free to change the prompt as needed.
                Depending on the prompt, you can generate a summary of the conversation or an action list.
                ''')
                openai_key_in.render()
                openai_prompt_in.render()
                openai_summary_btn = gr.Button("Evaluate and analyze transcription content")
                openai_summary_btn.click(create_transcription_summary,
                                         [openai_key_in, openai_prompt_in],
                                         [openai_summary_out]
                                         )

        with gr.Row():
            with gr.Column():
                openai_summary_out.render()
                system_info.render()
                gr.Markdown(
                    '''<center><img src='https://visitor-badge.glitch.me/badge?page_id=WhisperDiarizationSpeakers' alt='visitor badge'><a href="https://opensource.org/licenses/Apache-2.0"><img src='https://img.shields.io/badge/License-Apache_2.0-blue.svg' alt='License: Apache 2.0'></center>''')


    # with gr.Tab("Whisper Transcribe Japanese Audio"):
    #     gr.Markdown(f'''
    #           <div>
    #           <h1 style='text-align: center'>Whisper Transcribe Japanese Audio</h1>
    #           </div>
    #           Transcribe long-form microphone or audio inputs with the click of a button! The fine-tuned
    #           checkpoint <a href='https://huggingface.co/{MODEL_NAME}' target='_blank'><b>{MODEL_NAME}</b></a> to transcribe audio files of arbitrary length.
    #       ''')
    #     microphone = gr.inputs.Audio(source="microphone", type="filepath", optional=True)
    #     upload = gr.inputs.Audio(source="upload", type="filepath", optional=True)
    #     transcribe_btn = gr.Button("Transcribe Audio")
    #     text_output = gr.Textbox()
    #     with gr.Row():
    #         gr.Markdown('''
    #             ### You can test by following examples:
    #             ''')
    #     examples = gr.Examples(examples=
    #                            ["sample1.wav",
    #                             "sample2.wav",
    #                             ],
    #                            label="Examples", inputs=[upload])
    #     transcribe_btn.click(transcribe, [microphone, upload], outputs=text_output)
    #
    # with gr.Tab("Whisper Transcribe Japanese YouTube"):
    #     gr.Markdown(f'''
    #           <div>
    #           <h1 style='text-align: center'>Whisper Transcribe Japanese YouTube</h1>
    #           </div>
    #             Transcribe long-form YouTube videos with the click of a button! The fine-tuned checkpoint:
    #             <a href='https://huggingface.co/{MODEL_NAME}' target='_blank'><b>{MODEL_NAME}</b></a> to transcribe audio files of arbitrary length.
    #         ''')
    #     youtube_link = gr.Textbox(label="Youtube url", lines=1, interactive=True)
    #     yt_transcribe_btn = gr.Button("Transcribe YouTube")
    #     text_output2 = gr.Textbox()
    #     html_output = gr.Markdown()
    #     yt_transcribe_btn.click(yt_transcribe, [youtube_link], outputs=[html_output, text_output2])

demo.launch(debug=True)