Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq
|
|
6 |
import os
|
7 |
|
8 |
# Load model and processor (assuming Whisper model for transcription)
|
9 |
-
model_name = "danhtran2mind/Vi-Whisper-
|
10 |
processor = AutoProcessor.from_pretrained(model_name)
|
11 |
model = AutoModelForSpeechSeq2Seq.from_pretrained(model_name)
|
12 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
@@ -49,9 +49,9 @@ iface = gr.Interface(
|
|
49 |
fn=transcribe_audio,
|
50 |
inputs=gr.Audio(type="filepath", label="Upload Audio File"),
|
51 |
outputs=gr.Textbox(label="Transcription Result", lines=5),
|
52 |
-
title="Vietnamese Whisper-
|
53 |
description="Upload an audio file (e.g., WAV, MP3) to transcribe its content using a speech-to-text model.",
|
54 |
-
examples=load_examples("assets/Vi-Whisper-
|
55 |
)
|
56 |
|
57 |
# Launch the app
|
|
|
6 |
import os
|
7 |
|
8 |
# Load model and processor (assuming Whisper model for transcription)
|
9 |
+
model_name = "danhtran2mind/Vi-Whisper-Tiny-finetuning" # Replace with your model if different
|
10 |
processor = AutoProcessor.from_pretrained(model_name)
|
11 |
model = AutoModelForSpeechSeq2Seq.from_pretrained(model_name)
|
12 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
49 |
fn=transcribe_audio,
|
50 |
inputs=gr.Audio(type="filepath", label="Upload Audio File"),
|
51 |
outputs=gr.Textbox(label="Transcription Result", lines=5),
|
52 |
+
title="Vietnamese Whisper-Tiny finetuning",
|
53 |
description="Upload an audio file (e.g., WAV, MP3) to transcribe its content using a speech-to-text model.",
|
54 |
+
examples=load_examples("assets/Vi-Whisper-Tiny-finetuning")
|
55 |
)
|
56 |
|
57 |
# Launch the app
|