Spaces:
Running
Running
Commit
·
b912ddb
1
Parent(s):
d690b2a
Add progress bar
Browse files
app.py
CHANGED
|
@@ -14,7 +14,7 @@ model_id = "facebook/mms-1b-all"
|
|
| 14 |
processor = AutoProcessor.from_pretrained(model_id)
|
| 15 |
model = Wav2Vec2ForCTC.from_pretrained(model_id)
|
| 16 |
|
| 17 |
-
def transcribe(audio_file_mic=None, audio_file_upload=None, language="English (eng)"):
|
| 18 |
if audio_file_mic:
|
| 19 |
audio_file = audio_file_mic
|
| 20 |
elif audio_file_upload:
|
|
@@ -22,10 +22,12 @@ def transcribe(audio_file_mic=None, audio_file_upload=None, language="English (e
|
|
| 22 |
else:
|
| 23 |
return "Please upload an audio file or record one"
|
| 24 |
|
|
|
|
|
|
|
| 25 |
# Make sure audio is 16kHz
|
| 26 |
speech, sample_rate = librosa.load(audio_file)
|
| 27 |
if sample_rate != 16000:
|
| 28 |
-
|
| 29 |
speech = librosa.resample(speech, orig_sr=sample_rate, target_sr=16000)
|
| 30 |
|
| 31 |
# Cut speech into chunks
|
|
@@ -38,7 +40,8 @@ def transcribe(audio_file_mic=None, audio_file_upload=None, language="English (e
|
|
| 38 |
model.load_adapter(language_code)
|
| 39 |
|
| 40 |
transcriptions = []
|
| 41 |
-
|
|
|
|
| 42 |
inputs = processor(chunk, sampling_rate=16_000, return_tensors="pt")
|
| 43 |
|
| 44 |
with torch.no_grad():
|
|
|
|
| 14 |
processor = AutoProcessor.from_pretrained(model_id)
|
| 15 |
model = Wav2Vec2ForCTC.from_pretrained(model_id)
|
| 16 |
|
| 17 |
+
def transcribe(audio_file_mic=None, audio_file_upload=None, language="English (eng)", progress=gr.Progress()):
|
| 18 |
if audio_file_mic:
|
| 19 |
audio_file = audio_file_mic
|
| 20 |
elif audio_file_upload:
|
|
|
|
| 22 |
else:
|
| 23 |
return "Please upload an audio file or record one"
|
| 24 |
|
| 25 |
+
progress(0, desc="Starting")
|
| 26 |
+
|
| 27 |
# Make sure audio is 16kHz
|
| 28 |
speech, sample_rate = librosa.load(audio_file)
|
| 29 |
if sample_rate != 16000:
|
| 30 |
+
progress(1, desc="Resampling")
|
| 31 |
speech = librosa.resample(speech, orig_sr=sample_rate, target_sr=16000)
|
| 32 |
|
| 33 |
# Cut speech into chunks
|
|
|
|
| 40 |
model.load_adapter(language_code)
|
| 41 |
|
| 42 |
transcriptions = []
|
| 43 |
+
progress(2, desc="Transcribing")
|
| 44 |
+
for chunk in progress.tqdm(chunks, desc="Transcribing"):
|
| 45 |
inputs = processor(chunk, sampling_rate=16_000, return_tensors="pt")
|
| 46 |
|
| 47 |
with torch.no_grad():
|