Pijush2023 commited on
Commit
3f58796
·
verified ·
1 Parent(s): 22a93ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -23
app.py CHANGED
@@ -1,32 +1,39 @@
1
  import gradio as gr
2
  import numpy as np
3
- from gradio_client import Client
 
4
 
5
- client = Client("Pijush2023/voitex07122024")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- def transcribe_audio_from_api(new_chunk, state):
8
- sr, y = new_chunk
9
- y_list = y.tolist() # Convert NumPy array to list for JSON serialization
10
- new_chunk_serialized = {"sampling_rate": sr, "array": y_list}
11
-
12
- # Update the state with the new chunk
13
  if state is not None:
14
- state += new_chunk_serialized["array"]
15
  else:
16
- state = new_chunk_serialized["array"]
17
-
18
- chunk_to_send = {"sampling_rate": sr, "array": state}
19
-
20
- result = client.predict(
21
- new_chunk=chunk_to_send,
22
- api_name="/SAMLOne_real_time"
23
- )
24
-
25
- return state, result[1] # Return the updated state and transcribed text
26
 
27
- with gr.Blocks() as frontend:
28
- gr.Markdown("# Voice to Text Transcription (Frontend)")
29
 
 
 
 
 
 
 
 
30
  state = gr.State(None)
31
 
32
  with gr.Row():
@@ -35,6 +42,6 @@ with gr.Blocks() as frontend:
35
  with gr.Column():
36
  output_text = gr.Textbox(label="Transcription")
37
 
38
- audio_input.stream(transcribe_audio_from_api, inputs=[audio_input, state], outputs=[state, output_text])
39
 
40
- frontend.launch()
 
1
  import gradio as gr
2
  import numpy as np
3
+ import torch
4
+ from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
5
 
6
+ model_id = 'openai/whisper-large-v3'
7
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
8
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
9
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype).to(device)
10
+ processor = AutoProcessor.from_pretrained(model_id)
11
+
12
+ pipe_asr = pipeline("automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=15, batch_size=16, torch_dtype=torch_dtype, device=device, return_timestamps=True)
13
+
14
+ def transcribe_function(new_chunk, state):
15
+ try:
16
+ sr, y = new_chunk[0], new_chunk[1]
17
+ except TypeError:
18
+ print(f"Error chunk structure: {type(new_chunk)}, content: {new_chunk}")
19
+ return state, "", None
20
+
21
+ y = y.astype(np.float32) / np.max(np.abs(y))
22
 
 
 
 
 
 
 
23
  if state is not None:
24
+ state = np.concatenate([state, y])
25
  else:
26
+ state = y
 
 
 
 
 
 
 
 
 
27
 
28
+ result = pipe_asr({"array": state, "sampling_rate": sr}, return_timestamps=False)
 
29
 
30
+ full_text = result.get("text", "")
31
+
32
+ return state, full_text
33
+
34
+ with gr.Blocks() as demo:
35
+ gr.Markdown("# Voice to Text Transcription")
36
+
37
  state = gr.State(None)
38
 
39
  with gr.Row():
 
42
  with gr.Column():
43
  output_text = gr.Textbox(label="Transcription")
44
 
45
+ audio_input.stream(transcribe_function, inputs=[audio_input, state], outputs=[state, output_text], api_name="SAMLOne_real_time")
46
 
47
+ demo.launch(show_error=True)