thewh1teagle commited on
Commit
24a8315
·
0 Parent(s):
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. Dockerfile +24 -0
  3. README.md +10 -0
  4. app.py +155 -0
  5. example1.wav +3 -0
  6. requirements.txt +441 -0
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.wav filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim
2
+
3
+ RUN useradd -m -u 1000 user
4
+
5
+ RUN apt-get update && apt-get install -y git ffmpeg && rm -rf /var/lib/apt/lists/*
6
+
7
+ WORKDIR /app
8
+
9
+ # Use system Python environment (recommended for containers)
10
+ ENV UV_SYSTEM_PYTHON=1
11
+
12
+ COPY requirements.txt .
13
+ RUN uv pip install --no-cache -r requirements.txt
14
+
15
+ COPY . .
16
+
17
+ # Setup user
18
+ RUN chown -R user:user /app
19
+ USER user
20
+ ENV HOME=/app
21
+
22
+ EXPOSE 7860
23
+
24
+ CMD ["uv", "run", "app.py", "--model", "thewh1teagle/whisper-heb-ipa"]
README.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Transcribe Hebrew Speech into IPA
3
+ emoji: 🎙️
4
+ colorFrom: red
5
+ colorTo: blue
6
+ sdk: docker
7
+ sdk_version: "5.45.0"
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
app.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ Usage:
4
+ wget https://github.com/thewh1teagle/phonikud-chatterbox/releases/download/asset-files-v1/female1.wav -O example1.wav
5
+
6
+ # Run with default HF model
7
+ uv run src/infer.py
8
+
9
+ # Or run with local checkpoint
10
+ uv run src/infer.py --model ./whisper-heb-ipa/checkpoint-600
11
+
12
+ # Or with whisper small
13
+ uv run src/infer.py --model openai/whisper-small
14
+
15
+ # Or with thewh1teagle/whisper-heb-ipa
16
+ uv run src/infer.py --model thewh1teagle/whisper-heb-ipa
17
+ """
18
+
19
+
20
+ import torch
21
+ from transformers import pipeline
22
+ import gradio as gr
23
+ import argparse
24
+ from pydub import AudioSegment
25
+ from pydub.effects import normalize
26
+ import tempfile
27
+ import os
28
+
29
+ def main():
30
+ parser = argparse.ArgumentParser(description="Whisper Transcription Demo")
31
+ parser.add_argument(
32
+ "--model",
33
+ type=str,
34
+ default="openai/whisper-small",
35
+ help="Model name or path for Whisper (default: openai/whisper-small)"
36
+ )
37
+ args = parser.parse_args()
38
+
39
+ MODEL_NAME = args.model
40
+ BATCH_SIZE = 8
41
+
42
+ device = 0 if torch.cuda.is_available() else "cpu"
43
+
44
+ pipe = pipeline(
45
+ task="automatic-speech-recognition",
46
+ model=MODEL_NAME,
47
+ chunk_length_s=30,
48
+ device=device,
49
+ )
50
+
51
+ def normalize_audio(file_path):
52
+ """Normalize audio using pydub to improve transcription quality."""
53
+ try:
54
+ # Load audio file
55
+ audio = AudioSegment.from_file(file_path)
56
+
57
+ # Normalize the audio (adjusts volume to optimal level)
58
+ normalized_audio = normalize(audio)
59
+
60
+ # Create a temporary file for the normalized audio
61
+ with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
62
+ normalized_audio.export(temp_file.name, format="wav")
63
+ return temp_file.name
64
+ except Exception as e:
65
+ print(f"Warning: Audio normalization failed: {e}")
66
+ # Return original file if normalization fails
67
+ return file_path
68
+
69
+ def transcribe(file, task):
70
+ # Normalize the audio before transcription
71
+ normalized_file = normalize_audio(file)
72
+
73
+ try:
74
+ outputs = pipe(normalized_file, batch_size=BATCH_SIZE, generate_kwargs={"task": task})
75
+ text = outputs["text"]
76
+ return text
77
+ finally:
78
+ # Clean up temporary normalized file if it was created
79
+ if normalized_file != file and os.path.exists(normalized_file):
80
+ try:
81
+ os.unlink(normalized_file)
82
+ except Exception as e:
83
+ print(f"Warning: Could not delete temporary file {normalized_file}: {e}")
84
+
85
+ demo = gr.Blocks(
86
+ css="""
87
+ .large-textbox textarea {
88
+ font-size: 20px !important;
89
+ line-height: 1.6 !important;
90
+ }
91
+ """
92
+ )
93
+
94
+ mic_transcribe = gr.Interface(
95
+ fn=transcribe,
96
+ inputs=[
97
+ gr.Audio(sources=["microphone", "upload"], type="filepath"),
98
+ gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
99
+ ],
100
+ outputs=gr.Textbox(
101
+ label="Transcription",
102
+ lines=6,
103
+ max_lines=15,
104
+ min_width=400,
105
+ show_copy_button=True,
106
+ placeholder="Transcribed text will appear here...",
107
+ elem_classes=["large-textbox"]
108
+ ),
109
+ theme="huggingface",
110
+ title="Whisper Demo: Transcribe Audio",
111
+ description=(
112
+ "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
113
+ f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
114
+ " of arbitrary length."
115
+ ),
116
+ allow_flagging="never",
117
+ )
118
+
119
+ file_transcribe = gr.Interface(
120
+ fn=transcribe,
121
+ inputs=[
122
+ gr.Audio(sources=["upload"], label="Audio file", type="filepath"),
123
+ gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
124
+ ],
125
+ outputs=gr.Textbox(
126
+ label="Transcription",
127
+ lines=6,
128
+ max_lines=15,
129
+ min_width=400,
130
+ show_copy_button=True,
131
+ placeholder="Transcribed text will appear here...",
132
+ elem_classes=["large-textbox"]
133
+ ),
134
+ theme="huggingface",
135
+ title="Whisper Demo: Transcribe Audio",
136
+ description=(
137
+ "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
138
+ f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
139
+ " of arbitrary length."
140
+ ),
141
+ examples=[
142
+ ["./example1.wav", "transcribe"],
143
+ ],
144
+ cache_examples=True,
145
+ allow_flagging="never",
146
+ )
147
+
148
+ with demo:
149
+ gr.TabbedInterface([file_transcribe, mic_transcribe], ["Transcribe Audio File", "Transcribe Microphone"])
150
+
151
+ demo.launch(server_name="0.0.0.0", server_port=7860)
152
+
153
+
154
+ if __name__ == "__main__":
155
+ main()
example1.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0ec1ccd0339360929e254fd1d530154f9f307b1fc5af2878726f6ebf245673d
3
+ size 976974
requirements.txt ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv export --no-hashes --no-emit-project
3
+ absl-py==2.3.1
4
+ # via tensorboard
5
+ accelerate==1.10.1
6
+ # via whisper-heb-ipa
7
+ aiofiles==24.1.0
8
+ # via gradio
9
+ aiohappyeyeballs==2.6.1
10
+ # via aiohttp
11
+ aiohttp==3.12.15
12
+ # via fsspec
13
+ aiosignal==1.4.0
14
+ # via aiohttp
15
+ annotated-types==0.7.0
16
+ # via pydantic
17
+ anyio==4.10.0
18
+ # via
19
+ # gradio
20
+ # httpx
21
+ # starlette
22
+ attrs==25.3.0
23
+ # via aiohttp
24
+ audioop-lts==0.2.2 ; python_full_version >= '3.13'
25
+ # via
26
+ # gradio
27
+ # standard-aifc
28
+ # standard-sunau
29
+ audioread==3.0.1
30
+ # via librosa
31
+ brotli==1.1.0
32
+ # via gradio
33
+ certifi==2025.8.3
34
+ # via
35
+ # httpcore
36
+ # httpx
37
+ # requests
38
+ # sentry-sdk
39
+ cffi==2.0.0
40
+ # via soundfile
41
+ charset-normalizer==3.4.3
42
+ # via requests
43
+ click==8.2.1
44
+ # via
45
+ # jiwer
46
+ # typer
47
+ # uvicorn
48
+ # wandb
49
+ colorama==0.4.6 ; sys_platform == 'win32'
50
+ # via
51
+ # click
52
+ # tqdm
53
+ datasets==4.1.0
54
+ # via
55
+ # evaluate
56
+ # whisper-heb-ipa
57
+ decorator==5.2.1
58
+ # via librosa
59
+ dill==0.4.0
60
+ # via
61
+ # datasets
62
+ # evaluate
63
+ # multiprocess
64
+ evaluate==0.4.5
65
+ # via whisper-heb-ipa
66
+ fastapi==0.116.2
67
+ # via gradio
68
+ ffmpy==0.6.1
69
+ # via gradio
70
+ filelock==3.19.1
71
+ # via
72
+ # datasets
73
+ # huggingface-hub
74
+ # torch
75
+ # transformers
76
+ frozenlist==1.7.0
77
+ # via
78
+ # aiohttp
79
+ # aiosignal
80
+ fsspec==2025.9.0
81
+ # via
82
+ # datasets
83
+ # evaluate
84
+ # gradio-client
85
+ # huggingface-hub
86
+ # torch
87
+ gitdb==4.0.12
88
+ # via gitpython
89
+ gitpython==3.1.45
90
+ # via wandb
91
+ gradio==5.46.0
92
+ # via whisper-heb-ipa
93
+ gradio-client==1.13.0
94
+ # via gradio
95
+ groovy==0.1.2
96
+ # via gradio
97
+ grpcio==1.75.0
98
+ # via tensorboard
99
+ h11==0.16.0
100
+ # via
101
+ # httpcore
102
+ # uvicorn
103
+ hf-xet==1.1.10 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
104
+ # via huggingface-hub
105
+ httpcore==1.0.9
106
+ # via httpx
107
+ httpx==0.28.1
108
+ # via
109
+ # gradio
110
+ # gradio-client
111
+ # safehttpx
112
+ huggingface-hub==0.35.0
113
+ # via
114
+ # accelerate
115
+ # datasets
116
+ # evaluate
117
+ # gradio
118
+ # gradio-client
119
+ # tokenizers
120
+ # transformers
121
+ # whisper-heb-ipa
122
+ idna==3.10
123
+ # via
124
+ # anyio
125
+ # httpx
126
+ # requests
127
+ # yarl
128
+ jinja2==3.1.6
129
+ # via
130
+ # gradio
131
+ # torch
132
+ jiwer==4.0.0
133
+ # via whisper-heb-ipa
134
+ joblib==1.5.2
135
+ # via
136
+ # librosa
137
+ # scikit-learn
138
+ lazy-loader==0.4
139
+ # via librosa
140
+ librosa==0.11.0
141
+ # via whisper-heb-ipa
142
+ llvmlite==0.44.0
143
+ # via numba
144
+ markdown==3.9
145
+ # via tensorboard
146
+ markdown-it-py==4.0.0
147
+ # via rich
148
+ markupsafe==3.0.2
149
+ # via
150
+ # gradio
151
+ # jinja2
152
+ # werkzeug
153
+ mdurl==0.1.2
154
+ # via markdown-it-py
155
+ more-itertools==10.8.0
156
+ # via openai-whisper
157
+ mpmath==1.3.0
158
+ # via sympy
159
+ msgpack==1.1.1
160
+ # via librosa
161
+ multidict==6.6.4
162
+ # via
163
+ # aiohttp
164
+ # yarl
165
+ multiprocess==0.70.16
166
+ # via
167
+ # datasets
168
+ # evaluate
169
+ networkx==3.5
170
+ # via torch
171
+ numba==0.61.2
172
+ # via
173
+ # librosa
174
+ # openai-whisper
175
+ numpy==2.2.6
176
+ # via
177
+ # accelerate
178
+ # datasets
179
+ # evaluate
180
+ # gradio
181
+ # librosa
182
+ # numba
183
+ # openai-whisper
184
+ # pandas
185
+ # scikit-learn
186
+ # scipy
187
+ # soundfile
188
+ # soxr
189
+ # tensorboard
190
+ # transformers
191
+ nvidia-cublas-cu12==12.8.4.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
192
+ # via
193
+ # nvidia-cudnn-cu12
194
+ # nvidia-cusolver-cu12
195
+ # torch
196
+ nvidia-cuda-cupti-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
197
+ # via torch
198
+ nvidia-cuda-nvrtc-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
199
+ # via torch
200
+ nvidia-cuda-runtime-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
201
+ # via torch
202
+ nvidia-cudnn-cu12==9.10.2.21 ; platform_machine == 'x86_64' and sys_platform == 'linux'
203
+ # via torch
204
+ nvidia-cufft-cu12==11.3.3.83 ; platform_machine == 'x86_64' and sys_platform == 'linux'
205
+ # via torch
206
+ nvidia-cufile-cu12==1.13.1.3 ; platform_machine == 'x86_64' and sys_platform == 'linux'
207
+ # via torch
208
+ nvidia-curand-cu12==10.3.9.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
209
+ # via torch
210
+ nvidia-cusolver-cu12==11.7.3.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
211
+ # via torch
212
+ nvidia-cusparse-cu12==12.5.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
213
+ # via
214
+ # nvidia-cusolver-cu12
215
+ # torch
216
+ nvidia-cusparselt-cu12==0.7.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
217
+ # via torch
218
+ nvidia-nccl-cu12==2.27.3 ; platform_machine == 'x86_64' and sys_platform == 'linux'
219
+ # via torch
220
+ nvidia-nvjitlink-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
221
+ # via
222
+ # nvidia-cufft-cu12
223
+ # nvidia-cusolver-cu12
224
+ # nvidia-cusparse-cu12
225
+ # torch
226
+ nvidia-nvtx-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
227
+ # via torch
228
+ openai-whisper==20250625
229
+ # via whisper-heb-ipa
230
+ orjson==3.11.3
231
+ # via gradio
232
+ packaging==25.0
233
+ # via
234
+ # accelerate
235
+ # datasets
236
+ # evaluate
237
+ # gradio
238
+ # gradio-client
239
+ # huggingface-hub
240
+ # lazy-loader
241
+ # pooch
242
+ # tensorboard
243
+ # transformers
244
+ # wandb
245
+ pandas==2.3.2
246
+ # via
247
+ # datasets
248
+ # evaluate
249
+ # gradio
250
+ # whisper-heb-ipa
251
+ pillow==11.3.0
252
+ # via
253
+ # gradio
254
+ # tensorboard
255
+ platformdirs==4.4.0
256
+ # via
257
+ # pooch
258
+ # wandb
259
+ pooch==1.8.2
260
+ # via librosa
261
+ propcache==0.3.2
262
+ # via
263
+ # aiohttp
264
+ # yarl
265
+ protobuf==6.32.1
266
+ # via
267
+ # tensorboard
268
+ # wandb
269
+ psutil==7.0.0
270
+ # via accelerate
271
+ pyarrow==21.0.0
272
+ # via datasets
273
+ pycparser==2.23 ; implementation_name != 'PyPy'
274
+ # via cffi
275
+ pydantic==2.11.9
276
+ # via
277
+ # fastapi
278
+ # gradio
279
+ # wandb
280
+ pydantic-core==2.33.2
281
+ # via pydantic
282
+ pydub==0.25.1
283
+ # via
284
+ # gradio
285
+ # whisper-heb-ipa
286
+ pygments==2.19.2
287
+ # via rich
288
+ python-dateutil==2.9.0.post0
289
+ # via pandas
290
+ python-multipart==0.0.20
291
+ # via gradio
292
+ pytz==2025.2
293
+ # via pandas
294
+ pyyaml==6.0.2
295
+ # via
296
+ # accelerate
297
+ # datasets
298
+ # gradio
299
+ # huggingface-hub
300
+ # transformers
301
+ # wandb
302
+ rapidfuzz==3.14.1
303
+ # via jiwer
304
+ regex==2025.9.1
305
+ # via
306
+ # tiktoken
307
+ # transformers
308
+ requests==2.32.5
309
+ # via
310
+ # datasets
311
+ # evaluate
312
+ # huggingface-hub
313
+ # pooch
314
+ # tiktoken
315
+ # transformers
316
+ # wandb
317
+ rich==14.1.0
318
+ # via typer
319
+ ruff==0.13.0
320
+ # via gradio
321
+ safehttpx==0.1.6
322
+ # via gradio
323
+ safetensors==0.6.2
324
+ # via
325
+ # accelerate
326
+ # transformers
327
+ scikit-learn==1.7.2
328
+ # via librosa
329
+ scipy==1.16.2
330
+ # via
331
+ # librosa
332
+ # scikit-learn
333
+ semantic-version==2.10.0
334
+ # via gradio
335
+ sentry-sdk==2.38.0
336
+ # via wandb
337
+ setuptools==80.9.0
338
+ # via
339
+ # tensorboard
340
+ # torch
341
+ # triton
342
+ shellingham==1.5.4
343
+ # via typer
344
+ six==1.17.0
345
+ # via python-dateutil
346
+ smmap==5.0.2
347
+ # via gitdb
348
+ sniffio==1.3.1
349
+ # via anyio
350
+ soundfile==0.13.1
351
+ # via librosa
352
+ soxr==1.0.0
353
+ # via librosa
354
+ standard-aifc==3.13.0 ; python_full_version >= '3.13'
355
+ # via librosa
356
+ standard-chunk==3.13.0 ; python_full_version >= '3.13'
357
+ # via standard-aifc
358
+ standard-sunau==3.13.0 ; python_full_version >= '3.13'
359
+ # via librosa
360
+ starlette==0.48.0
361
+ # via
362
+ # fastapi
363
+ # gradio
364
+ sympy==1.14.0
365
+ # via torch
366
+ tensorboard==2.20.0
367
+ # via whisper-heb-ipa
368
+ tensorboard-data-server==0.7.2
369
+ # via tensorboard
370
+ threadpoolctl==3.6.0
371
+ # via scikit-learn
372
+ tiktoken==0.11.0
373
+ # via openai-whisper
374
+ tokenizers==0.22.0
375
+ # via transformers
376
+ tomlkit==0.13.3
377
+ # via gradio
378
+ torch==2.8.0
379
+ # via
380
+ # accelerate
381
+ # datasets
382
+ # openai-whisper
383
+ # torchaudio
384
+ torchaudio==2.8.0
385
+ # via whisper-heb-ipa
386
+ torchcodec==0.7.0
387
+ # via datasets
388
+ tqdm==4.67.1
389
+ # via
390
+ # datasets
391
+ # evaluate
392
+ # huggingface-hub
393
+ # openai-whisper
394
+ # transformers
395
+ transformers==4.56.1
396
+ # via whisper-heb-ipa
397
+ triton==3.4.0 ; (platform_machine == 'x86_64' and sys_platform == 'linux') or sys_platform == 'linux2'
398
+ # via
399
+ # openai-whisper
400
+ # torch
401
+ typer==0.17.4
402
+ # via gradio
403
+ typing-extensions==4.15.0
404
+ # via
405
+ # aiosignal
406
+ # anyio
407
+ # fastapi
408
+ # gradio
409
+ # gradio-client
410
+ # grpcio
411
+ # huggingface-hub
412
+ # librosa
413
+ # pydantic
414
+ # pydantic-core
415
+ # starlette
416
+ # torch
417
+ # typer
418
+ # typing-inspection
419
+ # wandb
420
+ typing-inspection==0.4.1
421
+ # via pydantic
422
+ tzdata==2025.2
423
+ # via pandas
424
+ urllib3==2.5.0
425
+ # via
426
+ # requests
427
+ # sentry-sdk
428
+ uvicorn==0.35.0
429
+ # via gradio
430
+ wandb==0.21.4
431
+ # via whisper-heb-ipa
432
+ websockets==15.0.1
433
+ # via gradio-client
434
+ werkzeug==3.1.3
435
+ # via tensorboard
436
+ xxhash==3.5.0
437
+ # via
438
+ # datasets
439
+ # evaluate
440
+ yarl==1.20.1
441
+ # via aiohttp