rbgo commited on
Commit
a8a8f61
Β·
verified Β·
1 Parent(s): 30c775e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +367 -131
app.py CHANGED
@@ -1,152 +1,388 @@
1
  # ---------------------------------------------------------------
2
- # app.py – β€œTTS Showcase” (static-audio-only Streamlit demo)
3
  # ---------------------------------------------------------------
4
  import os
5
- import streamlit as st
6
 
7
- # ---------- 1. Page-wide settings ----------
8
- st.set_page_config(
9
- page_title="πŸ”Š TTS Showcase",
10
- page_icon="🎧",
11
- layout="wide"
12
- )
13
-
14
- # ---------- 2. Demo metadata ----------
15
  MODELS = {
16
- "nari-labs/Dia-1.6B" : "Dia-1.6B",
17
- "hexgrad/Kokoro-82M" : "Kokoro 82M",
18
- "sesame/csm-1b" : "CSM 1B",
19
- "SparkAudio/Spark-TTS-0.5B" : "Spark-TTS 0.5B",
20
- "canopylabs/orpheus-3b-0.1-ft" : "Orpheus3b-0.1-ft",
21
- "SWivid/F5-TTS" : "F5-TTS",
22
- "Zyphra/Zonos-v0.1-transformer" : "Zonos v0.1",
23
- "coqui/XTTS-v2" : "XTTS-v2",
24
- "HKUSTAudio/Llasa-3B" : "Llasa 3B",
25
- "amphion/MaskGCT" : "MaskGCT",
26
- "OuteAI/Llama-OuteTTS-1.0-1B" : "Llama-OuteTTS-1.0-1B",
27
- "ByteDance/MegaTTS3" : "MegaTTS 3"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  }
29
 
30
  # Folder that contains subfolders with the audio clips
31
- SAMPLES_DIR = "samples" # <- change if yours is different
32
- CLIP_NAME = "generated-audio.wav" # <- your agreed filename
33
 
34
- # ---------- 3. Light CSS glow-up ----------
35
- st.markdown(
36
- """
37
- <style>
38
- /* Wider central column & soft grey background */
39
- .block-container { padding-top: 2rem; }
40
- body { background: #f5f7fa; }
41
-
42
- /* Simple card look */
43
- .tts-card {
44
- background: #ffffff;
45
- border-radius: 12px;
46
- padding: 1.2rem 1rem;
47
- box-shadow: 0 2px 8px rgba(0,0,0,.04);
48
- margin-bottom: 1.5rem;
49
- }
50
- .tts-title {
51
- font-weight: 600;
52
- font-size: 1.05rem;
53
- margin-bottom: .5rem;
54
- }
55
- audio { width: 100%; } /* Full-width players */
56
- </style>
57
- """,
58
- unsafe_allow_html=True
59
- )
60
-
61
- st.markdown(
62
- """
63
- <style>
64
- /* (-- existing styles here --) */
65
-
66
- /* ---------- Inferless banner ---------- */
67
- #inferless-banner{
68
- display:flex;
69
- align-items:center;
70
- gap:.5rem;
71
- margin-top:2rem;
72
- font-size:.85rem;
73
- color:#555;
74
- opacity:.8;
75
- }
76
- #inferless-banner img{
77
- height:24px; /* πŸ‘ˆ nice & small */
78
- width:24px;
79
- object-fit:contain;
80
- border-radius:4px; /* optional: soft corners */
81
- }
82
- .inferless-text{
83
- letter-spacing:.2px;
84
- font-weight:500;
85
- }
86
- </style>
87
- """,
88
- unsafe_allow_html=True
89
- )
90
-
91
-
92
- st.markdown(
93
- """
94
- <div id="inferless-banner">
95
- <img src="https://i.tracxn.com/logo/company/1678863153264_9e6a9a4d-b955-42b3-895e-b94ade13c997.jpeg?format=webp&height=120&width=120" alt="Inferless Logo">
96
- <div class="inferless-text">Powered by Inferless</div>
97
  </div>
98
- """,
99
- unsafe_allow_html=True
100
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
- # ---------- 4. Header & optional quick-filter ----------
103
- st.title("πŸŽ™οΈ Open-Source Text to Speech Model Gallery")
 
 
104
 
105
- with st.expander("ℹ️ About this demo", expanded=True):
106
- st.write(
107
- """
108
- * 12 popular TTS checkpoints, each with a single **_pre-synthesised_** sample
109
- """
110
- )
111
 
 
 
 
112
 
 
 
 
 
 
113
 
114
- filter_text = st.text_input(
115
- "Filter models… (e.g. β€œcoqui” or β€œ3B”)",
116
- placeholder="Search Model",
117
- label_visibility="collapsed"
118
- ).lower().strip()
119
 
120
- # ---------- 5. Render cards in a responsive 3-column grid ----------
121
- COLS_PER_ROW = 3
122
- cols = st.columns(COLS_PER_ROW)
123
 
124
- def repo_to_slug(repo: str) -> str:
125
- """huggingface/xxx -> huggingface_xxx (for folder naming)."""
126
- return repo.replace("/", "_")
127
 
128
- visible_models = [
129
- (repo, nice_name)
130
- for repo, nice_name in MODELS.items()
131
- if filter_text in repo.lower() or filter_text in nice_name.lower()
132
- ]
133
-
134
- if not visible_models:
135
- st.warning("No models match that filter.")
136
- else:
137
- for idx, (repo, display_name) in enumerate(visible_models):
138
- with cols[idx % COLS_PER_ROW]:
139
- with st.container():
140
- st.markdown("<div class='tts-card'>", unsafe_allow_html=True)
141
- st.markdown(f"<div class='tts-title'>🎧 {display_name}</div>", unsafe_allow_html=True)
142
-
143
- # Resolved path: samples/<repo-as-slug>/generated-audio.wav
144
- audio_path = os.path.join(SAMPLES_DIR, repo_to_slug(repo), CLIP_NAME)
145
- if os.path.isfile(audio_path):
146
- st.audio(audio_path)
147
- else:
148
- st.error("Sample clip not found πŸ€·β€β™‚οΈ")
 
 
149
 
150
- st.markdown("</div>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
 
 
 
 
 
 
 
 
 
 
1
  # ---------------------------------------------------------------
2
+ # app.py – "TTS Showcase" (Gradio Implementation)
3
  # ---------------------------------------------------------------
4
  import os
5
+ import gradio as gr
6
 
7
+ # ---------- 1. Demo metadata ----------
 
 
 
 
 
 
 
8
  MODELS = {
9
+ "nari-labs/Dia-1.6B": "Dia-1.6B",
10
+ "hexgrad/Kokoro-82M": "Kokoro-82M",
11
+ "sesame/csm-1b": "csm-1b",
12
+ "SparkAudio/Spark-TTS-0.5B": "Spark-TTS-0.5B",
13
+ "canopylabs/orpheus-3b-0.1-ft": "Orpheus-3b-0.1-ft",
14
+ "SWivid/F5-TTS": "F5-TTS",
15
+ "Zyphra/Zonos-v0.1-transformer": "Zonos-v0.1-transformer",
16
+ "coqui/XTTS-v2": "XTTS-v2",
17
+ "HKUSTAudio/Llasa-3B": "Llasa-3B",
18
+ "amphion/MaskGCT": "MaskGCT",
19
+ "OuteAI/Llama-OuteTTS-1.0-1B": "Llama-OuteTTS-1.0-1B",
20
+ "ByteDance/MegaTTS3": "MegaTTS3"
21
+ }
22
+
23
+ # Performance ratings for each model
24
+ MODEL_RATINGS = {
25
+ "nari-labs/Dia-1.6B": {"naturalness": "Good", "intelligibility": "Moderate", "controllability": "Good"},
26
+ "hexgrad/Kokoro-82M": {"naturalness": "Good", "intelligibility": "Excellent", "controllability": "Moderate"},
27
+ "sesame/csm-1b": {"naturalness": "Excellent", "intelligibility": "Excellent", "controllability": "Good"},
28
+ "SparkAudio/Spark-TTS-0.5B": {"naturalness": "Excellent", "intelligibility": "Excellent", "controllability": "Moderate"},
29
+ "canopylabs/orpheus-3b-0.1-ft": {"naturalness": "Excellent", "intelligibility": "Excellent", "controllability": "Moderate"},
30
+ "SWivid/F5-TTS": {"naturalness": "Excellent", "intelligibility": "Excellent", "controllability": "Good"},
31
+ "Zyphra/Zonos-v0.1-transformer": {"naturalness": "Good", "intelligibility": "Moderate", "controllability": "Excellent"},
32
+ "coqui/XTTS-v2": {"naturalness": "Good", "intelligibility": "Excellent", "controllability": "Moderate"},
33
+ "HKUSTAudio/Llasa-3B": {"naturalness": "Excellent", "intelligibility": "Good", "controllability": "Moderate"},
34
+ "amphion/MaskGCT": {"naturalness": "Good", "intelligibility": "Excellent", "controllability": "Moderate"},
35
+ "OuteAI/Llama-OuteTTS-1.0-1B": {"naturalness": "Moderate", "intelligibility": "Moderate", "controllability": "Moderate"},
36
+ "ByteDance/MegaTTS3": {"naturalness": "Good", "intelligibility": "Good", "controllability": "Moderate"}
37
+ }
38
+
39
+ # Model descriptions for better understanding
40
+ MODEL_DESCRIPTIONS = {
41
+ "nari-labs/Dia-1.6B": "Expressive conversational voice with moderate quality",
42
+ "hexgrad/Kokoro-82M": "Lightweight powerhouse with excellent clarity",
43
+ "sesame/csm-1b": "High-quality synthesis with excellent naturalness",
44
+ "SparkAudio/Spark-TTS-0.5B": "Efficient model with excellent performance",
45
+ "canopylabs/orpheus-3b-0.1-ft": "Fine-tuned large model with superior quality",
46
+ "SWivid/F5-TTS": "Advanced flow-based synthesis with top ratings",
47
+ "Zyphra/Zonos-v0.1-transformer": "Highly controllable transformer-based model",
48
+ "coqui/XTTS-v2": "Multi-lingual excellence with proven performance",
49
+ "HKUSTAudio/Llasa-3B": "Large-scale audio synthesis model",
50
+ "amphion/MaskGCT": "Masked generative modeling approach",
51
+ "OuteAI/Llama-OuteTTS-1.0-1B": "LLM-based TTS with moderate performance",
52
+ "ByteDance/MegaTTS3": "Industrial-grade TTS solution"
53
  }
54
 
55
  # Folder that contains subfolders with the audio clips
56
+ SAMPLES_DIR = "samples"
57
+ CLIP_NAME = "generated-audio.wav"
58
 
59
+ # Test prompt used for evaluation
60
+ TEST_PROMPT = "Hello, this is a universal test sentence. Can the advanced Zylophonic system clearly articulate this and express a hint of excitement? The quick brown fox certainly hopes so!"
61
+
62
+ def repo_to_slug(repo: str) -> str:
63
+ """Convert huggingface/xxx to huggingface_xxx for folder naming."""
64
+ return repo.replace("/", "_")
65
+
66
+ def get_rating_emoji(rating: str) -> str:
67
+ """Convert rating to emoji."""
68
+ if rating == "Excellent":
69
+ return "🟒"
70
+ elif rating == "Good":
71
+ return "🟑"
72
+ else:
73
+ return "🟠"
74
+
75
+ def get_audio_path(repo: str) -> str:
76
+ """Get the audio file path for a given repository."""
77
+ audio_path = os.path.join(SAMPLES_DIR, repo_to_slug(repo), CLIP_NAME)
78
+ return audio_path if os.path.isfile(audio_path) else None
79
+
80
+ def filter_models(search_term: str):
81
+ """Filter models based on search term."""
82
+ if not search_term.strip():
83
+ return list(MODELS.keys())
84
+
85
+ search_lower = search_term.lower().strip()
86
+ return [
87
+ repo for repo, name in MODELS.items()
88
+ if search_lower in repo.lower() or search_lower in name.lower()
89
+ ]
90
+
91
+ def create_model_card(repo: str) -> str:
92
+ """Create a formatted model card with ratings and description."""
93
+ display_name = MODELS[repo]
94
+ description = MODEL_DESCRIPTIONS.get(repo, "High-quality TTS model")
95
+ ratings = MODEL_RATINGS.get(repo, {})
96
+
97
+ card_html = f"""
98
+ <div class="model-card" style="border: 1px solid #ddd; border-radius: 12px; padding: 20px; margin: 10px 0; background: white;">
99
+ <h3 style="color: #2c3e50; margin-top: 0;">🎀 {display_name}</h3>
100
+ <div style="display: flex; gap: 15px; margin: 15px 0;">
101
+ <span style="color: #888;"><strong style="color: #888;">Naturalness:</strong> {get_rating_emoji(ratings.get('naturalness', 'Moderate'))} {ratings.get('naturalness', 'Moderate')}</span>
102
+ <span style="color: #888;"><strong style="color: #888;">Intelligibility:</strong> {get_rating_emoji(ratings.get('intelligibility', 'Moderate'))} {ratings.get('intelligibility', 'Moderate')}</span>
103
+ <span style="color: #888;"><strong style="color: #888;">Controllability:</strong> {get_rating_emoji(ratings.get('controllability', 'Moderate'))} {ratings.get('controllability', 'Moderate')}</span>
104
+ </div>
105
+
106
+ <p style="font-size: 0.9em; color: #888; margin: 5px 0;">Repository: <code style="color: #888;">{repo}</code></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  </div>
108
+ """
109
+ return card_html
110
+
111
+ # ---------- 2. Custom CSS ----------
112
+ custom_css = """
113
+ #title {
114
+ text-align: center;
115
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
116
+ color: white;
117
+ padding: 2rem;
118
+ border-radius: 15px;
119
+ margin-bottom: 2rem;
120
+ }
121
+
122
+ #intro-section {
123
+ background: #f8f9fa;
124
+ color: #2c3e50;
125
+ padding: 1.5rem;
126
+ border-radius: 10px;
127
+ margin: 1rem 0;
128
+ border-left: 4px solid #667eea;
129
+ }
130
 
131
+ #intro-section h2,
132
+ #intro-section h3 {
133
+ color: #2c3e50;
134
+ }
135
 
136
+ #intro-section p {
137
+ color: #34495e;
138
+ }
 
 
 
139
 
140
+ #intro-section ul li {
141
+ color: #34495e;
142
+ }
143
 
144
+ #intro-section .mission-text {
145
+ color: #667eea !important;
146
+ font-weight: bold;
147
+ text-align: center;
148
+ }
149
 
150
+ #intro-section strong {
151
+ color: #2c3e50 !important;
152
+ }
 
 
153
 
154
+ #intro-section em {
155
+ color: #2c3e50 !important;
156
+ }
157
 
158
+ #intro-section .mission-text strong {
159
+ color: #667eea !important;
160
+ }
161
 
162
+ #test-prompt {
163
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
164
+ color: white;
165
+ padding: 1.5rem;
166
+ border-radius: 10px;
167
+ text-align: center;
168
+ margin: 1rem 0;
169
+ }
170
+
171
+ .model-grid {
172
+ display: grid;
173
+ grid-template-columns: repeat(auto-fit, minmax(400px, 1fr));
174
+ gap: 1rem;
175
+ margin: 1rem 0;
176
+ }
177
+
178
+ #footer {
179
+ text-align: center;
180
+ padding: 2rem;
181
+ color: #666;
182
+ border-top: 1px solid #eee;
183
+ margin-top: 2rem;
184
+ }
185
 
186
+ /* make all the text in our white‐background cards dark */
187
+ .model-grid .gr-html * {
188
+ color: #2c3e50 !important;
189
+ }
190
+
191
+ .model-card {
192
+ background: white;
193
+ color: #2c3e50 !important;
194
+ border: 1px solid #ddd;
195
+ border-radius: 12px;
196
+ padding: 20px;
197
+ margin: 10px 0;
198
+ }
199
 
200
+ """
201
+
202
+ # ---------- 3. Main Gradio Interface ----------
203
+ def create_interface():
204
+ with gr.Blocks(css=custom_css, title="πŸŽ™οΈ TTS Model Gallery", theme=gr.themes.Soft()) as demo:
205
+
206
+ # Header Section
207
+ gr.HTML("""
208
+ <div id="title">
209
+ <h1>πŸŽ™οΈ Open-Source Text-to-Speech Model Gallery</h1>
210
+ </div>
211
+ """)
212
+
213
+ # Introduction Section
214
+ gr.HTML("""
215
+ <div id="intro-section">
216
+ <h3>πŸ”¬ Our Exciting Quest</h3>
217
+ <p>We're on a thrilling journey to help developers discover the perfect TTS models for their innovative audio projects!
218
+ We've put these 12 cutting-edge models through their paces using a scientifically designed universal test prompt.</p>
219
+
220
+ <p><strong>Featured TTS Engines:</strong></p>
221
+ <ul>
222
+ <li>🎭 <strong>Dia-1.6B</strong> - Expressive conversational voice</li>
223
+ <li>πŸŽͺ <strong>Kokoro-82M</strong> - Lightweight powerhouse</li>
224
+ <li>🎨 <strong>F5-TTS</strong> - Advanced flow-based synthesis</li>
225
+ <li>🎡 <strong>XTTS-v2</strong> - Multi-lingual excellence</li>
226
+ <li>🎼 <strong>MaskGCT</strong> - Masked generative modeling</li>
227
+ <li>🎀 <strong>Llasa-3B</strong> - Large-scale audio synthesis</li>
228
+ <li><em>...and 6 more incredible models!</em></li>
229
+ </ul>
230
+
231
+ </div>
232
+ """)
233
+
234
+ # Test Prompt Section
235
+ # gr.HTML(f"""
236
+ # <div id="test-prompt">
237
+ # <h3>🎯 Universal Test Prompt</h3>
238
+ # <p style="font-style: italic; font-size: 1.1em;">"{TEST_PROMPT}"</p>
239
+ # <p style="font-size: 0.9em; opacity: 0.9;">
240
+ # Carefully crafted to test naturalness, intelligibility, and technical pronunciation across all models
241
+ # </p>
242
+ # </div>
243
+ # """)
244
+
245
+ # Evaluation Criteria
246
+ with gr.Row():
247
+ with gr.Column():
248
+ gr.HTML("""
249
+ <div style="text-align: center; padding: 1rem; background: rgba(102, 126, 234, 0.1); border-radius: 8px;">
250
+ <div style="font-size: 2rem;">🎭</div>
251
+ <strong>Naturalness</strong><br>
252
+ <small>Human-like quality & emotional expression</small>
253
+ </div>
254
+ """)
255
+ with gr.Column():
256
+ gr.HTML("""
257
+ <div style="text-align: center; padding: 1rem; background: rgba(102, 126, 234, 0.1); border-radius: 8px;">
258
+ <div style="font-size: 2rem;">πŸ—£οΈ</div>
259
+ <strong>Intelligibility</strong><br>
260
+ <small>Clarity & pronunciation accuracy</small>
261
+ </div>
262
+ """)
263
+ with gr.Column():
264
+ gr.HTML("""
265
+ <div style="text-align: center; padding: 1rem; background: rgba(102, 126, 234, 0.1); border-radius: 8px;">
266
+ <div style="font-size: 2rem;">πŸŽ›οΈ</div>
267
+ <strong>Controllability</strong><br>
268
+ <small>Tone, pace & parameter flexibility</small>
269
+ </div>
270
+ """)
271
+
272
+ gr.Markdown("---")
273
+
274
+ # Search and Filter Section
275
+ with gr.Row():
276
+ search_box = gr.Textbox(
277
+ label="πŸ” Search Models",
278
+ placeholder="Filter by name or family (e.g., 'F5', 'TTS', '3B')",
279
+ value="",
280
+ scale=3
281
+ )
282
+ clear_btn = gr.Button("Clear", scale=1)
283
+
284
+ # Model Gallery Section
285
+ gr.Markdown("## 🎧 Model Gallery")
286
+
287
+ # Create model cards and audio players
288
+ model_components = []
289
+
290
+ for repo, display_name in MODELS.items():
291
+ with gr.Group():
292
+ # Model information card
293
+ model_info = gr.HTML(create_model_card(repo))
294
+
295
+ # Audio player
296
+ audio_path = get_audio_path(repo)
297
+ if audio_path:
298
+ audio_player = gr.Audio(
299
+ value=audio_path,
300
+ label=f"🎡 {display_name} Audio Sample",
301
+ interactive=False
302
+ )
303
+ else:
304
+ audio_player = gr.HTML(f"<p style='color: red;'>πŸ€·β€β™‚οΈ Audio sample not found for {display_name}</p>")
305
+
306
+ model_components.append((repo, model_info, audio_player))
307
+
308
+ # Search functionality
309
+ def update_visibility(search_term):
310
+ filtered_repos = filter_models(search_term)
311
+ updates = []
312
+
313
+ for repo, model_info, audio_player in model_components:
314
+ visible = repo in filtered_repos
315
+ updates.extend([
316
+ gr.update(visible=visible), # model_info
317
+ gr.update(visible=visible) # audio_player
318
+ ])
319
+
320
+ return updates
321
+
322
+ # Connect search functionality
323
+ search_box.change(
324
+ fn=update_visibility,
325
+ inputs=[search_box],
326
+ outputs=[comp for repo, model_info, audio_player in model_components for comp in [model_info, audio_player]]
327
+ )
328
+
329
+ clear_btn.click(
330
+ fn=lambda: "",
331
+ outputs=[search_box]
332
+ )
333
+
334
+ # Methodology Section
335
+ with gr.Accordion("πŸ“‹ Detailed Evaluation Methodology", open=False):
336
+ gr.Markdown("""
337
+ ### Test Prompt
338
+
339
+ `Hello, this is a universal test sentence. Can the advanced Zylophonic system clearly articulate this and express a hint of excitement? The quick brown fox certainly hopes so!`
340
+
341
+
342
+ ### Model Evaluation Criteria:
343
+
344
+ 🎭 **Naturalness (Human-like Quality)**
345
+ - Prosody and rhythm patterns
346
+ - Emotional expression capability
347
+ - Voice texture and warmth
348
+ - Natural breathing and pauses
349
+
350
+ πŸ—£οΈ **Intelligibility (Clarity & Accuracy)**
351
+ - Word pronunciation precision
352
+ - Consonant and vowel clarity
353
+ - Sentence comprehensibility
354
+ - Technical term handling
355
+
356
+ πŸŽ›οΈ **Controllability (Flexibility)**
357
+ - Parameter responsiveness
358
+ - Tone modification capability
359
+ - Speed and pitch control
360
+ - Customization potential
361
+
362
+ ### Key Insights:
363
+ - Smaller models (82M-500M) can excel in specific scenarios
364
+ - Larger models (1B-3B+) offer more versatility but require more resources
365
+ - Architecture matters as much as parameter count
366
+ - Training data quality significantly impacts output quality
367
+ """)
368
+
369
+ # Footer
370
+ # gr.HTML("""
371
+ # <div id="footer">
372
+ # <p><strong>πŸš€ Ready to deploy your own TTS model?</strong></p>
373
+ # <p>This demo showcases the power of open-source TTS technology. Each model offers unique strengths for different applications.</p>
374
+ # <p><em>Built with ❀️ using Gradio β€’ All models are open-source and available on Hugging Face</em></p>
375
+ # <p>⚑ Powered by Inferless</p>
376
+ # </div>
377
+ # """)
378
+
379
+ return demo
380
 
381
+ # ---------- 4. Launch the application ----------
382
+ if __name__ == "__main__":
383
+ demo = create_interface()
384
+ demo.launch(
385
+ share=True,
386
+ inbrowser=True,
387
+ show_error=True
388
+ )