Update app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import pandas as pd
|
| 3 |
|
| 4 |
-
banner_url = "
|
| 5 |
BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 20vw; min-width: 300px; max-width: 600px;"> </div>'
|
| 6 |
|
| 7 |
INTRODUCTION_TEXT = """
|
|
@@ -11,13 +11,8 @@ INTRODUCTION_TEXT = """
|
|
| 11 |
\nFor more detailed analysis such as models' robustness, speaker adaption, model efficiency and memory usage, please check our paper.
|
| 12 |
"""
|
| 13 |
|
| 14 |
-
CITATION_BUTTON_TEXT = """
|
| 15 |
-
|
| 16 |
-
author = {???},
|
| 17 |
-
year = ???,
|
| 18 |
-
publisher = {???},
|
| 19 |
-
howpublished = "???"
|
| 20 |
-
}
|
| 21 |
"""
|
| 22 |
|
| 23 |
METRICS_TAB_TEXT = METRICS_TAB_TEXT = """
|
|
@@ -25,7 +20,7 @@ METRICS_TAB_TEXT = METRICS_TAB_TEXT = """
|
|
| 25 |
We report both the Word Error Rate (WER) and Character Error Rate (CER) metrics.
|
| 26 |
## Reproduction
|
| 27 |
The Open Universal Arabic ASR Leaderboard will be a continuous benchmark project.
|
| 28 |
-
\nWe open-source the
|
| 29 |
\nPlease launch a discussion in our GitHub repo to let us know if you want to learn about the performance of a new model.
|
| 30 |
|
| 31 |
## Benchmark datasets
|
|
@@ -110,4 +105,4 @@ with gr.Blocks() as demo:
|
|
| 110 |
show_copy_button=True,
|
| 111 |
)
|
| 112 |
|
| 113 |
-
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import pandas as pd
|
| 3 |
|
| 4 |
+
banner_url = "file/banner.png"
|
| 5 |
BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 20vw; min-width: 300px; max-width: 600px;"> </div>'
|
| 6 |
|
| 7 |
INTRODUCTION_TEXT = """
|
|
|
|
| 11 |
\nFor more detailed analysis such as models' robustness, speaker adaption, model efficiency and memory usage, please check our paper.
|
| 12 |
"""
|
| 13 |
|
| 14 |
+
CITATION_BUTTON_TEXT = """
|
| 15 |
+
???
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
"""
|
| 17 |
|
| 18 |
METRICS_TAB_TEXT = METRICS_TAB_TEXT = """
|
|
|
|
| 20 |
We report both the Word Error Rate (WER) and Character Error Rate (CER) metrics.
|
| 21 |
## Reproduction
|
| 22 |
The Open Universal Arabic ASR Leaderboard will be a continuous benchmark project.
|
| 23 |
+
\nWe open-source the evaluation scripts at our GitHub repo.
|
| 24 |
\nPlease launch a discussion in our GitHub repo to let us know if you want to learn about the performance of a new model.
|
| 25 |
|
| 26 |
## Benchmark datasets
|
|
|
|
| 105 |
show_copy_button=True,
|
| 106 |
)
|
| 107 |
|
| 108 |
+
demo.launch(allowed_paths=["banner.png"])
|