Steveeeeeeen HF staff commited on
Commit
69dc62d
·
verified ·
1 Parent(s): f4c0d35

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +118 -0
  2. constants.py +119 -0
  3. init.py +92 -0
  4. utils_display.py +64 -0
app.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import json
4
+ from constants import BANNER, INTRODUCTION_TEXT, CITATION_TEXT, METRICS_TAB_TEXT, DIR_OUTPUT_REQUESTS, LEADERBOARD_CSS
5
+ from init import is_model_on_hub, upload_file, load_all_info_from_dataset_hub
6
+ from utils_display import AutoEvalColumn, fields, make_clickable_model, styled_error, styled_message
7
+ from datetime import datetime, timezone
8
+
9
+ LAST_UPDATED = "Nov 22th 2024"
10
+
11
+ column_names = {
12
+ "model": "Model",
13
+ "Average WER ⬇️": "Average WER ⬇️",
14
+ "RTFx": "RTFx ⬆️️",
15
+ "Bulgarian_female": "Bulgarian female",
16
+ "Bulgarian_male": "Bulgarian male",
17
+ "Catalan_female": "Catalan female",
18
+ "Chinese_female": "Chinese female",
19
+ "Chinese_male": "Chinese male",
20
+ "Eastern_European_male": "Eastern European male",
21
+ "European_male": "European male",
22
+ "French_female": "French female",
23
+ "Ghanain_English_female": "Ghanain English female",
24
+ "Indian_English_female": "Indian English female",
25
+ "Indian_English_male": "Indian English male",
26
+ "Indonesian_female": "Indonesian female",
27
+ "Irish_English_female": "Irish English female",
28
+ "Irish_English_male": "Irish English male",
29
+ "Israeli_male": "Israeli male",
30
+ "Italian_female": "Italian female",
31
+ "Jamaican_English_female": "Jamaican English female",
32
+ "Jamaican_English_male": "Jamaican English male",
33
+ "Kenyan_English_female": "Kenyan English female",
34
+ "Kenyan_English_male": "Kenyan English male",
35
+ "Latin_American_female": "Latin American female",
36
+ "Latin_American_male": "Latin American male",
37
+ "Lithuanian_male": "Lithuanian male",
38
+ "Mainstream_US_English_female": "Mainstream US English female",
39
+ "Mainstream_US_English_male": "Mainstream US English male",
40
+ "Nigerian_English_female": "Nigerian English female",
41
+ "Nigerian_English_male": "Nigerian English male",
42
+ "Romanian_female": "Romanian female",
43
+ "Scottish_English_male": "Scottish English male",
44
+ "Southern_British_English_male": "Southern British English male",
45
+ "Spanish_female": "Spanish female",
46
+ "Spanish_male": "Spanish male",
47
+ "Vietnamese_female": "Vietnamese female",
48
+ "Vietnamese_male": "Vietnamese male",
49
+ }
50
+
51
+ eval_queue_repo, requested_models, csv_results = load_all_info_from_dataset_hub()
52
+
53
+ if not csv_results.exists():
54
+ raise Exception(f"CSV file {csv_results} does not exist locally")
55
+
56
+ # Get csv with data and parse columns
57
+ original_df = pd.read_csv(csv_results)
58
+
59
+ # Formats the columns
60
+ def formatter(x):
61
+ if type(x) is str:
62
+ x = x
63
+ else:
64
+ x = round(x, 2)
65
+ return x
66
+
67
+ for col in original_df.columns:
68
+ if col == "model":
69
+ original_df[col] = original_df[col].apply(lambda x: x.replace(x, make_clickable_model(x)))
70
+ else:
71
+ original_df[col] = original_df[col].apply(formatter) # For numerical values
72
+
73
+ original_df.rename(columns=column_names, inplace=True)
74
+ original_df.sort_values(by='Average WER ⬇️', inplace=True)
75
+
76
+ COLS = [c.name for c in fields(AutoEvalColumn)]
77
+ TYPES = [c.type for c in fields(AutoEvalColumn)]
78
+
79
+
80
+ with gr.Blocks(css=LEADERBOARD_CSS) as demo:
81
+ # gr.HTML(BANNER, elem_id="banner")
82
+ # Write a header with the title
83
+ gr.Markdown("<h1>🤗 Open Automatic Speech Recognition on EdAcc Dataset</h1>", elem_classes="markdown-text")
84
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
85
+
86
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
87
+ with gr.TabItem("🏅 Leaderboard", elem_id="od-benchmark-tab-table", id=0):
88
+ # Add column filter dropdown
89
+ column_filter = gr.Dropdown(
90
+ choices=["All"] + [v for k,v in column_names.items() if k != "model"],
91
+ label="Filter by column",
92
+ multiselect=True,
93
+ value=["All"],
94
+ elem_id="column-filter"
95
+ )
96
+
97
+ leaderboard_table = gr.components.Dataframe(
98
+ value=original_df,
99
+ datatype=TYPES,
100
+ elem_id="leaderboard-table",
101
+ interactive=False,
102
+ visible=True,
103
+ )
104
+
105
+ # Update table columns when filter changes
106
+ def update_table(cols):
107
+ if "All" in cols:
108
+ return gr.Dataframe(value=original_df)
109
+ selected_cols = ["Model"] + cols # Always include the Model column
110
+ return gr.Dataframe(value=original_df[selected_cols])
111
+
112
+ column_filter.change(
113
+ fn=update_table,
114
+ inputs=[column_filter],
115
+ outputs=[leaderboard_table]
116
+ )
117
+
118
+ demo.launch(ssr_mode=False)
constants.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ # Directory where request by models are stored
4
+ DIR_OUTPUT_REQUESTS = Path("requested_models")
5
+ EVAL_REQUESTS_PATH = Path("eval_requests")
6
+
7
+ ##########################
8
+ # Text definitions #
9
+ ##########################
10
+
11
+ banner_url = "https://huggingface.co/datasets/reach-vb/random-images/resolve/main/asr_leaderboard.png"
12
+ BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 40vw; min-width: 300px; max-width: 600px;"> </div>'
13
+
14
+ TITLE = "<html> <head> <style> h1 {text-align: center;} </style> </head> <body> <h1> 🤗 Open Automatic Speech Recognition Leaderboard </b> </body> </html>"
15
+
16
+ INTRODUCTION_TEXT = "📐 Results on [EdAcc Dataset](https://huggingface.co/datasets/edinburghcstr/edacc) split by accents and gender. \
17
+ \nWe report the Average [WER](https://huggingface.co/spaces/evaluate-metric/wer) (⬇️ lower the better) and [RTFx](https://github.com/NVIDIA/DeepLearningExamples/blob/master/Kaldi/SpeechRecognition/README.md#metrics) (⬆️ higher the better)."
18
+
19
+
20
+ CITATION_TEXT = """@misc{open-asr-leaderboard,
21
+ title = {Open Automatic Speech Recognition Leaderboard},
22
+ author = {Srivastav, Vaibhav and Majumdar, Somshubra and Koluguri, Nithin and Moumen, Adel and Gandhi, Sanchit and others},
23
+ year = 2023,
24
+ publisher = {Hugging Face},
25
+ howpublished = "\\url{https://huggingface.co/spaces/hf-audio/open_asr_leaderboard}"
26
+ }
27
+ """
28
+
29
+ METRICS_TAB_TEXT = """
30
+ Here you will find details about the speech recognition metrics and datasets reported in our leaderboard.
31
+
32
+ ## Metrics
33
+
34
+ Models are evaluated jointly using the Word Error Rate (WER) and Inverse Real Time Factor (RTFx) metrics. The WER metric
35
+ is used to assess the accuracy of a system, and the RTFx the inference speed. Models are ranked in the leaderboard based
36
+ on their WER, lowest to highest.
37
+
38
+ Crucially, the WER and RTFx values are computed for the same inference run using a single script. The implication of this is two-fold:
39
+ 1. The WER and RTFx values are coupled: for a given WER, one can expect to achieve the corresponding RTFx. This allows the proposer to trade-off lower WER for higher RTFx should they wish.
40
+ 2. The WER and RTFx values are averaged over all audios in the benchmark (in the order of thousands of audios).
41
+
42
+ For details on reproducing the benchmark numbers, refer to the [Open ASR GitHub repository](https://github.com/huggingface/open_asr_leaderboard#evaluate-a-model).
43
+
44
+ ### Word Error Rate (WER)
45
+
46
+ Word Error Rate is used to measure the **accuracy** of automatic speech recognition systems. It calculates the percentage
47
+ of words in the system's output that differ from the reference (correct) transcript. **A lower WER value indicates higher accuracy**.
48
+
49
+ Take the following example:
50
+
51
+ | Reference: | the | cat | sat | on | the | mat |
52
+ |-------------|-----|-----|---------|-----|-----|-----|
53
+ | Prediction: | the | cat | **sit** | on | the | | |
54
+ | Label: | ✅ | ✅ | S | ✅ | ✅ | D |
55
+
56
+ Here, we have:
57
+ * 1 substitution ("sit" instead of "sat")
58
+ * 0 insertions
59
+ * 1 deletion ("mat" is missing)
60
+
61
+ This gives 2 errors in total. To get our word error rate, we divide the total number of errors (substitutions + insertions + deletions) by the total number of words in our
62
+ reference (N), which for this example is 6:
63
+
64
+ ```
65
+ WER = (S + I + D) / N = (1 + 0 + 1) / 6 = 0.333
66
+ ```
67
+
68
+ Giving a WER of 0.33, or 33%. For a fair comparison, we calculate **zero-shot** (i.e. pre-trained models only) *normalised WER* for all the model checkpoints, meaning punctuation and casing is removed from the references and predictions. You can find the evaluation code on our [Github repository](https://github.com/huggingface/open_asr_leaderboard). To read more about how the WER is computed, refer to the [Audio Transformers Course](https://huggingface.co/learn/audio-course/chapter5/evaluation).
69
+
70
+ ### Inverse Real Time Factor (RTFx)
71
+
72
+ Inverse Real Time Factor is a measure of the **latency** of automatic speech recognition systems, i.e. how long it takes an
73
+ model to process a given amount of speech. It is defined as:
74
+ ```
75
+ RTFx = (number of seconds of audio inferred) / (compute time in seconds)
76
+ ```
77
+
78
+ Therefore, and RTFx of 1 means a system processes speech as fast as it's spoken, while an RTFx of 2 means it takes half the time.
79
+ Thus, **a higher RTFx value indicates lower latency**.
80
+
81
+ ## How to reproduce our results
82
+
83
+ The ASR Leaderboard will be a continued effort to benchmark open source/access speech recognition models where possible.
84
+ Along with the Leaderboard we're open-sourcing the codebase used for running these evaluations.
85
+ For more details head over to our repo at: https://github.com/huggingface/open_asr_leaderboard
86
+
87
+ P.S. We'd love to know which other models you'd like us to benchmark next. Contributions are more than welcome! ♥️
88
+
89
+ ## Benchmark datasets
90
+
91
+ Evaluating Speech Recognition systems is a hard problem. We use the multi-dataset benchmarking strategy proposed in the
92
+ [ESB paper](https://arxiv.org/abs/2210.13352) to obtain robust evaluation scores for each model.
93
+
94
+ ESB is a benchmark for evaluating the performance of a single automatic speech recognition (ASR) system across a broad
95
+ set of speech datasets. It comprises eight English speech recognition datasets, capturing a broad range of domains,
96
+ acoustic conditions, speaker styles, and transcription requirements. As such, it gives a better indication of how
97
+ a model is likely to perform on downstream ASR compared to evaluating it on one dataset alone.
98
+
99
+ The ESB score is calculated as a macro-average of the WER scores across the ESB datasets. The models in the leaderboard
100
+ are ranked based on their average WER scores, from lowest to highest.
101
+
102
+ | Dataset | Domain | Speaking Style | Train (h) | Dev (h) | Test (h) | Transcriptions | License |
103
+ |-----------------------------------------------------------------------------------------|-----------------------------|-----------------------|-----------|---------|----------|--------------------|-----------------|
104
+ | [LibriSpeech](https://huggingface.co/datasets/librispeech_asr) | Audiobook | Narrated | 960 | 11 | 11 | Normalised | CC-BY-4.0 |
105
+ | [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) | European Parliament | Oratory | 523 | 5 | 5 | Punctuated | CC0 |
106
+ | [TED-LIUM](https://huggingface.co/datasets/LIUM/tedlium) | TED talks | Oratory | 454 | 2 | 3 | Normalised | CC-BY-NC-ND 3.0 |
107
+ | [GigaSpeech](https://huggingface.co/datasets/speechcolab/gigaspeech) | Audiobook, podcast, YouTube | Narrated, spontaneous | 2500 | 12 | 40 | Punctuated | apache-2.0 |
108
+ | [SPGISpeech](https://huggingface.co/datasets/kensho/spgispeech) | Financial meetings | Oratory, spontaneous | 4900 | 100 | 100 | Punctuated & Cased | User Agreement |
109
+ | [Earnings-22](https://huggingface.co/datasets/revdotcom/earnings22) | Financial meetings | Oratory, spontaneous | 105 | 5 | 5 | Punctuated & Cased | CC-BY-SA-4.0 |
110
+ | [AMI](https://huggingface.co/datasets/edinburghcstr/ami) | Meetings | Spontaneous | 78 | 9 | 9 | Punctuated & Cased | CC-BY-4.0 |
111
+
112
+ For more details on the individual datasets and how models are evaluated to give the ESB score, refer to the [ESB paper](https://arxiv.org/abs/2210.13352).
113
+ """
114
+
115
+ LEADERBOARD_CSS = """
116
+ #leaderboard-table th .header-content {
117
+ white-space: nowrap;
118
+ }
119
+ """
init.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from constants import EVAL_REQUESTS_PATH
3
+ from pathlib import Path
4
+ from huggingface_hub import HfApi, Repository
5
+
6
+ TOKEN_HUB = os.environ.get("TOKEN_HUB", None)
7
+ QUEUE_REPO = os.environ.get("QUEUE_REPO", None)
8
+ QUEUE_PATH = os.environ.get("QUEUE_PATH", None)
9
+
10
+ hf_api = HfApi(
11
+ endpoint="https://huggingface.co",
12
+ token=TOKEN_HUB,
13
+ )
14
+
15
+ def load_all_info_from_dataset_hub():
16
+ eval_queue_repo = None
17
+ requested_models = None
18
+
19
+ passed = True
20
+ if TOKEN_HUB is None:
21
+ passed = False
22
+ else:
23
+ print("Pulling evaluation requests and results.")
24
+
25
+ eval_queue_repo = Repository(
26
+ local_dir=QUEUE_PATH,
27
+ clone_from=QUEUE_REPO,
28
+ use_auth_token=TOKEN_HUB,
29
+ repo_type="dataset",
30
+ )
31
+ eval_queue_repo.git_pull()
32
+
33
+ # Local directory where dataset repo is cloned + folder with eval requests
34
+ directory = QUEUE_PATH / EVAL_REQUESTS_PATH
35
+ requested_models = get_all_requested_models(directory)
36
+ requested_models = [p.stem for p in requested_models]
37
+ # Local directory where dataset repo is cloned
38
+ csv_results = get_csv_with_results(QUEUE_PATH)
39
+ if csv_results is None:
40
+ passed = False
41
+ if not passed:
42
+ raise ValueError("No Hugging Face token provided. Skipping evaluation requests and results.")
43
+
44
+ return eval_queue_repo, requested_models, csv_results
45
+
46
+
47
+ def upload_file(requested_model_name, path_or_fileobj):
48
+ dest_repo_file = Path(EVAL_REQUESTS_PATH) / path_or_fileobj.name
49
+ dest_repo_file = str(dest_repo_file)
50
+ hf_api.upload_file(
51
+ path_or_fileobj=path_or_fileobj,
52
+ path_in_repo=str(dest_repo_file),
53
+ repo_id=QUEUE_REPO,
54
+ token=TOKEN_HUB,
55
+ repo_type="dataset",
56
+ commit_message=f"Add {requested_model_name} to eval queue")
57
+
58
+ def get_all_requested_models(directory):
59
+ directory = Path(directory)
60
+ all_requested_models = list(directory.glob("*.txt"))
61
+ return all_requested_models
62
+
63
+ def get_csv_with_results(directory):
64
+ directory = Path(directory)
65
+ all_csv_files = list(directory.glob("*.csv"))
66
+ latest = [f for f in all_csv_files if f.stem.endswith("latest")]
67
+ if len(latest) != 1:
68
+ return None
69
+ return latest[0]
70
+
71
+
72
+
73
+ def is_model_on_hub(model_name, revision="main") -> bool:
74
+ try:
75
+ model_name = model_name.replace(" ","")
76
+ author = model_name.split("/")[0]
77
+ model_id = model_name.split("/")[1]
78
+ if len(author) == 0 or len(model_id) == 0:
79
+ return False, "is not a valid model name. Please use the format `author/model_name`."
80
+ except Exception as e:
81
+ return False, "is not a valid model name. Please use the format `author/model_name`."
82
+
83
+ try:
84
+ models = list(hf_api.list_models(author=author, search=model_id))
85
+ matched = [model_name for m in models if m.modelId == model_name]
86
+ if len(matched) != 1:
87
+ return False, "was not found on the hub!"
88
+ else:
89
+ return True, None
90
+ except Exception as e:
91
+ print(f"Could not get the model from the hub.: {e}")
92
+ return False, "was not found on hub!"
utils_display.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ # These classes are for user facing column names, to avoid having to change them
4
+ # all around the code when a modif is needed
5
+ @dataclass
6
+ class ColumnContent:
7
+ name: str
8
+ type: str
9
+
10
+ def fields(raw_class):
11
+ return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
12
+
13
+ @dataclass(frozen=True)
14
+ class AutoEvalColumn: # Auto evals column
15
+ model = ColumnContent("Model", "markdown")
16
+ avg_wer = ColumnContent("Average WER ⬇️", "number")
17
+ rtf = ColumnContent("RTFx ⬆️️", "number")
18
+ bulgarian_female = ColumnContent("Bulgarian female", "number")
19
+ bulgarian_male = ColumnContent("Bulgarian male", "number")
20
+ catalan_female = ColumnContent("Catalan female", "number")
21
+ chinese_female = ColumnContent("Chinese female", "number")
22
+ chinese_male = ColumnContent("Chinese male", "number")
23
+ eastern_european_male = ColumnContent("Eastern European male", "number")
24
+ european_male = ColumnContent("European male", "number")
25
+ french_female = ColumnContent("French female", "number")
26
+ ghanain_english_female = ColumnContent("Ghanain English female", "number")
27
+ indian_english_female = ColumnContent("Indian English female", "number")
28
+ indian_english_male = ColumnContent("Indian English male", "number")
29
+ indonesian_female = ColumnContent("Indonesian female", "number")
30
+ irish_english_female = ColumnContent("Irish English female", "number")
31
+ irish_english_male = ColumnContent("Irish English male", "number")
32
+ israeli_male = ColumnContent("Israeli male", "number")
33
+ italian_female = ColumnContent("Italian female", "number")
34
+ jamaican_english_female = ColumnContent("Jamaican English female", "number")
35
+ jamaican_english_male = ColumnContent("Jamaican English male", "number")
36
+ kenyan_english_female = ColumnContent("Kenyan English female", "number")
37
+ kenyan_english_male = ColumnContent("Kenyan English male", "number")
38
+ latin_american_female = ColumnContent("Latin American female", "number")
39
+ latin_american_male = ColumnContent("Latin American male", "number")
40
+ lithuanian_male = ColumnContent("Lithuanian male", "number")
41
+ mainstream_us_english_female = ColumnContent("Mainstream US English female", "number")
42
+ mainstream_us_english_male = ColumnContent("Mainstream US English male", "number")
43
+ nigerian_english_female = ColumnContent("Nigerian English female", "number")
44
+ nigerian_english_male = ColumnContent("Nigerian English male", "number")
45
+ romanian_female = ColumnContent("Romanian female", "number")
46
+ scottish_english_male = ColumnContent("Scottish English male", "number")
47
+ southern_british_english_male = ColumnContent("Southern British English male", "number")
48
+ spanish_female = ColumnContent("Spanish female", "number")
49
+ spanish_male = ColumnContent("Spanish male", "number")
50
+ vietnamese_female = ColumnContent("Vietnamese female", "number")
51
+ vietnamese_male = ColumnContent("Vietnamese male", "number")
52
+
53
+ def make_clickable_model(model_name):
54
+ link = f"https://huggingface.co/{model_name}"
55
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
56
+
57
+ def styled_error(error):
58
+ return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
59
+
60
+ def styled_warning(warn):
61
+ return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
62
+
63
+ def styled_message(message):
64
+ return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"