Mohammadawad1 commited on
Commit
c6d4452
·
verified ·
1 Parent(s): 9dc6458

Update my_dataset.py

Browse files
Files changed (1) hide show
  1. my_dataset.py +102 -102
my_dataset.py CHANGED
@@ -1,123 +1,123 @@
1
- import os
2
- import csv
3
- import datasets
4
- from tqdm import tqdm # لإظهار تقدم القراءة في ملفات metadata
5
 
6
- _DESCRIPTION = "A speech dataset designed for automatic speech recognition (ASR), structured like Mozilla Common Voice."
7
- _CITATION = "No citation available yet."
8
 
9
- class MyDatasetConfig(datasets.BuilderConfig):
10
- def __init__(self, **kwargs):
11
- super(MyDatasetConfig, self).__init__(**kwargs)
12
 
13
- class MyDataset(datasets.GeneratorBasedBuilder):
14
- DEFAULT_WRITER_BATCH_SIZE = 1000 # مثل Common Voice
15
 
16
- BUILDER_CONFIGS = [
17
- MyDatasetConfig(
18
- name="default",
19
- version=datasets.Version("1.0.0"),
20
- description=_DESCRIPTION,
21
- ),
22
- ]
23
 
24
- def _info(self):
25
- features = datasets.Features({
26
- "client_id": datasets.Value("string"),
27
- "path": datasets.Value("string"),
28
- "audio": datasets.features.Audio(sampling_rate=16000),
29
- "text": datasets.Value("string"),
30
- "up_votes": datasets.Value("int64"),
31
- "down_votes": datasets.Value("int64"),
32
- "age": datasets.Value("string"),
33
- "gender": datasets.Value("string"),
34
- "accent": datasets.Value("string"),
35
- "locale": datasets.Value("string"),
36
- "segment": datasets.Value("string"),
37
- "variant": datasets.Value("string"),
38
- })
39
 
40
- return datasets.DatasetInfo(
41
- description=_DESCRIPTION,
42
- features=features,
43
- supervised_keys=None,
44
- citation=_CITATION,
45
- version=self.config.version,
46
- )
47
 
48
- def _split_generators(self, dl_manager):
49
- urls = {
50
- "train_audio": "https://huggingface.co/datasets/Mohammadawad1/my-dataset-test-v2/resolve/main/data/train_audio.tar",
51
- "train_metadata": "https://huggingface.co/datasets/Mohammadawad1/my-dataset-test-v2/resolve/main/data/train_metadata.csv",
52
- "validation_audio":"https://huggingface.co/datasets/Mohammadawad1/my-dataset-test-v2/resolve/main/data/validation_audio.tar",
53
- "validation_metadata": "https://huggingface.co/datasets/Mohammadawad1/my-dataset-test-v2/resolve/main/data/validation_metadata.csv",
54
- "test_audio":"https://huggingface.co/datasets/Mohammadawad1/my-dataset-test-v2/resolve/main/data/test_audio.tar",
55
- "test_metadata": "https://huggingface.co/datasets/Mohammadawad1/my-dataset-test-v2/resolve/main/data/test_metadata.csv",
56
- }
57
- splits = ["train", "validation", "test"]
58
- split_generators = []
59
 
60
- for split in splits:
61
- audio_url = urls.get(f"{split}_audio")
62
- metadata_url = urls.get(f"{split}_metadata")
63
 
64
- if audio_url and metadata_url:
65
- audio_tar_path = dl_manager.download(audio_url)
66
- metadata_path = dl_manager.download(metadata_url)
67
 
68
- split_generators.append(
69
- datasets.SplitGenerator(
70
- name=getattr(datasets.Split, split.upper()),
71
- gen_kwargs={
72
- "archives": dl_manager.iter_archive(audio_tar_path),
73
- "metadata_path": metadata_path,
74
- },
75
- )
76
- )
77
- return split_generators
78
 
79
 
80
- def _generate_examples(self, archives, metadata_path):
81
- metadata = {}
82
- data_fields = list(self._info().features.keys())
83
 
84
- with open(metadata_path, encoding="utf-8-sig") as f:
85
- reader = csv.DictReader(f, delimiter=",", quoting=csv.QUOTE_NONE)
86
- reader.fieldnames = [name.strip().replace('"', '') for name in reader.fieldnames]
87
 
88
- for row in tqdm(reader, desc="Loading metadata..."):
89
- row = {k.replace('"', ''): v.replace('"', '') for k, v in row.items()}
90
- if not row["file_name"].endswith(".wav"):
91
- row["file_name"] += ".wav"
92
- if not row["file_name"].startswith("clips/"):
93
- row["file_name"] = "clips/" + row["file_name"]
94
 
95
- if "accents" in row:
96
- row["accent"] = row["accents"]
97
- del row["accents"]
98
 
99
- for field in data_fields:
100
- if field not in row:
101
- if field in ["up_votes", "down_votes"]:
102
- row[field] = 0
103
- else:
104
- row[field] = ""
105
 
106
- metadata[row["file_name"]] = row
107
 
108
- for i, audio_archive in enumerate(archives):
109
- for path_in_tar, file_obj in audio_archive:
110
- _, filename = os.path.split(path_in_tar)
111
- if filename in metadata:
112
- example = dict(metadata[filename])
113
 
114
- # نقرأ أول 100 بايت فقط للتأكد من وجود بيانات
115
- sample_bytes = file_obj.read(100)
116
- print(f"Read {len(sample_bytes)} bytes from {path_in_tar}")
117
 
118
- # نعيد المؤشر للبداية للقراءة الفعلية بعد الاختبار
119
- file_obj.seek(0)
120
 
121
- example["audio"] = {"path": path_in_tar, "bytes": file_obj.read()}
122
- example["path"] = path_in_tar
123
- yield path_in_tar, example
 
1
+ # import os
2
+ # import csv
3
+ # import datasets
4
+ # from tqdm import tqdm # لإظهار تقدم القراءة في ملفات metadata
5
 
6
+ # _DESCRIPTION = "A speech dataset designed for automatic speech recognition (ASR), structured like Mozilla Common Voice."
7
+ # _CITATION = "No citation available yet."
8
 
9
+ # class MyDatasetConfig(datasets.BuilderConfig):
10
+ # def __init__(self, **kwargs):
11
+ # super(MyDatasetConfig, self).__init__(**kwargs)
12
 
13
+ # class MyDataset(datasets.GeneratorBasedBuilder):
14
+ # DEFAULT_WRITER_BATCH_SIZE = 1000 # مثل Common Voice
15
 
16
+ # BUILDER_CONFIGS = [
17
+ # MyDatasetConfig(
18
+ # name="default",
19
+ # version=datasets.Version("1.0.0"),
20
+ # description=_DESCRIPTION,
21
+ # ),
22
+ # ]
23
 
24
+ # def _info(self):
25
+ # features = datasets.Features({
26
+ # "client_id": datasets.Value("string"),
27
+ # "path": datasets.Value("string"),
28
+ # "audio": datasets.features.Audio(sampling_rate=16000),
29
+ # "text": datasets.Value("string"),
30
+ # "up_votes": datasets.Value("int64"),
31
+ # "down_votes": datasets.Value("int64"),
32
+ # "age": datasets.Value("string"),
33
+ # "gender": datasets.Value("string"),
34
+ # "accent": datasets.Value("string"),
35
+ # "locale": datasets.Value("string"),
36
+ # "segment": datasets.Value("string"),
37
+ # "variant": datasets.Value("string"),
38
+ # })
39
 
40
+ # return datasets.DatasetInfo(
41
+ # description=_DESCRIPTION,
42
+ # features=features,
43
+ # supervised_keys=None,
44
+ # citation=_CITATION,
45
+ # version=self.config.version,
46
+ # )
47
 
48
+ # def _split_generators(self, dl_manager):
49
+ # urls = {
50
+ # "train_audio": "https://huggingface.co/datasets/Mohammadawad1/my-dataset-test-v2/resolve/main/data/train_audio.tar",
51
+ # "train_metadata": "https://huggingface.co/datasets/Mohammadawad1/my-dataset-test-v2/resolve/main/data/train_metadata.csv",
52
+ # "validation_audio":"https://huggingface.co/datasets/Mohammadawad1/my-dataset-test-v2/resolve/main/data/validation_audio.tar",
53
+ # "validation_metadata": "https://huggingface.co/datasets/Mohammadawad1/my-dataset-test-v2/resolve/main/data/validation_metadata.csv",
54
+ # "test_audio":"https://huggingface.co/datasets/Mohammadawad1/my-dataset-test-v2/resolve/main/data/test_audio.tar",
55
+ # "test_metadata": "https://huggingface.co/datasets/Mohammadawad1/my-dataset-test-v2/resolve/main/data/test_metadata.csv",
56
+ # }
57
+ # splits = ["train", "validation", "test"]
58
+ # split_generators = []
59
 
60
+ # for split in splits:
61
+ # audio_url = urls.get(f"{split}_audio")
62
+ # metadata_url = urls.get(f"{split}_metadata")
63
 
64
+ # if audio_url and metadata_url:
65
+ # audio_tar_path = dl_manager.download(audio_url)
66
+ # metadata_path = dl_manager.download(metadata_url)
67
 
68
+ # split_generators.append(
69
+ # datasets.SplitGenerator(
70
+ # name=getattr(datasets.Split, split.upper()),
71
+ # gen_kwargs={
72
+ # "archives": dl_manager.iter_archive(audio_tar_path),
73
+ # "metadata_path": metadata_path,
74
+ # },
75
+ # )
76
+ # )
77
+ # return split_generators
78
 
79
 
80
+ # def _generate_examples(self, archives, metadata_path):
81
+ # metadata = {}
82
+ # data_fields = list(self._info().features.keys())
83
 
84
+ # with open(metadata_path, encoding="utf-8-sig") as f:
85
+ # reader = csv.DictReader(f, delimiter=",", quoting=csv.QUOTE_NONE)
86
+ # reader.fieldnames = [name.strip().replace('"', '') for name in reader.fieldnames]
87
 
88
+ # for row in tqdm(reader, desc="Loading metadata..."):
89
+ # row = {k.replace('"', ''): v.replace('"', '') for k, v in row.items()}
90
+ # if not row["file_name"].endswith(".wav"):
91
+ # row["file_name"] += ".wav"
92
+ # if not row["file_name"].startswith("clips/"):
93
+ # row["file_name"] = "clips/" + row["file_name"]
94
 
95
+ # if "accents" in row:
96
+ # row["accent"] = row["accents"]
97
+ # del row["accents"]
98
 
99
+ # for field in data_fields:
100
+ # if field not in row:
101
+ # if field in ["up_votes", "down_votes"]:
102
+ # row[field] = 0
103
+ # else:
104
+ # row[field] = ""
105
 
106
+ # metadata[row["file_name"]] = row
107
 
108
+ # for i, audio_archive in enumerate(archives):
109
+ # for path_in_tar, file_obj in audio_archive:
110
+ # _, filename = os.path.split(path_in_tar)
111
+ # if filename in metadata:
112
+ # example = dict(metadata[filename])
113
 
114
+ # # نقرأ أول 100 بايت فقط للتأكد من وجود بيانات
115
+ # sample_bytes = file_obj.read(100)
116
+ # print(f"Read {len(sample_bytes)} bytes from {path_in_tar}")
117
 
118
+ # # نعيد المؤشر للبداية للقراءة الفعلية بعد الاختبار
119
+ # file_obj.seek(0)
120
 
121
+ # example["audio"] = {"path": path_in_tar, "bytes": file_obj.read()}
122
+ # example["path"] = path_in_tar
123
+ # yield path_in_tar, example