|
from datasets import load_dataset, concatenate_datasets, DatasetDict |
|
import pandas as pd |
|
|
|
current_dataset = load_dataset("muzaffercky/kurdish-kurmanji-voice-corpus") |
|
|
|
|
|
new_dataset = load_dataset("audiofolder", data_dir="./prepare_data") |
|
|
|
metadata_df = pd.read_csv("metadata.csv", sep=";") |
|
|
|
|
|
def add_transcription(example): |
|
path = example["audio"]["path"] |
|
*args, root_folder, folder, filename = path.split("/") |
|
full_name = f"{root_folder}/{folder}/{filename}" |
|
matching_rows = metadata_df[metadata_df["file_name"] == full_name] |
|
|
|
transcription = matching_rows["transcription"].values[0] |
|
source = matching_rows["source"].values[0] |
|
example["transcription"] = transcription |
|
example["url"] = source |
|
print(transcription) |
|
|
|
return example |
|
|
|
|
|
def merge_datasets(current: DatasetDict, new: DatasetDict): |
|
keys = set(current.keys()).union( |
|
set(new.keys()) |
|
) |
|
updated_splits = {} |
|
for key in keys: |
|
current_has_key = key in current |
|
new_has_key = key in new |
|
|
|
if current_has_key and new_has_key: |
|
current_split = current[key] |
|
new_split = new[key] |
|
updated_splits[key] = concatenate_datasets( |
|
[current_split, new_split] |
|
) |
|
elif current_has_key: |
|
updated_splits[key] = current[key] |
|
elif new_has_key: |
|
updated_splits[key] = new[key] |
|
|
|
return DatasetDict(updated_splits) |
|
|
|
|
|
new_dataset = new_dataset.map(add_transcription).remove_columns("label") |
|
|
|
|
|
updated_dataset = merge_datasets(current_dataset, new_dataset) |
|
|
|
updated_dataset.push_to_hub("muzaffercky/kurdish-kurmanji-voice-corpus") |
|
|