|
|
|
import os |
|
import json |
|
import argparse |
|
|
|
import numpy as np |
|
import pandas as pd |
|
import soundfile as sf |
|
from tqdm import tqdm |
|
|
|
def generate_split_jsonl(data_path: str, split_name: str, tags: np.ndarray, binary: np.ndarray): |
|
""" |
|
Reads {split_name}.tsv, processes all .mp3 files, and generates MTT.{split_name}.jsonl. |
|
Includes error handling, a progress bar, and bitrate calculation for MP3s. |
|
""" |
|
mt_dir = data_path |
|
tsv_path = os.path.join(mt_dir, f'{split_name}.tsv') |
|
if split_name == "valid": |
|
out_split_name = "val" |
|
else: |
|
out_split_name = split_name |
|
out_path = os.path.join(mt_dir, f'MTT.{out_split_name}.jsonl') |
|
fail_log_path = os.path.join(mt_dir, f'fail.{out_split_name}.txt') |
|
|
|
|
|
df = pd.read_csv(tsv_path, sep='\t', header=None, names=['idx', 'title']) |
|
|
|
failed_count = 0 |
|
failed_records = [] |
|
|
|
print(f"Processing split: {split_name}") |
|
with open(out_path, 'w', encoding='utf-8') as fw: |
|
|
|
for _, row in tqdm(df.iterrows(), total=df.shape[0], desc=f"-> Generating {split_name}.jsonl"): |
|
try: |
|
i = int(row['idx']) |
|
title = row['title'] |
|
audio_path = os.path.join(mt_dir, 'mp3', title) |
|
|
|
|
|
info = sf.info(audio_path) |
|
duration = info.frames / info.samplerate |
|
num_samples = info.frames |
|
sample_rate = info.samplerate |
|
channels = info.channels |
|
|
|
|
|
|
|
bitrate = None |
|
|
|
if duration > 0: |
|
try: |
|
file_size_bytes = os.path.getsize(audio_path) |
|
|
|
bitrate = int((file_size_bytes * 8) / duration) |
|
except OSError: |
|
|
|
pass |
|
|
|
|
|
bit_depth = None |
|
if hasattr(info, 'subtype') and info.subtype and info.subtype.startswith('PCM_'): |
|
try: |
|
bit_depth = int(info.subtype.split('_', 1)[1]) |
|
except (ValueError, IndexError): |
|
pass |
|
|
|
|
|
|
|
labels = tags[binary[i].astype(bool)].tolist() |
|
|
|
|
|
record = { |
|
"audio_path": audio_path, |
|
"label": labels, |
|
"duration": duration, |
|
"sample_rate": sample_rate, |
|
"num_samples": num_samples, |
|
"bit_depth": bit_depth, |
|
"bitrate": bitrate, |
|
"channels": channels |
|
} |
|
fw.write(json.dumps(record, ensure_ascii=False) + "\n") |
|
|
|
except Exception as e: |
|
|
|
failed_count += 1 |
|
failed_records.append(f"File: {title}, Error: {str(e)}") |
|
continue |
|
|
|
print(f"Successfully generated {out_path}") |
|
|
|
|
|
if failed_count > 0: |
|
print(f"Skipped {failed_count} corrupted or problematic files for split '{split_name}'.") |
|
|
|
with open(fail_log_path, 'a', encoding='utf-8') as f_fail: |
|
f_fail.write(f"--- Failures for split: {split_name} ({failed_count} files) ---\n") |
|
for record in failed_records: |
|
f_fail.write(record + "\n") |
|
f_fail.write("\n") |
|
|
|
def main(): |
|
parser = argparse.ArgumentParser( |
|
description="Generate JSONL files for MTT dataset splits (train/valid/test) for .mp3 files.") |
|
parser.add_argument( |
|
"data_path", |
|
help="Root directory of the MTT dataset, containing annotations, tags, labels, and tsv splits.") |
|
args = parser.parse_args() |
|
|
|
mt_dir = args.data_path |
|
|
|
|
|
fail_log_path = os.path.join(mt_dir, 'processing_failures.log') |
|
if os.path.exists(fail_log_path): |
|
os.remove(fail_log_path) |
|
print(f"Removed old log file: {fail_log_path}") |
|
|
|
try: |
|
|
|
tags = np.load(os.path.join(mt_dir, 'tags.npy')) |
|
binary = np.load(os.path.join(mt_dir, 'binary_label.npy')) |
|
except FileNotFoundError as e: |
|
print(f"Error: Could not find required .npy file. {e}") |
|
return |
|
|
|
|
|
for split in ['train', 'valid', 'test']: |
|
generate_split_jsonl(args.data_path, split, tags, binary) |
|
|
|
print("\nProcessing complete.") |
|
if os.path.exists(fail_log_path): |
|
print(f"A log of all failed files has been saved to: {fail_log_path}") |
|
|
|
if __name__ == "__main__": |
|
main() |