Datasets:

License:
MTT / convert2jsonl.py
a43992899's picture
Add files using upload-large-folder tool
21ebc85 verified
raw
history blame
5.52 kB
#!/usr/bin/env python3
import os
import json
import argparse
import numpy as np
import pandas as pd
import soundfile as sf
from tqdm import tqdm
def generate_split_jsonl(data_path: str, split_name: str, tags: np.ndarray, binary: np.ndarray):
"""
Reads {split_name}.tsv, processes all .mp3 files, and generates MTT.{split_name}.jsonl.
Includes error handling, a progress bar, and bitrate calculation for MP3s.
"""
mt_dir = data_path
tsv_path = os.path.join(mt_dir, f'{split_name}.tsv')
if split_name == "valid":
out_split_name = "val"
else:
out_split_name = split_name
out_path = os.path.join(mt_dir, f'MTT.{out_split_name}.jsonl')
fail_log_path = os.path.join(mt_dir, f'fail.{out_split_name}.txt')
# Read index and filenames
df = pd.read_csv(tsv_path, sep='\t', header=None, names=['idx', 'title'])
failed_count = 0
failed_records = []
print(f"Processing split: {split_name}")
with open(out_path, 'w', encoding='utf-8') as fw:
# Use tqdm for a progress bar
for _, row in tqdm(df.iterrows(), total=df.shape[0], desc=f"-> Generating {split_name}.jsonl"):
try:
i = int(row['idx'])
title = row['title'] # e.g., "48/948.low.mp3"
audio_path = os.path.join(mt_dir, 'mp3', title)
# Read audio metadata (supports .mp3)
info = sf.info(audio_path)
duration = info.frames / info.samplerate
num_samples = info.frames
sample_rate = info.samplerate
channels = info.channels
# --- MODIFICATION START ---
# Calculate bitrate, which is more meaningful for compressed formats like MP3
bitrate = None
# Check if duration is valid to avoid division by zero
if duration > 0:
try:
file_size_bytes = os.path.getsize(audio_path)
# Bitrate in bits per second (bps)
bitrate = int((file_size_bytes * 8) / duration)
except OSError:
# File might not exist or other OS-level error
pass
# Infer bit depth from subtype, will be None for mp3
bit_depth = None
if hasattr(info, 'subtype') and info.subtype and info.subtype.startswith('PCM_'):
try:
bit_depth = int(info.subtype.split('_', 1)[1])
except (ValueError, IndexError):
pass # Could not parse bit depth from subtype
# --- MODIFICATION END ---
# Get the list of labels for this sample
labels = tags[binary[i].astype(bool)].tolist()
# Assemble the JSON object and write to file
record = {
"audio_path": audio_path,
"label": labels,
"duration": duration,
"sample_rate": sample_rate,
"num_samples": num_samples,
"bit_depth": bit_depth, # This will be null for MP3 files
"bitrate": bitrate, # This is the newly added field
"channels": channels
}
fw.write(json.dumps(record, ensure_ascii=False) + "\n")
except Exception as e:
# If any error occurs, log it and skip the file
failed_count += 1
failed_records.append(f"File: {title}, Error: {str(e)}")
continue
print(f"Successfully generated {out_path}")
# After the loop, report and log any failures
if failed_count > 0:
print(f"Skipped {failed_count} corrupted or problematic files for split '{split_name}'.")
# Append failures to fail.txt
with open(fail_log_path, 'a', encoding='utf-8') as f_fail:
f_fail.write(f"--- Failures for split: {split_name} ({failed_count} files) ---\n")
for record in failed_records:
f_fail.write(record + "\n")
f_fail.write("\n")
def main():
parser = argparse.ArgumentParser(
description="Generate JSONL files for MTT dataset splits (train/valid/test) for .mp3 files.")
parser.add_argument(
"data_path",
help="Root directory of the MTT dataset, containing annotations, tags, labels, and tsv splits.")
args = parser.parse_args()
mt_dir = args.data_path
# Use a generic failure log name that doesn't need to be cleaned for each split
fail_log_path = os.path.join(mt_dir, 'processing_failures.log')
if os.path.exists(fail_log_path):
os.remove(fail_log_path)
print(f"Removed old log file: {fail_log_path}")
try:
# Load tags and binary label matrix
tags = np.load(os.path.join(mt_dir, 'tags.npy'))
binary = np.load(os.path.join(mt_dir, 'binary_label.npy'))
except FileNotFoundError as e:
print(f"Error: Could not find required .npy file. {e}")
return
# Generate JSONL for each split
for split in ['train', 'valid', 'test']:
generate_split_jsonl(args.data_path, split, tags, binary)
print("\nProcessing complete.")
if os.path.exists(fail_log_path):
print(f"A log of all failed files has been saved to: {fail_log_path}")
if __name__ == "__main__":
main()