Datasets:

ArXiv:
License:
File size: 2,907 Bytes
2e88840
 
 
3ea8073
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
---
license: cc-by-4.0
---
Coming Soon

 

This script is for merging  tokenized speech datasets stored in memmap format. The input datasets can be combined to form larger training datasets. 
```python
import numpy as np
import os

def merge_memmap_datasets(dataset_dirs, output_dir):
    # Ensure the output directory exists
    os.makedirs(output_dir, exist_ok=True)

    # Dataset splits to be merged
    splits = ['train', 'val']

    for split in splits:
        shapes = []
        seq_len = None
        total_samples = 0

        # Collect shapes of all datasets and check sequence length consistency
        for dataset_dir in dataset_dirs:
            shape_path = os.path.join(dataset_dir, f'{split}_input_ids_shape.npy')
            if not os.path.exists(shape_path):
                print(f"Warning: {split}_input_ids_shape.npy not found in {dataset_dir}, skipping this dataset.")
                continue
            shape = np.load(shape_path)
            print(f"Loaded shape of {split} data from {dataset_dir}: {shape}")
            shape = tuple(shape)
            shapes.append((dataset_dir, shape))
            total_samples += shape[0]
            if seq_len is None:
                seq_len = shape[1]
            elif seq_len != shape[1]:
                print(f"Error: Sequence length mismatch in {split} data from {dataset_dir}.")
                return

        if total_samples == 0:
            print(f"Error: No valid {split} data found for merging.")
            continue

        new_shape = (total_samples, seq_len)

        # Create new memmap file
        output_memmap_path = os.path.join(output_dir, f'{split}_input_ids.memmap')
        output_memmap = np.memmap(
            output_memmap_path, dtype='int32', mode='w+', shape=new_shape
        )

        # Copy data from each dataset to the new memmap file
        start_idx = 0
        for dataset_dir, shape in shapes:
            memmap_path = os.path.join(dataset_dir, f'{split}_input_ids.memmap')
            data = np.memmap(
                memmap_path, dtype='int32', mode='r', shape=shape
            )
            end_idx = start_idx + shape[0]
            output_memmap[start_idx:end_idx, :] = data[:]
            print(f"Merged {split} data from {dataset_dir} into positions {start_idx}:{end_idx}")
            start_idx = end_idx
            del data  # Free memory

        # Delete temporary variable and flush data to disk
        del output_memmap

        # Save the new shape file
        np.save(os.path.join(output_dir, f'{split}_input_ids_shape.npy'), new_shape)

        print(f"Completed merging {split} data. New shape: {new_shape}")

if __name__ == "__main__":
    dataset_dirs = [
        'libriheavy_tts_1',
        'libriheavy_tts_2',
        'libriheavy_tts_3',
        'libriheavy_tts_4'
    ]
    output_dir = 'libriheavy_tts_all'
    merge_memmap_datasets(dataset_dirs, output_dir)
```