Datasets:

ArXiv:
License:
HKUST-Audio commited on
Commit
3ea8073
·
verified ·
1 Parent(s): 2e88840

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +81 -1
README.md CHANGED
@@ -1,4 +1,84 @@
1
  ---
2
  license: cc-by-4.0
3
  ---
4
- Coming Soon
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: cc-by-4.0
3
  ---
4
+ Coming Soon
5
+
6
+
7
+
8
+ This script is for merging tokenized speech datasets stored in memmap format. The input datasets can be combined to form larger training datasets.
9
+ ```python
10
+ import numpy as np
11
+ import os
12
+
13
+ def merge_memmap_datasets(dataset_dirs, output_dir):
14
+ # Ensure the output directory exists
15
+ os.makedirs(output_dir, exist_ok=True)
16
+
17
+ # Dataset splits to be merged
18
+ splits = ['train', 'val']
19
+
20
+ for split in splits:
21
+ shapes = []
22
+ seq_len = None
23
+ total_samples = 0
24
+
25
+ # Collect shapes of all datasets and check sequence length consistency
26
+ for dataset_dir in dataset_dirs:
27
+ shape_path = os.path.join(dataset_dir, f'{split}_input_ids_shape.npy')
28
+ if not os.path.exists(shape_path):
29
+ print(f"Warning: {split}_input_ids_shape.npy not found in {dataset_dir}, skipping this dataset.")
30
+ continue
31
+ shape = np.load(shape_path)
32
+ print(f"Loaded shape of {split} data from {dataset_dir}: {shape}")
33
+ shape = tuple(shape)
34
+ shapes.append((dataset_dir, shape))
35
+ total_samples += shape[0]
36
+ if seq_len is None:
37
+ seq_len = shape[1]
38
+ elif seq_len != shape[1]:
39
+ print(f"Error: Sequence length mismatch in {split} data from {dataset_dir}.")
40
+ return
41
+
42
+ if total_samples == 0:
43
+ print(f"Error: No valid {split} data found for merging.")
44
+ continue
45
+
46
+ new_shape = (total_samples, seq_len)
47
+
48
+ # Create new memmap file
49
+ output_memmap_path = os.path.join(output_dir, f'{split}_input_ids.memmap')
50
+ output_memmap = np.memmap(
51
+ output_memmap_path, dtype='int32', mode='w+', shape=new_shape
52
+ )
53
+
54
+ # Copy data from each dataset to the new memmap file
55
+ start_idx = 0
56
+ for dataset_dir, shape in shapes:
57
+ memmap_path = os.path.join(dataset_dir, f'{split}_input_ids.memmap')
58
+ data = np.memmap(
59
+ memmap_path, dtype='int32', mode='r', shape=shape
60
+ )
61
+ end_idx = start_idx + shape[0]
62
+ output_memmap[start_idx:end_idx, :] = data[:]
63
+ print(f"Merged {split} data from {dataset_dir} into positions {start_idx}:{end_idx}")
64
+ start_idx = end_idx
65
+ del data # Free memory
66
+
67
+ # Delete temporary variable and flush data to disk
68
+ del output_memmap
69
+
70
+ # Save the new shape file
71
+ np.save(os.path.join(output_dir, f'{split}_input_ids_shape.npy'), new_shape)
72
+
73
+ print(f"Completed merging {split} data. New shape: {new_shape}")
74
+
75
+ if __name__ == "__main__":
76
+ dataset_dirs = [
77
+ 'libriheavy_tts_1',
78
+ 'libriheavy_tts_2',
79
+ 'libriheavy_tts_3',
80
+ 'libriheavy_tts_4'
81
+ ]
82
+ output_dir = 'libriheavy_tts_all'
83
+ merge_memmap_datasets(dataset_dirs, output_dir)
84
+ ```