zmgong commited on
Commit
5cee67f
·
1 Parent(s): d78adf3

Add splited 1m data.

Browse files
data/BIOSCAN_1M/split_data/splitted_files/BioScan_data_in_splits.hdf5.part001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55cf45958e9bc139850b34702a66b73c4453e5f92f3aca0d5855b9bddb6c1590
3
+ size 48318382080
data/BIOSCAN_1M/split_data/splitted_files/BioScan_data_in_splits.hdf5.part002 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5216faff1790ab10251684ed1211e027345097c8e820eb38562cf3773edab026
3
+ size 31381187161
merge_bioscan_1m.py ADDED
File without changes
merge_bioscan_5m.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import glob
4
+
5
+ def merge_files(parts_dir, output_file, base_filename):
6
+ pattern = os.path.join(parts_dir, f"{base_filename}.part*")
7
+ parts = sorted(glob.glob(pattern))
8
+ if not parts:
9
+ print("No files found")
10
+ return
11
+
12
+ print(f"found {len(parts)} files, merge to {output_file} ...")
13
+ with open(output_file, 'wb') as outfile:
14
+ for part in parts:
15
+ print(f"合并文件: {part} ...")
16
+ with open(part, 'rb') as infile:
17
+ while True:
18
+ data = infile.read(1024 * 1024)
19
+ if not data:
20
+ break
21
+ outfile.write(data)
22
+ print(f"Merged: {output_file}")
23
+
24
+ if __name__ == '__main__':
25
+ parts_dir = "data/BIOSCAN_1M/split_data/splitted_files"
26
+ base_filename = "BioScan_data_in_splits.hdf5"
27
+ output_file = "data/BIOSCAN_1M/split_data/BioScan_data_in_splits_merged.hdf5"
28
+ merge_files(parts_dir, output_file, base_filename)
split_bioscan_1m.py ADDED
File without changes
split_bioscan_5m.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ def split_file(input_file, output_dir, chunk_size):
4
+ if not os.path.exists(output_dir):
5
+ os.makedirs(output_dir)
6
+
7
+ file_size = os.path.getsize(input_file)
8
+ base_name = os.path.basename(input_file)
9
+ num_chunks = (file_size + chunk_size - 1) // chunk_size
10
+ print(f"Total size: {file_size} bytes, split to {num_chunks} files")
11
+
12
+ with open(input_file, 'rb') as f:
13
+ for i in range(num_chunks):
14
+ part_filename = os.path.join(output_dir, f"{base_name}.part{i + 1:03d}")
15
+ with open(part_filename, 'wb') as part_file:
16
+ bytes_written = 0
17
+ while bytes_written < chunk_size:
18
+ buffer = f.read(min(1024 * 1024, chunk_size - bytes_written))
19
+ if not buffer:
20
+ break
21
+ part_file.write(buffer)
22
+ bytes_written += len(buffer)
23
+ print(f"Write to: {part_filename} Size: {bytes_written} bytes")
24
+
25
+
26
+ if __name__ == '__main__':
27
+ input_file = "data/BIOSCAN_1M/split_data/BioScan_data_in_splits.hdf5"
28
+ output_dir = "data/BIOSCAN_1M/split_data/splitted_files"
29
+ chunk_size = 45 * (1024 ** 3)
30
+ split_file(input_file, output_dir, chunk_size)