Datasets:

Modalities:
Tabular
Text
Formats:
csv
Libraries:
Datasets
Dask
License:
File size: 3,445 Bytes
133a4b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111

import os
import glob
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from tqdm import tqdm
import cv2 as cv
import argparse


def images_to_parquet_sharded(image_dir, output_path, output_prefix="data_shard", shard_size_mb=100):
    """
    Converts a folder of images into multiple Parquet shard files (~shard_size_mb each).

    Args:
        image_dir (str): Path to dataset folder. If images are grouped in
                         subfolders, the folder name will be used as label.
        output_path (str): Path to processed dataset folder. 
        output_prefix (str): Prefix for output parquet files (e.g., "data_shard").
        shard_size_mb (int): Approximate shard size in MB.
    """
    shard_size_bytes = shard_size_mb * 1024 * 1024
    shard_index = 0
    records = []
    all_images_count = 0
    total_written = 0

    image_paths = glob.glob(os.path.join(image_dir, "**", "*.*"), recursive=True)

    for path in tqdm(image_paths, desc="Packing images"):
        ext = os.path.splitext(path)[1].lower()
        if ext not in [".jpg", ".jpeg", ".png", ".bmp", ".gif", ".webp"]:
            continue

        label = os.path.basename(os.path.dirname(path))

        img_bytes = cv.imread(path)
        img_bytes = cv.imencode('.png', img_bytes)[1].tobytes()

        records.append({"image": img_bytes, "label": label, "filename": os.path.basename(path)})
        total_written += len(img_bytes)

        # When current shard size exceeds limit, flush to disk
        if total_written >= shard_size_bytes:
            df = pd.DataFrame(records)
            table = pa.Table.from_pandas(df)
            out_path = os.path.join(output_path, f"{output_prefix}-{shard_index:05d}.parquet")
            pq.write_table(table, out_path)
            all_images_count += len(records)
            # Reset for next shard
            shard_index += 1
            records = []
            total_written = 0
            

    # Write leftover records
    if records:
        df = pd.DataFrame(records)
        table = pa.Table.from_pandas(df)
        out_path = os.path.join(output_path, f"{output_prefix}-{shard_index:05d}.parquet")
        pq.write_table(table, out_path)
        all_images_count += len(records)
        
    print(f"✅ Wrote {all_images_count} images to {output_path}")



if __name__ == "__main__":
    # Create an ArgumentParser object
    parser = argparse.ArgumentParser(
        description="This script takes an images folder and convert them into parquet files."
    )

    # Add arguments
    parser.add_argument(
        "--output",
        "-o",
        type=str,
        required=True,
        help="Output path for the resulting dataset.",
    )
    parser.add_argument(
        "--output_prefix",
        "-op",
        type=str,
        required=False,
        default="data_shard",
        help="Output prefix for the resulting dataset.",
    )
    parser.add_argument(
        "--input",
        "-i",
        type=str,
        required=True,
        help="Input path for the dataset to convert.",
    )
    parser.add_argument(
        "--parquet_shard_size_mb",
        "-ps",
        type=int,
        default=100,
        required=False,
        help="Size of the parquet shard in MB.",
    )

    args = parser.parse_args()

    # Example usage
    images_to_parquet_sharded(args.input, args.output, output_prefix=args.output_prefix, shard_size_mb=args.parquet_shard_size_mb)