Datasets:
Update README.md
Browse files
README.md
CHANGED
@@ -113,49 +113,6 @@ Images are stored in byte format, you can decode with `Image.open(BytesIO(img["b
|
|
113 |
* **is_preset** - if the image is from the "random image button"
|
114 |
* **dataset_preset** - which dataset the preset image is from. This can be either [NewYorker](https://huggingface.co/datasets/jmhessel/newyorker_caption_contest), [WikiArt](https://huggingface.co/datasets/huggan/wikiart), [TextVQA](https://huggingface.co/datasets/facebook/textvqa), [ChartQA](https://huggingface.co/datasets/lmms-lab/ChartQA), [DocQA](https://huggingface.co/datasets/lmms-lab/DocVQA), or [realworldqa](https://x.ai/blog/grok-1.5v)
|
115 |
|
116 |
-
|
117 |
-
## Download Locally
|
118 |
-
|
119 |
-
To download the dataset into a local directory, the code below will download the images into the VisionArena-Chat folder
|
120 |
-
```
|
121 |
-
from datasets import load_dataset
|
122 |
-
from PIL import Image
|
123 |
-
from io import BytesIO
|
124 |
-
import json
|
125 |
-
import os
|
126 |
-
from tqdm import tqdm
|
127 |
-
from multiprocessing import Pool, cpu_count
|
128 |
-
|
129 |
-
def download_dataset(num_workers=None):
|
130 |
-
base_dir, images_dir = "VisionArena-Chat", os.path.join("VisionArena-Chat", "images")
|
131 |
-
os.makedirs(images_dir, exist_ok=True)
|
132 |
-
ds = load_dataset("lmarena-ai/VisionArena-Chat", split="train")
|
133 |
-
samples = list(ds)
|
134 |
-
num_workers = num_workers or min(cpu_count(), 8)
|
135 |
-
print(f"Processing samples using {num_workers} workers...")
|
136 |
-
|
137 |
-
def process_sample(idx_sample):
|
138 |
-
idx, sample = idx_sample
|
139 |
-
processed_images = []
|
140 |
-
for img_idx, img in enumerate(sample.get("images", [])):
|
141 |
-
img_filename = f"image_{idx}_{img_idx}.png"
|
142 |
-
img_path = os.path.join(images_dir, img_filename)
|
143 |
-
if not os.path.exists(img_path):
|
144 |
-
Image.open(BytesIO(img["bytes"])).save(img_path)
|
145 |
-
processed_images.append(os.path.join("images", img_filename))
|
146 |
-
sample["images"] = processed_images
|
147 |
-
return sample
|
148 |
-
|
149 |
-
with Pool(num_workers) as pool:
|
150 |
-
processed_data = list(tqdm(pool.imap(process_sample, enumerate(samples)), total=len(samples), desc="Processing samples"))
|
151 |
-
with open(os.path.join(base_dir, "data.json"), 'w', encoding='utf-8') as f:
|
152 |
-
json.dump(processed_data, f, ensure_ascii=False, indent=2)
|
153 |
-
print(f"Dataset downloaded and processed successfully!\nImages saved in: {images_dir}\nData saved in: {os.path.join(base_dir, 'data.json')}")
|
154 |
-
|
155 |
-
if __name__ == "__main__":
|
156 |
-
download_dataset()
|
157 |
-
```
|
158 |
-
|
159 |
## Bias, Risks, and Limitations
|
160 |
|
161 |
This dataset contains a large amount of STEM related questions, OCR tasks, and general problems like captioning. This dataset contains less questions which relate to specialized domains outside of stem.
|
|
|
113 |
* **is_preset** - if the image is from the "random image button"
|
114 |
* **dataset_preset** - which dataset the preset image is from. This can be either [NewYorker](https://huggingface.co/datasets/jmhessel/newyorker_caption_contest), [WikiArt](https://huggingface.co/datasets/huggan/wikiart), [TextVQA](https://huggingface.co/datasets/facebook/textvqa), [ChartQA](https://huggingface.co/datasets/lmms-lab/ChartQA), [DocQA](https://huggingface.co/datasets/lmms-lab/DocVQA), or [realworldqa](https://x.ai/blog/grok-1.5v)
|
115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
## Bias, Risks, and Limitations
|
117 |
|
118 |
This dataset contains a large amount of STEM related questions, OCR tasks, and general problems like captioning. This dataset contains less questions which relate to specialized domains outside of stem.
|