Dataset Viewer
Auto-converted to Parquet
images
images listlengths
1
1
data_source
stringclasses
2 values
reward_model
dict
extra_info
dict
prompt
listlengths
2
2
ability
stringclasses
3 values
env_name
stringclasses
1 value
sealvqa_vaw
{ "ground_truth": "Yes, the color of the flowers is yellow." }
{ "answer": "Yes, the color of the flowers is yellow.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2364899.jpg", "index": "sealvqa_vaw_0", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the flowers yellow?" }, { "from": "gpt", "value": "Yes, the color of the flowers is yellow." } ], "question": "Is the color of the flowers yellow?", "question_id": "sealvqa_vaw_0", "search": 1, "target_instances": [ { "bbox": [ 141.66666666666666, 335, 62.666666666666686, 33.5 ], "instance_id": "2364899006", "name": "flowers" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the flowers yellow?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the pants is white." }
{ "answer": "The color of the pants is white.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2409126.jpg", "index": "sealvqa_vaw_1", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the pants?" }, { "from": "gpt", "value": "The color of the pants is white." } ], "question": "What is the color of the pants?", "question_id": "sealvqa_vaw_1", "search": 1, "target_instances": [ { "bbox": [ 421.6666666666667, 115, 17.666666666666686, 32.66666666666666 ], "instance_id": "2409126013", "name": "pants" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the pants?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The material of the wheels is wooden." }
{ "answer": "The material of the wheels is wooden.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2383494.jpg", "index": "sealvqa_vaw_2", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the material of the wheels?" }, { "from": "gpt", "value": "The material of the wheels is wooden." } ], "question": "What is the material of the wheels?", "question_id": "sealvqa_vaw_2", "search": 1, "target_instances": [ { "bbox": [ 332.5, 115, 20.166666666666686, 93.5 ], "instance_id": "2383494000", "name": "wheels" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the material of the wheels?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the pose of the zebras is not running." }
{ "answer": "No, the pose of the zebras is not running.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/150488.jpg", "index": "sealvqa_vaw_3", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the pose of the zebras running?" }, { "from": "gpt", "value": "No, the pose of the zebras is not running." } ], "question": "Is the pose of the zebras running?", "question_id": "sealvqa_vaw_3", "search": 1, "target_instances": [ { "bbox": [ 276.48, 320.85333333333335, 99.98666666666662, 77.79999999999995 ], "instance_id": "150488004", "name": "zebras" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the pose of the zebras running?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_general
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the feathers is gray." }
{ "answer": "The color of the feathers is gray.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2391278.jpg", "index": "sealvqa_vaw_4", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the feathers?" }, { "from": "gpt", "value": "The color of the feathers is gray." } ], "question": "What is the color of the feathers?", "question_id": "sealvqa_vaw_4", "search": 1, "target_instances": [ { "bbox": [ 180.83333333333334, 264.1666666666667, 64.33333333333331, 22.66666666666663 ], "instance_id": "2391278003", "name": "feathers" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the feathers?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the basket is red." }
{ "answer": "The color of the basket is red.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2375917.jpg", "index": "sealvqa_vaw_5", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the basket?" }, { "from": "gpt", "value": "The color of the basket is red." } ], "question": "What is the color of the basket?", "question_id": "sealvqa_vaw_5", "search": 1, "target_instances": [ { "bbox": [ 417.5, 97.5, 73.5, 67.66666666666666 ], "instance_id": "2375917004", "name": "basket" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the basket?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the kite is red." }
{ "answer": "The color of the kite is red.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2415879.jpg", "index": "sealvqa_vaw_6", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the kite?" }, { "from": "gpt", "value": "The color of the kite is red." } ], "question": "What is the color of the kite?", "question_id": "sealvqa_vaw_6", "search": 1, "target_instances": [ { "bbox": [ 268.125, 215, 74.125, 107.875 ], "instance_id": "2415879003", "name": "kite" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the kite?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the cabinets is white." }
{ "answer": "The color of the cabinets is white.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2320937.jpg", "index": "sealvqa_vaw_7", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the cabinets?" }, { "from": "gpt", "value": "The color of the cabinets is white." } ], "question": "What is the color of the cabinets?", "question_id": "sealvqa_vaw_7", "search": 1, "target_instances": [ { "bbox": [ 144.16666666666666, 217.5, 52.666666666666686, 58.5 ], "instance_id": "2320937006", "name": "cabinets" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the cabinets?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the opaqeness of the cup is not transparent." }
{ "answer": "No, the opaqeness of the cup is not transparent.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2411637.jpg", "index": "sealvqa_vaw_8", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the opaqeness of the cup transparent?" }, { "from": "gpt", "value": "No, the opaqeness of the cup is not transparent." } ], "question": "Is the opaqeness of the cup transparent?", "question_id": "sealvqa_vaw_8", "search": 1, "target_instances": [ { "bbox": [ 179.16666666666666, 100, 22.666666666666686, 30.166666666666686 ], "instance_id": "2411637001", "name": "cup" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the opaqeness of the cup transparent?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_general
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the shirt is red." }
{ "answer": "The color of the shirt is red.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2370229.jpg", "index": "sealvqa_vaw_9", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the shirt?" }, { "from": "gpt", "value": "The color of the shirt is red." } ], "question": "What is the color of the shirt?", "question_id": "sealvqa_vaw_9", "search": 1, "target_instances": [ { "bbox": [ 160, 137, 49, 42 ], "instance_id": "2370229005", "name": "shirt" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the shirt?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the color of the design is not black." }
{ "answer": "No, the color of the design is not black.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2381719.jpg", "index": "sealvqa_vaw_10", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the design black?" }, { "from": "gpt", "value": "No, the color of the design is not black." } ], "question": "Is the color of the design black?", "question_id": "sealvqa_vaw_10", "search": 1, "target_instances": [ { "bbox": [ 341.6666666666667, 174.16666666666666, 76, 46.00000000000003 ], "instance_id": "2381719002", "name": "design" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the design black?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the color of the stars is not orange." }
{ "answer": "No, the color of the stars is not orange.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2389715.jpg", "index": "sealvqa_vaw_11", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the stars orange?" }, { "from": "gpt", "value": "No, the color of the stars is not orange." } ], "question": "Is the color of the stars orange?", "question_id": "sealvqa_vaw_11", "search": 1, "target_instances": [ { "bbox": [ 44.25, 186.375, 53.5, 20.125 ], "instance_id": "2389715002", "name": "stars" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the stars orange?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "Yes, the color of the blouse is green." }
{ "answer": "Yes, the color of the blouse is green.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2372007.jpg", "index": "sealvqa_vaw_12", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the blouse green?" }, { "from": "gpt", "value": "Yes, the color of the blouse is green." } ], "question": "Is the color of the blouse green?", "question_id": "sealvqa_vaw_12", "search": 1, "target_instances": [ { "bbox": [ 162.5, 299.1666666666667, 36.83333333333334, 81.83333333333331 ], "instance_id": "2372007003", "name": "blouse" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the blouse green?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the building is blue." }
{ "answer": "The color of the building is blue.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2383501.jpg", "index": "sealvqa_vaw_13", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the building?" }, { "from": "gpt", "value": "The color of the building is blue." } ], "question": "What is the color of the building?", "question_id": "sealvqa_vaw_13", "search": 1, "target_instances": [ { "bbox": [ 403.3333333333333, 30.833333333333336, 76, 122.66666666666666 ], "instance_id": "2383501004", "name": "building" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the building?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "Yes, the color of the grass is green." }
{ "answer": "Yes, the color of the grass is green.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2324713.jpg", "index": "sealvqa_vaw_14", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the grass green?" }, { "from": "gpt", "value": "Yes, the color of the grass is green." } ], "question": "Is the color of the grass green?", "question_id": "sealvqa_vaw_14", "search": 1, "target_instances": [ { "bbox": [ 400.8333333333333, 241.66666666666666, 96.00000000000006, 57.66666666666666 ], "instance_id": "2324713004", "name": "grass" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the grass green?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The face expression of the woman is smiling." }
{ "answer": "The face expression of the woman is smiling.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2408270.jpg", "index": "sealvqa_vaw_15", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the face expression of the woman?" }, { "from": "gpt", "value": "The face expression of the woman is smiling." } ], "question": "What is the face expression of the woman?", "question_id": "sealvqa_vaw_15", "search": 1, "target_instances": [ { "bbox": [ 242, 168, 51, 86 ], "instance_id": "2408270002", "name": "woman" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the face expression of the woman?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the line is yellow." }
{ "answer": "The color of the line is yellow.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/4508.jpg", "index": "sealvqa_vaw_16", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the line?" }, { "from": "gpt", "value": "The color of the line is yellow." } ], "question": "What is the color of the line?", "question_id": "sealvqa_vaw_16", "search": 1, "target_instances": [ { "bbox": [ 249, 213.5, 50, 73.5 ], "instance_id": "4508001", "name": "line" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the line?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the word is yellow." }
{ "answer": "The color of the word is yellow.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2406739.jpg", "index": "sealvqa_vaw_17", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the word?" }, { "from": "gpt", "value": "The color of the word is yellow." } ], "question": "What is the color of the word?", "question_id": "sealvqa_vaw_17", "search": 1, "target_instances": [ { "bbox": [ 127.5, 281.06666666666666, 39.53333333333333, 45.19999999999999 ], "instance_id": "2406739001", "name": "word" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the word?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the uniform is blue." }
{ "answer": "The color of the uniform is blue.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2319880.jpg", "index": "sealvqa_vaw_18", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the uniform?" }, { "from": "gpt", "value": "The color of the uniform is blue." } ], "question": "What is the color of the uniform?", "question_id": "sealvqa_vaw_18", "search": 1, "target_instances": [ { "bbox": [ 308, 104, 23, 44 ], "instance_id": "2319880004", "name": "uniform" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the uniform?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the shoes is red." }
{ "answer": "The color of the shoes is red.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2406860.jpg", "index": "sealvqa_vaw_19", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the shoes?" }, { "from": "gpt", "value": "The color of the shoes is red." } ], "question": "What is the color of the shoes?", "question_id": "sealvqa_vaw_19", "search": 1, "target_instances": [ { "bbox": [ 28.333333333333332, 200.83333333333334, 171, 41 ], "instance_id": "2406860003", "name": "shoes" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the shoes?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the lighthouse is red." }
{ "answer": "The color of the lighthouse is red.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2395045.jpg", "index": "sealvqa_vaw_20", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the lighthouse?" }, { "from": "gpt", "value": "The color of the lighthouse is red." } ], "question": "What is the color of the lighthouse?", "question_id": "sealvqa_vaw_20", "search": 1, "target_instances": [ { "bbox": [ 233.33333333333334, 47.5, 38.49999999999997, 76 ], "instance_id": "2395045002", "name": "lighthouse" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the lighthouse?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The material of the spoon is stainless steel." }
{ "answer": "The material of the spoon is stainless steel.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2372786.jpg", "index": "sealvqa_vaw_21", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the material of the spoon?" }, { "from": "gpt", "value": "The material of the spoon is stainless steel." } ], "question": "What is the material of the spoon?", "question_id": "sealvqa_vaw_21", "search": 1, "target_instances": [ { "bbox": [ 102, 352, 97, 67 ], "instance_id": "2372786015", "name": "spoon" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the material of the spoon?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "Yes, the color of the sign is blue." }
{ "answer": "Yes, the color of the sign is blue.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2393730.jpg", "index": "sealvqa_vaw_22", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the sign blue?" }, { "from": "gpt", "value": "Yes, the color of the sign is blue." } ], "question": "Is the color of the sign blue?", "question_id": "sealvqa_vaw_22", "search": 1, "target_instances": [ { "bbox": [ 4, 106, 47, 68 ], "instance_id": "2393730003", "name": "sign" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the sign blue?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The material of the table is wooden." }
{ "answer": "The material of the table is wooden.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2319270.jpg", "index": "sealvqa_vaw_23", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the material of the table?" }, { "from": "gpt", "value": "The material of the table is wooden." } ], "question": "What is the material of the table?", "question_id": "sealvqa_vaw_23", "search": 1, "target_instances": [ { "bbox": [ 1.67, 371.2966666666666, 75.59333333333333, 73.36666666666673 ], "instance_id": "2319270010", "name": "table" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the material of the table?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "Yes, the color of the fur is black." }
{ "answer": "Yes, the color of the fur is black.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2404579.jpg", "index": "sealvqa_vaw_24", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the fur black?" }, { "from": "gpt", "value": "Yes, the color of the fur is black." } ], "question": "Is the color of the fur black?", "question_id": "sealvqa_vaw_24", "search": 1, "target_instances": [ { "bbox": [ 82.5, 77.5, 72.66666666666669, 80.16666666666669 ], "instance_id": "2404579000", "name": "fur" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the fur black?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the plane is white." }
{ "answer": "The color of the plane is white.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2398589.jpg", "index": "sealvqa_vaw_25", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the plane?" }, { "from": "gpt", "value": "The color of the plane is white." } ], "question": "What is the color of the plane?", "question_id": "sealvqa_vaw_25", "search": 1, "target_instances": [ { "bbox": [ 223.33333333333331, 162.5, 51.83333333333337, 26.833333333333314 ], "instance_id": "2398589000", "name": "plane" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the plane?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the pose of the propeller is not turning." }
{ "answer": "No, the pose of the propeller is not turning.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2375066.jpg", "index": "sealvqa_vaw_26", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the pose of the propeller turning?" }, { "from": "gpt", "value": "No, the pose of the propeller is not turning." } ], "question": "Is the pose of the propeller turning?", "question_id": "sealvqa_vaw_26", "search": 1, "target_instances": [ { "bbox": [ 394.1666666666667, 126.66666666666667, 55.16666666666663, 51.83333333333333 ], "instance_id": "2375066006", "name": "propeller" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the pose of the propeller turning?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_general
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the color of the painting is not orange." }
{ "answer": "No, the color of the painting is not orange.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2399355.jpg", "index": "sealvqa_vaw_27", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the painting orange?" }, { "from": "gpt", "value": "No, the color of the painting is not orange." } ], "question": "Is the color of the painting orange?", "question_id": "sealvqa_vaw_27", "search": 1, "target_instances": [ { "bbox": [ 113, 16, 56, 59 ], "instance_id": "2399355000", "name": "painting" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the painting orange?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the color of the frame is not cream colored." }
{ "answer": "No, the color of the frame is not cream colored.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2318829.jpg", "index": "sealvqa_vaw_28", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the frame cream colored?" }, { "from": "gpt", "value": "No, the color of the frame is not cream colored." } ], "question": "Is the color of the frame cream colored?", "question_id": "sealvqa_vaw_28", "search": 1, "target_instances": [ { "bbox": [ 39.16666666666667, 143.33333333333334, 66.83333333333333, 89.33333333333331 ], "instance_id": "2318829004", "name": "frame" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the frame cream colored?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "Yes, the color of the window is white." }
{ "answer": "Yes, the color of the window is white.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/3128.jpg", "index": "sealvqa_vaw_29", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the window white?" }, { "from": "gpt", "value": "Yes, the color of the window is white." } ], "question": "Is the color of the window white?", "question_id": "sealvqa_vaw_29", "search": 1, "target_instances": [ { "bbox": [ 296, 419, 47, 141 ], "instance_id": "3128001", "name": "window" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the window white?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The material of the bat is metal." }
{ "answer": "The material of the bat is metal.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2369622.jpg", "index": "sealvqa_vaw_30", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the material of the bat?" }, { "from": "gpt", "value": "The material of the bat is metal." } ], "question": "What is the material of the bat?", "question_id": "sealvqa_vaw_30", "search": 1, "target_instances": [ { "bbox": [ 373.33333333333337, 155.83333333333331, 33.49999999999994, 98.50000000000003 ], "instance_id": "2369622005", "name": "bat" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the material of the bat?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the van is red." }
{ "answer": "The color of the van is red.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2415548.jpg", "index": "sealvqa_vaw_31", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the van?" }, { "from": "gpt", "value": "The color of the van is red." } ], "question": "What is the color of the van?", "question_id": "sealvqa_vaw_31", "search": 1, "target_instances": [ { "bbox": [ 140, 62.5, 96, 96.83333333333331 ], "instance_id": "2415548001", "name": "van" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the van?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The material of the pillow is fluffy." }
{ "answer": "The material of the pillow is fluffy.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2396597.jpg", "index": "sealvqa_vaw_32", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the material of the pillow?" }, { "from": "gpt", "value": "The material of the pillow is fluffy." } ], "question": "What is the material of the pillow?", "question_id": "sealvqa_vaw_32", "search": 1, "target_instances": [ { "bbox": [ 100, 146.66666666666666, 105.16666666666666, 67.66666666666669 ], "instance_id": "2396597001", "name": "pillow" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the material of the pillow?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "Yes, the color of the text is white." }
{ "answer": "Yes, the color of the text is white.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2397023.jpg", "index": "sealvqa_vaw_33", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the text white?" }, { "from": "gpt", "value": "Yes, the color of the text is white." } ], "question": "Is the color of the text white?", "question_id": "sealvqa_vaw_33", "search": 1, "target_instances": [ { "bbox": [ 179.20000000000002, 190.93333333333334, 135.4, 50.06666666666666 ], "instance_id": "2397023001", "name": "text" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the text white?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the speaker is gray." }
{ "answer": "The color of the speaker is gray.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2391687.jpg", "index": "sealvqa_vaw_34", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the speaker?" }, { "from": "gpt", "value": "The color of the speaker is gray." } ], "question": "What is the color of the speaker?", "question_id": "sealvqa_vaw_34", "search": 1, "target_instances": [ { "bbox": [ 66, 57, 67, 14 ], "instance_id": "2391687005", "name": "speaker" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the speaker?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the train is silver." }
{ "answer": "The color of the train is silver.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2381611.jpg", "index": "sealvqa_vaw_35", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the train?" }, { "from": "gpt", "value": "The color of the train is silver." } ], "question": "What is the color of the train?", "question_id": "sealvqa_vaw_35", "search": 1, "target_instances": [ { "bbox": [ 56.666666666666664, 85, 113.5, 61 ], "instance_id": "2381611004", "name": "train" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the train?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the color of the roof is not brown." }
{ "answer": "No, the color of the roof is not brown.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2326987.jpg", "index": "sealvqa_vaw_36", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the roof brown?" }, { "from": "gpt", "value": "No, the color of the roof is not brown." } ], "question": "Is the color of the roof brown?", "question_id": "sealvqa_vaw_36", "search": 1, "target_instances": [ { "bbox": [ 256.41, 237.54000000000002, 29.859999999999957, 15.430000000000007 ], "instance_id": "2326987003", "name": "roof" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the roof brown?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the box is white." }
{ "answer": "The color of the box is white.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2355374.jpg", "index": "sealvqa_vaw_37", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the box?" }, { "from": "gpt", "value": "The color of the box is white." } ], "question": "What is the color of the box?", "question_id": "sealvqa_vaw_37", "search": 1, "target_instances": [ { "bbox": [ 345, 429, 30, 69 ], "instance_id": "2355374008", "name": "box" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the box?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the material of the doll is not fluffy." }
{ "answer": "No, the material of the doll is not fluffy.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2321166.jpg", "index": "sealvqa_vaw_38", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the material of the doll fluffy?" }, { "from": "gpt", "value": "No, the material of the doll is not fluffy." } ], "question": "Is the material of the doll fluffy?", "question_id": "sealvqa_vaw_38", "search": 1, "target_instances": [ { "bbox": [ 405, 203.33333333333334, 93.5, 106.00000000000003 ], "instance_id": "2321166001", "name": "doll" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the material of the doll fluffy?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the cloud is dark gray." }
{ "answer": "The color of the cloud is dark gray.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2390143.jpg", "index": "sealvqa_vaw_39", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the cloud?" }, { "from": "gpt", "value": "The color of the cloud is dark gray." } ], "question": "What is the color of the cloud?", "question_id": "sealvqa_vaw_39", "search": 1, "target_instances": [ { "bbox": [ 380, 152, 111, 85 ], "instance_id": "2390143006", "name": "cloud" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the cloud?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the shorts is black." }
{ "answer": "The color of the shorts is black.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2379103.jpg", "index": "sealvqa_vaw_40", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the shorts?" }, { "from": "gpt", "value": "The color of the shorts is black." } ], "question": "What is the color of the shorts?", "question_id": "sealvqa_vaw_40", "search": 1, "target_instances": [ { "bbox": [ 130, 149.16666666666666, 81, 66.00000000000003 ], "instance_id": "2379103003", "name": "shorts" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the shorts?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the pants is gray." }
{ "answer": "The color of the pants is gray.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2397139.jpg", "index": "sealvqa_vaw_41", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the pants?" }, { "from": "gpt", "value": "The color of the pants is gray." } ], "question": "What is the color of the pants?", "question_id": "sealvqa_vaw_41", "search": 1, "target_instances": [ { "bbox": [ 187, 184, 51, 70 ], "instance_id": "2397139006", "name": "pants" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the pants?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the towel is white." }
{ "answer": "The color of the towel is white.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2414657.jpg", "index": "sealvqa_vaw_42", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the towel?" }, { "from": "gpt", "value": "The color of the towel is white." } ], "question": "What is the color of the towel?", "question_id": "sealvqa_vaw_42", "search": 1, "target_instances": [ { "bbox": [ 138.195, 196.47, 91.465, 87.025 ], "instance_id": "2414657004", "name": "towel" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the towel?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the boat is black." }
{ "answer": "The color of the boat is black.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2399497.jpg", "index": "sealvqa_vaw_43", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the boat?" }, { "from": "gpt", "value": "The color of the boat is black." } ], "question": "What is the color of the boat?", "question_id": "sealvqa_vaw_43", "search": 1, "target_instances": [ { "bbox": [ 182, 198, 109, 25 ], "instance_id": "2399497004", "name": "boat" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the boat?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the color of the branches is not light brown." }
{ "answer": "No, the color of the branches is not light brown.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2377890.jpg", "index": "sealvqa_vaw_44", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the branches light brown?" }, { "from": "gpt", "value": "No, the color of the branches is not light brown." } ], "question": "Is the color of the branches light brown?", "question_id": "sealvqa_vaw_44", "search": 1, "target_instances": [ { "bbox": [ 66.66666666666667, 4.166666666666667, 151.83333333333331, 56 ], "instance_id": "2377890001", "name": "branches" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the branches light brown?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the shoes is black." }
{ "answer": "The color of the shoes is black.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2371600.jpg", "index": "sealvqa_vaw_45", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the shoes?" }, { "from": "gpt", "value": "The color of the shoes is black." } ], "question": "What is the color of the shoes?", "question_id": "sealvqa_vaw_45", "search": 1, "target_instances": [ { "bbox": [ 382.5, 282.5, 61, 36 ], "instance_id": "2371600003", "name": "shoes" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the shoes?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the wing is white." }
{ "answer": "The color of the wing is white.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2383438.jpg", "index": "sealvqa_vaw_46", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the wing?" }, { "from": "gpt", "value": "The color of the wing is white." } ], "question": "What is the color of the wing?", "question_id": "sealvqa_vaw_46", "search": 1, "target_instances": [ { "bbox": [ 12, 257, 276, 17 ], "instance_id": "2383438005", "name": "wing" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the wing?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the jacket is blue." }
{ "answer": "The color of the jacket is blue.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2376230.jpg", "index": "sealvqa_vaw_47", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the jacket?" }, { "from": "gpt", "value": "The color of the jacket is blue." } ], "question": "What is the color of the jacket?", "question_id": "sealvqa_vaw_47", "search": 1, "target_instances": [ { "bbox": [ 284, 123, 54, 134 ], "instance_id": "2376230010", "name": "jacket" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the jacket?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the pants is black." }
{ "answer": "The color of the pants is black.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2414709.jpg", "index": "sealvqa_vaw_48", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the pants?" }, { "from": "gpt", "value": "The color of the pants is black." } ], "question": "What is the color of the pants?", "question_id": "sealvqa_vaw_48", "search": 1, "target_instances": [ { "bbox": [ 304.16666666666663, 239.16666666666666, 44.33333333333337, 98.50000000000003 ], "instance_id": "2414709002", "name": "pants" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the pants?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the scissors is orange." }
{ "answer": "The color of the scissors is orange.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2360507.jpg", "index": "sealvqa_vaw_49", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the scissors?" }, { "from": "gpt", "value": "The color of the scissors is orange." } ], "question": "What is the color of the scissors?", "question_id": "sealvqa_vaw_49", "search": 1, "target_instances": [ { "bbox": [ 154.375, 191.25, 74.74999999999997, 39.125 ], "instance_id": "2360507005", "name": "scissors" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the scissors?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the jacket is black." }
{ "answer": "The color of the jacket is black.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/744.jpg", "index": "sealvqa_vaw_50", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the jacket?" }, { "from": "gpt", "value": "The color of the jacket is black." } ], "question": "What is the color of the jacket?", "question_id": "sealvqa_vaw_50", "search": 1, "target_instances": [ { "bbox": [ 599, 354, 78, 112 ], "instance_id": "744002", "name": "jacket" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the jacket?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the lettering is black." }
{ "answer": "The color of the lettering is black.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2339091.jpg", "index": "sealvqa_vaw_51", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the lettering?" }, { "from": "gpt", "value": "The color of the lettering is black." } ], "question": "What is the color of the lettering?", "question_id": "sealvqa_vaw_51", "search": 1, "target_instances": [ { "bbox": [ 202.5, 81.66666666666667, 170.16666666666663, 21 ], "instance_id": "2339091012", "name": "lettering" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the lettering?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "Yes, the color of the line is white." }
{ "answer": "Yes, the color of the line is white.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2385831.jpg", "index": "sealvqa_vaw_52", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the line white?" }, { "from": "gpt", "value": "Yes, the color of the line is white." } ], "question": "Is the color of the line white?", "question_id": "sealvqa_vaw_52", "search": 1, "target_instances": [ { "bbox": [ 255.855, 320.235, 77.035, 11.544999999999959 ], "instance_id": "2385831006", "name": "line" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the line white?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the pole is gray." }
{ "answer": "The color of the pole is gray.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2394494.jpg", "index": "sealvqa_vaw_53", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the pole?" }, { "from": "gpt", "value": "The color of the pole is gray." } ], "question": "What is the color of the pole?", "question_id": "sealvqa_vaw_53", "search": 1, "target_instances": [ { "bbox": [ 167.66, 148.84666666666666, 20.366666666666674, 112.77333333333334 ], "instance_id": "2394494001", "name": "pole" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the pole?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the jacket is yellow." }
{ "answer": "The color of the jacket is yellow.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2369892.jpg", "index": "sealvqa_vaw_54", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the jacket?" }, { "from": "gpt", "value": "The color of the jacket is yellow." } ], "question": "What is the color of the jacket?", "question_id": "sealvqa_vaw_54", "search": 1, "target_instances": [ { "bbox": [ 170, 132.5, 62.66666666666666, 52.666666666666686 ], "instance_id": "2369892001", "name": "jacket" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the jacket?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the pose of the deer is not walking." }
{ "answer": "No, the pose of the deer is not walking.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2417170.jpg", "index": "sealvqa_vaw_55", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the pose of the deer walking?" }, { "from": "gpt", "value": "No, the pose of the deer is not walking." } ], "question": "Is the pose of the deer walking?", "question_id": "sealvqa_vaw_55", "search": 1, "target_instances": [ { "bbox": [ 253, 20, 56, 57 ], "instance_id": "2417170005", "name": "deer" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the pose of the deer walking?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_general
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the statue is golden." }
{ "answer": "The color of the statue is golden.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2377841.jpg", "index": "sealvqa_vaw_56", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the statue?" }, { "from": "gpt", "value": "The color of the statue is golden." } ], "question": "What is the color of the statue?", "question_id": "sealvqa_vaw_56", "search": 1, "target_instances": [ { "bbox": [ 254.19, 204.795, 38.74000000000001, 45.95500000000001 ], "instance_id": "2377841001", "name": "statue" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the statue?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "Yes, the material of the track is metal." }
{ "answer": "Yes, the material of the track is metal.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2396282.jpg", "index": "sealvqa_vaw_57", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the material of the track metal?" }, { "from": "gpt", "value": "Yes, the material of the track is metal." } ], "question": "Is the material of the track metal?", "question_id": "sealvqa_vaw_57", "search": 1, "target_instances": [ { "bbox": [ 355, 267.5, 68.5, 66 ], "instance_id": "2396282002", "name": "track" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the material of the track metal?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the sign is white." }
{ "answer": "The color of the sign is white.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/3396.jpg", "index": "sealvqa_vaw_58", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the sign?" }, { "from": "gpt", "value": "The color of the sign is white." } ], "question": "What is the color of the sign?", "question_id": "sealvqa_vaw_58", "search": 1, "target_instances": [ { "bbox": [ 681.3333333333334, 218.66666666666666, 59.66666666666663, 52.99999999999997 ], "instance_id": "3396002", "name": "sign" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the sign?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the platter is black." }
{ "answer": "The color of the platter is black.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/1524.jpg", "index": "sealvqa_vaw_59", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the platter?" }, { "from": "gpt", "value": "The color of the platter is black." } ], "question": "What is the color of the platter?", "question_id": "sealvqa_vaw_59", "search": 1, "target_instances": [ { "bbox": [ 329, 41, 127, 72 ], "instance_id": "1524001", "name": "platter" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the platter?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the sky is blue." }
{ "answer": "The color of the sky is blue.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2412093.jpg", "index": "sealvqa_vaw_60", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the sky?" }, { "from": "gpt", "value": "The color of the sky is blue." } ], "question": "What is the color of the sky?", "question_id": "sealvqa_vaw_60", "search": 1, "target_instances": [ { "bbox": [ 250.83333333333334, 64.16666666666666, 65.16666666666666, 110.16666666666669 ], "instance_id": "2412093000", "name": "sky" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the sky?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "Yes, the color of the stripe is black." }
{ "answer": "Yes, the color of the stripe is black.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2317654.jpg", "index": "sealvqa_vaw_61", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the stripe black?" }, { "from": "gpt", "value": "Yes, the color of the stripe is black." } ], "question": "Is the color of the stripe black?", "question_id": "sealvqa_vaw_61", "search": 1, "target_instances": [ { "bbox": [ 76.80000000000001, 218.24, 63.079999999999984, 57.31999999999999 ], "instance_id": "2317654001", "name": "stripe" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the stripe black?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the logo is green." }
{ "answer": "The color of the logo is green.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2379090.jpg", "index": "sealvqa_vaw_62", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the logo?" }, { "from": "gpt", "value": "The color of the logo is green." } ], "question": "What is the color of the logo?", "question_id": "sealvqa_vaw_62", "search": 1, "target_instances": [ { "bbox": [ 82.5, 130, 92.66666666666666, 86.83333333333331 ], "instance_id": "2379090000", "name": "logo" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the logo?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "Yes, the material of the skyscraper is glass." }
{ "answer": "Yes, the material of the skyscraper is glass.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2388436.jpg", "index": "sealvqa_vaw_63", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the material of the skyscraper glass?" }, { "from": "gpt", "value": "Yes, the material of the skyscraper is glass." } ], "question": "Is the material of the skyscraper glass?", "question_id": "sealvqa_vaw_63", "search": 1, "target_instances": [ { "bbox": [ 237, 0, 68, 69 ], "instance_id": "2388436004", "name": "skyscraper" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the material of the skyscraper glass?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "Yes, the color of the skirt is blue." }
{ "answer": "Yes, the color of the skirt is blue.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2386450.jpg", "index": "sealvqa_vaw_64", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the color of the skirt blue?" }, { "from": "gpt", "value": "Yes, the color of the skirt is blue." } ], "question": "Is the color of the skirt blue?", "question_id": "sealvqa_vaw_64", "search": 1, "target_instances": [ { "bbox": [ 56, 210, 101, 77 ], "instance_id": "2386450002", "name": "skirt" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the color of the skirt blue?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the gloves is black." }
{ "answer": "The color of the gloves is black.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2366589.jpg", "index": "sealvqa_vaw_65", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the gloves?" }, { "from": "gpt", "value": "The color of the gloves is black." } ], "question": "What is the color of the gloves?", "question_id": "sealvqa_vaw_65", "search": 1, "target_instances": [ { "bbox": [ 205.83333333333334, 228.33333333333334, 31.833333333333314, 40.99999999999997 ], "instance_id": "2366589004", "name": "gloves" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the gloves?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "Yes, the material of the trash can is stainless steel." }
{ "answer": "Yes, the material of the trash can is stainless steel.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2413414.jpg", "index": "sealvqa_vaw_66", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the material of the trash can stainless steel?" }, { "from": "gpt", "value": "Yes, the material of the trash can is stainless steel." } ], "question": "Is the material of the trash can stainless steel?", "question_id": "sealvqa_vaw_66", "search": 1, "target_instances": [ { "bbox": [ 321, 154, 41, 77 ], "instance_id": "2413414004", "name": "trash can" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the material of the trash can stainless steel?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the cloud is white." }
{ "answer": "The color of the cloud is white.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2371946.jpg", "index": "sealvqa_vaw_67", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the cloud?" }, { "from": "gpt", "value": "The color of the cloud is white." } ], "question": "What is the color of the cloud?", "question_id": "sealvqa_vaw_67", "search": 1, "target_instances": [ { "bbox": [ 291.375, 124.95, 23.05000000000001, 87.10000000000001 ], "instance_id": "2371946003", "name": "cloud" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the cloud?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the state of the jacket is not open." }
{ "answer": "No, the state of the jacket is not open.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2398772.jpg", "index": "sealvqa_vaw_68", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the state of the jacket open?" }, { "from": "gpt", "value": "No, the state of the jacket is not open." } ], "question": "Is the state of the jacket open?", "question_id": "sealvqa_vaw_68", "search": 1, "target_instances": [ { "bbox": [ 221, 133, 63, 62 ], "instance_id": "2398772002", "name": "jacket" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the state of the jacket open?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_general
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the mask is blue." }
{ "answer": "The color of the mask is blue.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2404310.jpg", "index": "sealvqa_vaw_69", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the mask?" }, { "from": "gpt", "value": "The color of the mask is blue." } ], "question": "What is the color of the mask?", "question_id": "sealvqa_vaw_69", "search": 1, "target_instances": [ { "bbox": [ 136.38333333333333, 194.83333333333334, 46.09, 36.62666666666664 ], "instance_id": "2404310001", "name": "mask" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the mask?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the lines is yellow." }
{ "answer": "The color of the lines is yellow.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2362978.jpg", "index": "sealvqa_vaw_70", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the lines?" }, { "from": "gpt", "value": "The color of the lines is yellow." } ], "question": "What is the color of the lines?", "question_id": "sealvqa_vaw_70", "search": 1, "target_instances": [ { "bbox": [ 310, 312.5, 71, 21.833333333333314 ], "instance_id": "2362978002", "name": "lines" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the lines?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "No, the state of the rod is not empty." }
{ "answer": "No, the state of the rod is not empty.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2320709.jpg", "index": "sealvqa_vaw_71", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nIs the state of the rod empty?" }, { "from": "gpt", "value": "No, the state of the rod is not empty." } ], "question": "Is the state of the rod empty?", "question_id": "sealvqa_vaw_71", "search": 1, "target_instances": [ { "bbox": [ 5, 3.3333333333333335, 211, 46 ], "instance_id": "2320709005", "name": "rod" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nIs the state of the rod empty?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_general
visual_toolbox_v2
sealvqa_vaw
{ "ground_truth": "The color of the deck is red." }
{ "answer": "The color of the deck is red.", "bbox_format": "absolute_xywh", "bbox_format_description": "Bounding boxes use absolute pixel coordinates in [x, y, width, height] format", "conversation_turns": 2, "data_split": "train", "dataset_type": "vaw_attribute", "has_targets": true, "image_path": "gqa/images/2405871.jpg", "index": "sealvqa_vaw_72", "original_conversations": [ { "from": "human", "value": "<image>\nAdditional visual information to focus on: <object>.\nWhat is the color of the deck?" }, { "from": "gpt", "value": "The color of the deck is red." } ], "question": "What is the color of the deck?", "question_id": "sealvqa_vaw_72", "search": 1, "target_instances": [ { "bbox": [ 322.5, 238.33333333333334, 59.33333333333337, 44.33333333333334 ], "instance_id": "2405871001", "name": "deck" } ] }
[ { "content": "You are a helpful assistant.\n\n# Tools\nYou may call one or more functions to assist with the user query.\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{\"type\":\"function\",\"function\":{\"name\":\"image_zoom_in_tool\",\"description\":\"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label.\",\"parameters\":{\"type\":\"object\",\"properties\":{\"bbox_2d\":{\"type\":\"array\",\"items\":{\"type\":\"number\"},\"minItems\":4,\"maxItems\":4,\"description\":\"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner.\"},\"label\":{\"type\":\"string\",\"description\":\"The name or label of the object in the specified bounding box (optional).\"}},\"required\":[\"bbox\"]}}}\n</tools>\n\n# How to call a tool\nReturn a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n\n**Example**: \n<tool_call> \n{\"name\": \"image_zoom_in_tool\", \"arguments\": {\"bbox_2d\": [10, 20, 100, 200], \"label\": \"the apple on the desk\"}} \n</tool_call>", "role": "system" }, { "content": "<image>\nWhat is the color of the deck?\nThink first, call **image_zoom_in_tool** if needed, then answer. Format strictly as: <think>...</think> <tool_call>...</tool_call> (if tools needed) <answer>...</answer> ", "role": "user" } ]
sealvqa_vaw_attribute
visual_toolbox_v2
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
100