diff --git "a/spatial_temporal_action_localization/ava.json" "b/spatial_temporal_action_localization/ava.json" new file mode 100644--- /dev/null +++ "b/spatial_temporal_action_localization/ava.json" @@ -0,0 +1 @@ +[{"video": "-5KQ66BBWC4.mkv", "clip": [879, 942], "conversations": [{"from": "human", "timestamps": 909.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.118, 0.01, 0.889, 0.984]?"}, {"from": "gpt", "value": ["At Now, [0.118, 0.01, 0.889, 0.984]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 905.0s to 908.0s, [0.392, 0.033, 0.556, 0.618]->[0.408, 0.008, 0.586, 0.639]->[0.42, 0.115, 0.616, 0.883]->[0.38, 0.086, 0.693, 0.991]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 942.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.103, 0.22, 0.726, 0.97]: martial art, fight/hit (a person); [0.661, 0.461, 0.997, 0.996]: fight/hit (a person), bend/bow (at the waist)."}]}, {"video": "-5KQ66BBWC4.mkv", "clip": [999, 1035], "conversations": [{"from": "human", "timestamps": 1029.0, "value": "What actions is the person in bbox [0.395, 0.09, 0.961, 0.971] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.395, 0.09, 0.961, 0.971]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1028.0s, [0.424, 0.081, 0.952, 0.977]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1035.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.014, 0.057, 0.614, 0.975]: listen to (a person), stand, watch (a person); [0.442, 0.099, 0.943, 0.953]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "-5KQ66BBWC4.mkv", "clip": [1172, 1227], "conversations": [{"from": "human", "timestamps": 1202.0, "value": "Describe the actions of the person at bbox [0.017, 0.111, 0.528, 0.995] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.017, 0.111, 0.528, 0.995]: bend/bow (at the waist), carry/hold (an object).", "At 1201.0s, [0.024, 0.235, 0.764, 0.99]: bend/bow (at the waist), throw.", "From 1199.0s to 1200.0s, [0.027, 0.25, 0.543, 0.994]->[0.055, 0.3, 0.868, 0.972]: bend/bow (at the waist), carry/hold (an object).", "At 1198.0s, [0.067, 0.163, 0.551, 1.0]: bend/bow (at the waist), carry/hold (an object), lift/pick up."]}, {"from": "human", "timestamps": 1227.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.058, 0.685, 0.967]: shovel, bend/bow (at the waist)."}]}, {"video": "-5KQ66BBWC4.mkv", "clip": [1350, 1414], "conversations": [{"from": "human", "timestamps": 1380.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.118, 0.412, 0.375, 0.925]: listen to (a person), sit; [0.445, 0.166, 0.685, 0.524]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1414.0, "value": "What actions did the person at bbox [0.008, 0.172, 0.303, 0.763] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.008, 0.172, 0.303, 0.763]: sit, listen to (a person), watch (a person).", "From 1412.0s to 1413.0s, [0.0, 0.174, 0.298, 0.766]->[0.013, 0.171, 0.301, 0.772]: sit, listen to (a person), watch (a person).", "From 1410.0s to 1411.0s, [0.004, 0.171, 0.29, 0.769]->[0.001, 0.172, 0.3, 0.773]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1408.0s to 1409.0s, [0.0, 0.165, 0.303, 0.776]->[0.0, 0.172, 0.297, 0.776]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "-5KQ66BBWC4.mkv", "clip": [1450, 1483], "conversations": [{"from": "human", "timestamps": 1480.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.241, 0.005, 0.709, 0.977]: martial art."}, {"from": "human", "timestamps": 1483.0, "value": "What actions is the person in bbox [0.527, 0.162, 0.984, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.527, 0.162, 0.984, 0.988]: martial art.", "From 1475.0s to 1482.0s, [0.168, 0.194, 0.576, 0.998]->[0.202, 0.133, 0.503, 0.981]->[0.188, 0.055, 0.494, 0.977]->[0.31, 0.004, 0.521, 0.982]->[0.389, 0.094, 0.604, 0.869]->[0.241, 0.005, 0.709, 0.977]->[0.065, 0.101, 0.766, 1.0]->[0.538, 0.105, 0.991, 0.986]: martial art."]}]}, {"video": "-5KQ66BBWC4.mkv", "clip": [1546, 1596], "conversations": [{"from": "human", "timestamps": 1576.0, "value": "Describe the actions of the person at bbox [0.128, 0.036, 0.976, 0.969] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.128, 0.036, 0.976, 0.969]: stand, throw.", "From 1574.0s to 1575.0s, [0.134, 0.014, 0.995, 1.0]->[0.139, 0.014, 0.993, 0.974]: stand, throw.", "At 1573.0s, [0.076, 0.017, 0.766, 1.0]: stand, carry/hold (an object), throw."]}, {"from": "human", "timestamps": 1596.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.324, 0.082, 0.544, 0.957]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.681, 0.109, 0.993, 0.991]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "-5KQ66BBWC4.mkv", "clip": [1639, 1673], "conversations": [{"from": "human", "timestamps": 1669.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.013, 0.0, 0.695, 1.0]: listen to (a person), stand, watch (a person); [0.501, 0.05, 0.992, 0.992]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1673.0, "value": "Describe the actions of the person at bbox [0.009, 0.0, 0.646, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.009, 0.0, 0.646, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1671.0s to 1672.0s, [0.009, 0.011, 0.663, 0.983]->[0.011, 0.016, 0.659, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "-5KQ66BBWC4.mkv", "clip": [1588, 1726], "conversations": [{"from": "human", "timestamps": 1618.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.109, 0.046, 0.709, 0.993]: watch (a person), martial art, fight/hit (a person); [0.641, 0.168, 0.997, 0.99]: carry/hold (an object), stand, fight/hit (a person), watch (a person)."}, {"from": "human", "timestamps": 1726.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.024, 0.0, 0.98, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.024, 0.0, 0.98, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1720.0s to 1725.0s, [0.42, 0.113, 0.628, 0.767]->[0.415, 0.119, 0.616, 0.78]->[0.512, 0.136, 0.668, 0.816]->[0.514, 0.163, 0.66, 0.799]->[0.509, 0.14, 0.65, 0.817]->[0.492, 0.137, 0.648, 0.797]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1718.0s to 1719.0s, [0.422, 0.302, 0.614, 0.736]->[0.407, 0.429, 0.592, 0.868]: bend/bow (at the waist).", "At 1717.0s, [0.391, 0.112, 0.526, 0.745]: walk.", "At 1716.0s, [0.375, 0.126, 0.502, 0.717]: stand, carry/hold (an object).", "From 1714.0s to 1715.0s, [0.379, 0.116, 0.539, 0.717]->[0.384, 0.135, 0.549, 0.726]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1713.0s, [0.379, 0.206, 0.548, 0.729]: bend/bow (at the waist), carry/hold (an object).", "From 1711.0s to 1712.0s, [0.37, 0.13, 0.526, 0.691]->[0.406, 0.129, 0.555, 0.72]: stand, carry/hold (an object).", "From 1708.0s to 1710.0s, [0.064, 0.1, 0.267, 0.643]->[0.071, 0.108, 0.247, 0.632]->[0.16, 0.117, 0.302, 0.777]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1707.0s, [0.059, 0.108, 0.214, 0.638]: stand, chop, talk to (e.g., self, a person, a group).", "At 1706.0s, [0.049, 0.105, 0.254, 0.847]: stand, hit (an object)."]}]}, {"video": "-5KQ66BBWC4.mkv", "clip": [1658, 1729], "conversations": [{"from": "human", "timestamps": 1688.0, "value": "What actions is the person in bbox [0.275, 0.0, 0.996, 0.976] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.275, 0.0, 0.996, 0.976]: stand.", "From 1685.0s to 1687.0s, [0.165, 0.0, 0.996, 0.977]->[0.17, 0.0, 0.998, 0.977]->[0.206, 0.009, 0.998, 0.98]: stand.", "At 1684.0s, [0.099, 0.0, 1.0, 0.979]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1729.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.004, 0.0, 0.951, 0.996]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "-5KQ66BBWC4.mkv", "clip": [1735, 1782], "conversations": [{"from": "human", "timestamps": 1765.0, "value": "Describe the actions of the person at bbox [0.0, 0.06, 0.781, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.06, 0.781, 0.987]: stand.", "At 1764.0s, [0.003, 0.073, 0.749, 0.998]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1782.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.002, 0.086, 0.192, 0.982]: carry/hold (an object), stand; [0.438, 0.347, 0.623, 0.994]: carry/hold (an object), walk."}]}, {"video": "-FaXLcSFjUI.mp4", "clip": [1031, 1134], "conversations": [{"from": "human", "timestamps": 1061.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.318, 0.334, 0.427, 0.504]: carry/hold (an object), walk; [0.421, 0.235, 0.565, 0.444]: carry/hold (an object), run/jog."}, {"from": "human", "timestamps": 1134.0, "value": "What actions did the person at bbox [0.046, 0.002, 0.421, 0.996] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.046, 0.002, 0.421, 0.996]: stand, listen to (a person), watch (a person).", "At 1133.0s, [0.044, 0.004, 0.401, 0.996]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "-FaXLcSFjUI.mp4", "clip": [1132, 1163], "conversations": [{"from": "human", "timestamps": 1162.0, "value": "For the person at bbox [0.215, 0.272, 0.534, 0.991], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.215, 0.272, 0.534, 0.991]: stand, listen to (a person).", "At 1161.0s, [0.177, 0.252, 0.51, 0.987]: stand, listen to (a person).", "At 1160.0s, [0.216, 0.266, 0.497, 0.987]: stand, listen to (a person), watch (a person).", "At 1159.0s, [0.31, 0.258, 0.532, 0.99]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1163.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.196, 0.261, 0.551, 1.0]: listen to (a person), stand; [0.489, 0.134, 0.949, 0.989]: carry/hold (an object), stand."}]}, {"video": "-FaXLcSFjUI.mp4", "clip": [1486, 1523], "conversations": [{"from": "human", "timestamps": 1516.0, "value": "Describe the actions of the person at bbox [0.345, 0.27, 0.748, 0.783] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.345, 0.27, 0.748, 0.783]: carry/hold (an object), crawl, climb (e.g., a mountain).", "From 1514.0s to 1515.0s, [0.385, 0.194, 0.708, 0.656]->[0.365, 0.215, 0.785, 0.787]: carry/hold (an object), crawl, climb (e.g., a mountain).", "At 1513.0s, [0.338, 0.102, 0.59, 0.491]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1523.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.1, 0.172, 0.767, 0.998]: carry/hold (an object), crawl, climb (e.g., a mountain)."}]}, {"video": "-FaXLcSFjUI.mp4", "clip": [1528, 1599], "conversations": [{"from": "human", "timestamps": 1558.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.527, 0.411, 0.594, 0.673]: carry/hold (an object), walk; [0.642, 0.387, 0.729, 0.67]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1599.0, "value": "For the person at bbox [0.592, 0.098, 0.906, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.592, 0.098, 0.906, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1598.0s, [0.576, 0.065, 0.904, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "-FaXLcSFjUI.mp4", "clip": [1564, 1603], "conversations": [{"from": "human", "timestamps": 1594.0, "value": "For the person at bbox [0.09, 0.054, 0.959, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.09, 0.054, 0.959, 0.99]: sit, stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1593.0s, [0.073, 0.066, 0.952, 0.994]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1591.0s to 1592.0s, [0.058, 0.081, 0.953, 0.996]->[0.07, 0.081, 0.961, 0.989]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1590.0s, [0.214, 0.064, 0.949, 0.998]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1603.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.217, 0.13, 0.515, 1.0]: carry/hold (an object), stand, listen to (a person); [0.558, 0.068, 0.939, 0.986]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "-FaXLcSFjUI.mp4", "clip": [1608, 1731], "conversations": [{"from": "human", "timestamps": 1638.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.275, 0.143, 0.567, 0.998]: listen to (a person), stand, hand shake; [0.515, 0.101, 0.877, 0.992]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1731.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.213, 0.063, 0.755, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.213, 0.063, 0.755, 1.0]: walk.", "From 1729.0s to 1730.0s, [0.03, 0.04, 0.722, 1.0]->[0.038, 0.094, 0.446, 0.998]: walk."]}]}, {"video": "-FaXLcSFjUI.mp4", "clip": [1722, 1790], "conversations": [{"from": "human", "timestamps": 1752.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.064, 0.467, 0.525, 0.809]: lie/sleep; [0.41, 0.096, 0.697, 0.685]: watch (a person), sit; [0.52, 0.437, 0.934, 0.969]: watch (a person), sit."}, {"from": "human", "timestamps": 1790.0, "value": "What actions is the person in bbox [0.434, 0.121, 0.822, 0.954] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.434, 0.121, 0.822, 0.954]: sit, watch (a person).", "At 1789.0s, [0.321, 0.122, 0.786, 0.966]: sit, watch (a person).", "At 1788.0s, [0.458, 0.111, 0.826, 0.956]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "-IELREHX_js.mp4", "clip": [875, 950], "conversations": [{"from": "human", "timestamps": 905.0, "value": "Describe the actions of the person at bbox [0.087, 0.112, 0.328, 0.941] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.087, 0.112, 0.328, 0.941]: stand, talk to (e.g., self, a person, a group).", "From 903.0s to 904.0s, [0.06, 0.042, 0.468, 0.956]->[0.036, 0.137, 0.367, 0.978]: walk.", "At 902.0s, [0.053, 0.06, 0.655, 0.973]: walk, watch (a person)."]}, {"from": "human", "timestamps": 950.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.042, 0.001, 0.639, 1.0]: watch (a person), sit; [0.618, 0.029, 0.975, 0.98]: sit."}]}, {"video": "-IELREHX_js.mp4", "clip": [927, 968], "conversations": [{"from": "human", "timestamps": 957.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.026, 0.0, 0.376, 1.0]: listen to (a person), stand."}, {"from": "human", "timestamps": 968.0, "value": "What actions did the person at bbox [0.025, 0.0, 0.342, 0.98] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.025, 0.0, 0.342, 0.98]: stand, watch (a person).", "At 967.0s, [0.025, 0.019, 0.316, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 966.0s, [0.025, 0.001, 0.467, 1.0]: stand, watch (a person)."]}]}, {"video": "-IELREHX_js.mp4", "clip": [940, 1001], "conversations": [{"from": "human", "timestamps": 970.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.028, 0.348, 0.258, 0.978]: stand, talk to (e.g., self, a person, a group); [0.199, 0.01, 0.684, 0.987]: watch (a person); [0.23, 0.345, 0.378, 0.687]: carry/hold (an object), walk; [0.673, 0.655, 0.888, 0.992]: sit."}, {"from": "human", "timestamps": 1001.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.025, 0.004, 0.716, 0.984]?"}, {"from": "gpt", "value": ["At Now, [0.025, 0.004, 0.716, 0.984]: sit, stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1000.0s, [0.025, 0.0, 0.707, 0.994]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 999.0s, [0.044, 0.011, 0.662, 1.0]: sit, stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 998.0s, [0.025, 0.027, 0.646, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 997.0s, [0.025, 0.011, 0.687, 1.0]: sit, carry/hold (an object), smoke, talk to (e.g., self, a person, a group).", "At 996.0s, [0.029, 0.0, 0.687, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "-IELREHX_js.mp4", "clip": [1034, 1108], "conversations": [{"from": "human", "timestamps": 1064.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.053, 0.01, 0.729, 1.0]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1108.0, "value": "What actions did the person at bbox [0.271, 0.061, 0.92, 0.998] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.271, 0.061, 0.92, 0.998]: walk.", "At 1107.0s, [0.25, 0.025, 0.779, 0.999]: stand."]}]}, {"video": "-IELREHX_js.mp4", "clip": [1134, 1178], "conversations": [{"from": "human", "timestamps": 1164.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.043, 0.009, 0.693, 1.0]: get up."}, {"from": "human", "timestamps": 1178.0, "value": "What actions did the person at bbox [0.058, 0.026, 0.799, 0.935] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.058, 0.026, 0.799, 0.935]: lie/sleep.", "From 1175.0s to 1177.0s, [0.034, 0.02, 0.764, 0.967]->[0.048, 0.02, 0.743, 0.973]->[0.043, 0.039, 0.795, 0.922]: lie/sleep.", "At 1174.0s, [0.055, 0.028, 0.975, 0.949]: listen to (a person), lie/sleep."]}]}, {"video": "-IELREHX_js.mp4", "clip": [1186, 1235], "conversations": [{"from": "human", "timestamps": 1216.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.48, 0.007, 0.961, 1.0]: carry/hold (an object), stand, dress/put on clothing."}, {"from": "human", "timestamps": 1235.0, "value": "What actions is the person in bbox [0.091, 0.02, 0.52, 0.997] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.091, 0.02, 0.52, 0.997]: sit, read.", "At 1234.0s, [0.092, 0.017, 0.523, 0.982]: sit, carry/hold (an object).", "At 1233.0s, [0.081, 0.064, 0.577, 0.997]: sit."]}]}, {"video": "-IELREHX_js.mp4", "clip": [1230, 1315], "conversations": [{"from": "human", "timestamps": 1260.0, "value": "Describe the actions of the person at bbox [0.527, 0.208, 0.839, 0.953] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.527, 0.208, 0.839, 0.953]: bend/bow (at the waist), carry/hold (an object).", "At 1259.0s, [0.557, 0.201, 0.837, 0.956]: bend/bow (at the waist), touch (an object), talk to (e.g., self, a person, a group).", "At 1258.0s, [0.518, 0.207, 0.846, 0.939]: bend/bow (at the waist), carry/hold (an object), push (an object), talk to (e.g., self, a person, a group).", "At 1257.0s, [0.502, 0.203, 0.848, 0.965]: bend/bow (at the waist), carry/hold (an object), put down, talk to (e.g., self, a person, a group).", "At 1256.0s, [0.518, 0.199, 0.846, 0.987]: bend/bow (at the waist), carry/hold (an object).", "At 1255.0s, [0.528, 0.021, 0.925, 0.966]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1251.0s to 1254.0s, [0.539, 0.024, 0.851, 0.92]->[0.534, 0.021, 0.854, 0.931]->[0.525, 0.003, 0.871, 0.973]->[0.538, 0.034, 0.892, 0.984]: sit, carry/hold (an object).", "At 1250.0s, [0.576, 0.07, 0.855, 0.894]: sit, carry/hold (an object), read.", "At 1249.0s, [0.518, 0.071, 0.861, 0.955]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1315.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.05, 0.062, 0.548, 0.978]: lie/sleep, hug (a person), kiss (a person); [0.356, 0.044, 0.946, 1.0]: lie/sleep, kiss (a person)."}]}, {"video": "-IELREHX_js.mp4", "clip": [1383, 1435], "conversations": [{"from": "human", "timestamps": 1413.0, "value": "What actions is the person in bbox [0.579, 0.134, 0.789, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.579, 0.134, 0.789, 1.0]: stand, carry/hold (an object), close (e.g., a door, a box).", "At 1412.0s, [0.582, 0.155, 0.79, 0.981]: stand, open (e.g., a window, a car door)."]}, {"from": "human", "timestamps": 1435.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.544, 0.0, 0.962, 1.0]: stand."}]}, {"video": "-IELREHX_js.mp4", "clip": [1450, 1488], "conversations": [{"from": "human", "timestamps": 1480.0, "value": "What actions did the person at bbox [0.123, 0.088, 0.471, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.123, 0.088, 0.471, 0.985]: stand, walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1478.0s to 1479.0s, [0.275, 0.037, 0.946, 0.857]->[0.031, 0.167, 0.564, 0.992]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1488.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.074, 0.012, 0.774, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.7, 0.153, 0.974, 0.995]: listen to (a person), stand, watch (a person)."}]}, {"video": "-IELREHX_js.mp4", "clip": [1476, 1520], "conversations": [{"from": "human", "timestamps": 1506.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.025, 0.032, 0.594, 0.994]?"}, {"from": "gpt", "value": ["At Now, [0.025, 0.032, 0.594, 0.994]: stand, watch (a person).", "At 1505.0s, [0.025, 0.017, 0.588, 1.0]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1520.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.034, 0.027, 0.606, 1.0]: listen to (a person), stand; [0.505, 0.0, 0.975, 0.975]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "-IELREHX_js.mp4", "clip": [1487, 1586], "conversations": [{"from": "human", "timestamps": 1517.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.025, 0.0, 0.567, 0.988]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1586.0, "value": "What actions did the person at bbox [0.025, 0.069, 0.318, 0.974] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.025, 0.069, 0.318, 0.974]: stand.", "At 1585.0s, [0.054, 0.061, 0.431, 0.997]: stand.", "At 1583.0s, [0.036, 0.034, 0.438, 0.968]: sit, touch (an object)."]}]}, {"video": "-IELREHX_js.mp4", "clip": [1539, 1588], "conversations": [{"from": "human", "timestamps": 1569.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.313, 0.026, 0.975, 0.983]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1588.0, "value": "Describe the actions of the person at bbox [0.025, 0.088, 0.32, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.025, 0.088, 0.32, 0.983]: stand.", "From 1585.0s to 1587.0s, [0.054, 0.061, 0.431, 0.997]->[0.025, 0.069, 0.318, 0.974]->[0.037, 0.061, 0.444, 0.964]: stand.", "At 1583.0s, [0.036, 0.034, 0.438, 0.968]: sit, touch (an object)."]}]}, {"video": "-IELREHX_js.mp4", "clip": [1719, 1786], "conversations": [{"from": "human", "timestamps": 1749.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.034, 0.0, 0.811, 0.94]?"}, {"from": "gpt", "value": ["In the current frame, [0.034, 0.0, 0.811, 0.94]: carry/hold (an object), listen to (a person), lie/sleep, watch (a person).", "At 1748.0s, [0.025, 0.0, 0.662, 0.956]: carry/hold (an object), listen to (a person), lie/sleep, watch (a person)."]}, {"from": "human", "timestamps": 1786.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.394, 0.137, 0.645, 0.951]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "-OyDO1g74vc.mp4", "clip": [891, 947], "conversations": [{"from": "human", "timestamps": 921.0, "value": "For the person at bbox [0.0, 0.015, 0.819, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.015, 0.819, 0.994]: stand, talk to (e.g., self, a person, a group).", "From 919.0s to 920.0s, [0.012, 0.002, 0.787, 0.97]->[0.008, 0.009, 0.597, 0.989]: stand.", "From 917.0s to 918.0s, [0.0, 0.004, 0.798, 0.965]->[0.0, 0.0, 0.797, 0.975]: sit.", "From 915.0s to 916.0s, [0.016, 0.007, 0.678, 0.988]->[0.0, 0.005, 0.76, 0.993]: stand."]}, {"from": "human", "timestamps": 947.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.193, 0.098, 0.806, 0.945]: carry/hold (an object), stand, touch (an object)."}]}, {"video": "-OyDO1g74vc.mp4", "clip": [897, 1003], "conversations": [{"from": "human", "timestamps": 927.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.078, 0.026, 0.919, 0.968]: walk."}, {"from": "human", "timestamps": 1003.0, "value": "What actions is the person in bbox [0.157, 0.045, 1.0, 0.992] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.157, 0.045, 1.0, 0.992]: stand, answer phone.", "From 990.0s to 1002.0s, [0.126, 0.043, 1.0, 0.981]->[0.133, 0.05, 1.0, 0.982]->[0.07, 0.053, 0.994, 0.975]->[0.029, 0.028, 1.0, 0.982]->[0.036, 0.036, 1.0, 0.979]->[0.06, 0.023, 1.0, 0.976]->[0.083, 0.038, 1.0, 0.975]->[0.091, 0.051, 0.991, 0.983]->[0.093, 0.039, 1.0, 0.982]->[0.008, 0.051, 0.972, 0.984]->[0.0, 0.063, 0.94, 0.973]->[0.051, 0.041, 1.0, 0.986]->[0.125, 0.047, 1.0, 0.983]: stand, answer phone."]}]}, {"video": "-OyDO1g74vc.mp4", "clip": [1018, 1077], "conversations": [{"from": "human", "timestamps": 1048.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.126, 0.009, 1.0, 0.988]: stand."}, {"from": "human", "timestamps": 1077.0, "value": "Describe the actions of the person at bbox [0.076, 0.0, 1.0, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.076, 0.0, 1.0, 0.983]: stand, talk to (e.g., self, a person, a group).", "From 1073.0s to 1076.0s, [0.087, 0.0, 1.0, 0.991]->[0.117, 0.0, 1.0, 0.988]->[0.062, 0.0, 1.0, 0.985]->[0.114, 0.0, 0.988, 0.986]: stand, talk to (e.g., self, a person, a group).", "From 1071.0s to 1072.0s, [0.029, 0.01, 0.962, 0.985]->[0.135, 0.0, 0.983, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1069.0s to 1070.0s, [0.043, 0.021, 0.978, 0.988]->[0.004, 0.0, 0.863, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 1068.0s, [0.011, 0.02, 0.895, 0.988]: sit, talk to (e.g., self, a person, a group).", "At 1067.0s, [0.0, 0.003, 1.0, 0.981]: stand, talk to (e.g., self, a person, a group).", "From 1064.0s to 1066.0s, [0.03, 0.0, 1.0, 0.997]->[0.0, 0.006, 1.0, 0.991]->[0.0, 0.011, 1.0, 0.968]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1062.0s to 1063.0s, [0.065, 0.0, 1.0, 0.998]->[0.091, 0.0, 1.0, 0.993]: stand, talk to (e.g., self, a person, a group).", "At 1061.0s, [0.099, 0.0, 0.947, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "-OyDO1g74vc.mp4", "clip": [1112, 1152], "conversations": [{"from": "human", "timestamps": 1142.0, "value": "For the person at bbox [0.467, 0.451, 0.704, 0.884], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.467, 0.451, 0.704, 0.884]: walk, carry/hold (an object).", "At 1141.0s, [0.456, 0.561, 0.645, 0.878]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1152.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.403, 0.127, 0.672, 0.539]: carry/hold (an object), stand."}]}, {"video": "-OyDO1g74vc.mp4", "clip": [1203, 1235], "conversations": [{"from": "human", "timestamps": 1233.0, "value": "What actions is the person in bbox [0.133, 0.12, 0.463, 0.789] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.133, 0.12, 0.463, 0.789]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1231.0s to 1232.0s, [0.157, 0.113, 0.474, 0.807]->[0.153, 0.107, 0.464, 0.808]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1229.0s to 1230.0s, [0.148, 0.131, 0.463, 0.795]->[0.159, 0.127, 0.478, 0.797]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1228.0s, [0.005, 0.128, 0.298, 0.787]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1235.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.129, 0.118, 0.459, 0.804]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group); [0.367, 0.109, 0.743, 0.979]: listen to (a person), stand, touch (an object), watch (a person)."}]}, {"video": "-OyDO1g74vc.mp4", "clip": [1326, 1361], "conversations": [{"from": "human", "timestamps": 1356.0, "value": "Describe the actions of the person at bbox [0.486, 0.081, 0.886, 0.97] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.486, 0.081, 0.886, 0.97]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1351.0s to 1355.0s, [0.49, 0.079, 0.903, 0.972]->[0.485, 0.058, 0.905, 0.976]->[0.477, 0.055, 0.896, 0.977]->[0.487, 0.09, 0.899, 0.964]->[0.496, 0.091, 0.9, 0.958]: stand, read, listen to (a person).", "From 1349.0s to 1350.0s, [0.485, 0.045, 0.942, 0.979]->[0.44, 0.048, 0.887, 0.964]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1341.0s to 1348.0s, [0.49, 0.006, 0.952, 0.975]->[0.461, 0.023, 0.95, 0.973]->[0.541, 0.042, 0.948, 0.989]->[0.531, 0.046, 0.948, 0.965]->[0.542, 0.049, 0.955, 0.995]->[0.524, 0.032, 0.954, 0.983]->[0.519, 0.03, 0.938, 0.968]->[0.538, 0.051, 0.944, 0.956]: stand, read, listen to (a person).", "At 1340.0s, [0.587, 0.0, 0.997, 0.995]: stand, read."]}, {"from": "human", "timestamps": 1361.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.007, 0.0, 0.593, 1.0]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.461, 0.083, 0.877, 0.979]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "-OyDO1g74vc.mp4", "clip": [1348, 1389], "conversations": [{"from": "human", "timestamps": 1378.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.012, 0.056, 0.488, 0.988]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.385, 0.124, 0.855, 0.979]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1389.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.028, 0.084, 0.518, 0.985]?"}, {"from": "gpt", "value": ["In the current frame, [0.028, 0.084, 0.518, 0.985]: stand, carry/hold (an object), read, listen to (a person).", "At 1388.0s, [0.005, 0.036, 0.452, 0.977]: stand, carry/hold (an object), read, listen to (a person).", "From 1386.0s to 1387.0s, [0.016, 0.042, 0.592, 0.978]->[0.016, 0.04, 0.523, 0.991]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1384.0s to 1385.0s, [0.021, 0.045, 0.599, 0.97]->[0.016, 0.035, 0.572, 0.964]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1383.0s, [0.028, 0.066, 0.559, 0.96]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1382.0s, [0.022, 0.061, 0.599, 0.972]: stand, read, talk to (e.g., self, a person, a group).", "At 1381.0s, [0.04, 0.069, 0.605, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1379.0s to 1380.0s, [0.015, 0.045, 0.492, 0.983]->[0.016, 0.052, 0.561, 0.971]: stand, carry/hold (an object), listen to (a person).", "At 1378.0s, [0.012, 0.056, 0.488, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1377.0s, [0.028, 0.024, 0.489, 0.958]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1375.0s to 1376.0s, [0.02, 0.06, 0.585, 0.967]->[0.016, 0.074, 0.463, 0.976]: stand, carry/hold (an object), listen to (a person).", "At 1374.0s, [0.029, 0.041, 0.519, 0.969]: stand, read, listen to (a person).", "From 1371.0s to 1373.0s, [0.017, 0.034, 0.582, 0.976]->[0.015, 0.013, 0.53, 0.978]->[0.014, 0.036, 0.547, 0.979]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1368.0s to 1370.0s, [0.011, 0.024, 0.602, 0.979]->[0.02, 0.034, 0.539, 0.973]->[0.023, 0.034, 0.584, 0.969]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1367.0s, [0.022, 0.033, 0.625, 0.986]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1366.0s, [0.008, 0.049, 0.612, 0.997]: stand, carry/hold (an object), listen to (a person).", "From 1362.0s to 1365.0s, [0.0, 0.008, 0.598, 0.99]->[0.016, 0.0, 0.579, 0.991]->[0.013, 0.024, 0.611, 0.976]->[0.006, 0.023, 0.599, 0.985]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1361.0s, [0.007, 0.0, 0.593, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1358.0s to 1360.0s, [0.019, 0.009, 0.592, 0.971]->[0.013, 0.01, 0.611, 0.974]->[0.037, 0.011, 0.601, 0.984]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1357.0s, [0.012, 0.0, 0.597, 0.986]: stand, carry/hold (an object), write, talk to (e.g., self, a person, a group), watch (a person).", "At 1356.0s, [0.017, 0.0, 0.638, 0.98]: stand, read, write, talk to (e.g., self, a person, a group).", "At 1355.0s, [0.029, 0.0, 0.574, 0.961]: stand, carry/hold (an object), write, talk to (e.g., self, a person, a group).", "At 1354.0s, [0.018, 0.0, 0.583, 0.981]: stand, read, write, talk to (e.g., self, a person, a group).", "At 1353.0s, [0.017, 0.0, 0.606, 0.974]: stand, read, talk to (e.g., self, a person, a group).", "At 1352.0s, [0.012, 0.003, 0.643, 0.986]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 1351.0s, [0.023, 0.0, 0.603, 0.981]: stand, carry/hold (an object), read, write.", "At 1350.0s, [0.005, 0.009, 0.57, 0.998]: stand, carry/hold (an object), write, listen to (a person).", "At 1349.0s, [0.002, 0.008, 0.593, 1.0]: stand, write, listen to (a person).", "From 1342.0s to 1348.0s, [0.009, 0.005, 0.63, 0.995]->[0.003, 0.014, 0.61, 0.995]->[0.0, 0.002, 0.599, 1.0]->[0.009, 0.001, 0.601, 0.992]->[0.008, 0.002, 0.609, 0.993]->[0.005, 0.004, 0.615, 1.0]->[0.0, 0.003, 0.578, 0.995]: stand, write, talk to (e.g., self, a person, a group).", "From 1340.0s to 1341.0s, [0.114, 0.005, 0.716, 1.0]->[0.009, 0.006, 0.574, 1.0]: stand, carry/hold (an object)."]}]}, {"video": "-OyDO1g74vc.mp4", "clip": [1349, 1491], "conversations": [{"from": "human", "timestamps": 1379.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.015, 0.045, 0.492, 0.983]: carry/hold (an object), stand, listen to (a person); [0.371, 0.115, 0.883, 0.966]: read, stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1491.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.106, 0.088, 0.797, 0.994]?"}, {"from": "gpt", "value": ["At Now, [0.106, 0.088, 0.797, 0.994]: stand, watch (a person).", "From 1488.0s to 1490.0s, [0.152, 0.068, 0.692, 0.99]->[0.203, 0.074, 0.747, 1.0]->[0.161, 0.085, 0.708, 0.986]: stand."]}]}, {"video": "-OyDO1g74vc.mp4", "clip": [1540, 1595], "conversations": [{"from": "human", "timestamps": 1570.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.465, 0.043, 0.965, 0.99]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1595.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.002, 0.094, 0.459, 0.977]?"}, {"from": "gpt", "value": ["In the current frame, [0.002, 0.094, 0.459, 0.977]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1577.0s to 1594.0s, [0.027, 0.124, 0.383, 0.98]->[0.07, 0.118, 0.375, 0.941]->[0.063, 0.116, 0.406, 0.975]->[0.043, 0.101, 0.453, 0.979]->[0.046, 0.096, 0.441, 0.978]->[0.052, 0.097, 0.438, 0.973]->[0.035, 0.1, 0.439, 0.98]->[0.054, 0.085, 0.399, 0.971]->[0.036, 0.103, 0.443, 0.98]->[0.033, 0.096, 0.429, 0.97]->[0.017, 0.078, 0.368, 0.981]->[0.023, 0.085, 0.428, 0.988]->[0.007, 0.084, 0.381, 0.986]->[0.016, 0.08, 0.425, 0.982]->[0.015, 0.098, 0.396, 0.986]->[0.013, 0.101, 0.428, 0.981]->[0.008, 0.096, 0.372, 0.997]->[0.01, 0.103, 0.437, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "-OyDO1g74vc.mp4", "clip": [1595, 1643], "conversations": [{"from": "human", "timestamps": 1625.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.616, 0.213, 0.916, 0.998]?"}, {"from": "gpt", "value": ["At Now, [0.616, 0.213, 0.916, 0.998]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1623.0s to 1624.0s, [0.557, 0.155, 0.888, 0.939]->[0.547, 0.203, 0.894, 0.986]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1622.0s, [0.653, 0.091, 1.0, 0.994]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1620.0s to 1621.0s, [0.611, 0.037, 0.999, 0.989]->[0.696, 0.07, 1.0, 0.996]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1643.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.009, 0.018, 0.303, 0.975]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.487, 0.016, 1.0, 1.0]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "-OyDO1g74vc.mp4", "clip": [1616, 1679], "conversations": [{"from": "human", "timestamps": 1646.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.0, 0.3, 1.0]: watch (a person), stand; [0.289, 0.288, 0.825, 0.984]: carry/hold (an object), open (e.g., a window, a car door), walk."}, {"from": "human", "timestamps": 1679.0, "value": "For the person at bbox [0.017, 0.067, 0.722, 0.996], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.017, 0.067, 0.722, 0.996]: stand, listen to (a person), watch (a person).", "At 1678.0s, [0.0, 0.072, 0.454, 0.996]: stand, listen to (a person), watch (a person)."]}]}, {"video": "-OyDO1g74vc.mp4", "clip": [1687, 1727], "conversations": [{"from": "human", "timestamps": 1717.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.02, 0.106, 0.681, 0.99]: listen to (a person), stand, watch (a person); [0.555, 0.0, 0.995, 0.98]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1727.0, "value": "What actions is the person in bbox [0.068, 0.039, 0.554, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.068, 0.039, 0.554, 0.993]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1726.0s, [0.099, 0.163, 0.98, 0.978]: bend/bow (at the waist), carry/hold (an object).", "From 1724.0s to 1725.0s, [0.089, 0.042, 0.691, 0.98]->[0.036, 0.122, 0.751, 0.972]: stand, close (e.g., a door, a box).", "At 1723.0s, [0.113, 0.155, 0.701, 0.991]: bend/bow (at the waist), close (e.g., a door, a box).", "At 1722.0s, [0.021, 0.01, 0.717, 0.973]: walk.", "At 1721.0s, [0.531, 0.017, 1.0, 0.982]: walk, watch (a person)."]}]}, {"video": "-OyDO1g74vc.mp4", "clip": [1730, 1766], "conversations": [{"from": "human", "timestamps": 1760.0, "value": "What actions did the person at bbox [0.514, 0.292, 0.938, 0.976] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.514, 0.292, 0.938, 0.976]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1757.0s to 1759.0s, [0.494, 0.264, 0.907, 0.97]->[0.504, 0.281, 0.92, 0.974]->[0.525, 0.291, 0.938, 0.975]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1755.0s to 1756.0s, [0.362, 0.15, 0.82, 0.968]->[0.428, 0.214, 0.902, 0.965]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1754.0s, [0.334, 0.271, 0.811, 0.97]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1753.0s, [0.244, 0.193, 1.0, 0.978]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1750.0s to 1752.0s, [0.541, 0.097, 0.97, 0.977]->[0.559, 0.146, 0.978, 1.0]->[0.404, 0.137, 0.848, 0.975]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1749.0s, [0.428, 0.216, 0.998, 0.984]: walk, carry/hold (an object).", "At 1748.0s, [0.669, 0.277, 0.998, 0.983]: stand, carry/hold (an object), take (an object) from (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1766.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.291, 0.282, 0.921, 0.979]: carry/hold (an object), walk."}]}, {"video": "-XpUuIgyUHE.mp4", "clip": [875, 908], "conversations": [{"from": "human", "timestamps": 905.0, "value": "What actions is the person in bbox [0.156, 0.049, 0.935, 0.958] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.156, 0.049, 0.935, 0.958]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 904.0s, [0.259, 0.037, 0.946, 0.948]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 903.0s, [0.286, 0.037, 0.924, 0.942]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 902.0s, [0.228, 0.031, 0.944, 0.957]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 908.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.103, 0.043, 0.947, 0.963]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "-XpUuIgyUHE.mp4", "clip": [1039, 1135], "conversations": [{"from": "human", "timestamps": 1069.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.019, 0.033, 0.324, 0.815]: listen to (a person), stand, watch (a person); [0.078, 0.096, 0.907, 0.93]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1135.0, "value": "What actions did the person at bbox [0.042, 0.019, 0.991, 0.962] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.042, 0.019, 0.991, 0.962]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1134.0s, [0.017, 0.021, 1.0, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1132.0s to 1133.0s, [0.065, 0.058, 0.999, 0.958]->[0.035, 0.032, 0.996, 0.967]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1130.0s to 1131.0s, [0.131, 0.059, 0.958, 0.946]->[0.108, 0.051, 0.996, 0.966]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1129.0s, [0.598, 0.063, 0.978, 0.948]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1128.0s, [0.509, 0.046, 0.994, 0.977]: stand, listen to (a person), watch (a person)."]}]}, {"video": "-XpUuIgyUHE.mp4", "clip": [1136, 1171], "conversations": [{"from": "human", "timestamps": 1166.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.051, 0.173, 0.323, 0.855]: listen to (a person), stand; [0.089, 0.053, 0.969, 0.953]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.769, 0.056, 0.986, 0.881]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1171.0, "value": "What actions is the person in bbox [0.735, 0.046, 0.988, 0.911] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.735, 0.046, 0.988, 0.911]: stand, listen to (a person), watch (a person).", "From 1169.0s to 1170.0s, [0.725, 0.038, 0.979, 0.878]->[0.653, 0.042, 0.986, 0.937]: stand, listen to (a person), watch (a person)."]}]}, {"video": "-XpUuIgyUHE.mp4", "clip": [1140, 1213], "conversations": [{"from": "human", "timestamps": 1170.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.039, 0.165, 0.271, 0.897]: listen to (a person), stand, watch (a person); [0.085, 0.037, 0.892, 0.955]: stand, talk to (e.g., self, a person, a group); [0.653, 0.042, 0.986, 0.937]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1213.0, "value": "What actions is the person in bbox [0.46, 0.23, 0.64, 0.684] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.46, 0.23, 0.64, 0.684]: walk, carry/hold (an object), climb (e.g., a mountain).", "From 1211.0s to 1212.0s, [0.511, 0.215, 0.71, 0.676]->[0.519, 0.222, 0.713, 0.676]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1210.0s, [0.547, 0.179, 0.733, 0.645]: walk, carry/hold (an object), climb (e.g., a mountain), talk to (e.g., self, a person, a group)."]}]}, {"video": "-XpUuIgyUHE.mp4", "clip": [1195, 1255], "conversations": [{"from": "human", "timestamps": 1225.0, "value": "What actions is the person in bbox [0.03, 0.062, 0.745, 0.956] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.03, 0.062, 0.745, 0.956]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1223.0s to 1224.0s, [0.014, 0.097, 0.692, 0.961]->[0.035, 0.067, 0.726, 0.952]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1255.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.053, 0.099, 0.957, 0.953]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "-XpUuIgyUHE.mp4", "clip": [1399, 1438], "conversations": [{"from": "human", "timestamps": 1429.0, "value": "For the person at bbox [0.131, 0.055, 0.959, 0.952], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.131, 0.055, 0.959, 0.952]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1428.0s, [0.07, 0.046, 0.966, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1438.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.345, 0.089, 0.84, 0.915]: carry/hold (an object), lie/sleep; [0.453, 0.145, 0.543, 0.224]: watch (a person), stand."}]}, {"video": "-XpUuIgyUHE.mp4", "clip": [1378, 1457], "conversations": [{"from": "human", "timestamps": 1408.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.08, 0.319, 0.536, 0.94]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.319, 0.297, 0.709, 0.726]: sit, drink, listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.535, 0.062, 0.644, 0.334]: carry/hold (an object), stand, watch (a person); [0.622, 0.258, 0.923, 0.763]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1457.0, "value": "What actions did the person at bbox [0.016, 0.328, 0.336, 0.949] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.016, 0.328, 0.336, 0.949]: stand, listen to (a person), watch (a person).", "At 1456.0s, [0.019, 0.302, 0.392, 0.949]: stand."]}]}, {"video": "-XpUuIgyUHE.mp4", "clip": [1515, 1549], "conversations": [{"from": "human", "timestamps": 1545.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.235, 0.2, 0.605, 0.863]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk; [0.549, 0.135, 0.776, 0.944]: carry/hold (an object), listen to (a person), watch (a person), walk."}, {"from": "human", "timestamps": 1549.0, "value": "What actions did the person at bbox [0.122, 0.171, 0.572, 0.945] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.122, 0.171, 0.572, 0.945]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1548.0s, [0.046, 0.204, 0.493, 0.954]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1547.0s, [0.12, 0.138, 0.51, 0.801]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "-XpUuIgyUHE.mp4", "clip": [1504, 1601], "conversations": [{"from": "human", "timestamps": 1534.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.043, 0.029, 0.622, 0.955]: carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1601.0, "value": "What actions is the person in bbox [0.575, 0.044, 0.936, 0.934] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.575, 0.044, 0.936, 0.934]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1599.0s to 1600.0s, [0.521, 0.048, 0.963, 0.935]->[0.514, 0.055, 0.947, 0.931]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1598.0s, [0.516, 0.086, 0.914, 0.953]: stand, carry/hold (an object), watch (a person).", "At 1597.0s, [0.495, 0.072, 0.926, 0.916]: carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1596.0s, [0.384, 0.091, 0.969, 0.94]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1595.0s, [0.371, 0.069, 0.909, 0.956]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1594.0s, [0.061, 0.051, 0.829, 0.942]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1593.0s, [0.09, 0.035, 0.791, 0.906]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "-XpUuIgyUHE.mp4", "clip": [1597, 1630], "conversations": [{"from": "human", "timestamps": 1627.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.235, 0.04, 0.941, 0.948]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1630.0, "value": "What actions did the person at bbox [0.037, 0.098, 0.565, 0.925] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.037, 0.098, 0.565, 0.925]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1628.0s to 1629.0s, [0.045, 0.119, 0.535, 0.939]->[0.038, 0.097, 0.542, 0.939]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "-ZFgsrolSxo.mkv", "clip": [946, 981], "conversations": [{"from": "human", "timestamps": 976.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.603, 0.081, 0.866, 0.973]?"}, {"from": "gpt", "value": ["At Now, [0.603, 0.081, 0.866, 0.973]: stand, hand shake, talk to (e.g., self, a person, a group), watch (a person).", "At 975.0s, [0.5, 0.129, 0.83, 0.981]: stand, hand shake, talk to (e.g., self, a person, a group), watch (a person).", "At 974.0s, [0.539, 0.049, 0.824, 0.971]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 967.0s to 973.0s, [0.629, 0.13, 0.831, 0.987]->[0.629, 0.132, 0.835, 0.979]->[0.651, 0.154, 0.841, 0.98]->[0.622, 0.154, 0.842, 0.992]->[0.637, 0.162, 0.844, 0.991]->[0.655, 0.143, 0.864, 0.979]->[0.676, 0.092, 0.87, 0.969]: stand, listen to (a person), watch (a person).", "At 966.0s, [0.598, 0.131, 0.836, 0.985]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 981.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.476, 0.1, 0.768, 0.997]: watch (a person), stand."}]}, {"video": "-ZFgsrolSxo.mkv", "clip": [1007, 1046], "conversations": [{"from": "human", "timestamps": 1037.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.212, 0.1, 0.905, 0.977]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1046.0, "value": "What actions is the person in bbox [0.125, 0.074, 0.447, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.125, 0.074, 0.447, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1043.0s to 1045.0s, [0.122, 0.094, 0.441, 0.971]->[0.143, 0.089, 0.472, 0.98]->[0.124, 0.085, 0.43, 0.97]: stand, listen to (a person), watch (a person).", "At 1042.0s, [0.129, 0.058, 0.347, 1.0]: stand, listen to (a person)."]}]}, {"video": "-ZFgsrolSxo.mkv", "clip": [1070, 1122], "conversations": [{"from": "human", "timestamps": 1100.0, "value": "Describe the actions of the person at bbox [0.36, 0.045, 0.886, 0.914] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.36, 0.045, 0.886, 0.914]: sit, cook.", "From 1098.0s to 1099.0s, [0.272, 0.124, 0.87, 0.992]->[0.337, 0.023, 0.911, 0.898]: sit, cook.", "At 1097.0s, [0.349, 0.028, 0.878, 1.0]: sit, cook, stir.", "From 1095.0s to 1096.0s, [0.422, 0.0, 0.867, 0.988]->[0.402, 0.0, 0.897, 0.968]: sit, cook.", "At 1094.0s, [0.231, 0.067, 0.6, 0.968]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1122.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.194, 0.065, 0.338, 0.925]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.314, 0.176, 0.448, 0.782]: shovel, stand; [0.589, 0.233, 0.869, 0.985]: shovel, bend/bow (at the waist)."}]}, {"video": "-ZFgsrolSxo.mkv", "clip": [1130, 1169], "conversations": [{"from": "human", "timestamps": 1160.0, "value": "What actions did the person at bbox [0.196, 0.061, 0.441, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.196, 0.061, 0.441, 0.986]: stand, watch (a person).", "From 1158.0s to 1159.0s, [0.136, 0.143, 0.48, 0.97]->[0.134, 0.141, 0.486, 0.982]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "From 1155.0s to 1157.0s, [0.134, 0.011, 0.389, 0.998]->[0.129, 0.073, 0.449, 0.973]->[0.175, 0.055, 0.389, 0.983]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1169.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.167, 0.023, 0.48, 0.977]: listen to (a person), stand, watch (a person); [0.47, 0.424, 0.794, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "-ZFgsrolSxo.mkv", "clip": [1169, 1248], "conversations": [{"from": "human", "timestamps": 1199.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.198, 0.029, 0.589, 0.663]: carry/hold (an object), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1248.0, "value": "Describe the actions of the person at bbox [0.55, 0.334, 0.679, 0.802] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.55, 0.334, 0.679, 0.802]: walk, carry/hold (an object).", "At 1247.0s, [0.584, 0.336, 0.686, 0.772]: walk, carry/hold (an object)."]}]}, {"video": "-ZFgsrolSxo.mkv", "clip": [1230, 1289], "conversations": [{"from": "human", "timestamps": 1260.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.181, 0.028, 0.604, 0.969]: bend/bow (at the waist)."}, {"from": "human", "timestamps": 1289.0, "value": "What actions did the person at bbox [0.438, 0.277, 0.74, 0.975] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.438, 0.277, 0.74, 0.975]: sit, hand shake, listen to (a person), talk to (e.g., self, a person, a group).", "At 1288.0s, [0.44, 0.16, 0.78, 0.941]: sit, hand shake, listen to (a person), talk to (e.g., self, a person, a group).", "At 1287.0s, [0.456, 0.212, 0.744, 0.938]: sit, talk to (e.g., self, a person, a group).", "At 1286.0s, [0.389, 0.316, 0.718, 0.978]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1284.0s to 1285.0s, [0.167, 0.179, 0.75, 0.97]->[0.264, 0.331, 0.668, 0.97]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "-ZFgsrolSxo.mkv", "clip": [1269, 1356], "conversations": [{"from": "human", "timestamps": 1299.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.184, 0.026, 0.572, 0.938]: touch (an object), crouch/kneel, talk to (e.g., self, a person, a group); [0.486, 0.178, 0.773, 0.965]: listen to (a person), sit."}, {"from": "human", "timestamps": 1356.0, "value": "Describe the actions of the person at bbox [0.429, 0.149, 0.655, 0.886] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.429, 0.149, 0.655, 0.886]: sit, carry/hold (an object).", "At 1355.0s, [0.383, 0.133, 0.604, 0.836]: sit.", "At 1354.0s, [0.318, 0.098, 0.595, 0.836]: sit, eat.", "From 1352.0s to 1353.0s, [0.33, 0.102, 0.65, 0.78]->[0.316, 0.12, 0.614, 0.747]: sit.", "At 1351.0s, [0.339, 0.095, 0.628, 0.852]: sit, touch (an object).", "At 1350.0s, [0.384, 0.094, 0.628, 0.876]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1349.0s, [0.348, 0.098, 0.631, 0.828]: sit, touch (an object).", "From 1347.0s to 1348.0s, [0.362, 0.151, 0.654, 0.839]->[0.355, 0.112, 0.66, 0.835]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1345.0s to 1346.0s, [0.358, 0.115, 0.619, 0.829]->[0.414, 0.128, 0.647, 0.827]: sit, talk to (e.g., self, a person, a group).", "From 1342.0s to 1344.0s, [0.338, 0.096, 0.608, 0.78]->[0.339, 0.121, 0.632, 0.814]->[0.345, 0.127, 0.663, 0.795]: sit.", "At 1341.0s, [0.356, 0.15, 0.638, 0.864]: sit, talk to (e.g., self, a person, a group).", "At 1340.0s, [0.455, 0.267, 0.674, 0.928]: sit, touch (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "-ZFgsrolSxo.mkv", "clip": [1362, 1413], "conversations": [{"from": "human", "timestamps": 1392.0, "value": "For the person at bbox [0.469, 0.426, 0.775, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.469, 0.426, 0.775, 0.987]: sit, eat, talk to (e.g., self, a person, a group).", "At 1391.0s, [0.537, 0.352, 0.826, 0.967]: sit, eat.", "At 1390.0s, [0.545, 0.192, 0.842, 0.915]: sit.", "At 1389.0s, [0.527, 0.161, 0.833, 0.921]: sit, carry/hold (an object).", "From 1387.0s to 1388.0s, [0.516, 0.191, 0.832, 0.916]->[0.502, 0.174, 0.833, 0.931]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1386.0s, [0.513, 0.188, 0.825, 0.947]: sit, talk to (e.g., self, a person, a group).", "At 1385.0s, [0.543, 0.192, 0.825, 0.961]: sit, eat, talk to (e.g., self, a person, a group).", "From 1382.0s to 1384.0s, [0.537, 0.184, 0.835, 0.93]->[0.522, 0.183, 0.839, 0.939]->[0.54, 0.188, 0.829, 0.945]: sit, eat.", "At 1381.0s, [0.527, 0.183, 0.84, 0.945]: sit, touch (an object).", "At 1380.0s, [0.522, 0.193, 0.839, 0.893]: sit.", "From 1378.0s to 1379.0s, [0.559, 0.164, 0.847, 0.906]->[0.543, 0.193, 0.839, 0.931]: sit, eat.", "At 1377.0s, [0.562, 0.17, 0.848, 0.881]: sit, eat, watch (a person).", "At 1376.0s, [0.554, 0.196, 0.843, 0.934]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1375.0s, [0.489, 0.195, 0.821, 0.932]: sit, eat, talk to (e.g., self, a person, a group).", "At 1374.0s, [0.496, 0.119, 0.794, 0.806]: sit, eat, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1373.0s, [0.514, 0.077, 0.81, 0.829]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1372.0s, [0.534, 0.109, 0.826, 0.83]: sit, carry/hold (an object), eat, talk to (e.g., self, a person, a group).", "From 1368.0s to 1371.0s, [0.498, 0.22, 0.789, 0.924]->[0.498, 0.246, 0.771, 0.933]->[0.561, 0.177, 0.824, 0.837]->[0.57, 0.175, 0.861, 0.882]: sit, eat, talk to (e.g., self, a person, a group).", "At 1367.0s, [0.48, 0.219, 0.789, 0.942]: sit, eat, touch (an object), talk to (e.g., self, a person, a group).", "At 1366.0s, [0.469, 0.223, 0.77, 0.932]: sit, eat, touch (an object).", "From 1364.0s to 1365.0s, [0.472, 0.248, 0.756, 0.941]->[0.465, 0.244, 0.76, 0.941]: sit, eat, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1363.0s, [0.461, 0.21, 0.743, 0.873]: sit, eat, talk to (e.g., self, a person, a group), watch (a person).", "At 1362.0s, [0.452, 0.208, 0.736, 0.903]: sit, carry/hold (an object), eat.", "From 1359.0s to 1361.0s, [0.461, 0.162, 0.672, 0.866]->[0.465, 0.167, 0.654, 0.87]->[0.435, 0.227, 0.742, 0.98]: sit, eat.", "At 1358.0s, [0.455, 0.15, 0.679, 0.892]: sit.", "At 1357.0s, [0.439, 0.184, 0.655, 0.929]: sit, touch (an object).", "At 1356.0s, [0.429, 0.149, 0.655, 0.886]: sit, carry/hold (an object).", "At 1355.0s, [0.383, 0.133, 0.604, 0.836]: sit.", "At 1354.0s, [0.318, 0.098, 0.595, 0.836]: sit, eat.", "From 1352.0s to 1353.0s, [0.33, 0.102, 0.65, 0.78]->[0.316, 0.12, 0.614, 0.747]: sit.", "At 1351.0s, [0.339, 0.095, 0.628, 0.852]: sit, touch (an object).", "At 1350.0s, [0.384, 0.094, 0.628, 0.876]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1349.0s, [0.348, 0.098, 0.631, 0.828]: sit, touch (an object).", "From 1347.0s to 1348.0s, [0.362, 0.151, 0.654, 0.839]->[0.355, 0.112, 0.66, 0.835]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1345.0s to 1346.0s, [0.358, 0.115, 0.619, 0.829]->[0.414, 0.128, 0.647, 0.827]: sit, talk to (e.g., self, a person, a group).", "From 1342.0s to 1344.0s, [0.338, 0.096, 0.608, 0.78]->[0.339, 0.121, 0.632, 0.814]->[0.345, 0.127, 0.663, 0.795]: sit.", "At 1341.0s, [0.356, 0.15, 0.638, 0.864]: sit, talk to (e.g., self, a person, a group).", "At 1340.0s, [0.455, 0.267, 0.674, 0.928]: sit, touch (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1413.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.31, 0.329, 0.869, 0.99]: carry/hold (an object), bend/bow (at the waist)."}]}, {"video": "-ZFgsrolSxo.mkv", "clip": [1381, 1455], "conversations": [{"from": "human", "timestamps": 1411.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.387, 0.026, 0.718, 1.0]: eat, bend/bow (at the waist)."}, {"from": "human", "timestamps": 1455.0, "value": "What actions is the person in bbox [0.102, 0.127, 0.792, 0.977] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.102, 0.127, 0.792, 0.977]: sit, stand, eat.", "At 1454.0s, [0.213, 0.137, 0.748, 0.983]: sit, eat.", "At 1453.0s, [0.156, 0.07, 0.824, 1.0]: stand, eat, talk to (e.g., self, a person, a group).", "At 1452.0s, [0.164, 0.121, 0.732, 0.995]: stand, eat.", "At 1451.0s, [0.13, 0.154, 0.748, 0.959]: sit, eat, talk to (e.g., self, a person, a group).", "From 1448.0s to 1450.0s, [0.121, 0.141, 0.778, 0.969]->[0.124, 0.17, 0.83, 0.981]->[0.121, 0.132, 0.681, 0.973]: sit, eat.", "From 1443.0s to 1447.0s, [0.242, 0.12, 0.651, 0.883]->[0.237, 0.111, 0.654, 0.856]->[0.222, 0.181, 0.634, 0.946]->[0.244, 0.204, 0.647, 0.972]->[0.271, 0.225, 0.719, 0.973]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1442.0s, [0.248, 0.104, 0.647, 0.861]: sit, lift/pick up, talk to (e.g., self, a person, a group).", "From 1432.0s to 1441.0s, [0.248, 0.059, 0.661, 0.858]->[0.254, 0.042, 0.659, 0.856]->[0.21, 0.087, 0.675, 0.863]->[0.2, 0.073, 0.688, 0.884]->[0.208, 0.124, 0.71, 0.9]->[0.189, 0.123, 0.678, 0.919]->[0.194, 0.083, 0.694, 0.886]->[0.214, 0.11, 0.629, 0.886]->[0.214, 0.107, 0.631, 0.901]->[0.226, 0.121, 0.647, 0.918]: sit, talk to (e.g., self, a person, a group).", "At 1431.0s, [0.26, 0.067, 0.676, 0.859]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1429.0s to 1430.0s, [0.269, 0.095, 0.713, 0.883]->[0.263, 0.071, 0.679, 0.864]: sit, talk to (e.g., self, a person, a group).", "From 1426.0s to 1428.0s, [0.274, 0.081, 0.76, 0.908]->[0.248, 0.06, 0.734, 0.895]->[0.278, 0.074, 0.702, 0.886]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1425.0s, [0.142, 0.074, 0.777, 0.894]: sit, talk to (e.g., self, a person, a group).", "At 1424.0s, [0.22, 0.126, 0.832, 0.915]: sit, put down, talk to (e.g., self, a person, a group).", "From 1422.0s to 1423.0s, [0.239, 0.132, 0.814, 0.922]->[0.219, 0.145, 0.826, 0.941]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1420.0s to 1421.0s, [0.139, 0.153, 0.832, 0.894]->[0.175, 0.127, 0.851, 0.919]: sit, carry/hold (an object)."]}]}, {"video": "-ZFgsrolSxo.mkv", "clip": [1727, 1777], "conversations": [{"from": "human", "timestamps": 1757.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.222, 0.035, 0.576, 0.847]: cut."}, {"from": "human", "timestamps": 1777.0, "value": "What actions is the person in bbox [0.169, 0.523, 0.409, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.169, 0.523, 0.409, 0.978]: sit, carry/hold (an object), cut.", "At 1776.0s, [0.26, 0.545, 0.47, 0.983]: cut, crouch/kneel.", "At 1775.0s, [0.236, 0.499, 0.478, 0.978]: sit, carry/hold (an object).", "At 1774.0s, [0.243, 0.541, 0.458, 0.974]: sit.", "From 1770.0s to 1773.0s, [0.215, 0.497, 0.464, 0.977]->[0.214, 0.537, 0.475, 0.978]->[0.242, 0.487, 0.471, 0.985]->[0.217, 0.525, 0.466, 0.969]: cut, crouch/kneel.", "From 1768.0s to 1769.0s, [0.253, 0.531, 0.507, 0.981]->[0.254, 0.509, 0.476, 0.984]: sit, cut.", "At 1767.0s, [0.246, 0.516, 0.468, 0.991]: crouch/kneel.", "At 1766.0s, [0.249, 0.499, 0.458, 0.982]: sit, crouch/kneel.", "At 1765.0s, [0.247, 0.521, 0.463, 0.968]: sit, cut, crouch/kneel.", "At 1764.0s, [0.243, 0.489, 0.468, 0.987]: sit, carry/hold (an object), cut.", "From 1762.0s to 1763.0s, [0.236, 0.467, 0.459, 0.983]->[0.237, 0.47, 0.457, 0.993]: sit, cut.", "At 1761.0s, [0.235, 0.471, 0.45, 0.986]: sit, carry/hold (an object), cut.", "At 1760.0s, [0.237, 0.474, 0.451, 0.992]: carry/hold (an object), chop, cut, crouch/kneel.", "At 1759.0s, [0.25, 0.017, 0.599, 0.839]: carry/hold (an object), cut, crouch/kneel.", "At 1758.0s, [0.249, 0.04, 0.592, 0.844]: sit, carry/hold (an object), cut.", "At 1757.0s, [0.222, 0.035, 0.576, 0.847]: cut."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [876, 909], "conversations": [{"from": "human", "timestamps": 906.0, "value": "What actions is the person in bbox [0.38, 0.2, 0.841, 0.881] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.38, 0.2, 0.841, 0.881]: sit.", "At 905.0s, [0.39, 0.183, 0.856, 0.872]: sit, listen to (a person).", "At 904.0s, [0.061, 0.168, 0.774, 0.877]: sit."]}, {"from": "human", "timestamps": 909.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.265, 0.172, 0.753, 0.872]: sit; [0.808, 0.207, 1.0, 0.889]: sit."}]}, {"video": "053oq2xB3oU.mkv", "clip": [928, 1005], "conversations": [{"from": "human", "timestamps": 958.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.263, 0.188, 0.792, 0.853]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1005.0, "value": "Describe the actions of the person at bbox [0.827, 0.311, 0.986, 0.824] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.827, 0.311, 0.986, 0.824]: sit, watch (a person).", "At 1004.0s, [0.851, 0.348, 0.988, 0.836]: sit, watch (a person)."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [940, 1018], "conversations": [{"from": "human", "timestamps": 970.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.118, 0.176, 0.54, 0.879]?"}, {"from": "gpt", "value": ["At Now, [0.118, 0.176, 0.54, 0.879]: walk, carry/hold (an object).", "At 969.0s, [0.78, 0.189, 0.998, 0.789]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1018.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.012, 0.248, 0.211, 0.741]: watch (a person), sit; [0.149, 0.249, 0.274, 0.707]: listen to (a person), sit, watch (a person); [0.23, 0.248, 0.458, 0.768]: watch (a person), sit; [0.42, 0.237, 0.533, 0.764]: watch (a person), sit; [0.494, 0.263, 0.644, 0.784]: watch (a person), sit; [0.594, 0.203, 0.832, 0.824]: carry/hold (an object), sit, watch (a person); [0.854, 0.237, 0.996, 0.637]: watch (a person), sit."}]}, {"video": "053oq2xB3oU.mkv", "clip": [993, 1055], "conversations": [{"from": "human", "timestamps": 1023.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.015, 0.605, 0.192, 0.862]?"}, {"from": "gpt", "value": ["In the current frame, [0.015, 0.605, 0.192, 0.862]: sit, watch (a person).", "At 1022.0s, [0.019, 0.594, 0.157, 0.863]: sit.", "At 1021.0s, [0.018, 0.594, 0.159, 0.862]: sit, watch (a person).", "At 1020.0s, [0.016, 0.577, 0.153, 0.857]: sit."]}, {"from": "human", "timestamps": 1055.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.003, 0.227, 0.35, 0.849]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.257, 0.159, 0.469, 0.524]: listen to (a person), talk to (e.g., self, a person, a group), watch (a person), bend/bow (at the waist); [0.434, 0.245, 0.612, 0.724]: listen to (a person), sit, touch (an object), watch (a person); [0.518, 0.368, 0.741, 0.862]: listen to (a person), sit, watch (a person); [0.659, 0.319, 0.977, 0.864]: listen to (a person), sit, watch (a person); [0.834, 0.254, 0.984, 0.633]: listen to (a person), sit, watch (a person)."}]}, {"video": "053oq2xB3oU.mkv", "clip": [1044, 1104], "conversations": [{"from": "human", "timestamps": 1074.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.291, 0.16, 0.885]: sit; [0.156, 0.236, 0.54, 0.865]: sit; [0.48, 0.343, 0.647, 0.665]: watch (a person), sit; [0.536, 0.156, 0.987, 0.88]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1104.0, "value": "For the person at bbox [0.429, 0.203, 0.941, 0.784], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.429, 0.203, 0.941, 0.784]: stand, carry/hold (an object), touch (an object).", "At 1103.0s, [0.224, 0.177, 0.653, 0.815]: stand, carry/hold (an object), touch (an object).", "From 1100.0s to 1102.0s, [0.248, 0.301, 0.604, 0.829]->[0.255, 0.286, 0.604, 0.831]->[0.256, 0.242, 0.608, 0.838]: stand, carry/hold (an object)."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [1086, 1118], "conversations": [{"from": "human", "timestamps": 1116.0, "value": "For the person at bbox [0.174, 0.165, 0.763, 0.854], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.174, 0.165, 0.763, 0.854]: sit, carry/hold (an object), watch (a person).", "At 1115.0s, [0.22, 0.176, 0.906, 0.878]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1118.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.19, 0.167, 0.829, 0.87]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.662, 0.328, 0.83, 0.667]: watch (a person), sit; [0.883, 0.351, 1.0, 0.866]: watch (a person), sit."}]}, {"video": "053oq2xB3oU.mkv", "clip": [1119, 1167], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.427, 0.321, 0.774, 0.736]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.746, 0.298, 0.996, 0.825]: listen to (a person), sit, touch (an object)."}, {"from": "human", "timestamps": 1167.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.314, 0.256, 0.615, 0.787]?"}, {"from": "gpt", "value": ["At Now, [0.314, 0.256, 0.615, 0.787]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1166.0s, [0.357, 0.258, 0.659, 0.818]: stand, listen to (a person).", "At 1165.0s, [0.346, 0.26, 0.658, 0.813]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1164.0s, [0.352, 0.264, 0.659, 0.826]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1163.0s, [0.353, 0.256, 0.66, 0.814]: touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1162.0s, [0.35, 0.249, 0.656, 0.801]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1161.0s, [0.347, 0.244, 0.659, 0.776]: stand, talk to (e.g., self, a person, a group).", "At 1160.0s, [0.362, 0.273, 0.694, 0.79]: stand, grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1159.0s, [0.35, 0.256, 0.616, 0.789]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1158.0s, [0.39, 0.257, 0.68, 0.798]: stand, touch (an object), listen to (a person).", "At 1157.0s, [0.392, 0.262, 0.698, 0.78]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [1118, 1202], "conversations": [{"from": "human", "timestamps": 1148.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.041, 0.362, 0.357, 0.822]: listen to (a person), sit, touch (an object), watch (a person); [0.461, 0.331, 0.766, 0.752]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.745, 0.306, 0.987, 0.847]: listen to (a person), sit, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1202.0, "value": "Describe the actions of the person at bbox [0.335, 0.248, 0.623, 0.774] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.335, 0.248, 0.623, 0.774]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1201.0s, [0.326, 0.247, 0.602, 0.775]: stand, carry/hold (an object), touch (an object), watch (a person).", "At 1200.0s, [0.341, 0.254, 0.657, 0.797]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1199.0s, [0.336, 0.245, 0.642, 0.791]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1198.0s, [0.342, 0.243, 0.646, 0.787]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1197.0s, [0.344, 0.241, 0.642, 0.809]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1195.0s to 1196.0s, [0.343, 0.25, 0.628, 0.778]->[0.342, 0.243, 0.647, 0.795]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1194.0s, [0.348, 0.254, 0.626, 0.789]: stand, touch (an object), talk to (e.g., self, a person, a group).", "From 1191.0s to 1193.0s, [0.348, 0.252, 0.665, 0.78]->[0.342, 0.255, 0.635, 0.782]->[0.341, 0.249, 0.652, 0.788]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1190.0s, [0.344, 0.245, 0.662, 0.782]: stand, talk to (e.g., self, a person, a group).", "From 1188.0s to 1189.0s, [0.332, 0.25, 0.621, 0.764]->[0.328, 0.247, 0.672, 0.803]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1187.0s, [0.257, 0.255, 0.572, 0.802]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1186.0s, [0.286, 0.245, 0.579, 0.802]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1185.0s, [0.26, 0.244, 0.586, 0.775]: stand, carry/hold (an object).", "At 1184.0s, [0.289, 0.245, 0.588, 0.779]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1183.0s, [0.351, 0.239, 0.629, 0.775]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1180.0s to 1182.0s, [0.351, 0.264, 0.625, 0.773]->[0.346, 0.265, 0.627, 0.786]->[0.339, 0.263, 0.619, 0.786]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1177.0s to 1179.0s, [0.361, 0.263, 0.649, 0.777]->[0.35, 0.267, 0.626, 0.769]->[0.325, 0.268, 0.63, 0.77]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1175.0s to 1176.0s, [0.36, 0.261, 0.631, 0.787]->[0.393, 0.259, 0.663, 0.818]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1174.0s, [0.355, 0.244, 0.654, 0.79]: stand, carry/hold (an object).", "From 1172.0s to 1173.0s, [0.251, 0.248, 0.536, 0.802]->[0.329, 0.25, 0.622, 0.781]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1171.0s, [0.32, 0.253, 0.595, 0.775]: stand, carry/hold (an object), lift/pick up.", "From 1167.0s to 1170.0s, [0.314, 0.256, 0.615, 0.787]->[0.341, 0.269, 0.66, 0.794]->[0.344, 0.271, 0.612, 0.793]->[0.349, 0.261, 0.672, 0.77]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1166.0s, [0.357, 0.258, 0.659, 0.818]: stand, listen to (a person).", "At 1165.0s, [0.346, 0.26, 0.658, 0.813]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1164.0s, [0.352, 0.264, 0.659, 0.826]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1163.0s, [0.353, 0.256, 0.66, 0.814]: touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1162.0s, [0.35, 0.249, 0.656, 0.801]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1161.0s, [0.347, 0.244, 0.659, 0.776]: stand, talk to (e.g., self, a person, a group).", "At 1160.0s, [0.362, 0.273, 0.694, 0.79]: stand, grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1159.0s, [0.35, 0.256, 0.616, 0.789]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1158.0s, [0.39, 0.257, 0.68, 0.798]: stand, touch (an object), listen to (a person).", "At 1157.0s, [0.392, 0.262, 0.698, 0.78]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [1189, 1223], "conversations": [{"from": "human", "timestamps": 1219.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.024, 0.167, 0.702, 0.861]: walk."}, {"from": "human", "timestamps": 1223.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.11, 0.124, 0.774, 0.869]?"}, {"from": "gpt", "value": ["At Now, [0.11, 0.124, 0.774, 0.869]: walk.", "From 1220.0s to 1222.0s, [0.12, 0.196, 0.731, 0.871]->[0.103, 0.193, 0.689, 0.877]->[0.121, 0.221, 0.691, 0.874]: walk, talk to (e.g., self, a person, a group).", "At 1219.0s, [0.024, 0.167, 0.702, 0.861]: walk.", "At 1218.0s, [0.0, 0.131, 0.801, 0.845]: walk, talk to (e.g., self, a person, a group).", "From 1212.0s to 1217.0s, [0.176, 0.126, 0.83, 0.859]->[0.154, 0.123, 0.786, 0.874]->[0.081, 0.141, 0.829, 0.872]->[0.079, 0.159, 0.929, 0.873]->[0.045, 0.129, 0.827, 0.859]->[0.296, 0.177, 0.794, 0.887]: walk."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [1287, 1319], "conversations": [{"from": "human", "timestamps": 1317.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.008, 0.143, 0.425, 0.878]: watch (a person), stand; [0.208, 0.144, 0.976, 0.876]: watch (a person), stand."}, {"from": "human", "timestamps": 1319.0, "value": "What actions is the person in bbox [0.205, 0.139, 1.0, 0.864] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.205, 0.139, 1.0, 0.864]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1318.0s, [0.712, 0.147, 0.992, 0.838]: stand, watch (a person)."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [1280, 1375], "conversations": [{"from": "human", "timestamps": 1310.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.005, 0.143, 0.23, 0.891]: listen to (a person), stand, watch (a person); [0.289, 0.183, 0.661, 0.87]: watch (a person), stand, talk to (e.g., self, a person, a group), push (another person); [0.877, 0.186, 1.0, 0.854]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1375.0, "value": "For the person at bbox [0.012, 0.153, 0.594, 0.863], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.012, 0.153, 0.594, 0.863]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1374.0s, [0.0, 0.138, 0.564, 0.867]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [1292, 1377], "conversations": [{"from": "human", "timestamps": 1322.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.004, 0.135, 0.412, 0.869]: listen to (a person), stand, watch (a person); [0.148, 0.131, 1.0, 0.87]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1377.0, "value": "Describe the actions of the person at bbox [0.011, 0.113, 0.378, 0.904] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.011, 0.113, 0.378, 0.904]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "From 1374.0s to 1376.0s, [0.0, 0.138, 0.564, 0.867]->[0.012, 0.153, 0.594, 0.863]->[0.003, 0.106, 0.415, 0.876]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [1392, 1454], "conversations": [{"from": "human", "timestamps": 1422.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.203, 0.231, 0.464, 0.854]: listen to (a person), stand, watch (a person); [0.506, 0.191, 0.907, 0.863]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1454.0, "value": "What actions did the person at bbox [0.177, 0.244, 0.372, 0.806] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.177, 0.244, 0.372, 0.806]: sit, listen to (a person), watch (a person).", "At 1453.0s, [0.161, 0.251, 0.358, 0.804]: sit, listen to (a person), watch (a person).", "At 1452.0s, [0.185, 0.258, 0.374, 0.802]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1451.0s, [0.132, 0.29, 0.361, 0.795]: sit, listen to (a person), watch (a person).", "At 1450.0s, [0.185, 0.246, 0.512, 0.842]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1449.0s, [0.236, 0.202, 0.565, 0.863]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1448.0s, [0.395, 0.217, 0.699, 0.838]: sit, watch (a person)."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [1415, 1481], "conversations": [{"from": "human", "timestamps": 1445.0, "value": "What actions did the person at bbox [0.562, 0.182, 0.89, 0.863] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.562, 0.182, 0.89, 0.863]: stand, watch (a person).", "From 1443.0s to 1444.0s, [0.556, 0.183, 0.907, 0.854]->[0.564, 0.175, 0.896, 0.859]: stand, watch (a person).", "At 1442.0s, [0.552, 0.181, 0.904, 0.859]: stand, listen to (a person), watch (a person).", "At 1441.0s, [0.556, 0.176, 0.901, 0.859]: stand, watch (a person).", "From 1439.0s to 1440.0s, [0.559, 0.175, 0.896, 0.865]->[0.568, 0.177, 0.901, 0.863]: stand, listen to (a person), watch (a person).", "From 1437.0s to 1438.0s, [0.573, 0.185, 0.908, 0.867]->[0.56, 0.182, 0.904, 0.865]: stand, watch (a person).", "From 1434.0s to 1436.0s, [0.561, 0.171, 0.906, 0.871]->[0.586, 0.185, 0.908, 0.867]->[0.565, 0.181, 0.909, 0.856]: stand, listen to (a person), watch (a person).", "At 1433.0s, [0.565, 0.176, 0.89, 0.859]: stand, watch (a person).", "From 1431.0s to 1432.0s, [0.554, 0.176, 0.881, 0.86]->[0.537, 0.177, 0.892, 0.859]: stand, listen to (a person), watch (a person).", "At 1430.0s, [0.547, 0.175, 0.884, 0.866]: stand, watch (a person).", "From 1426.0s to 1429.0s, [0.478, 0.165, 0.813, 0.869]->[0.497, 0.199, 0.852, 0.862]->[0.546, 0.171, 0.873, 0.861]->[0.533, 0.172, 0.863, 0.859]: stand, listen to (a person), watch (a person).", "At 1425.0s, [0.511, 0.164, 0.811, 0.852]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1423.0s to 1424.0s, [0.684, 0.181, 0.993, 0.859]->[0.584, 0.187, 0.98, 0.877]: stand, watch (a person).", "At 1422.0s, [0.506, 0.191, 0.907, 0.863]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1421.0s, [0.497, 0.156, 0.794, 0.851]: stand, watch (a person).", "At 1420.0s, [0.665, 0.168, 0.973, 0.875]: walk, listen to (a person), watch (a person).", "From 1417.0s to 1419.0s, [0.482, 0.189, 0.984, 0.897]->[0.518, 0.187, 0.994, 0.909]->[0.523, 0.18, 0.992, 0.931]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1416.0s, [0.504, 0.194, 0.982, 0.894]: bend/bow (at the waist), grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1415.0s, [0.518, 0.202, 0.999, 0.894]: bend/bow (at the waist), pull (an object), talk to (e.g., self, a person, a group).", "At 1414.0s, [0.536, 0.167, 0.989, 0.905]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1413.0s, [0.592, 0.131, 0.997, 0.861]: stand, watch (a person).", "At 1412.0s, [0.553, 0.138, 0.962, 0.862]: stand."]}, {"from": "human", "timestamps": 1481.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.226, 0.156, 0.872]: carry/hold (an object), walk; [0.086, 0.249, 0.279, 0.865]: walk; [0.407, 0.328, 0.54, 0.703]: walk; [0.471, 0.395, 0.676, 0.863]: carry/hold (an object), dress/put on clothing, walk."}]}, {"video": "053oq2xB3oU.mkv", "clip": [1504, 1615], "conversations": [{"from": "human", "timestamps": 1534.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.246, 0.325, 0.53, 0.879]: stand, talk to (e.g., self, a person, a group); [0.597, 0.248, 0.836, 0.862]: carry/hold (an object), stand, listen to (a person)."}, {"from": "human", "timestamps": 1615.0, "value": "Describe the actions of the person at bbox [0.154, 0.195, 0.553, 0.664] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.154, 0.195, 0.553, 0.664]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1612.0s to 1614.0s, [0.127, 0.193, 0.568, 0.665]->[0.151, 0.188, 0.547, 0.668]->[0.127, 0.195, 0.532, 0.676]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [1555, 1614], "conversations": [{"from": "human", "timestamps": 1585.0, "value": "What actions did the person at bbox [0.407, 0.214, 0.989, 0.879] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.407, 0.214, 0.989, 0.879]: get up, listen to (a person), watch (a person).", "From 1583.0s to 1584.0s, [0.422, 0.208, 0.982, 0.873]->[0.459, 0.218, 0.983, 0.866]: sit, listen to (a person), watch (a person).", "From 1578.0s to 1582.0s, [0.464, 0.215, 0.983, 0.865]->[0.459, 0.209, 0.973, 0.873]->[0.446, 0.209, 0.981, 0.883]->[0.447, 0.219, 0.968, 0.856]->[0.452, 0.209, 0.976, 0.863]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1614.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.127, 0.195, 0.532, 0.676]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.51, 0.192, 0.963, 0.857]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "053oq2xB3oU.mkv", "clip": [1627, 1665], "conversations": [{"from": "human", "timestamps": 1657.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.132, 0.199, 0.636, 0.873]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1665.0, "value": "What actions is the person in bbox [0.588, 0.182, 0.949, 0.922] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.588, 0.182, 0.949, 0.922]: stand, carry/hold (an object).", "From 1663.0s to 1664.0s, [0.131, 0.246, 0.498, 0.789]->[0.106, 0.191, 0.492, 0.829]: stand.", "From 1661.0s to 1662.0s, [0.149, 0.245, 0.54, 0.841]->[0.134, 0.224, 0.516, 0.829]: stand, carry/hold (an object).", "From 1657.0s to 1660.0s, [0.132, 0.199, 0.636, 0.873]->[0.12, 0.182, 0.695, 0.867]->[0.128, 0.153, 0.63, 0.869]->[0.065, 0.18, 0.767, 0.872]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1652.0s to 1656.0s, [0.144, 0.167, 0.723, 0.875]->[0.131, 0.169, 0.686, 0.871]->[0.17, 0.168, 0.705, 0.872]->[0.094, 0.15, 0.769, 0.87]->[0.118, 0.214, 0.765, 0.858]: stand, carry/hold (an object)."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [1597, 1737], "conversations": [{"from": "human", "timestamps": 1627.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.107, 0.201, 0.528, 0.817]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.497, 0.189, 0.828, 0.811]: listen to (a person), sit."}, {"from": "human", "timestamps": 1737.0, "value": "What actions did the person at bbox [0.379, 0.252, 0.653, 0.822] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.379, 0.252, 0.653, 0.822]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1736.0s, [0.284, 0.252, 0.591, 0.871]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1735.0s, [0.064, 0.21, 0.477, 0.847]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1730.0s to 1734.0s, [0.101, 0.224, 0.518, 0.863]->[0.105, 0.232, 0.504, 0.866]->[0.098, 0.236, 0.527, 0.863]->[0.121, 0.235, 0.537, 0.848]->[0.112, 0.237, 0.529, 0.841]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1728.0s to 1729.0s, [0.098, 0.231, 0.523, 0.869]->[0.096, 0.229, 0.51, 0.867]: sit, carry/hold (an object), watch (a person).", "From 1726.0s to 1727.0s, [0.109, 0.23, 0.523, 0.869]->[0.102, 0.238, 0.52, 0.867]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1723.0s to 1725.0s, [0.101, 0.236, 0.516, 0.848]->[0.112, 0.234, 0.517, 0.854]->[0.11, 0.239, 0.506, 0.855]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1718.0s to 1722.0s, [0.103, 0.227, 0.515, 0.857]->[0.101, 0.242, 0.544, 0.861]->[0.112, 0.234, 0.539, 0.859]->[0.109, 0.234, 0.536, 0.857]->[0.104, 0.234, 0.493, 0.852]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1707.0s to 1717.0s, [0.111, 0.236, 0.552, 0.854]->[0.107, 0.246, 0.602, 0.854]->[0.099, 0.238, 0.58, 0.878]->[0.11, 0.239, 0.568, 0.864]->[0.111, 0.24, 0.601, 0.864]->[0.115, 0.233, 0.531, 0.864]->[0.111, 0.238, 0.547, 0.862]->[0.098, 0.247, 0.578, 0.878]->[0.108, 0.229, 0.527, 0.862]->[0.113, 0.236, 0.526, 0.861]->[0.109, 0.236, 0.522, 0.868]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1706.0s, [0.107, 0.24, 0.579, 0.85]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1704.0s to 1705.0s, [0.108, 0.24, 0.598, 0.847]->[0.112, 0.236, 0.527, 0.852]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1703.0s, [0.101, 0.228, 0.493, 0.857]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1702.0s, [0.098, 0.259, 0.483, 0.86]: sit, carry/hold (an object), listen to (a person).", "At 1701.0s, [0.104, 0.275, 0.473, 0.858]: sit, carry/hold (an object), drink, listen to (a person).", "At 1700.0s, [0.097, 0.275, 0.493, 0.847]: sit, carry/hold (an object), listen to (a person).", "At 1699.0s, [0.106, 0.227, 0.619, 0.847]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1698.0s, [0.117, 0.248, 0.585, 0.855]: sit, carry/hold (an object), eat, listen to (a person).", "At 1697.0s, [0.109, 0.247, 0.531, 0.867]: sit, carry/hold (an object), smoke.", "At 1696.0s, [0.119, 0.272, 0.591, 0.838]: sit, smoke.", "At 1695.0s, [0.108, 0.271, 0.562, 0.836]: sit, carry/hold (an object), drink, listen to (a person).", "At 1694.0s, [0.104, 0.241, 0.488, 0.851]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1693.0s, [0.117, 0.227, 0.523, 0.864]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1692.0s, [0.109, 0.229, 0.542, 0.858]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1687.0s to 1691.0s, [0.118, 0.229, 0.538, 0.856]->[0.106, 0.214, 0.552, 0.866]->[0.095, 0.219, 0.577, 0.874]->[0.106, 0.231, 0.546, 0.879]->[0.102, 0.233, 0.545, 0.863]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1686.0s, [0.117, 0.225, 0.531, 0.852]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1685.0s, [0.087, 0.277, 0.471, 0.871]: sit, carry/hold (an object), watch (a person).", "At 1684.0s, [0.093, 0.289, 0.532, 0.854]: sit, carry/hold (an object).", "At 1683.0s, [0.278, 0.191, 0.596, 0.835]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1682.0s, [0.126, 0.134, 0.415, 0.849]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1681.0s, [0.256, 0.155, 0.577, 0.855]: walk, carry/hold (an object), watch (a person)."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [1663, 1733], "conversations": [{"from": "human", "timestamps": 1693.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.117, 0.227, 0.523, 0.864]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.466, 0.228, 0.904, 0.782]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1733.0, "value": "Describe the actions of the person at bbox [0.466, 0.292, 0.85, 0.728] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.466, 0.292, 0.85, 0.728]: sit, carry/hold (an object), listen to (a person).", "From 1728.0s to 1732.0s, [0.451, 0.229, 0.833, 0.754]->[0.465, 0.234, 0.839, 0.78]->[0.461, 0.269, 0.832, 0.731]->[0.461, 0.263, 0.845, 0.728]->[0.449, 0.264, 0.845, 0.742]: sit, carry/hold (an object), listen to (a person).", "From 1726.0s to 1727.0s, [0.457, 0.225, 0.831, 0.739]->[0.454, 0.224, 0.832, 0.748]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1723.0s to 1725.0s, [0.461, 0.232, 0.833, 0.766]->[0.457, 0.237, 0.834, 0.761]->[0.448, 0.22, 0.825, 0.767]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1707.0s to 1722.0s, [0.441, 0.243, 0.833, 0.765]->[0.442, 0.246, 0.837, 0.778]->[0.447, 0.24, 0.829, 0.761]->[0.464, 0.237, 0.832, 0.762]->[0.464, 0.235, 0.829, 0.778]->[0.456, 0.236, 0.831, 0.781]->[0.462, 0.236, 0.827, 0.761]->[0.46, 0.242, 0.834, 0.776]->[0.453, 0.237, 0.832, 0.761]->[0.456, 0.236, 0.829, 0.78]->[0.462, 0.232, 0.829, 0.766]->[0.46, 0.236, 0.832, 0.759]->[0.46, 0.237, 0.827, 0.778]->[0.452, 0.234, 0.833, 0.762]->[0.46, 0.239, 0.827, 0.774]->[0.456, 0.229, 0.834, 0.757]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1706.0s, [0.445, 0.241, 0.833, 0.741]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1705.0s, [0.438, 0.239, 0.835, 0.771]: sit, carry/hold (an object), watch (a person).", "At 1704.0s, [0.451, 0.238, 0.843, 0.772]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1703.0s, [0.47, 0.221, 0.856, 0.746]: sit, carry/hold (an object), eat, listen to (a person), watch (a person).", "From 1700.0s to 1702.0s, [0.472, 0.195, 0.838, 0.764]->[0.493, 0.198, 0.842, 0.775]->[0.487, 0.188, 0.832, 0.744]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1699.0s, [0.44, 0.249, 0.837, 0.747]: sit, carry/hold (an object), eat, listen to (a person), watch (a person).", "From 1696.0s to 1698.0s, [0.45, 0.247, 0.872, 0.783]->[0.438, 0.231, 0.773, 0.741]->[0.447, 0.245, 0.84, 0.746]: sit, carry/hold (an object), eat, talk to (e.g., self, a person, a group), watch (a person).", "At 1695.0s, [0.442, 0.266, 0.849, 0.743]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1694.0s, [0.451, 0.243, 0.844, 0.764]: sit, carry/hold (an object), listen to (a person).", "At 1693.0s, [0.466, 0.228, 0.904, 0.782]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1691.0s to 1692.0s, [0.469, 0.228, 0.867, 0.769]->[0.46, 0.231, 0.858, 0.805]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1690.0s, [0.47, 0.278, 0.905, 0.743]: sit, carry/hold (an object), listen to (a person).", "At 1689.0s, [0.49, 0.281, 0.87, 0.721]: sit, carry/hold (an object), eat, listen to (a person).", "From 1687.0s to 1688.0s, [0.47, 0.217, 0.846, 0.733]->[0.47, 0.25, 0.874, 0.765]: sit, carry/hold (an object), listen to (a person).", "At 1686.0s, [0.478, 0.225, 0.848, 0.732]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1685.0s, [0.47, 0.227, 0.793, 0.764]: sit, pull (an object), take (an object) from (a person), watch (a person).", "At 1684.0s, [0.513, 0.256, 0.841, 0.797]: sit, watch (a person).", "At 1683.0s, [0.483, 0.398, 0.817, 0.834]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1681.0s to 1682.0s, [0.469, 0.398, 0.772, 0.855]->[0.514, 0.452, 0.841, 0.862]: sit, carry/hold (an object), listen to (a person).", "At 1680.0s, [0.425, 0.356, 0.741, 0.852]: sit, carry/hold (an object), eat, listen to (a person).", "At 1679.0s, [0.421, 0.335, 0.809, 0.842]: sit, carry/hold (an object).", "From 1673.0s to 1678.0s, [0.297, 0.213, 0.653, 0.838]->[0.3, 0.239, 0.649, 0.854]->[0.361, 0.216, 0.644, 0.767]->[0.373, 0.2, 0.647, 0.737]->[0.308, 0.196, 0.6, 0.664]->[0.351, 0.267, 0.611, 0.829]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1672.0s, [0.318, 0.227, 0.646, 0.834]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1671.0s, [0.303, 0.213, 0.65, 0.819]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1670.0s, [0.323, 0.243, 0.611, 0.821]: stand, carry/hold (an object), eat, talk to (e.g., self, a person, a group).", "At 1669.0s, [0.36, 0.23, 0.605, 0.744]: stand, carry/hold (an object).", "From 1667.0s to 1668.0s, [0.363, 0.279, 0.554, 0.841]->[0.413, 0.252, 0.618, 0.852]: walk, carry/hold (an object).", "At 1666.0s, [0.334, 0.266, 0.494, 0.826]: walk, carry/hold (an object), watch (a person).", "At 1665.0s, [0.385, 0.28, 0.516, 0.807]: walk, carry/hold (an object)."]}]}, {"video": "053oq2xB3oU.mkv", "clip": [1718, 1789], "conversations": [{"from": "human", "timestamps": 1748.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.226, 0.174, 0.58, 0.751]: walk."}, {"from": "human", "timestamps": 1789.0, "value": "For the person at bbox [0.188, 0.176, 0.53, 0.763], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.188, 0.176, 0.53, 0.763]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1788.0s, [0.184, 0.211, 0.546, 0.76]: sit, carry/hold (an object), listen to (a person).", "From 1784.0s to 1787.0s, [0.191, 0.204, 0.541, 0.766]->[0.206, 0.226, 0.532, 0.739]->[0.21, 0.289, 0.554, 0.769]->[0.204, 0.203, 0.565, 0.754]: sit, listen to (a person), watch (a person).", "At 1783.0s, [0.199, 0.236, 0.556, 0.743]: sit, listen to (a person).", "At 1782.0s, [0.204, 0.253, 0.527, 0.702]: sit, listen to (a person), watch (a person).", "At 1781.0s, [0.209, 0.212, 0.548, 0.743]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1780.0s, [0.244, 0.193, 0.555, 0.727]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1777.0s to 1779.0s, [0.275, 0.201, 0.601, 0.703]->[0.253, 0.187, 0.588, 0.728]->[0.247, 0.181, 0.578, 0.725]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1776.0s, [0.286, 0.228, 0.606, 0.699]: sit, talk to (e.g., self, a person, a group).", "At 1775.0s, [0.322, 0.213, 0.592, 0.682]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1774.0s, [0.334, 0.226, 0.632, 0.672]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1773.0s, [0.365, 0.236, 0.658, 0.674]: sit, talk to (e.g., self, a person, a group).", "At 1772.0s, [0.363, 0.233, 0.676, 0.668]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1771.0s, [0.412, 0.244, 0.677, 0.662]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1770.0s, [0.417, 0.226, 0.71, 0.695]: sit, carry/hold (an object), smoke, talk to (e.g., self, a person, a group).", "At 1769.0s, [0.435, 0.238, 0.705, 0.65]: sit, carry/hold (an object), smoke, listen to (a person), watch (a person).", "At 1768.0s, [0.469, 0.247, 0.724, 0.626]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "0f39OWEqJ24.mp4", "clip": [904, 940], "conversations": [{"from": "human", "timestamps": 934.0, "value": "For the person at bbox [0.021, 0.131, 0.676, 0.997], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.021, 0.131, 0.676, 0.997]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 933.0s, [0.032, 0.14, 0.657, 0.986]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 931.0s to 932.0s, [0.008, 0.139, 0.694, 0.994]->[0.021, 0.145, 0.683, 0.99]: stand, carry/hold (an object).", "At 930.0s, [0.035, 0.133, 0.649, 0.987]: stand, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 940.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.008, 0.001, 0.973, 1.0]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "0f39OWEqJ24.mp4", "clip": [879, 985], "conversations": [{"from": "human", "timestamps": 909.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.032, 0.182, 0.647, 0.99]: carry/hold (an object), stand, play musical instrument."}, {"from": "human", "timestamps": 985.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.773, 0.449, 0.972, 0.73]?"}, {"from": "gpt", "value": ["In the current frame, [0.773, 0.449, 0.972, 0.73]: sit, listen to (a person).", "At 984.0s, [0.773, 0.451, 0.993, 0.768]: sit, touch (an object), watch (a person)."]}]}, {"video": "0f39OWEqJ24.mp4", "clip": [967, 1018], "conversations": [{"from": "human", "timestamps": 997.0, "value": "What actions did the person at bbox [0.553, 0.116, 0.93, 0.972] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.553, 0.116, 0.93, 0.972]: stand, carry/hold (an object), watch (a person).", "At 996.0s, [0.562, 0.123, 0.878, 0.98]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 995.0s, [0.572, 0.126, 0.886, 0.991]: stand, carry/hold (an object), watch (a person).", "At 994.0s, [0.588, 0.102, 0.908, 0.985]: stand, drink, watch (a person).", "From 992.0s to 993.0s, [0.569, 0.15, 0.818, 0.956]->[0.594, 0.156, 0.84, 0.973]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 991.0s, [0.549, 0.161, 0.791, 0.964]: stand, watch (a person).", "From 989.0s to 990.0s, [0.559, 0.202, 0.748, 0.919]->[0.512, 0.174, 0.823, 0.952]: stand, listen to (a person), watch (a person).", "From 987.0s to 988.0s, [0.477, 0.23, 0.674, 0.898]->[0.531, 0.212, 0.731, 0.929]: stand, carry/hold (an object), watch (a person).", "At 986.0s, [0.475, 0.249, 0.646, 0.676]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1018.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.219, 0.498, 0.98]: carry/hold (an object), stand, listen to (a person); [0.437, 0.21, 0.961, 0.995]: stand."}]}, {"video": "0f39OWEqJ24.mp4", "clip": [1003, 1037], "conversations": [{"from": "human", "timestamps": 1033.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.263, 0.0, 1.0, 0.995]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1037.0, "value": "For the person at bbox [0.11, 0.008, 1.0, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.11, 0.008, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1033.0s to 1036.0s, [0.263, 0.0, 1.0, 0.995]->[0.311, 0.0, 1.0, 0.988]->[0.107, 0.0, 1.0, 1.0]->[0.084, 0.0, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1032.0s, [0.213, 0.0, 1.0, 0.996]: stand, listen to (a person)."]}]}, {"video": "0f39OWEqJ24.mp4", "clip": [1161, 1234], "conversations": [{"from": "human", "timestamps": 1191.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.141, 0.176, 0.728, 0.913]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1234.0, "value": "Describe the actions of the person at bbox [0.0, 0.047, 0.905, 0.859] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.047, 0.905, 0.859]: lie/sleep.", "At 1233.0s, [0.012, 0.004, 0.928, 0.861]: lie/sleep."]}]}, {"video": "0f39OWEqJ24.mp4", "clip": [1213, 1246], "conversations": [{"from": "human", "timestamps": 1243.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.254, 0.013, 0.956, 0.996]: watch (a person), sit."}, {"from": "human", "timestamps": 1246.0, "value": "For the person at bbox [0.1, 0.0, 0.914, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.1, 0.0, 0.914, 1.0]: sit, stand, smoke.", "At 1245.0s, [0.06, 0.004, 0.758, 0.987]: stand, carry/hold (an object).", "At 1244.0s, [0.064, 0.0, 0.788, 0.994]: stand."]}]}, {"video": "0f39OWEqJ24.mp4", "clip": [1294, 1347], "conversations": [{"from": "human", "timestamps": 1324.0, "value": "For the person at bbox [0.055, 0.207, 0.807, 0.963], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.055, 0.207, 0.807, 0.963]: sit, carry/hold (an object), watch (a person).", "At 1323.0s, [0.061, 0.168, 0.753, 0.979]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1347.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.147, 0.104, 0.994, 0.993]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "0f39OWEqJ24.mp4", "clip": [1370, 1478], "conversations": [{"from": "human", "timestamps": 1400.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.059, 0.061, 0.998, 1.0]: stand, drink."}, {"from": "human", "timestamps": 1478.0, "value": "What actions did the person at bbox [0.115, 0.0, 0.972, 0.968] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.115, 0.0, 0.972, 0.968]: sit, stand, talk to (e.g., self, a person, a group).", "From 1475.0s to 1477.0s, [0.12, 0.0, 1.0, 0.992]->[0.069, 0.0, 1.0, 0.978]->[0.086, 0.0, 0.981, 0.99]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "0f39OWEqJ24.mp4", "clip": [1429, 1481], "conversations": [{"from": "human", "timestamps": 1459.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.018, 0.128, 0.766, 0.92]: carry/hold (an object), sit; [0.508, 0.07, 1.0, 0.961]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1481.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.112, 0.0, 1.0, 0.974]?"}, {"from": "gpt", "value": ["In the current frame, [0.112, 0.0, 1.0, 0.974]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1479.0s to 1480.0s, [0.086, 0.0, 0.987, 0.988]->[0.113, 0.0, 0.996, 0.987]: sit, talk to (e.g., self, a person, a group).", "At 1478.0s, [0.115, 0.0, 0.972, 0.968]: sit, stand, talk to (e.g., self, a person, a group).", "From 1475.0s to 1477.0s, [0.12, 0.0, 1.0, 0.992]->[0.069, 0.0, 1.0, 0.978]->[0.086, 0.0, 0.981, 0.99]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "0f39OWEqJ24.mp4", "clip": [1548, 1591], "conversations": [{"from": "human", "timestamps": 1578.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.001, 0.111, 0.789, 0.996]: walk."}, {"from": "human", "timestamps": 1591.0, "value": "Describe the actions of the person at bbox [0.08, 0.043, 0.588, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.08, 0.043, 0.588, 0.986]: stand, kiss (a person).", "From 1589.0s to 1590.0s, [0.104, 0.036, 0.601, 0.988]->[0.093, 0.046, 0.622, 1.0]: stand, kiss (a person)."]}]}, {"video": "0f39OWEqJ24.mp4", "clip": [1614, 1698], "conversations": [{"from": "human", "timestamps": 1644.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.195, 0.084, 0.906, 0.983]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1698.0, "value": "What actions is the person in bbox [0.153, 0.059, 0.968, 0.994] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.153, 0.059, 0.968, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 1697.0s, [0.136, 0.077, 0.985, 0.988]: stand, talk to (e.g., self, a person, a group).", "At 1696.0s, [0.575, 0.159, 0.899, 0.704]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1693.0s to 1695.0s, [0.629, 0.167, 0.934, 0.702]->[0.589, 0.177, 0.89, 0.69]->[0.581, 0.176, 0.894, 0.707]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1692.0s, [0.586, 0.224, 0.84, 0.699]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1686.0s to 1691.0s, [0.626, 0.213, 0.88, 0.689]->[0.616, 0.213, 0.849, 0.682]->[0.649, 0.24, 0.841, 0.692]->[0.629, 0.223, 0.822, 0.71]->[0.592, 0.224, 0.813, 0.71]->[0.631, 0.211, 0.833, 0.702]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1685.0s, [0.638, 0.228, 0.908, 0.704]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1684.0s, [0.635, 0.233, 0.912, 0.686]: stand, watch (a person).", "At 1683.0s, [0.489, 0.206, 0.766, 0.722]: walk, carry/hold (an object), watch (a person).", "At 1682.0s, [0.222, 0.118, 0.577, 0.75]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1681.0s, [0.191, 0.082, 0.615, 0.851]: stand, carry/hold (an object), lift/pick up, listen to (a person), watch (a person).", "From 1678.0s to 1680.0s, [0.271, 0.037, 0.607, 0.705]->[0.123, 0.064, 0.614, 0.745]->[0.205, 0.083, 0.605, 0.837]: stand, carry/hold (an object).", "At 1677.0s, [0.025, 0.005, 0.613, 0.618]: stand."]}]}, {"video": "0f39OWEqJ24.mp4", "clip": [1673, 1722], "conversations": [{"from": "human", "timestamps": 1703.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.188, 0.121, 0.611, 0.755]: listen to (a person), stand, touch (an object); [0.614, 0.165, 0.938, 0.703]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1722.0, "value": "Describe the actions of the person at bbox [0.432, 0.248, 0.682, 0.695] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.432, 0.248, 0.682, 0.695]: walk, listen to (a person), watch (a person).", "At 1721.0s, [0.44, 0.338, 0.568, 0.709]: walk, watch (a person)."]}]}, {"video": "0f39OWEqJ24.mp4", "clip": [1726, 1773], "conversations": [{"from": "human", "timestamps": 1756.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.18, 0.135, 0.972, 0.983]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1773.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.293, 0.032, 0.839, 0.989]?"}, {"from": "gpt", "value": ["In the current frame, [0.293, 0.032, 0.839, 0.989]: stand, talk to (e.g., self, a person, a group).", "From 1771.0s to 1772.0s, [0.214, 0.044, 0.779, 0.998]->[0.262, 0.048, 0.813, 0.999]: stand, talk to (e.g., self, a person, a group).", "At 1770.0s, [0.238, 0.052, 0.796, 0.997]: stand, watch (a person).", "At 1769.0s, [0.243, 0.039, 0.734, 0.992]: stand."]}]}, {"video": "0wBYFahr3uI.mp4", "clip": [873, 911], "conversations": [{"from": "human", "timestamps": 903.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.452, 0.399, 0.581, 0.737]?"}, {"from": "gpt", "value": ["In the current frame, [0.452, 0.399, 0.581, 0.737]: stand, carry/hold (an object).", "At 902.0s, [0.496, 0.385, 0.59, 0.737]: stand, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 911.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.219, 0.123, 0.712, 0.879]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.7, 0.39, 0.997, 0.894]: bend/bow (at the waist), walk."}]}, {"video": "0wBYFahr3uI.mp4", "clip": [874, 976], "conversations": [{"from": "human", "timestamps": 904.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.233, 0.228, 0.648, 0.889]: carry/hold (an object), stand; [0.528, 0.509, 0.816, 0.874]: bend/bow (at the waist)."}, {"from": "human", "timestamps": 976.0, "value": "What actions did the person at bbox [0.759, 0.297, 0.895, 0.699] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.759, 0.297, 0.895, 0.699]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 975.0s, [0.759, 0.321, 0.908, 0.713]: bend/bow (at the waist), listen to (a person), watch (a person)."]}]}, {"video": "0wBYFahr3uI.mp4", "clip": [1040, 1075], "conversations": [{"from": "human", "timestamps": 1070.0, "value": "For the person at bbox [0.055, 0.168, 0.882, 0.909], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.055, 0.168, 0.882, 0.909]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1068.0s to 1069.0s, [0.041, 0.141, 0.878, 0.892]->[0.05, 0.171, 0.89, 0.905]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1067.0s, [0.237, 0.16, 0.867, 0.892]: stand, play with kids, watch (a person).", "At 1066.0s, [0.259, 0.168, 0.84, 0.854]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1075.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.287, 0.15, 0.934, 0.886]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "0wBYFahr3uI.mp4", "clip": [995, 1110], "conversations": [{"from": "human", "timestamps": 1025.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.03, 0.132, 0.702, 0.888]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1110.0, "value": "What actions did the person at bbox [0.006, 0.175, 0.805, 0.874] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.006, 0.175, 0.805, 0.874]: sit, talk to (e.g., self, a person, a group).", "At 1109.0s, [0.009, 0.163, 0.804, 0.88]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "0wBYFahr3uI.mp4", "clip": [1077, 1137], "conversations": [{"from": "human", "timestamps": 1107.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.331, 0.175, 0.951, 0.858]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1137.0, "value": "What actions did the person at bbox [0.037, 0.21, 0.793, 0.88] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.037, 0.21, 0.793, 0.88]: sit, listen to (a person).", "From 1135.0s to 1136.0s, [0.019, 0.163, 0.823, 0.882]->[0.03, 0.185, 0.819, 0.877]: sit, listen to (a person)."]}]}, {"video": "0wBYFahr3uI.mp4", "clip": [1146, 1180], "conversations": [{"from": "human", "timestamps": 1176.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.337, 0.154, 0.941, 0.879]?"}, {"from": "gpt", "value": ["At Now, [0.337, 0.154, 0.941, 0.879]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1174.0s to 1175.0s, [0.324, 0.132, 0.948, 0.854]->[0.285, 0.132, 0.949, 0.891]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1180.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.314, 0.136, 0.944, 0.888]: carry/hold (an object), stand, listen to (a person)."}]}, {"video": "0wBYFahr3uI.mp4", "clip": [1122, 1245], "conversations": [{"from": "human", "timestamps": 1152.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.038, 0.157, 0.81, 0.888]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1245.0, "value": "For the person at bbox [0.013, 0.199, 0.943, 0.873], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.013, 0.199, 0.943, 0.873]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1244.0s, [0.037, 0.202, 0.923, 0.869]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1240.0s to 1243.0s, [0.114, 0.204, 0.92, 0.908]->[0.0, 0.187, 0.884, 0.913]->[0.044, 0.206, 0.94, 0.904]->[0.013, 0.206, 0.912, 0.892]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1239.0s, [0.05, 0.193, 0.903, 0.875]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "0wBYFahr3uI.mp4", "clip": [1194, 1244], "conversations": [{"from": "human", "timestamps": 1224.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.007, 0.178, 0.736, 0.874]?"}, {"from": "gpt", "value": ["In the current frame, [0.007, 0.178, 0.736, 0.874]: sit, watch (a person).", "At 1223.0s, [0.003, 0.17, 0.794, 0.894]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1244.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.037, 0.202, 0.923, 0.869]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "0wBYFahr3uI.mp4", "clip": [1245, 1329], "conversations": [{"from": "human", "timestamps": 1275.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.027, 0.189, 0.928, 0.915]: listen to (a person), sit."}, {"from": "human", "timestamps": 1329.0, "value": "What actions did the person at bbox [0.1, 0.164, 0.693, 0.921] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.1, 0.164, 0.693, 0.921]: stand, talk to (e.g., self, a person, a group).", "At 1328.0s, [0.058, 0.107, 0.894, 0.884]: stand, talk to (e.g., self, a person, a group).", "At 1327.0s, [0.093, 0.155, 0.672, 0.911]: sit, talk to (e.g., self, a person, a group).", "From 1325.0s to 1326.0s, [0.118, 0.168, 0.755, 0.907]->[0.104, 0.177, 0.703, 0.899]: stand, talk to (e.g., self, a person, a group).", "At 1324.0s, [0.125, 0.172, 0.578, 0.88]: sit, talk to (e.g., self, a person, a group).", "From 1322.0s to 1323.0s, [0.072, 0.176, 0.535, 0.886]->[0.084, 0.163, 0.74, 0.902]: sit, stand, listen to (a person)."]}]}, {"video": "0wBYFahr3uI.mp4", "clip": [1397, 1494], "conversations": [{"from": "human", "timestamps": 1427.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.064, 0.114, 0.943, 0.892]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1494.0, "value": "For the person at bbox [0.255, 0.1, 1.0, 0.884], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.255, 0.1, 1.0, 0.884]: walk.", "From 1490.0s to 1493.0s, [0.197, 0.29, 0.727, 0.876]->[0.229, 0.232, 0.773, 0.856]->[0.075, 0.148, 0.68, 0.868]->[0.015, 0.119, 0.719, 0.861]: walk."]}]}, {"video": "0wBYFahr3uI.mp4", "clip": [1496, 1573], "conversations": [{"from": "human", "timestamps": 1526.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.516, 0.414, 0.757, 0.872]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1573.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.459, 0.175, 0.758, 0.858]?"}, {"from": "gpt", "value": ["At Now, [0.459, 0.175, 0.758, 0.858]: bend/bow (at the waist).", "From 1568.0s to 1572.0s, [0.39, 0.172, 0.656, 0.823]->[0.404, 0.335, 0.65, 0.859]->[0.368, 0.349, 0.609, 0.873]->[0.395, 0.235, 0.649, 0.877]->[0.432, 0.357, 0.729, 0.864]: bend/bow (at the waist).", "From 1563.0s to 1567.0s, [0.85, 0.175, 1.0, 0.819]->[0.591, 0.19, 0.765, 0.845]->[0.396, 0.202, 0.603, 0.833]->[0.376, 0.187, 0.605, 0.866]->[0.424, 0.191, 0.651, 0.869]: walk."]}]}, {"video": "0wBYFahr3uI.mp4", "clip": [1621, 1662], "conversations": [{"from": "human", "timestamps": 1651.0, "value": "What actions did the person at bbox [0.447, 0.253, 0.949, 0.841] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.447, 0.253, 0.949, 0.841]: stand.", "At 1650.0s, [0.297, 0.208, 0.96, 0.847]: stand, talk to (e.g., self, a person, a group).", "At 1649.0s, [0.281, 0.206, 0.959, 0.866]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1662.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.423, 0.431, 0.533, 0.643]: carry/hold (an object), crouch/kneel."}]}, {"video": "0wBYFahr3uI.mp4", "clip": [1617, 1681], "conversations": [{"from": "human", "timestamps": 1647.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.091, 0.209, 0.588, 0.876]: watch (a person), stand."}, {"from": "human", "timestamps": 1681.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.321, 0.211, 0.791, 0.879]?"}, {"from": "gpt", "value": ["At Now, [0.321, 0.211, 0.791, 0.879]: crouch/kneel, touch (an object).", "At 1680.0s, [0.287, 0.292, 0.745, 0.886]: crouch/kneel."]}]}, {"video": "0wBYFahr3uI.mp4", "clip": [1663, 1713], "conversations": [{"from": "human", "timestamps": 1693.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.49, 0.29, 0.686, 0.819]?"}, {"from": "gpt", "value": ["In the current frame, [0.49, 0.29, 0.686, 0.819]: carry/hold (an object), crouch/kneel, touch (an object).", "From 1688.0s to 1692.0s, [0.336, 0.274, 0.696, 0.811]->[0.337, 0.283, 0.691, 0.811]->[0.364, 0.279, 0.691, 0.807]->[0.337, 0.285, 0.689, 0.813]->[0.483, 0.281, 0.684, 0.822]: carry/hold (an object), crouch/kneel.", "At 1687.0s, [0.306, 0.304, 0.686, 0.808]: carry/hold (an object), crouch/kneel, touch (an object).", "From 1685.0s to 1686.0s, [0.487, 0.295, 0.687, 0.825]->[0.294, 0.31, 0.687, 0.794]: carry/hold (an object), crouch/kneel.", "At 1684.0s, [0.514, 0.335, 0.687, 0.82]: sit, carry/hold (an object), crouch/kneel.", "From 1682.0s to 1683.0s, [0.31, 0.263, 0.801, 0.868]->[0.496, 0.322, 0.688, 0.813]: carry/hold (an object), crouch/kneel.", "At 1681.0s, [0.321, 0.211, 0.791, 0.879]: crouch/kneel, touch (an object).", "At 1680.0s, [0.287, 0.292, 0.745, 0.886]: crouch/kneel."]}, {"from": "human", "timestamps": 1713.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.252, 0.563, 0.515, 0.88]: carry/hold (an object), touch (an object), crouch/kneel; [0.581, 0.177, 0.799, 0.817]: watch (a person), stand."}]}, {"video": "0wBYFahr3uI.mp4", "clip": [1762, 1796], "conversations": [{"from": "human", "timestamps": 1792.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.649, 0.353, 0.737, 0.68]?"}, {"from": "gpt", "value": ["In the current frame, [0.649, 0.353, 0.737, 0.68]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1791.0s, [0.648, 0.352, 0.739, 0.683]: stand, carry/hold (an object), watch (a person).", "From 1789.0s to 1790.0s, [0.65, 0.354, 0.746, 0.675]->[0.646, 0.354, 0.74, 0.683]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1787.0s to 1788.0s, [0.645, 0.352, 0.733, 0.681]->[0.649, 0.352, 0.741, 0.685]: stand, carry/hold (an object), watch (a person).", "At 1786.0s, [0.645, 0.356, 0.741, 0.684]: stand, carry/hold (an object), listen to (a person).", "At 1785.0s, [0.649, 0.358, 0.746, 0.686]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1796.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.069, 0.145, 0.977, 0.903]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [885, 947], "conversations": [{"from": "human", "timestamps": 915.0, "value": "Describe the actions of the person at bbox [0.297, 0.14, 0.57, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.297, 0.14, 0.57, 0.991]: stand.", "At 914.0s, [0.292, 0.145, 0.567, 0.999]: stand, touch (an object).", "At 913.0s, [0.314, 0.283, 0.606, 0.996]: bend/bow (at the waist), carry/hold (an object).", "At 912.0s, [0.313, 0.225, 0.604, 0.99]: bend/bow (at the waist), put down.", "At 911.0s, [0.519, 0.245, 0.673, 0.92]: stand, carry/hold (an object), open (e.g., a window, a car door), listen to (a person).", "From 908.0s to 910.0s, [0.003, 0.091, 0.355, 0.872]->[0.386, 0.199, 0.669, 1.0]->[0.475, 0.218, 0.641, 0.97]: walk, carry/hold (an object).", "At 907.0s, [0.008, 0.206, 0.482, 0.984]: bend/bow (at the waist), carry/hold (an object).", "At 906.0s, [0.226, 0.106, 0.615, 0.989]: stand, listen to (a person).", "From 904.0s to 905.0s, [0.235, 0.091, 0.629, 0.988]->[0.242, 0.094, 0.599, 0.986]: stand, watch (a person).", "At 903.0s, [0.219, 0.074, 0.56, 0.998]: bend/bow (at the waist), watch (a person)."]}, {"from": "human", "timestamps": 947.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.564, 0.435, 0.749, 0.898]: carry/hold (an object), walk."}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1017, 1050], "conversations": [{"from": "human", "timestamps": 1047.0, "value": "For the person at bbox [0.518, 0.055, 0.969, 0.974], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.518, 0.055, 0.969, 0.974]: stand, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group).", "At 1046.0s, [0.659, 0.083, 0.99, 0.967]: stand, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group).", "At 1045.0s, [0.655, 0.09, 0.987, 0.968]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1044.0s, [0.625, 0.104, 0.982, 0.886]: stand, carry/hold (an object), drink, talk to (e.g., self, a person, a group).", "At 1043.0s, [0.584, 0.129, 0.978, 0.849]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1042.0s, [0.594, 0.145, 0.985, 0.906]: stand, carry/hold (an object), listen to (a person).", "At 1041.0s, [0.574, 0.104, 0.981, 0.898]: stand, carry/hold (an object), touch (an object), give/serve (an object) to (a person), listen to (a person), talk to (e.g., self, a person, a group).", "At 1040.0s, [0.585, 0.199, 0.984, 0.812]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1039.0s, [0.625, 0.199, 0.99, 0.738]: bend/bow (at the waist), carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1038.0s, [0.843, 0.09, 0.995, 0.678]: stand."]}, {"from": "human", "timestamps": 1050.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.012, 0.237, 0.265, 0.983]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.21, 0.442, 0.438, 1.0]: carry/hold (an object), bend/bow (at the waist), listen to (a person); [0.668, 0.042, 0.96, 0.97]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [994, 1094], "conversations": [{"from": "human", "timestamps": 1024.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.184, 0.127, 0.589, 0.7]: carry/hold (an object), sit, dress/put on clothing."}, {"from": "human", "timestamps": 1094.0, "value": "What actions did the person at bbox [0.151, 0.353, 0.393, 0.74] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.151, 0.353, 0.393, 0.74]: walk, carry/hold (an object).", "At 1093.0s, [0.013, 0.363, 0.288, 0.729]: stand, carry/hold (an object), touch (an object)."]}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1077, 1134], "conversations": [{"from": "human", "timestamps": 1107.0, "value": "What actions is the person in bbox [0.614, 0.267, 0.825, 0.758] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.614, 0.267, 0.825, 0.758]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1105.0s to 1106.0s, [0.645, 0.268, 0.841, 0.913]->[0.571, 0.258, 0.784, 0.959]: run/jog, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1104.0s, [0.657, 0.279, 0.807, 0.875]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1102.0s to 1103.0s, [0.632, 0.28, 0.788, 0.895]->[0.645, 0.281, 0.794, 0.894]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1101.0s, [0.676, 0.263, 0.81, 0.909]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1100.0s, [0.699, 0.232, 0.87, 0.938]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1134.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.006, 0.339, 0.253, 0.687]: carry/hold (an object), listen to (a person), walk; [0.289, 0.308, 0.442, 0.532]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.507, 0.254, 0.822, 0.991]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.737, 0.34, 0.936, 0.937]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1134, 1197], "conversations": [{"from": "human", "timestamps": 1164.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.013, 0.19, 0.226, 0.981]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.267, 0.109, 0.533, 0.995]: carry/hold (an object), watch (a person), walk; [0.545, 0.207, 0.79, 0.911]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1197.0, "value": "What actions did the person at bbox [0.304, 0.0, 0.833, 0.927] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.304, 0.0, 0.833, 0.927]: stand, drink, talk to (e.g., self, a person, a group).", "At 1196.0s, [0.271, 0.033, 0.655, 0.918]: stand, drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1195.0s, [0.355, 0.352, 0.688, 0.918]: sit, drink, watch (a person).", "At 1194.0s, [0.26, 0.318, 0.681, 0.924]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1191.0s to 1193.0s, [0.324, 0.372, 0.68, 0.956]->[0.238, 0.373, 0.701, 0.908]->[0.242, 0.369, 0.637, 0.915]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1190.0s, [0.326, 0.349, 0.693, 0.922]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1189.0s, [0.316, 0.371, 0.68, 0.945]: sit, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1188.0s, [0.311, 0.373, 0.669, 0.947]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1159, 1237], "conversations": [{"from": "human", "timestamps": 1189.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.04, 0.439, 0.378, 0.91]: listen to (a person), sit, touch (an object), watch (a person); [0.316, 0.371, 0.68, 0.945]: sit, touch (an object), listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.509, 0.511, 0.981, 0.996]: sit, touch (an object), listen to (a person), watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1237.0, "value": "What actions is the person in bbox [0.199, 0.358, 0.647, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.199, 0.358, 0.647, 1.0]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1235.0s to 1236.0s, [0.376, 0.299, 0.862, 1.0]->[0.278, 0.323, 0.773, 0.991]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1232.0s to 1234.0s, [0.441, 0.276, 0.899, 0.999]->[0.377, 0.29, 0.871, 0.997]->[0.358, 0.302, 0.851, 0.997]: sit, talk to (e.g., self, a person, a group).", "At 1231.0s, [0.296, 0.225, 0.974, 0.986]: sit, touch (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1171, 1244], "conversations": [{"from": "human", "timestamps": 1201.0, "value": "For the person at bbox [0.359, 0.139, 0.927, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.359, 0.139, 0.927, 0.987]: sit, touch (an object), listen to (a person).", "At 1200.0s, [0.442, 0.138, 0.904, 0.977]: sit, listen to (a person), watch (a person).", "At 1199.0s, [0.429, 0.198, 0.92, 0.973]: sit, lift (a person), listen to (a person), watch (a person).", "At 1198.0s, [0.476, 0.221, 0.925, 0.994]: sit, lift (a person), watch (a person)."]}, {"from": "human", "timestamps": 1244.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.121, 0.181, 0.734, 0.985]: stand, touch (an object)."}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1284, 1316], "conversations": [{"from": "human", "timestamps": 1314.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.194, 0.219, 0.385, 0.987]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1316.0, "value": "For the person at bbox [0.193, 0.128, 0.565, 0.988], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.193, 0.128, 0.565, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1315.0s, [0.203, 0.158, 0.502, 0.981]: walk, carry/hold (an object).", "From 1312.0s to 1314.0s, [0.195, 0.188, 0.381, 0.986]->[0.192, 0.188, 0.365, 0.97]->[0.194, 0.219, 0.385, 0.987]: stand, carry/hold (an object)."]}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1278, 1356], "conversations": [{"from": "human", "timestamps": 1308.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.098, 0.199, 0.409, 0.991]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.478, 0.148, 0.812, 0.986]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1356.0, "value": "What actions is the person in bbox [0.1, 0.168, 0.416, 0.994] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.1, 0.168, 0.416, 0.994]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1355.0s, [0.109, 0.187, 0.429, 0.985]: stand, carry/hold (an object).", "At 1354.0s, [0.098, 0.184, 0.562, 0.992]: stand, carry/hold (an object), give/serve (an object) to (a person).", "From 1352.0s to 1353.0s, [0.033, 0.175, 0.356, 0.989]->[0.101, 0.167, 0.374, 0.993]: stand, carry/hold (an object)."]}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1314, 1350], "conversations": [{"from": "human", "timestamps": 1344.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.004, 0.197, 0.325, 0.78]: carry/hold (an object), stand, listen to (a person); [0.358, 0.599, 0.725, 0.991]: carry/hold (an object), stand, touch (an object), listen to (a person); [0.65, 0.197, 0.984, 0.98]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1350.0, "value": "For the person at bbox [0.455, 0.236, 0.671, 0.726], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.455, 0.236, 0.671, 0.726]: stand, carry/hold (an object), write, talk to (e.g., self, a person, a group).", "At 1348.0s, [0.371, 0.145, 0.874, 0.968]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1346.0s to 1347.0s, [0.331, 0.122, 0.779, 0.986]->[0.4, 0.1, 0.796, 0.999]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1349, 1401], "conversations": [{"from": "human", "timestamps": 1379.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.129, 0.116, 0.353, 0.977]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.269, 0.088, 0.675, 0.994]: listen to (a person), watch (a person), walk; [0.64, 0.231, 0.988, 1.0]: watch (a person), grab (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1401.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.556, 0.306, 0.94, 0.983]?"}, {"from": "gpt", "value": ["In the current frame, [0.556, 0.306, 0.94, 0.983]: sit, listen to (a person), watch (a person).", "From 1397.0s to 1400.0s, [0.556, 0.303, 0.932, 0.983]->[0.548, 0.315, 0.935, 0.984]->[0.558, 0.302, 0.94, 0.988]->[0.555, 0.307, 0.946, 0.995]: sit, listen to (a person), watch (a person).", "From 1395.0s to 1396.0s, [0.577, 0.284, 0.943, 0.988]->[0.496, 0.3, 0.934, 0.973]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1368, 1487], "conversations": [{"from": "human", "timestamps": 1398.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.055, 0.263, 0.458, 0.963]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.548, 0.315, 0.935, 0.984]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1487.0, "value": "Describe the actions of the person at bbox [0.491, 0.219, 0.752, 0.925] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.491, 0.219, 0.752, 0.925]: walk.", "At 1486.0s, [0.58, 0.285, 0.791, 0.855]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1433, 1493], "conversations": [{"from": "human", "timestamps": 1463.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.272, 0.211, 0.474, 0.981]: drink, listen to (a person), watch (a person), walk, carry/hold (an object); [0.388, 0.182, 0.748, 0.992]: carry/hold (an object), watch (a person), walk."}, {"from": "human", "timestamps": 1493.0, "value": "For the person at bbox [0.578, 0.151, 0.998, 0.972], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.578, 0.151, 0.998, 0.972]: stand, talk to (e.g., self, a person, a group).", "At 1492.0s, [0.586, 0.149, 0.981, 0.965]: stand, talk to (e.g., self, a person, a group).", "At 1491.0s, [0.64, 0.147, 0.986, 0.966]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1490.0s, [0.803, 0.207, 0.986, 0.966]: walk, watch (a person)."]}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1522, 1556], "conversations": [{"from": "human", "timestamps": 1552.0, "value": "For the person at bbox [0.358, 0.162, 0.63, 0.98], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.358, 0.162, 0.63, 0.98]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1551.0s, [0.331, 0.162, 0.566, 0.991]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1550.0s, [0.314, 0.17, 0.556, 0.985]: stand, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group).", "At 1549.0s, [0.314, 0.251, 0.644, 0.986]: stand, carry/hold (an object).", "At 1548.0s, [0.271, 0.189, 0.567, 0.982]: walk, carry/hold (an object).", "At 1547.0s, [0.227, 0.125, 0.548, 0.997]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1546.0s, [0.248, 0.1, 0.525, 0.98]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1545.0s, [0.255, 0.096, 0.546, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1544.0s, [0.253, 0.099, 0.545, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1538.0s to 1543.0s, [0.212, 0.105, 0.535, 0.983]->[0.222, 0.12, 0.536, 1.0]->[0.219, 0.111, 0.526, 0.985]->[0.235, 0.111, 0.532, 0.983]->[0.255, 0.1, 0.549, 1.0]->[0.247, 0.11, 0.544, 0.982]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1537.0s, [0.178, 0.114, 0.531, 0.999]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1536.0s, [0.184, 0.12, 0.532, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1534.0s to 1535.0s, [0.074, 0.146, 0.454, 0.991]->[0.175, 0.129, 0.519, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1533.0s, [0.0, 0.048, 0.217, 1.0]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1556.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.476, 0.125, 0.707, 0.982]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1602, 1709], "conversations": [{"from": "human", "timestamps": 1632.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.022, 0.279, 0.541, 0.929]: listen to (a person), sit; [0.568, 0.278, 0.921, 0.902]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1709.0, "value": "What actions is the person in bbox [0.197, 0.075, 0.474, 0.919] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.197, 0.075, 0.474, 0.919]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1706.0s to 1708.0s, [0.099, 0.053, 0.456, 0.878]->[0.198, 0.083, 0.476, 0.879]->[0.193, 0.071, 0.47, 0.925]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1705.0s, [0.15, 0.069, 0.53, 0.917]: stand, listen to (a person), watch (a person).", "From 1700.0s to 1704.0s, [0.156, 0.082, 0.485, 0.785]->[0.11, 0.134, 0.464, 0.916]->[0.112, 0.117, 0.453, 0.98]->[0.1, 0.13, 0.468, 0.931]->[0.173, 0.092, 0.494, 0.845]: stand, listen to (a person).", "At 1699.0s, [0.11, 0.148, 0.456, 0.97]: stand, touch (an object), listen to (a person).", "At 1698.0s, [0.223, 0.129, 0.55, 0.77]: stand, touch (an object).", "At 1697.0s, [0.854, 0.121, 0.996, 1.0]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1662, 1695], "conversations": [{"from": "human", "timestamps": 1692.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.124, 0.279, 0.421, 0.848]: listen to (a person), sit, touch (an object), watch (a person); [0.478, 0.298, 0.939, 0.96]: sit, touch (an object), drink, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1695.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.457, 0.261, 0.97, 0.969]?"}, {"from": "gpt", "value": ["At Now, [0.457, 0.261, 0.97, 0.969]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1694.0s, [0.478, 0.264, 0.97, 0.97]: sit, drink, touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1693.0s, [0.468, 0.252, 0.944, 0.969]: sit, touch (an object), listen to (a person), watch (a person).", "At 1692.0s, [0.478, 0.298, 0.939, 0.96]: sit, drink, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1690.0s to 1691.0s, [0.472, 0.283, 0.945, 0.957]->[0.494, 0.294, 0.943, 0.961]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1689.0s, [0.56, 0.266, 0.97, 0.983]: sit, drink, talk to (e.g., self, a person, a group).", "At 1688.0s, [0.574, 0.267, 0.979, 0.979]: sit, drink, touch (an object), watch (a person).", "At 1687.0s, [0.542, 0.3, 0.97, 0.974]: sit, drink, touch (an object), listen to (a person).", "At 1686.0s, [0.52, 0.264, 0.976, 0.985]: sit, carry/hold (an object), drink, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1685.0s, [0.562, 0.28, 0.968, 0.983]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1684.0s, [0.527, 0.26, 0.975, 0.977]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1680.0s to 1683.0s, [0.503, 0.264, 0.968, 0.98]->[0.489, 0.271, 0.967, 0.978]->[0.52, 0.267, 0.974, 0.982]->[0.501, 0.267, 0.964, 0.979]: sit, touch (an object), listen to (a person)."]}]}, {"video": "1ReZIMmD_8E.mp4", "clip": [1735, 1769], "conversations": [{"from": "human", "timestamps": 1765.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.118, 0.169, 0.343, 0.785]: carry/hold (an object), stand, touch (an object), talk to (e.g., self, a person, a group); [0.346, 0.271, 0.468, 0.656]: listen to (a person), smoke, walk; [0.505, 0.408, 0.688, 0.931]: listen to (a person), sit, touch (an object); [0.709, 0.427, 0.952, 0.744]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1769.0, "value": "What actions is the person in bbox [0.454, 0.047, 0.844, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.454, 0.047, 0.844, 1.0]: stand, carry/hold (an object).", "From 1767.0s to 1768.0s, [0.338, 0.148, 0.66, 0.955]->[0.423, 0.1, 0.774, 0.989]: stand, carry/hold (an object), touch (an object).", "At 1766.0s, [0.196, 0.16, 0.456, 0.891]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1764.0s to 1765.0s, [0.18, 0.19, 0.387, 0.689]->[0.118, 0.169, 0.343, 0.785]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1763.0s, [0.206, 0.187, 0.401, 0.734]: stand, carry/hold (an object), touch (an object), listen to (a person).", "At 1762.0s, [0.139, 0.195, 0.34, 0.925]: carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "26V9UzqSguo.mp4", "clip": [873, 950], "conversations": [{"from": "human", "timestamps": 903.0, "value": "What actions is the person in bbox [0.335, 0.408, 0.729, 0.812] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.335, 0.408, 0.729, 0.812]: sit.", "At 902.0s, [0.373, 0.412, 0.757, 0.819]: sit."]}, {"from": "human", "timestamps": 950.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.408, 0.351, 0.452, 0.563]: carry/hold (an object), walk."}]}, {"video": "26V9UzqSguo.mp4", "clip": [932, 1014], "conversations": [{"from": "human", "timestamps": 962.0, "value": "What actions did the person at bbox [0.093, 0.293, 0.343, 0.965] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.093, 0.293, 0.343, 0.965]: walk, listen to (a person).", "At 961.0s, [0.062, 0.298, 0.342, 0.981]: walk, listen to (a person).", "From 958.0s to 960.0s, [0.037, 0.315, 0.221, 0.99]->[0.065, 0.297, 0.234, 0.98]->[0.031, 0.306, 0.26, 0.978]: walk, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1014.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.028, 0.212, 0.556, 0.989]: stand, talk to (e.g., self, a person, a group); [0.382, 0.224, 0.928, 0.99]: listen to (a person), stand, watch (a person)."}]}, {"video": "26V9UzqSguo.mp4", "clip": [1117, 1176], "conversations": [{"from": "human", "timestamps": 1147.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.006, 0.264, 0.303, 0.975]: listen to (a person), watch (a person), walk; [0.442, 0.209, 0.99, 0.982]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1176.0, "value": "What actions did the person at bbox [0.779, 0.154, 0.998, 0.661] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.779, 0.154, 0.998, 0.661]: stand, read.", "At 1175.0s, [0.467, 0.167, 0.656, 0.468]: stand, carry/hold (an object), read.", "At 1174.0s, [0.306, 0.182, 0.512, 0.826]: stand, read."]}]}, {"video": "26V9UzqSguo.mp4", "clip": [1123, 1229], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.001, 0.137, 0.388, 0.983]: listen to (a person), stand, talk to (e.g., self, a person, a group); [0.477, 0.173, 0.853, 0.99]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1229.0, "value": "Describe the actions of the person at bbox [0.079, 0.269, 0.362, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.079, 0.269, 0.362, 0.981]: walk, watch (a person).", "At 1228.0s, [0.233, 0.26, 0.449, 0.953]: walk, watch (a person).", "From 1223.0s to 1227.0s, [0.542, 0.467, 0.621, 0.796]->[0.443, 0.431, 0.526, 0.795]->[0.419, 0.368, 0.544, 0.799]->[0.391, 0.354, 0.56, 0.774]->[0.307, 0.335, 0.538, 0.928]: walk.", "At 1222.0s, [0.611, 0.474, 0.689, 0.787]: stand.", "At 1221.0s, [0.61, 0.476, 0.692, 0.788]: stand, put down.", "From 1218.0s to 1220.0s, [0.609, 0.476, 0.694, 0.791]->[0.609, 0.475, 0.693, 0.791]->[0.607, 0.471, 0.684, 0.791]: stand, answer phone."]}]}, {"video": "26V9UzqSguo.mp4", "clip": [1253, 1309], "conversations": [{"from": "human", "timestamps": 1283.0, "value": "What actions is the person in bbox [0.067, 0.035, 0.254, 0.482] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.067, 0.035, 0.254, 0.482]: walk.", "At 1282.0s, [0.112, 0.131, 0.441, 0.683]: walk.", "At 1281.0s, [0.128, 0.372, 0.346, 0.838]: walk, touch (an object)."]}, {"from": "human", "timestamps": 1309.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.276, 0.367, 0.355, 0.558]: stand."}]}, {"video": "26V9UzqSguo.mp4", "clip": [1613, 1723], "conversations": [{"from": "human", "timestamps": 1643.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.638, 0.41, 0.852, 0.798]: run/jog; [0.856, 0.467, 0.997, 0.662]: touch (an object)."}, {"from": "human", "timestamps": 1723.0, "value": "What actions did the person at bbox [0.331, 0.513, 0.396, 0.785] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.331, 0.513, 0.396, 0.785]: stand, watch (a person).", "From 1719.0s to 1722.0s, [0.031, 0.1, 0.734, 0.974]->[0.043, 0.07, 0.746, 0.972]->[0.041, 0.109, 0.748, 0.976]->[0.33, 0.522, 0.384, 0.781]: stand."]}]}, {"video": "26V9UzqSguo.mp4", "clip": [1745, 1782], "conversations": [{"from": "human", "timestamps": 1775.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.304, 0.06, 0.885, 0.962]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1782.0, "value": "What actions did the person at bbox [0.519, 0.253, 0.725, 0.652] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.519, 0.253, 0.725, 0.652]: sit, listen to (a person).", "At 1781.0s, [0.504, 0.249, 0.725, 0.71]: sit, listen to (a person).", "From 1774.0s to 1780.0s, [0.294, 0.07, 0.899, 0.975]->[0.304, 0.06, 0.885, 0.962]->[0.291, 0.05, 0.905, 0.977]->[0.245, 0.112, 0.873, 0.972]->[0.26, 0.091, 0.892, 0.977]->[0.282, 0.065, 0.91, 0.972]->[0.27, 0.099, 0.907, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1773.0s, [0.317, 0.067, 0.888, 0.986]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1771.0s to 1772.0s, [0.534, 0.323, 0.754, 0.811]->[0.531, 0.201, 0.729, 0.726]: sit, talk to (e.g., self, a person, a group).", "At 1770.0s, [0.618, 0.213, 0.805, 0.743]: stand, carry/hold (an object)."]}]}, {"video": "2DUITARAsWQ.mp4", "clip": [923, 960], "conversations": [{"from": "human", "timestamps": 953.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.32, 0.033, 0.832, 0.976]: stand."}, {"from": "human", "timestamps": 960.0, "value": "What actions is the person in bbox [0.023, 0.098, 0.692, 0.981] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.023, 0.098, 0.692, 0.981]: stand.", "At 959.0s, [0.012, 0.093, 0.734, 0.993]: stand, carry/hold (an object).", "At 958.0s, [0.014, 0.072, 0.721, 0.992]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "2DUITARAsWQ.mp4", "clip": [960, 991], "conversations": [{"from": "human", "timestamps": 990.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.005, 0.008, 0.615, 0.974]?"}, {"from": "gpt", "value": ["At Now, [0.005, 0.008, 0.615, 0.974]: stand, carry/hold (an object), take (an object) from (a person).", "At 989.0s, [0.006, 0.013, 0.717, 0.987]: stand, carry/hold (an object).", "At 988.0s, [0.028, 0.043, 0.771, 0.976]: stand.", "From 981.0s to 987.0s, [0.007, 0.042, 0.366, 0.987]->[0.01, 0.0, 0.39, 1.0]->[0.036, 0.006, 0.807, 0.975]->[0.033, 0.003, 0.722, 0.975]->[0.05, 0.02, 0.869, 0.972]->[0.032, 0.014, 0.726, 0.971]->[0.02, 0.019, 0.724, 0.964]: stand, watch (a person).", "From 979.0s to 980.0s, [0.142, 0.131, 0.624, 0.978]->[0.238, 0.066, 0.657, 1.0]: walk, watch (a person).", "At 978.0s, [0.109, 0.117, 0.523, 1.0]: stand, listen to (a person), watch (a person).", "From 975.0s to 977.0s, [0.119, 0.11, 0.525, 0.979]->[0.119, 0.12, 0.533, 0.98]->[0.116, 0.12, 0.523, 0.984]: stand, watch (a person)."]}, {"from": "human", "timestamps": 991.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.002, 0.015, 0.547, 0.992]: carry/hold (an object), stand; [0.501, 0.108, 0.946, 1.0]: stand."}]}, {"video": "2DUITARAsWQ.mp4", "clip": [1068, 1101], "conversations": [{"from": "human", "timestamps": 1098.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.219, 0.244, 0.545, 0.859]: watch (a person), sit."}, {"from": "human", "timestamps": 1101.0, "value": "For the person at bbox [0.209, 0.241, 0.537, 0.851], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.209, 0.241, 0.537, 0.851]: sit.", "At 1100.0s, [0.22, 0.251, 0.538, 0.858]: sit, watch (a person).", "From 1097.0s to 1098.0s, [0.218, 0.246, 0.541, 0.862]->[0.219, 0.244, 0.545, 0.859]: sit, watch (a person)."]}]}, {"video": "2DUITARAsWQ.mp4", "clip": [1125, 1194], "conversations": [{"from": "human", "timestamps": 1155.0, "value": "For the person at bbox [0.0, 0.0, 0.982, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.0, 0.982, 1.0]: bend/bow (at the waist), talk to (e.g., self, a person, a group).", "From 1152.0s to 1154.0s, [0.0, 0.0, 0.991, 1.0]->[0.0, 0.0, 1.0, 0.991]->[0.0, 0.0, 1.0, 1.0]: bend/bow (at the waist), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1194.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.117, 0.0, 0.992, 0.979]: stand."}]}, {"video": "2DUITARAsWQ.mp4", "clip": [1163, 1259], "conversations": [{"from": "human", "timestamps": 1193.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.36, 0.082, 0.879, 0.981]: stand."}, {"from": "human", "timestamps": 1259.0, "value": "What actions is the person in bbox [0.0, 0.025, 0.436, 0.953] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.025, 0.436, 0.953]: stand, listen to (a person), watch (a person).", "From 1244.0s to 1258.0s, [0.0, 0.012, 0.361, 0.97]->[0.036, 0.0, 0.46, 0.974]->[0.001, 0.017, 0.437, 1.0]->[0.0, 0.019, 0.479, 0.987]->[0.107, 0.02, 0.833, 0.971]->[0.097, 0.023, 0.811, 0.978]->[0.0, 0.006, 0.357, 0.967]->[0.0, 0.01, 0.364, 0.962]->[0.0, 0.017, 0.428, 0.965]->[0.003, 0.002, 0.36, 0.996]->[0.113, 0.014, 0.865, 0.976]->[0.107, 0.003, 0.868, 0.981]->[0.094, 0.0, 0.838, 0.978]->[0.0, 0.019, 0.406, 0.973]->[0.0, 0.031, 0.406, 0.953]: stand, listen to (a person).", "At 1243.0s, [0.002, 0.018, 0.481, 0.97]: stand, watch (a person).", "At 1242.0s, [0.0, 0.023, 0.364, 0.981]: stand."]}]}, {"video": "2DUITARAsWQ.mp4", "clip": [1258, 1316], "conversations": [{"from": "human", "timestamps": 1288.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.275, 0.02, 0.972, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.275, 0.02, 0.972, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1281.0s to 1287.0s, [0.3, 0.026, 1.0, 0.973]->[0.296, 0.023, 0.992, 0.973]->[0.286, 0.029, 0.967, 0.97]->[0.271, 0.031, 0.961, 0.973]->[0.282, 0.029, 0.966, 0.968]->[0.304, 0.022, 0.987, 0.978]->[0.311, 0.023, 0.992, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 1280.0s, [0.23, 0.044, 0.964, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1279.0s, [0.223, 0.034, 0.966, 0.969]: stand, watch (a person).", "From 1276.0s to 1278.0s, [0.215, 0.023, 0.971, 0.978]->[0.177, 0.036, 0.978, 0.969]->[0.241, 0.035, 0.966, 0.964]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1275.0s, [0.221, 0.035, 0.964, 0.968]: stand, talk to (e.g., self, a person, a group).", "From 1265.0s to 1274.0s, [0.247, 0.026, 0.995, 0.982]->[0.268, 0.033, 0.996, 0.974]->[0.238, 0.019, 1.0, 0.962]->[0.253, 0.027, 0.974, 0.975]->[0.26, 0.024, 1.0, 0.975]->[0.257, 0.034, 0.97, 0.97]->[0.182, 0.027, 0.963, 0.966]->[0.236, 0.024, 0.965, 0.97]->[0.248, 0.026, 0.967, 0.968]->[0.216, 0.025, 0.967, 0.971]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1264.0s, [0.192, 0.026, 0.983, 0.973]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1316.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.002, 0.018, 0.93, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "2DUITARAsWQ.mp4", "clip": [1245, 1368], "conversations": [{"from": "human", "timestamps": 1275.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.024, 0.412, 0.969]: listen to (a person), stand; [0.221, 0.035, 0.964, 0.968]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1368.0, "value": "What actions did the person at bbox [0.336, 0.183, 0.641, 0.98] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.336, 0.183, 0.641, 0.98]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1366.0s to 1367.0s, [0.304, 0.166, 0.637, 0.997]->[0.311, 0.178, 0.637, 0.973]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1365.0s, [0.298, 0.159, 0.636, 0.986]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1361.0s to 1364.0s, [0.305, 0.159, 0.637, 0.998]->[0.295, 0.165, 0.631, 0.991]->[0.319, 0.16, 0.632, 0.986]->[0.322, 0.167, 0.632, 0.985]: stand, carry/hold (an object), listen to (a person).", "From 1359.0s to 1360.0s, [0.358, 0.164, 0.645, 0.986]->[0.299, 0.163, 0.641, 0.988]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1358.0s, [0.327, 0.161, 0.648, 0.978]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1356.0s to 1357.0s, [0.346, 0.161, 0.646, 0.978]->[0.297, 0.151, 0.649, 0.989]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1354.0s to 1355.0s, [0.34, 0.166, 0.65, 0.986]->[0.373, 0.149, 0.634, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1353.0s, [0.321, 0.173, 0.651, 0.971]: stand, carry/hold (an object), kick (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1351.0s to 1352.0s, [0.33, 0.155, 0.597, 0.979]->[0.347, 0.156, 0.643, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1349.0s to 1350.0s, [0.284, 0.142, 0.612, 0.977]->[0.335, 0.132, 0.657, 0.987]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1344.0s to 1348.0s, [0.555, 0.555, 0.647, 0.784]->[0.26, 0.138, 0.58, 0.994]->[0.324, 0.12, 0.58, 0.972]->[0.306, 0.126, 0.582, 0.979]->[0.323, 0.113, 0.575, 0.974]: stand, carry/hold (an object), listen to (a person).", "At 1343.0s, [0.547, 0.593, 0.67, 0.934]: stand, carry/hold (an object), watch (a person).", "At 1342.0s, [0.567, 0.578, 0.74, 0.919]: bend/bow (at the waist), carry/hold (an object), grab (a person)."]}]}, {"video": "2DUITARAsWQ.mp4", "clip": [1319, 1356], "conversations": [{"from": "human", "timestamps": 1349.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.027, 0.617, 0.357, 0.992]: listen to (a person), sit, grab (a person); [0.284, 0.142, 0.612, 0.977]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1356.0, "value": "For the person at bbox [0.085, 0.594, 0.373, 0.998], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.085, 0.594, 0.373, 0.998]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1354.0s to 1355.0s, [0.096, 0.62, 0.406, 0.991]->[0.066, 0.665, 0.424, 0.991]: sit, grab (a person), listen to (a person).", "At 1353.0s, [0.046, 0.59, 0.382, 0.991]: sit, crouch/kneel, pull (an object), grab (a person), listen to (a person), talk to (e.g., self, a person, a group).", "At 1352.0s, [0.075, 0.579, 0.412, 0.992]: sit, grab (a person), listen to (a person), watch (a person).", "From 1349.0s to 1351.0s, [0.027, 0.617, 0.357, 0.992]->[0.099, 0.649, 0.404, 0.989]->[0.054, 0.637, 0.342, 0.989]: sit, grab (a person), listen to (a person).", "From 1346.0s to 1348.0s, [0.025, 0.558, 0.348, 0.996]->[0.032, 0.589, 0.372, 0.996]->[0.021, 0.555, 0.348, 0.994]: sit, grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1345.0s, [0.024, 0.574, 0.339, 0.999]: crouch/kneel, grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1344.0s, [0.508, 0.735, 0.649, 0.933]: sit, grab (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "2DUITARAsWQ.mp4", "clip": [1388, 1421], "conversations": [{"from": "human", "timestamps": 1418.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.011, 0.003, 0.471, 0.978]: watch (a person), stand; [0.369, 0.26, 0.964, 1.0]: watch (a person), stand; [0.826, 0.016, 0.999, 0.748]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1421.0, "value": "What actions did the person at bbox [0.396, 0.173, 0.937, 0.979] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.396, 0.173, 0.937, 0.979]: stand, answer phone, watch (a person).", "At 1420.0s, [0.374, 0.128, 0.967, 0.989]: stand, answer phone, listen to (a person).", "At 1419.0s, [0.385, 0.088, 0.969, 0.976]: stand.", "At 1418.0s, [0.369, 0.26, 0.964, 1.0]: stand, watch (a person).", "From 1413.0s to 1417.0s, [0.311, 0.079, 0.935, 0.994]->[0.336, 0.118, 0.947, 0.984]->[0.368, 0.103, 0.975, 0.978]->[0.013, 0.014, 0.357, 0.991]->[0.305, 0.146, 0.934, 0.981]: stand, listen to (a person), watch (a person).", "At 1412.0s, [0.2, 0.017, 0.869, 0.984]: stand, watch (a person)."]}]}, {"video": "2DUITARAsWQ.mp4", "clip": [1373, 1460], "conversations": [{"from": "human", "timestamps": 1403.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.255, 0.322, 0.353, 0.711]: watch (a person), stand; [0.652, 0.402, 0.787, 0.877]: watch (a person), walk."}, {"from": "human", "timestamps": 1460.0, "value": "For the person at bbox [0.0, 0.005, 0.932, 0.986], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.005, 0.932, 0.986]: sit.", "At 1459.0s, [0.0, 0.006, 0.929, 0.989]: sit, stand.", "At 1458.0s, [0.024, 0.003, 0.919, 0.986]: sit, stand, answer phone, carry/hold (an object).", "From 1456.0s to 1457.0s, [0.046, 0.0, 0.911, 0.974]->[0.035, 0.007, 0.904, 0.977]: stand, answer phone."]}]}, {"video": "2DUITARAsWQ.mp4", "clip": [1410, 1470], "conversations": [{"from": "human", "timestamps": 1440.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.004, 0.017, 0.277, 0.984]: watch (a person), stand; [0.204, 0.056, 0.994, 0.984]: stand, answer phone."}, {"from": "human", "timestamps": 1470.0, "value": "Describe the actions of the person at bbox [0.014, 0.057, 0.559, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.014, 0.057, 0.559, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1469.0s, [0.024, 0.035, 0.551, 0.98]: stand.", "From 1466.0s to 1468.0s, [0.01, 0.027, 0.585, 0.983]->[0.016, 0.02, 0.567, 0.987]->[0.0, 0.034, 0.569, 0.976]: stand, watch (a person).", "At 1465.0s, [0.012, 0.014, 0.563, 0.985]: stand, listen to (a person), watch (a person)."]}]}, {"video": "2DUITARAsWQ.mp4", "clip": [1480, 1590], "conversations": [{"from": "human", "timestamps": 1510.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.008, 0.008, 1.0, 0.978]: stand."}, {"from": "human", "timestamps": 1590.0, "value": "For the person at bbox [0.185, 0.112, 0.621, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.185, 0.112, 0.621, 0.99]: stand, talk to (e.g., self, a person, a group).", "From 1588.0s to 1589.0s, [0.194, 0.119, 0.623, 0.991]->[0.18, 0.121, 0.615, 0.987]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "2DUITARAsWQ.mp4", "clip": [1609, 1695], "conversations": [{"from": "human", "timestamps": 1639.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.086, 0.245, 0.325, 0.874]: carry/hold (an object), sit, extract; [0.604, 0.246, 0.833, 0.863]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1695.0, "value": "Describe the actions of the person at bbox [0.058, 0.024, 0.869, 0.963] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.058, 0.024, 0.869, 0.963]: sit, carry/hold (an object).", "At 1694.0s, [0.046, 0.03, 0.862, 0.984]: sit, carry/hold (an object)."]}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [879, 916], "conversations": [{"from": "human", "timestamps": 909.0, "value": "What actions is the person in bbox [0.446, 0.001, 1.0, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.446, 0.001, 1.0, 1.0]: stand, watch (a person).", "At 908.0s, [0.255, 0.012, 1.0, 0.98]: stand, talk to (e.g., self, a person, a group).", "From 905.0s to 907.0s, [0.008, 0.005, 0.99, 0.969]->[0.014, 0.009, 0.976, 0.963]->[0.063, 0.014, 0.776, 0.995]: stand.", "At 904.0s, [0.016, 0.005, 0.969, 0.979]: stand, talk to (e.g., self, a person, a group).", "At 903.0s, [0.001, 0.0, 0.981, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 902.0s, [0.0, 0.094, 0.166, 0.857]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 916.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.012, 0.349, 0.134, 0.562]: carry/hold (an object), sit, watch (a person); [0.121, 0.398, 0.242, 0.618]: listen to (a person), sit, watch (a person); [0.457, 0.168, 0.631, 0.542]: touch (an object), listen to (a person), watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [918, 973], "conversations": [{"from": "human", "timestamps": 948.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.21, 0.006, 1.0, 0.978]: watch (a person), stand."}, {"from": "human", "timestamps": 973.0, "value": "Describe the actions of the person at bbox [0.115, 0.092, 0.958, 0.98] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.115, 0.092, 0.958, 0.98]: talk to (e.g., self, a person, a group), lie/sleep.", "From 971.0s to 972.0s, [0.139, 0.09, 0.965, 0.984]->[0.136, 0.087, 0.962, 0.99]: talk to (e.g., self, a person, a group), lie/sleep.", "From 966.0s to 970.0s, [0.075, 0.107, 0.909, 0.991]->[0.01, 0.112, 0.944, 0.984]->[0.03, 0.084, 0.951, 0.968]->[0.086, 0.104, 0.898, 0.981]->[0.126, 0.09, 0.949, 0.989]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "At 965.0s, [0.12, 0.096, 0.954, 0.98]: talk to (e.g., self, a person, a group), lie/sleep.", "From 961.0s to 964.0s, [0.102, 0.104, 0.955, 0.994]->[0.091, 0.12, 0.948, 0.973]->[0.0, 0.114, 0.947, 0.963]->[0.123, 0.125, 0.899, 0.977]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person)."]}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [938, 981], "conversations": [{"from": "human", "timestamps": 968.0, "value": "For the person at bbox [0.03, 0.084, 0.951, 0.968], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.03, 0.084, 0.951, 0.968]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "From 966.0s to 967.0s, [0.075, 0.107, 0.909, 0.991]->[0.01, 0.112, 0.944, 0.984]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "At 965.0s, [0.12, 0.096, 0.954, 0.98]: talk to (e.g., self, a person, a group), lie/sleep.", "From 961.0s to 964.0s, [0.102, 0.104, 0.955, 0.994]->[0.091, 0.12, 0.948, 0.973]->[0.0, 0.114, 0.947, 0.963]->[0.123, 0.125, 0.899, 0.977]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person)."]}, {"from": "human", "timestamps": 981.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.244, 0.013, 1.0, 0.977]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [1025, 1068], "conversations": [{"from": "human", "timestamps": 1055.0, "value": "Describe the actions of the person at bbox [0.756, 0.24, 0.997, 0.919] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.756, 0.24, 0.997, 0.919]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1050.0s to 1054.0s, [0.768, 0.253, 0.998, 0.824]->[0.761, 0.237, 0.998, 0.865]->[0.754, 0.241, 1.0, 0.881]->[0.756, 0.238, 0.998, 0.915]->[0.754, 0.236, 0.997, 0.9]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1068.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.021, 0.007, 0.946, 0.98]: watch (a person), sit."}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [1039, 1125], "conversations": [{"from": "human", "timestamps": 1069.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.006, 0.981, 0.983]: sit."}, {"from": "human", "timestamps": 1125.0, "value": "What actions is the person in bbox [0.073, 0.033, 0.704, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.073, 0.033, 0.704, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1123.0s to 1124.0s, [0.17, 0.292, 0.425, 0.979]->[0.271, 0.285, 0.534, 0.983]: stand, watch (a person)."]}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [1062, 1093], "conversations": [{"from": "human", "timestamps": 1092.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.26, 0.005, 1.0, 0.991]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1093.0, "value": "For the person at bbox [0.171, 0.085, 0.983, 0.997], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.171, 0.085, 0.983, 0.997]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1092.0s, [0.26, 0.005, 1.0, 0.991]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1091.0s, [0.102, 0.0, 0.988, 0.997]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [1123, 1187], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.085, 0.027, 0.886, 0.973]: stand."}, {"from": "human", "timestamps": 1187.0, "value": "What actions is the person in bbox [0.463, 0.442, 0.577, 0.873] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.463, 0.442, 0.577, 0.873]: stand.", "From 1185.0s to 1186.0s, [0.441, 0.453, 0.544, 0.854]->[0.463, 0.451, 0.555, 0.87]: walk.", "From 1183.0s to 1184.0s, [0.163, 0.482, 0.232, 0.799]->[0.166, 0.481, 0.234, 0.814]: stand.", "From 1179.0s to 1182.0s, [0.025, 0.381, 0.134, 0.801]->[0.04, 0.417, 0.129, 0.8]->[0.065, 0.454, 0.163, 0.812]->[0.13, 0.484, 0.213, 0.805]: walk."]}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [1140, 1217], "conversations": [{"from": "human", "timestamps": 1170.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.154, 0.048, 0.891, 0.981]: watch (a person), stand."}, {"from": "human", "timestamps": 1217.0, "value": "For the person at bbox [0.084, 0.0, 0.996, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.084, 0.0, 0.996, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 1215.0s to 1216.0s, [0.122, 0.0, 1.0, 0.987]->[0.103, 0.011, 0.947, 0.97]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [1263, 1308], "conversations": [{"from": "human", "timestamps": 1293.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.801, 0.088, 0.995, 0.959]?"}, {"from": "gpt", "value": ["In the current frame, [0.801, 0.088, 0.995, 0.959]: stand, carry/hold (an object).", "At 1292.0s, [0.668, 0.119, 0.875, 0.903]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1308.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.309, 0.048, 0.642, 0.811]: carry/hold (an object), stand, watch (a person); [0.535, 0.583, 0.996, 0.994]: watch (a person), lie/sleep; [0.611, 0.36, 0.735, 0.598]: bend/bow (at the waist); [0.736, 0.315, 0.843, 0.587]: carry/hold (an object), stand."}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [1268, 1337], "conversations": [{"from": "human", "timestamps": 1298.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.142, 0.035, 0.825, 0.974]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1337.0, "value": "What actions did the person at bbox [0.046, 0.12, 1.0, 0.945] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.046, 0.12, 1.0, 0.945]: carry/hold (an object), lie/sleep.", "At 1336.0s, [0.029, 0.105, 1.0, 0.973]: carry/hold (an object), lie/sleep, watch (a person).", "At 1334.0s, [0.508, 0.517, 0.938, 0.992]: listen to (a person), lie/sleep, watch (a person).", "From 1331.0s to 1332.0s, [0.534, 0.502, 0.987, 0.977]->[0.551, 0.511, 0.971, 0.982]: listen to (a person), lie/sleep, watch (a person).", "From 1329.0s to 1330.0s, [0.042, 0.195, 0.95, 0.96]->[0.019, 0.152, 1.0, 0.956]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "From 1327.0s to 1328.0s, [0.02, 0.067, 1.0, 0.953]->[0.005, 0.108, 0.993, 0.938]: listen to (a person), talk to (e.g., self, a person, a group), lie/sleep, watch (a person)."]}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [1361, 1397], "conversations": [{"from": "human", "timestamps": 1391.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.055, 0.911, 1.0]: listen to (a person), lie/sleep, watch (a person)."}, {"from": "human", "timestamps": 1397.0, "value": "What actions is the person in bbox [0.024, 0.004, 0.955, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.024, 0.004, 0.955, 0.988]: sit, listen to (a person), watch (a person).", "From 1394.0s to 1396.0s, [0.014, 0.046, 0.925, 0.977]->[0.0, 0.037, 0.919, 0.988]->[0.0, 0.028, 0.887, 0.99]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "From 1392.0s to 1393.0s, [0.0, 0.069, 0.916, 0.973]->[0.0, 0.038, 0.88, 0.984]: lie/sleep, watch (a person).", "At 1391.0s, [0.0, 0.055, 0.911, 1.0]: listen to (a person), lie/sleep, watch (a person)."]}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [1420, 1458], "conversations": [{"from": "human", "timestamps": 1450.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.519, 0.486, 0.739, 0.912]: lie/sleep."}, {"from": "human", "timestamps": 1458.0, "value": "What actions did the person at bbox [0.229, 0.415, 0.943, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.229, 0.415, 0.943, 0.983]: listen to (a person), lie/sleep.", "At 1457.0s, [0.237, 0.389, 0.89, 0.998]: listen to (a person), lie/sleep."]}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [1530, 1575], "conversations": [{"from": "human", "timestamps": 1560.0, "value": "For the person at bbox [0.392, 0.409, 0.511, 0.701], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.392, 0.409, 0.511, 0.701]: sit, drive (e.g., a car, a truck).", "From 1557.0s to 1559.0s, [0.089, 0.07, 0.515, 0.557]->[0.59, 0.167, 0.732, 0.385]->[0.381, 0.12, 0.557, 0.324]: sit, drive (e.g., a car, a truck).", "At 1556.0s, [0.111, 0.081, 0.54, 0.546]: sit, drive (e.g., a car, a truck), watch (a person)."]}, {"from": "human", "timestamps": 1575.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.021, 0.254, 0.2, 0.478]: watch (a person), sit, drive (e.g., a car, a truck); [0.56, 0.214, 0.868, 0.789]: sit, touch (an object), drive (e.g., a car, a truck)."}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [1606, 1721], "conversations": [{"from": "human", "timestamps": 1636.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.171, 0.118, 0.755, 0.958]: sit, drive (e.g., a car, a truck)."}, {"from": "human", "timestamps": 1721.0, "value": "Describe the actions of the person at bbox [0.326, 0.362, 0.5, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.326, 0.362, 0.5, 0.991]: walk.", "At 1720.0s, [0.345, 0.327, 0.51, 0.993]: walk.", "At 1718.0s, [0.34, 0.125, 0.894, 0.977]: stand, push (an object).", "From 1716.0s to 1717.0s, [0.293, 0.206, 0.78, 0.974]->[0.346, 0.257, 0.788, 1.0]: stand, touch (an object).", "At 1715.0s, [0.235, 0.113, 0.759, 0.973]: stand, hit (an object), touch (an object).", "At 1714.0s, [0.108, 0.156, 0.728, 0.982]: stand, touch (an object).", "From 1705.0s to 1713.0s, [0.643, 0.16, 0.853, 0.854]->[0.622, 0.151, 0.861, 0.897]->[0.62, 0.179, 0.884, 0.958]->[0.429, 0.457, 0.53, 0.911]->[0.463, 0.423, 0.591, 0.974]->[0.404, 0.337, 0.579, 0.979]->[0.444, 0.322, 0.635, 0.99]->[0.544, 0.353, 0.714, 0.97]->[0.444, 0.35, 0.604, 0.93]: walk."]}]}, {"video": "2E_e8JlvTlg.mkv", "clip": [1655, 1717], "conversations": [{"from": "human", "timestamps": 1685.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.46, 0.217, 0.911, 0.82]: carry/hold (an object), sit, touch (an object)."}, {"from": "human", "timestamps": 1717.0, "value": "For the person at bbox [0.346, 0.257, 0.788, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.346, 0.257, 0.788, 1.0]: stand, touch (an object).", "At 1716.0s, [0.293, 0.206, 0.78, 0.974]: stand, touch (an object).", "At 1715.0s, [0.235, 0.113, 0.759, 0.973]: stand, hit (an object), touch (an object).", "At 1714.0s, [0.108, 0.156, 0.728, 0.982]: stand, touch (an object).", "From 1705.0s to 1713.0s, [0.643, 0.16, 0.853, 0.854]->[0.622, 0.151, 0.861, 0.897]->[0.62, 0.179, 0.884, 0.958]->[0.429, 0.457, 0.53, 0.911]->[0.463, 0.423, 0.591, 0.974]->[0.404, 0.337, 0.579, 0.979]->[0.444, 0.322, 0.635, 0.99]->[0.544, 0.353, 0.714, 0.97]->[0.444, 0.35, 0.604, 0.93]: walk."]}]}, {"video": "2FIHxnZKg6A.webm", "clip": [908, 941], "conversations": [{"from": "human", "timestamps": 938.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.026, 0.185, 0.645, 0.859]: listen to (a person), stand; [0.426, 0.228, 0.947, 0.852]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 941.0, "value": "For the person at bbox [0.032, 0.175, 0.639, 0.87], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.032, 0.175, 0.639, 0.87]: sit.", "From 936.0s to 940.0s, [0.042, 0.193, 0.742, 0.868]->[0.021, 0.178, 0.582, 0.849]->[0.026, 0.185, 0.645, 0.859]->[0.018, 0.174, 0.602, 0.853]->[0.024, 0.191, 0.658, 0.87]: stand, listen to (a person)."]}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1004, 1069], "conversations": [{"from": "human", "timestamps": 1034.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.348, 0.173, 0.904, 0.88]?"}, {"from": "gpt", "value": ["At Now, [0.348, 0.173, 0.904, 0.88]: stand, watch (a person).", "From 1029.0s to 1033.0s, [0.512, 0.183, 0.99, 0.891]->[0.356, 0.169, 0.918, 0.879]->[0.327, 0.171, 0.88, 0.885]->[0.343, 0.173, 0.905, 0.87]->[0.35, 0.173, 0.893, 0.871]: stand.", "At 1028.0s, [0.47, 0.157, 0.938, 0.874]: stand, watch (a person).", "At 1027.0s, [0.47, 0.164, 0.941, 0.873]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1024.0s to 1026.0s, [0.568, 0.18, 0.999, 0.882]->[0.56, 0.164, 0.992, 0.879]->[0.527, 0.167, 0.982, 0.882]: stand, listen to (a person), watch (a person).", "From 1021.0s to 1023.0s, [0.552, 0.164, 0.993, 0.876]->[0.574, 0.158, 0.992, 0.861]->[0.576, 0.164, 0.995, 0.881]: stand, listen to (a person).", "From 1019.0s to 1020.0s, [0.593, 0.151, 0.986, 0.903]->[0.546, 0.15, 0.977, 0.877]: stand, listen to (a person), watch (a person).", "At 1018.0s, [0.532, 0.179, 0.783, 0.848]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1016.0s to 1017.0s, [0.584, 0.198, 0.811, 0.852]->[0.556, 0.195, 0.809, 0.841]: walk, listen to (a person).", "At 1015.0s, [0.527, 0.216, 0.757, 0.852]: walk, listen to (a person), watch (a person).", "From 1010.0s to 1014.0s, [0.621, 0.322, 0.762, 0.855]->[0.633, 0.272, 0.776, 0.851]->[0.591, 0.251, 0.775, 0.853]->[0.592, 0.245, 0.778, 0.844]->[0.591, 0.221, 0.801, 0.844]: walk, listen to (a person).", "At 1009.0s, [0.629, 0.289, 0.755, 0.833]: stand, close (e.g., a door, a box), listen to (a person)."]}, {"from": "human", "timestamps": 1069.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.078, 0.214, 0.693, 0.877]: lie/sleep."}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1031, 1094], "conversations": [{"from": "human", "timestamps": 1061.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.114, 0.253, 0.861, 0.884]: watch (a person), lie/sleep, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1094.0, "value": "Describe the actions of the person at bbox [0.005, 0.293, 0.333, 0.87] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.005, 0.293, 0.333, 0.87]: stand, watch (a person).", "At 1093.0s, [0.085, 0.155, 0.62, 0.864]: stand, watch (a person)."]}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1067, 1123], "conversations": [{"from": "human", "timestamps": 1097.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.012, 0.157, 0.521, 0.864]: sing to (e.g., self, a person, a group), dance; [0.583, 0.216, 0.989, 0.845]: watch (a person), stand."}, {"from": "human", "timestamps": 1123.0, "value": "What actions is the person in bbox [0.224, 0.242, 0.603, 0.857] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.224, 0.242, 0.603, 0.857]: stand, touch (an object), watch (a person).", "From 1121.0s to 1122.0s, [0.227, 0.221, 0.647, 0.845]->[0.222, 0.222, 0.606, 0.864]: stand, touch (an object).", "From 1119.0s to 1120.0s, [0.223, 0.282, 0.368, 0.85]->[0.213, 0.256, 0.514, 0.854]: walk, touch (an object), watch (a person)."]}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1115, 1150], "conversations": [{"from": "human", "timestamps": 1145.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.086, 0.148, 0.66, 0.857]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1150.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.108, 0.138, 0.662, 0.871]?"}, {"from": "gpt", "value": ["In the current frame, [0.108, 0.138, 0.662, 0.871]: sit, listen to (a person), watch (a person).", "At 1149.0s, [0.104, 0.145, 0.667, 0.87]: sit, watch (a person).", "At 1148.0s, [0.116, 0.15, 0.666, 0.859]: stand.", "From 1145.0s to 1146.0s, [0.086, 0.148, 0.66, 0.857]->[0.085, 0.147, 0.653, 0.86]: sit, listen to (a person), watch (a person)."]}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1256, 1348], "conversations": [{"from": "human", "timestamps": 1286.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.309, 0.208, 0.9, 0.881]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1348.0, "value": "For the person at bbox [0.057, 0.231, 0.482, 0.86], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.057, 0.231, 0.482, 0.86]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1346.0s to 1347.0s, [0.614, 0.463, 0.792, 0.847]->[0.609, 0.457, 0.793, 0.847]: stand, listen to (a person), watch (a person).", "At 1345.0s, [0.613, 0.462, 0.795, 0.845]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1341.0s to 1344.0s, [0.603, 0.465, 0.8, 0.848]->[0.605, 0.457, 0.798, 0.851]->[0.608, 0.461, 0.8, 0.853]->[0.603, 0.469, 0.799, 0.849]: stand, listen to (a person), watch (a person).", "From 1339.0s to 1340.0s, [0.604, 0.464, 0.799, 0.848]->[0.606, 0.461, 0.799, 0.849]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1328, 1364], "conversations": [{"from": "human", "timestamps": 1358.0, "value": "For the person at bbox [0.641, 0.554, 0.869, 0.858], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.641, 0.554, 0.869, 0.858]: sit, listen to (a person), watch (a person).", "At 1357.0s, [0.666, 0.68, 0.825, 0.857]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1364.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.103, 0.131, 0.791, 0.872]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1367, 1409], "conversations": [{"from": "human", "timestamps": 1397.0, "value": "What actions did the person at bbox [0.309, 0.326, 0.692, 0.856] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.309, 0.326, 0.692, 0.856]: stand, text on/look at a cellphone.", "At 1396.0s, [0.302, 0.35, 0.728, 0.857]: stand, answer phone."]}, {"from": "human", "timestamps": 1409.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.175, 0.145, 0.789, 0.866]: listen to (a person), sit, watch (a person)."}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1351, 1453], "conversations": [{"from": "human", "timestamps": 1381.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.225, 0.307, 0.39, 0.707]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.661, 0.325, 0.893, 0.67]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1453.0, "value": "For the person at bbox [0.108, 0.168, 0.651, 0.852], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.108, 0.168, 0.651, 0.852]: stand, watch (a person).", "At 1452.0s, [0.093, 0.159, 0.643, 0.854]: stand, watch (a person)."]}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1427, 1484], "conversations": [{"from": "human", "timestamps": 1457.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.262, 0.187, 0.983, 0.873]: stand, smoke, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1484.0, "value": "What actions is the person in bbox [0.144, 0.243, 0.531, 0.838] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.144, 0.243, 0.531, 0.838]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1481.0s to 1483.0s, [0.576, 0.206, 0.828, 0.849]->[0.121, 0.258, 0.536, 0.843]->[0.121, 0.253, 0.519, 0.843]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1487, 1543], "conversations": [{"from": "human", "timestamps": 1517.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.012, 0.368, 0.153, 0.82]: carry/hold (an object), stand, read; [0.056, 0.175, 0.453, 0.864]: carry/hold (an object), stand; [0.459, 0.175, 0.763, 0.842]: carry/hold (an object), stand, watch (a person); [0.697, 0.392, 0.845, 0.847]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1543.0, "value": "What actions is the person in bbox [0.025, 0.144, 0.528, 0.879] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.025, 0.144, 0.528, 0.879]: stand.", "At 1542.0s, [0.018, 0.138, 0.608, 0.87]: stand, watch (a person)."]}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1524, 1588], "conversations": [{"from": "human", "timestamps": 1554.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.36, 0.239, 0.419, 0.446]: carry/hold (an object), walk; [0.402, 0.176, 0.565, 0.689]: get up."}, {"from": "human", "timestamps": 1588.0, "value": "Describe the actions of the person at bbox [0.423, 0.53, 0.46, 0.647] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.423, 0.53, 0.46, 0.647]: walk, carry/hold (an object).", "At 1587.0s, [0.488, 0.536, 0.535, 0.649]: walk, carry/hold (an object)."]}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1533, 1602], "conversations": [{"from": "human", "timestamps": 1563.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.412, 0.189, 0.919, 0.854]: carry/hold (an object), watch (a person), walk."}, {"from": "human", "timestamps": 1602.0, "value": "Describe the actions of the person at bbox [0.253, 0.196, 0.7, 0.85] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.253, 0.196, 0.7, 0.85]: stand, talk to (e.g., self, a person, a group).", "From 1600.0s to 1601.0s, [0.211, 0.199, 0.675, 0.859]->[0.205, 0.2, 0.639, 0.855]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1602, 1677], "conversations": [{"from": "human", "timestamps": 1632.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.405, 0.202, 0.855, 0.859]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1677.0, "value": "What actions is the person in bbox [0.181, 0.199, 0.602, 0.852] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.181, 0.199, 0.602, 0.852]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1673.0s to 1676.0s, [0.256, 0.188, 0.707, 0.856]->[0.247, 0.205, 0.689, 0.849]->[0.263, 0.203, 0.705, 0.85]->[0.284, 0.201, 0.714, 0.846]: stand, talk to (e.g., self, a person, a group).", "At 1672.0s, [0.218, 0.205, 0.673, 0.859]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1685, 1718], "conversations": [{"from": "human", "timestamps": 1715.0, "value": "What actions is the person in bbox [0.116, 0.171, 0.771, 0.869] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.116, 0.171, 0.771, 0.869]: stand.", "From 1713.0s to 1714.0s, [0.138, 0.181, 0.866, 0.865]->[0.166, 0.166, 0.874, 0.852]: stand.", "At 1712.0s, [0.106, 0.162, 0.78, 0.862]: stand, talk to (e.g., self, a person, a group).", "From 1710.0s to 1711.0s, [0.108, 0.156, 0.816, 0.864]->[0.09, 0.156, 0.77, 0.848]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1718.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.02, 0.223, 0.282, 0.858]: carry/hold (an object), walk; [0.284, 0.281, 0.322, 0.43]: carry/hold (an object), stand."}]}, {"video": "2FIHxnZKg6A.webm", "clip": [1750, 1789], "conversations": [{"from": "human", "timestamps": 1780.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.032, 0.205, 0.626, 0.853]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.523, 0.172, 0.991, 0.859]: listen to (a person), stand."}, {"from": "human", "timestamps": 1789.0, "value": "What actions did the person at bbox [0.317, 0.18, 0.922, 0.851] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.317, 0.18, 0.922, 0.851]: stand, talk to (e.g., self, a person, a group).", "At 1788.0s, [0.282, 0.187, 0.924, 0.857]: stand, talk to (e.g., self, a person, a group).", "At 1787.0s, [0.311, 0.185, 0.952, 0.842]: stand, listen to (a person).", "From 1781.0s to 1786.0s, [0.522, 0.182, 0.998, 0.852]->[0.552, 0.174, 0.989, 0.847]->[0.53, 0.165, 1.0, 0.858]->[0.496, 0.166, 0.997, 0.87]->[0.295, 0.186, 0.935, 0.855]->[0.304, 0.189, 0.939, 0.847]: stand, listen to (a person), watch (a person).", "From 1776.0s to 1780.0s, [0.644, 0.183, 0.991, 0.863]->[0.655, 0.186, 0.993, 0.857]->[0.652, 0.17, 0.995, 0.868]->[0.537, 0.132, 0.991, 0.865]->[0.523, 0.172, 0.991, 0.859]: stand, listen to (a person).", "From 1774.0s to 1775.0s, [0.639, 0.184, 0.993, 0.886]->[0.635, 0.192, 0.992, 0.854]: stand, listen to (a person), watch (a person).", "From 1772.0s to 1773.0s, [0.244, 0.147, 0.925, 0.851]->[0.216, 0.16, 0.881, 0.848]: stand, talk to (e.g., self, a person, a group).", "At 1771.0s, [0.283, 0.152, 0.984, 0.873]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1770.0s, [0.428, 0.19, 0.989, 0.849]: walk, talk to (e.g., self, a person, a group).", "At 1769.0s, [0.314, 0.298, 0.482, 0.848]: walk.", "At 1768.0s, [0.317, 0.282, 0.467, 0.841]: walk, talk to (e.g., self, a person, a group).", "From 1759.0s to 1767.0s, [0.234, 0.191, 0.346, 0.565]->[0.232, 0.208, 0.35, 0.634]->[0.244, 0.23, 0.368, 0.669]->[0.245, 0.253, 0.376, 0.703]->[0.268, 0.266, 0.402, 0.731]->[0.268, 0.277, 0.403, 0.759]->[0.293, 0.289, 0.438, 0.795]->[0.274, 0.285, 0.42, 0.816]->[0.287, 0.287, 0.436, 0.847]: walk, listen to (a person).", "From 1757.0s to 1758.0s, [0.206, 0.171, 0.308, 0.46]->[0.22, 0.185, 0.323, 0.526]: walk."]}]}, {"video": "2KpThOF_QmE.mkv", "clip": [874, 950], "conversations": [{"from": "human", "timestamps": 904.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.78, 0.044, 0.919, 0.995]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 950.0, "value": "What actions is the person in bbox [0.278, 0.155, 0.393, 0.744] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.278, 0.155, 0.393, 0.744]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 949.0s, [0.292, 0.162, 0.394, 0.752]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 948.0s, [0.294, 0.149, 0.402, 0.737]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 947.0s, [0.307, 0.152, 0.413, 0.739]: stand, carry/hold (an object), listen to (a person).", "At 946.0s, [0.319, 0.15, 0.443, 0.74]: stand, carry/hold (an object), listen to (a person), take (an object) from (a person).", "At 945.0s, [0.316, 0.147, 0.461, 0.743]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 944.0s, [0.305, 0.154, 0.417, 0.733]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 942.0s to 943.0s, [0.293, 0.145, 0.401, 0.725]->[0.293, 0.141, 0.402, 0.731]: stand, listen to (a person)."]}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1019, 1079], "conversations": [{"from": "human", "timestamps": 1049.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.141, 0.013, 0.569, 0.928]: sit, smoke."}, {"from": "human", "timestamps": 1079.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.451, 0.005, 0.994, 0.985]?"}, {"from": "gpt", "value": ["In the current frame, [0.451, 0.005, 0.994, 0.985]: sit, watch (a person).", "At 1078.0s, [0.541, 0.0, 0.994, 0.982]: sit, watch (a person).", "From 1076.0s to 1077.0s, [0.524, 0.007, 1.0, 0.986]->[0.527, 0.013, 1.0, 0.983]: sit."]}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1141, 1205], "conversations": [{"from": "human", "timestamps": 1171.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.122, 0.032, 0.611, 0.997]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.383, 0.317, 0.633, 0.864]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.537, 0.21, 0.761, 0.864]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1205.0, "value": "What actions is the person in bbox [0.549, 0.218, 0.733, 0.801] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.549, 0.218, 0.733, 0.801]: sit, smoke, listen to (a person).", "From 1203.0s to 1204.0s, [0.54, 0.209, 0.736, 0.809]->[0.545, 0.213, 0.735, 0.801]: sit, smoke, listen to (a person).", "At 1202.0s, [0.533, 0.208, 0.734, 0.851]: sit, carry/hold (an object), smoke, listen to (a person).", "At 1201.0s, [0.541, 0.219, 0.736, 0.844]: sit, smoke, listen to (a person).", "At 1200.0s, [0.527, 0.199, 0.736, 0.663]: sit, carry/hold (an object), smoke, listen to (a person).", "At 1199.0s, [0.566, 0.229, 0.765, 0.689]: sit, listen to (a person), watch (a person).", "At 1198.0s, [0.631, 0.046, 0.781, 0.677]: bend/bow (at the waist), carry/hold (an object), listen to (a person), watch (a person).", "At 1197.0s, [0.648, 0.008, 0.78, 0.679]: stand, carry/hold (an object), smoke, listen to (a person), talk to (e.g., self, a person, a group).", "From 1194.0s to 1196.0s, [0.562, 0.022, 0.793, 0.932]->[0.66, 0.023, 0.93, 0.989]->[0.644, 0.014, 0.846, 0.895]: walk, carry/hold (an object), listen to (a person).", "At 1193.0s, [0.467, 0.024, 0.723, 0.838]: stand, carry/hold (an object), smoke, take (an object) from (a person), watch (a person).", "From 1191.0s to 1192.0s, [0.431, 0.014, 0.718, 0.918]->[0.45, 0.016, 0.717, 0.627]: stand, carry/hold (an object), smoke, watch (a person).", "At 1190.0s, [0.461, 0.018, 0.719, 0.642]: stand, carry/hold (an object), watch (a person).", "At 1189.0s, [0.488, 0.007, 0.71, 0.896]: stand, carry/hold (an object), smoke, watch (a person).", "At 1188.0s, [0.419, 0.013, 0.74, 0.931]: stand, smoke, watch (a person).", "At 1187.0s, [0.427, 0.015, 0.733, 0.923]: stand, carry/hold (an object), watch (a person).", "At 1186.0s, [0.445, 0.017, 0.718, 0.627]: stand, carry/hold (an object).", "At 1185.0s, [0.453, 0.041, 0.714, 0.895]: stand, carry/hold (an object), smoke.", "At 1184.0s, [0.449, 0.051, 0.745, 0.935]: stand, carry/hold (an object).", "From 1182.0s to 1183.0s, [0.515, 0.038, 0.809, 0.906]->[0.439, 0.044, 0.755, 0.942]: stand, carry/hold (an object), smoke, give/serve (an object) to (a person).", "At 1181.0s, [0.663, 0.061, 0.888, 0.995]: walk, carry/hold (an object), smoke.", "At 1180.0s, [0.68, 0.014, 0.863, 0.983]: walk, carry/hold (an object).", "At 1179.0s, [0.579, 0.204, 0.789, 0.808]: carry/hold (an object), get up.", "At 1178.0s, [0.58, 0.199, 0.781, 0.781]: sit, carry/hold (an object).", "At 1177.0s, [0.571, 0.256, 0.779, 0.852]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1176.0s, [0.574, 0.25, 0.769, 0.791]: sit, carry/hold (an object), smoke, watch (a person).", "At 1175.0s, [0.575, 0.233, 0.757, 0.733]: sit, carry/hold (an object), smoke, listen to (a person), watch (a person).", "At 1174.0s, [0.581, 0.235, 0.766, 0.773]: sit, smoke, listen to (a person).", "At 1173.0s, [0.545, 0.237, 0.759, 0.871]: sit, carry/hold (an object), listen to (a person).", "At 1172.0s, [0.586, 0.222, 0.76, 0.845]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1171.0s, [0.537, 0.21, 0.761, 0.864]: sit, listen to (a person), watch (a person).", "At 1170.0s, [0.546, 0.217, 0.758, 0.853]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1169.0s, [0.549, 0.208, 0.752, 0.883]: sit, listen to (a person), watch (a person).", "At 1168.0s, [0.571, 0.185, 0.758, 0.826]: sit, carry/hold (an object), listen to (a person).", "At 1167.0s, [0.54, 0.198, 0.762, 0.806]: sit, touch (an object), listen to (a person).", "At 1166.0s, [0.575, 0.141, 0.811, 0.83]: bend/bow (at the waist), touch (an object), listen to (a person).", "At 1165.0s, [0.522, 0.024, 0.807, 0.758]: bend/bow (at the waist), carry/hold (an object), touch (an object).", "At 1164.0s, [0.511, 0.006, 0.795, 0.67]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1192, 1230], "conversations": [{"from": "human", "timestamps": 1222.0, "value": "Describe the actions of the person at bbox [0.377, 0.299, 0.648, 0.829] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.377, 0.299, 0.648, 0.829]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1221.0s, [0.372, 0.304, 0.643, 0.827]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1220.0s, [0.374, 0.305, 0.635, 0.812]: sit, carry/hold (an object), listen to (a person).", "From 1218.0s to 1219.0s, [0.373, 0.3, 0.65, 0.812]->[0.37, 0.297, 0.651, 0.817]: sit, smoke, listen to (a person), talk to (e.g., self, a person, a group).", "From 1216.0s to 1217.0s, [0.378, 0.314, 0.621, 0.817]->[0.371, 0.302, 0.662, 0.819]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1215.0s, [0.374, 0.314, 0.618, 0.817]: sit, carry/hold (an object).", "From 1213.0s to 1214.0s, [0.376, 0.317, 0.621, 0.812]->[0.374, 0.314, 0.618, 0.807]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1212.0s, [0.384, 0.306, 0.652, 0.813]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1211.0s, [0.378, 0.317, 0.618, 0.807]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1209.0s to 1210.0s, [0.382, 0.302, 0.588, 0.806]->[0.379, 0.302, 0.653, 0.813]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1208.0s, [0.374, 0.309, 0.51, 0.763]: sit, listen to (a person), watch (a person).", "From 1206.0s to 1207.0s, [0.372, 0.317, 0.502, 0.778]->[0.356, 0.314, 0.507, 0.829]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1199.0s to 1204.0s, [0.37, 0.326, 0.656, 0.835]->[0.372, 0.29, 0.672, 0.806]->[0.374, 0.324, 0.618, 0.819]->[0.368, 0.312, 0.615, 0.807]->[0.369, 0.317, 0.617, 0.8]->[0.367, 0.304, 0.664, 0.827]: sit, carry/hold (an object), listen to (a person).", "At 1198.0s, [0.366, 0.339, 0.637, 0.837]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1197.0s, [0.384, 0.316, 0.654, 0.839]: sit, listen to (a person), watch (a person).", "At 1196.0s, [0.389, 0.322, 0.628, 0.834]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1193.0s to 1195.0s, [0.381, 0.348, 0.574, 0.77]->[0.401, 0.341, 0.609, 0.811]->[0.393, 0.333, 0.625, 0.823]: sit, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1230.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.127, 0.022, 0.643, 0.982]: sit, talk to (e.g., self, a person, a group); [0.371, 0.307, 0.628, 0.824]: listen to (a person), sit, eat; [0.542, 0.221, 0.736, 0.861]: listen to (a person), sit, smoke."}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1251, 1286], "conversations": [{"from": "human", "timestamps": 1281.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.562, 0.228, 0.763, 0.803]?"}, {"from": "gpt", "value": ["At Now, [0.562, 0.228, 0.763, 0.803]: sit, smoke, talk to (e.g., self, a person, a group), watch (a person).", "From 1279.0s to 1280.0s, [0.56, 0.224, 0.756, 0.732]->[0.561, 0.228, 0.767, 0.756]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1278.0s, [0.559, 0.217, 0.762, 0.662]: sit, smoke, talk to (e.g., self, a person, a group).", "At 1277.0s, [0.56, 0.219, 0.757, 0.656]: sit, smoke, watch (a person).", "At 1276.0s, [0.56, 0.221, 0.761, 0.664]: sit, watch (a person).", "At 1275.0s, [0.562, 0.22, 0.76, 0.679]: sit, listen to (a person).", "At 1274.0s, [0.558, 0.221, 0.762, 0.682]: sit, smoke, listen to (a person), watch (a person).", "At 1273.0s, [0.56, 0.219, 0.76, 0.669]: sit, carry/hold (an object), smoke, listen to (a person), watch (a person).", "At 1272.0s, [0.561, 0.222, 0.762, 0.667]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1271.0s, [0.561, 0.221, 0.762, 0.668]: sit, listen to (a person), watch (a person).", "At 1270.0s, [0.558, 0.221, 0.764, 0.663]: sit, carry/hold (an object), watch (a person).", "At 1269.0s, [0.559, 0.222, 0.762, 0.661]: sit, smoke, watch (a person).", "From 1262.0s to 1268.0s, [0.56, 0.214, 0.75, 0.686]->[0.553, 0.217, 0.748, 0.667]->[0.557, 0.216, 0.751, 0.686]->[0.559, 0.218, 0.748, 0.665]->[0.555, 0.215, 0.756, 0.649]->[0.556, 0.219, 0.761, 0.669]->[0.558, 0.222, 0.762, 0.665]: sit, smoke, listen to (a person).", "At 1261.0s, [0.557, 0.205, 0.74, 0.634]: sit, smoke.", "At 1260.0s, [0.556, 0.204, 0.739, 0.635]: sit, smoke, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1258.0s to 1259.0s, [0.556, 0.214, 0.738, 0.679]->[0.558, 0.206, 0.736, 0.642]: sit, smoke, listen to (a person), watch (a person).", "At 1257.0s, [0.557, 0.196, 0.737, 0.69]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1256.0s, [0.539, 0.156, 0.744, 0.695]: sit, smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1255.0s, [0.558, 0.196, 0.775, 0.656]: sit, smoke, talk to (e.g., self, a person, a group).", "At 1254.0s, [0.556, 0.217, 0.767, 0.79]: sit, smoke, listen to (a person), talk to (e.g., self, a person, a group).", "At 1253.0s, [0.562, 0.22, 0.77, 0.694]: sit, smoke, listen to (a person).", "From 1251.0s to 1252.0s, [0.552, 0.219, 0.762, 0.754]->[0.555, 0.22, 0.764, 0.751]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1250.0s, [0.558, 0.22, 0.764, 0.858]: sit, carry/hold (an object), listen to (a person).", "At 1249.0s, [0.557, 0.223, 0.768, 0.879]: sit, smoke, listen to (a person).", "At 1248.0s, [0.563, 0.216, 0.761, 0.872]: sit, listen to (a person).", "At 1247.0s, [0.557, 0.217, 0.761, 0.857]: sit, carry/hold (an object), listen to (a person).", "At 1246.0s, [0.555, 0.218, 0.76, 0.842]: sit, carry/hold (an object).", "At 1245.0s, [0.555, 0.213, 0.757, 0.858]: sit, carry/hold (an object), smoke.", "At 1244.0s, [0.557, 0.213, 0.761, 0.853]: sit, smoke, listen to (a person).", "At 1243.0s, [0.553, 0.224, 0.756, 0.837]: sit, smoke.", "At 1242.0s, [0.552, 0.215, 0.75, 0.862]: sit, smoke, watch (a person)."]}, {"from": "human", "timestamps": 1286.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.119, 0.026, 0.59, 0.97]: sit, talk to (e.g., self, a person, a group); [0.385, 0.335, 0.64, 0.78]: listen to (a person), sit, watch (a person); [0.558, 0.219, 0.76, 0.757]: carry/hold (an object), sit, watch (a person), listen to (a person)."}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1296, 1374], "conversations": [{"from": "human", "timestamps": 1326.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.276, 0.743, 0.967]: listen to (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1374.0, "value": "What actions is the person in bbox [0.457, 0.286, 0.922, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.457, 0.286, 0.922, 0.985]: stand, eat.", "At 1373.0s, [0.465, 0.278, 0.94, 0.982]: stand, eat, talk to (e.g., self, a person, a group).", "At 1372.0s, [0.539, 0.256, 0.952, 1.0]: stand, eat, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1359, 1400], "conversations": [{"from": "human", "timestamps": 1389.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.268, 0.0, 0.602, 0.96]: listen to (a person), stand, watch (a person); [0.504, 0.26, 0.96, 0.991]: stand, eat, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1400.0, "value": "What actions did the person at bbox [0.588, 0.328, 0.935, 0.984] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.588, 0.328, 0.935, 0.984]: stand, eat, listen to (a person), watch (a person).", "From 1397.0s to 1399.0s, [0.568, 0.305, 0.943, 0.99]->[0.569, 0.309, 0.952, 0.993]->[0.588, 0.311, 0.927, 0.983]: stand, eat, listen to (a person), watch (a person)."]}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1375, 1483], "conversations": [{"from": "human", "timestamps": 1405.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.241, 0.0, 0.596, 0.975]: stand, talk to (e.g., self, a person, a group); [0.543, 0.331, 0.936, 0.979]: listen to (a person), stand, eat."}, {"from": "human", "timestamps": 1483.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.444, 0.0, 0.984, 0.97]?"}, {"from": "gpt", "value": ["At Now, [0.444, 0.0, 0.984, 0.97]: stand, talk to (e.g., self, a person, a group).", "At 1482.0s, [0.416, 0.002, 0.992, 0.981]: stand, talk to (e.g., self, a person, a group).", "At 1481.0s, [0.39, 0.022, 0.986, 0.967]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1427, 1470], "conversations": [{"from": "human", "timestamps": 1457.0, "value": "What actions did the person at bbox [0.016, 0.092, 0.237, 0.991] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.016, 0.092, 0.237, 0.991]: stand, listen to (a person), watch (a person).", "From 1451.0s to 1456.0s, [0.0, 0.039, 0.131, 0.981]->[0.013, 0.049, 0.23, 0.98]->[0.012, 0.061, 0.207, 0.989]->[0.019, 0.061, 0.233, 0.992]->[0.002, 0.068, 0.202, 0.962]->[0.0, 0.097, 0.224, 0.968]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1470.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.158, 0.237, 0.993]: listen to (a person), stand, watch (a person); [0.107, 0.277, 0.439, 0.994]: listen to (a person), stand, watch (a person); [0.482, 0.016, 0.738, 0.972]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.675, 0.098, 0.99, 0.997]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1494, 1541], "conversations": [{"from": "human", "timestamps": 1524.0, "value": "What actions did the person at bbox [0.759, 0.1, 0.94, 0.966] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.759, 0.1, 0.94, 0.966]: walk, carry/hold (an object), watch (a person).", "At 1523.0s, [0.717, 0.098, 0.98, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1522.0s, [0.862, 0.103, 1.0, 0.977]: stand.", "At 1521.0s, [0.823, 0.12, 1.0, 1.0]: stand, listen to (a person), watch (a person).", "At 1520.0s, [0.791, 0.134, 1.0, 0.981]: stand, listen to (a person).", "From 1515.0s to 1519.0s, [0.735, 0.165, 0.997, 0.983]->[0.732, 0.152, 0.998, 0.986]->[0.748, 0.161, 0.997, 0.984]->[0.767, 0.142, 1.0, 1.0]->[0.777, 0.13, 0.999, 0.984]: stand, listen to (a person), watch (a person).", "At 1514.0s, [0.734, 0.106, 1.0, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1513.0s, [0.77, 0.115, 1.0, 0.979]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1507.0s to 1512.0s, [0.706, 0.139, 0.996, 0.993]->[0.749, 0.109, 0.997, 0.988]->[0.715, 0.119, 0.997, 0.993]->[0.718, 0.153, 0.997, 0.993]->[0.849, 0.156, 1.0, 0.986]->[0.774, 0.134, 1.0, 0.979]: stand, listen to (a person), watch (a person).", "At 1506.0s, [0.661, 0.059, 0.957, 0.986]: stand, listen to (a person), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1541.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.268, 0.191, 0.496, 0.497]: listen to (a person), stand, touch (an object), watch (a person); [0.672, 0.112, 0.835, 0.542]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1521, 1607], "conversations": [{"from": "human", "timestamps": 1551.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.275, 0.186, 0.501, 0.517]: listen to (a person), stand, touch (an object), watch (a person); [0.673, 0.107, 0.836, 0.53]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1607.0, "value": "What actions is the person in bbox [0.0, 0.0, 0.415, 0.973] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.0, 0.415, 0.973]: stand, talk to (e.g., self, a person, a group).", "At 1606.0s, [0.025, 0.0, 0.492, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 1602.0s to 1605.0s, [0.573, 0.095, 1.0, 0.962]->[0.015, 0.0, 0.407, 0.987]->[0.0, 0.002, 0.383, 0.993]->[0.003, 0.0, 0.445, 0.982]: sit, talk to (e.g., self, a person, a group).", "At 1601.0s, [0.606, 0.08, 1.0, 0.957]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1600.0s, [0.621, 0.03, 0.995, 1.0]: stand, listen to (a person).", "At 1599.0s, [0.314, 0.062, 1.0, 0.956]: sit, fight/hit (a person)."]}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1531, 1594], "conversations": [{"from": "human", "timestamps": 1561.0, "value": "What actions is the person in bbox [0.324, 0.104, 0.607, 0.922] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.324, 0.104, 0.607, 0.922]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1554.0s to 1560.0s, [0.664, 0.102, 0.814, 0.537]->[0.329, 0.112, 0.608, 0.916]->[0.351, 0.111, 0.606, 0.91]->[0.343, 0.109, 0.599, 0.909]->[0.319, 0.115, 0.608, 0.906]->[0.331, 0.094, 0.617, 0.923]->[0.325, 0.108, 0.61, 0.913]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1553.0s, [0.66, 0.118, 0.818, 0.543]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1548.0s to 1552.0s, [0.671, 0.104, 0.833, 0.543]->[0.648, 0.126, 0.824, 0.542]->[0.689, 0.112, 0.846, 0.543]->[0.673, 0.107, 0.836, 0.53]->[0.667, 0.116, 0.823, 0.544]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1547.0s, [0.68, 0.102, 0.841, 0.533]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1539.0s to 1546.0s, [0.659, 0.123, 0.836, 0.534]->[0.667, 0.115, 0.834, 0.535]->[0.672, 0.112, 0.835, 0.542]->[0.675, 0.105, 0.836, 0.543]->[0.67, 0.11, 0.835, 0.543]->[0.669, 0.103, 0.839, 0.533]->[0.652, 0.119, 0.818, 0.544]->[0.649, 0.127, 0.836, 0.551]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1538.0s, [0.685, 0.108, 0.854, 0.546]: stand, touch (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1594.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.012, 0.405, 0.979]: listen to (a person), sit, smoke; [0.623, 0.024, 1.0, 0.973]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1593, 1647], "conversations": [{"from": "human", "timestamps": 1623.0, "value": "What actions is the person in bbox [0.159, 0.117, 0.808, 0.973] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.159, 0.117, 0.808, 0.973]: stand.", "At 1622.0s, [0.15, 0.132, 0.876, 0.971]: stand, listen to (a person).", "From 1620.0s to 1621.0s, [0.126, 0.078, 0.844, 0.909]->[0.147, 0.147, 0.863, 0.967]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1647.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.012, 0.006, 0.453, 0.982]: listen to (a person), sit, smoke; [0.672, 0.069, 1.0, 0.981]: sit."}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1608, 1694], "conversations": [{"from": "human", "timestamps": 1638.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.004, 0.024, 0.658, 0.93]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.099, 0.0, 0.839, 0.995]: listen to (a person), sit, stand."}, {"from": "human", "timestamps": 1694.0, "value": "For the person at bbox [0.003, 0.007, 0.34, 0.995], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.003, 0.007, 0.34, 0.995]: sit, listen to (a person).", "At 1693.0s, [0.006, 0.0, 0.194, 0.995]: stand, listen to (a person), watch (a person)."]}]}, {"video": "2KpThOF_QmE.mkv", "clip": [1655, 1698], "conversations": [{"from": "human", "timestamps": 1685.0, "value": "What actions did the person at bbox [0.079, 0.03, 0.868, 0.961] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.079, 0.03, 0.868, 0.961]: sit, listen to (a person).", "From 1682.0s to 1684.0s, [0.081, 0.058, 0.81, 0.977]->[0.071, 0.043, 0.881, 0.972]->[0.071, 0.05, 0.877, 0.97]: sit, talk to (e.g., self, a person, a group).", "At 1681.0s, [0.112, 0.088, 0.839, 0.954]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1680.0s, [0.136, 0.032, 0.871, 0.973]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1679.0s, [0.137, 0.052, 0.912, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1676.0s to 1678.0s, [0.254, 0.005, 0.897, 0.983]->[0.175, 0.035, 0.921, 0.939]->[0.179, 0.016, 0.905, 0.909]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1698.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.401, 0.652, 0.501, 0.958]: sit; [0.559, 0.66, 0.658, 0.94]: sit."}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [902, 955], "conversations": [{"from": "human", "timestamps": 932.0, "value": "Describe the actions of the person at bbox [0.436, 0.337, 0.599, 0.797] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.436, 0.337, 0.599, 0.797]: sit, carry/hold (an object).", "At 931.0s, [0.469, 0.339, 0.639, 0.809]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 955.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.267, 0.138, 0.635, 0.749]: carry/hold (an object), sit."}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [940, 996], "conversations": [{"from": "human", "timestamps": 970.0, "value": "For the person at bbox [0.325, 0.166, 0.635, 0.947], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.325, 0.166, 0.635, 0.947]: walk, carry/hold (an object).", "At 969.0s, [0.299, 0.183, 0.653, 1.0]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 967.0s to 968.0s, [0.498, 0.156, 0.837, 1.0]->[0.375, 0.266, 0.862, 0.993]: walk, carry/hold (an object).", "At 966.0s, [0.497, 0.146, 0.753, 0.996]: stand, carry/hold (an object).", "At 965.0s, [0.315, 0.024, 0.768, 0.808]: carry/hold (an object), get up.", "At 964.0s, [0.256, 0.13, 0.59, 0.791]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 996.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.046, 0.071, 0.863, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1049, 1119], "conversations": [{"from": "human", "timestamps": 1079.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.194, 0.034, 0.961, 0.98]: sit, stand."}, {"from": "human", "timestamps": 1119.0, "value": "Describe the actions of the person at bbox [0.106, 0.031, 0.835, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.106, 0.031, 0.835, 0.986]: sit, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1118.0s, [0.122, 0.012, 1.0, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1117.0s, [0.131, 0.071, 0.996, 1.0]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1083, 1128], "conversations": [{"from": "human", "timestamps": 1113.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.052, 0.063, 0.858, 0.977]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1128.0, "value": "For the person at bbox [0.024, 0.033, 0.852, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.024, 0.033, 0.852, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1127.0s, [0.053, 0.035, 0.856, 0.988]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1165, 1204], "conversations": [{"from": "human", "timestamps": 1195.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.341, 0.25, 0.63, 0.985]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.521, 0.302, 0.834, 0.919]: listen to (a person), crouch/kneel, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1204.0, "value": "What actions is the person in bbox [0.314, 0.212, 0.628, 0.98] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.314, 0.212, 0.628, 0.98]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1203.0s, [0.315, 0.214, 0.619, 0.976]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1199.0s to 1202.0s, [0.29, 0.209, 0.657, 0.948]->[0.279, 0.216, 0.472, 0.524]->[0.319, 0.219, 0.65, 0.967]->[0.318, 0.222, 0.638, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1198.0s, [0.181, 0.205, 0.646, 0.994]: sit, carry/hold (an object), put down, listen to (a person), talk to (e.g., self, a person, a group).", "At 1197.0s, [0.192, 0.216, 0.642, 0.981]: sit, carry/hold (an object), listen to (a person).", "At 1196.0s, [0.33, 0.227, 0.648, 0.973]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1192.0s to 1195.0s, [0.338, 0.263, 0.647, 0.992]->[0.329, 0.243, 0.645, 0.997]->[0.333, 0.268, 0.639, 0.959]->[0.341, 0.25, 0.63, 0.985]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1280, 1314], "conversations": [{"from": "human", "timestamps": 1310.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.226, 0.06, 0.935, 0.959]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1314.0, "value": "Describe the actions of the person at bbox [0.072, 0.08, 0.895, 0.973] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.072, 0.08, 0.895, 0.973]: sit, watch (a person).", "From 1312.0s to 1313.0s, [0.082, 0.068, 0.863, 0.982]->[0.067, 0.051, 0.89, 0.971]: sit.", "At 1311.0s, [0.101, 0.052, 0.835, 0.991]: sit, listen to (a person), watch (a person)."]}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1256, 1341], "conversations": [{"from": "human", "timestamps": 1286.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.074, 0.062, 0.959, 0.984]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1341.0, "value": "For the person at bbox [0.421, 0.203, 0.606, 0.676], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.421, 0.203, 0.606, 0.676]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1332.0s to 1340.0s, [0.422, 0.199, 0.604, 0.676]->[0.42, 0.201, 0.603, 0.673]->[0.429, 0.191, 0.61, 0.679]->[0.427, 0.196, 0.61, 0.68]->[0.428, 0.202, 0.614, 0.689]->[0.424, 0.205, 0.607, 0.682]->[0.419, 0.2, 0.603, 0.668]->[0.418, 0.2, 0.601, 0.66]->[0.42, 0.203, 0.614, 0.692]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1331.0s, [0.425, 0.2, 0.61, 0.677]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1330.0s, [0.428, 0.196, 0.605, 0.662]: sit, carry/hold (an object), watch (a person)."]}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1366, 1431], "conversations": [{"from": "human", "timestamps": 1396.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.053, 0.242, 0.339, 0.912]: listen to (a person), sit, watch (a person); [0.418, 0.216, 0.618, 0.712]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1431.0, "value": "What actions is the person in bbox [0.102, 0.166, 0.969, 0.996] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.102, 0.166, 0.969, 0.996]: lie/sleep.", "From 1429.0s to 1430.0s, [0.098, 0.157, 0.988, 0.982]->[0.117, 0.137, 0.998, 0.995]: lie/sleep."]}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1349, 1457], "conversations": [{"from": "human", "timestamps": 1379.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.212, 0.021, 0.882, 0.971]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1457.0, "value": "For the person at bbox [0.137, 0.023, 0.745, 0.975], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.137, 0.023, 0.745, 0.975]: stand, talk to (e.g., self, a person, a group).", "From 1455.0s to 1456.0s, [0.149, 0.027, 0.743, 0.99]->[0.103, 0.013, 0.747, 0.99]: stand, talk to (e.g., self, a person, a group).", "At 1453.0s, [0.189, 0.042, 0.796, 0.979]: stand."]}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1506, 1552], "conversations": [{"from": "human", "timestamps": 1536.0, "value": "Describe the actions of the person at bbox [0.076, 0.0, 0.604, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.076, 0.0, 0.604, 0.991]: sit, listen to (a person).", "At 1535.0s, [0.047, 0.091, 0.576, 0.977]: sit."]}, {"from": "human", "timestamps": 1552.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.019, 0.013, 0.839, 0.976]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1511, 1590], "conversations": [{"from": "human", "timestamps": 1541.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.01, 0.0, 0.761, 0.962]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1590.0, "value": "What actions did the person at bbox [0.007, 0.0, 0.915, 0.993] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.007, 0.0, 0.915, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 1588.0s to 1589.0s, [0.021, 0.0, 0.907, 0.996]->[0.002, 0.007, 0.905, 1.0]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1538, 1614], "conversations": [{"from": "human", "timestamps": 1568.0, "value": "What actions is the person in bbox [0.028, 0.01, 0.864, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.028, 0.01, 0.864, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1567.0s, [0.0, 0.035, 0.87, 0.977]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1614.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.931, 0.976]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1616, 1652], "conversations": [{"from": "human", "timestamps": 1646.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.152, 0.39, 0.617, 0.99]: stand, talk to (e.g., self, a person, a group); [0.514, 0.162, 0.996, 0.987]: listen to (a person), lift (a person), watch (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1652.0, "value": "What actions did the person at bbox [0.243, 0.225, 0.517, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.243, 0.225, 0.517, 0.99]: walk, watch (a person).", "At 1651.0s, [0.249, 0.105, 0.505, 0.972]: walk.", "At 1650.0s, [0.254, 0.225, 0.451, 0.989]: walk, grab (a person), listen to (a person), watch (a person).", "At 1649.0s, [0.296, 0.271, 0.57, 0.98]: stand, listen to (a person), watch (a person).", "At 1648.0s, [0.108, 0.41, 0.525, 0.988]: grab (a person), talk to (e.g., self, a person, a group).", "At 1647.0s, [0.142, 0.483, 0.598, 0.994]: sit, talk to (e.g., self, a person, a group).", "At 1646.0s, [0.152, 0.39, 0.617, 0.99]: stand, talk to (e.g., self, a person, a group).", "From 1644.0s to 1645.0s, [0.063, 0.167, 0.77, 1.0]->[0.113, 0.475, 0.696, 0.991]: stand, hug (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1618, 1700], "conversations": [{"from": "human", "timestamps": 1648.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.108, 0.41, 0.525, 0.988]: grab (a person), talk to (e.g., self, a person, a group); [0.321, 0.017, 0.622, 0.705]: listen to (a person), stand, grab (a person), watch (a person); [0.392, 0.066, 0.96, 0.986]: carry/hold (an object), grab (a person), bend/bow (at the waist), listen to (a person)."}, {"from": "human", "timestamps": 1700.0, "value": "Describe the actions of the person at bbox [0.093, 0.036, 0.971, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.093, 0.036, 0.971, 1.0]: sit, carry/hold (an object).", "At 1699.0s, [0.086, 0.036, 0.994, 0.99]: sit, carry/hold (an object).", "From 1695.0s to 1698.0s, [0.095, 0.053, 0.978, 0.99]->[0.099, 0.005, 0.999, 1.0]->[0.131, 0.028, 1.0, 0.983]->[0.107, 0.036, 0.989, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1693.0s to 1694.0s, [0.175, 0.141, 0.922, 0.981]->[0.15, 0.118, 0.919, 0.995]: sit, carry/hold (an object).", "At 1692.0s, [0.111, 0.079, 0.923, 0.987]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), lie/sleep.", "From 1687.0s to 1691.0s, [0.152, 0.092, 0.94, 0.977]->[0.137, 0.071, 0.914, 0.978]->[0.122, 0.122, 0.934, 0.985]->[0.104, 0.083, 0.907, 0.976]->[0.157, 0.094, 0.907, 0.975]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1680.0s to 1686.0s, [0.1, 0.062, 0.891, 1.0]->[0.146, 0.07, 0.908, 0.997]->[0.125, 0.056, 0.92, 1.0]->[0.079, 0.059, 0.925, 0.988]->[0.144, 0.054, 0.924, 1.0]->[0.172, 0.077, 0.916, 0.988]->[0.105, 0.083, 0.905, 0.966]: sit, carry/hold (an object).", "From 1677.0s to 1678.0s, [0.384, 0.267, 0.689, 0.825]->[0.264, 0.121, 0.737, 0.988]: sit, carry/hold (an object).", "At 1676.0s, [0.382, 0.299, 0.698, 0.84]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1674.0s to 1675.0s, [0.377, 0.309, 0.693, 0.877]->[0.396, 0.307, 0.693, 0.868]: sit, carry/hold (an object).", "At 1673.0s, [0.392, 0.328, 0.692, 0.861]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1670.0s to 1671.0s, [0.403, 0.329, 0.692, 0.873]->[0.393, 0.342, 0.691, 0.875]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1666.0s to 1669.0s, [0.413, 0.29, 0.685, 0.841]->[0.415, 0.306, 0.695, 0.82]->[0.381, 0.332, 0.685, 0.875]->[0.408, 0.33, 0.686, 0.859]: sit, carry/hold (an object)."]}]}, {"video": "2bxKkUgcqpk.mp4", "clip": [1669, 1713], "conversations": [{"from": "human", "timestamps": 1699.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.086, 0.036, 0.994, 0.99]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1713.0, "value": "What actions did the person at bbox [0.31, 0.273, 0.454, 0.744] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.31, 0.273, 0.454, 0.744]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1712.0s, [0.526, 0.338, 0.638, 0.798]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1709.0s to 1711.0s, [0.661, 0.414, 0.724, 0.652]->[0.628, 0.377, 0.714, 0.662]->[0.531, 0.364, 0.666, 0.728]: run/jog, carry/hold (an object), watch (a person).", "From 1707.0s to 1708.0s, [0.744, 0.408, 0.788, 0.57]->[0.694, 0.389, 0.749, 0.594]: run/jog, carry/hold (an object).", "At 1706.0s, [0.828, 0.366, 0.866, 0.513]: run/jog."]}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [876, 917], "conversations": [{"from": "human", "timestamps": 906.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.299, 0.362, 0.548, 0.799]: stand, touch (an object)."}, {"from": "human", "timestamps": 917.0, "value": "For the person at bbox [0.555, 0.117, 0.802, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.555, 0.117, 0.802, 0.992]: walk.", "At 916.0s, [0.553, 0.159, 0.726, 0.915]: walk, enter.", "At 915.0s, [0.666, 0.21, 0.739, 0.86]: walk."]}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [912, 1004], "conversations": [{"from": "human", "timestamps": 942.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.056, 0.049, 0.719, 0.981]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1004.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.016, 0.018, 0.893, 0.998]?"}, {"from": "gpt", "value": ["At Now, [0.016, 0.018, 0.893, 0.998]: stand, listen to (a person), watch (a person).", "At 1003.0s, [0.031, 0.0, 0.836, 0.989]: stand, listen to (a person), watch (a person).", "At 1002.0s, [0.008, 0.011, 0.863, 0.972]: stand, watch (a person)."]}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [949, 1019], "conversations": [{"from": "human", "timestamps": 979.0, "value": "For the person at bbox [0.034, 0.027, 0.882, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.034, 0.027, 0.882, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 978.0s, [0.015, 0.008, 0.902, 0.975]: stand, talk to (e.g., self, a person, a group).", "From 976.0s to 977.0s, [0.029, 0.003, 0.892, 0.993]->[0.034, 0.013, 0.859, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1019.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.31, 0.0, 0.983, 0.977]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [1005, 1068], "conversations": [{"from": "human", "timestamps": 1035.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.015, 0.001, 0.776, 0.988]: bend/bow (at the waist)."}, {"from": "human", "timestamps": 1068.0, "value": "For the person at bbox [0.05, 0.401, 0.731, 0.976], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.05, 0.401, 0.731, 0.976]: lie/sleep.", "At 1067.0s, [0.024, 0.363, 0.397, 0.907]: lie/sleep."]}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [1057, 1129], "conversations": [{"from": "human", "timestamps": 1087.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.195, 0.022, 1.0, 0.995]: sit."}, {"from": "human", "timestamps": 1129.0, "value": "For the person at bbox [0.492, 0.267, 0.598, 0.896], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.492, 0.267, 0.598, 0.896]: bend/bow (at the waist), listen to (a person).", "At 1128.0s, [0.466, 0.272, 0.584, 0.905]: stand, listen to (a person).", "At 1127.0s, [0.544, 0.256, 0.658, 0.913]: walk.", "At 1126.0s, [0.556, 0.266, 0.681, 0.932]: walk, listen to (a person).", "At 1125.0s, [0.55, 0.224, 0.691, 0.983]: walk."]}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [1150, 1185], "conversations": [{"from": "human", "timestamps": 1180.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.243, 0.0, 0.926, 0.991]: stand, answer phone."}, {"from": "human", "timestamps": 1185.0, "value": "What actions did the person at bbox [0.322, 0.159, 0.607, 0.989] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.322, 0.159, 0.607, 0.989]: sit, answer phone, carry/hold (an object).", "At 1184.0s, [0.341, 0.133, 0.623, 0.987]: sit, answer phone, carry/hold (an object).", "From 1182.0s to 1183.0s, [0.406, 0.1, 0.671, 0.981]->[0.381, 0.093, 0.648, 0.99]: sit, answer phone."]}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [1180, 1228], "conversations": [{"from": "human", "timestamps": 1210.0, "value": "Describe the actions of the person at bbox [0.0, 0.0, 0.739, 0.969] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.0, 0.739, 0.969]: stand, answer phone.", "At 1209.0s, [0.0, 0.019, 0.74, 0.963]: stand, answer phone.", "At 1208.0s, [0.062, 0.01, 0.78, 1.0]: stand, answer phone, talk to (e.g., self, a person, a group).", "From 1206.0s to 1207.0s, [0.0, 0.0, 0.751, 0.994]->[0.001, 0.0, 0.77, 1.0]: stand, answer phone."]}, {"from": "human", "timestamps": 1228.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.013, 0.731, 0.971]: stand, answer phone."}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [1249, 1304], "conversations": [{"from": "human", "timestamps": 1279.0, "value": "Describe the actions of the person at bbox [0.139, 0.157, 0.537, 0.979] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.139, 0.157, 0.537, 0.979]: sit, talk to (e.g., self, a person, a group).", "From 1276.0s to 1278.0s, [0.155, 0.154, 0.5, 0.987]->[0.147, 0.133, 0.511, 0.977]->[0.165, 0.16, 0.533, 0.973]: sit, talk to (e.g., self, a person, a group).", "At 1275.0s, [0.162, 0.172, 0.501, 0.982]: sit, crouch/kneel, talk to (e.g., self, a person, a group).", "At 1274.0s, [0.151, 0.183, 0.486, 0.992]: sit.", "At 1273.0s, [0.189, 0.187, 0.523, 0.989]: sit, talk to (e.g., self, a person, a group).", "At 1272.0s, [0.209, 0.178, 0.569, 0.993]: crouch/kneel, talk to (e.g., self, a person, a group).", "At 1271.0s, [0.195, 0.142, 0.594, 1.0]: sit, crouch/kneel, talk to (e.g., self, a person, a group).", "At 1270.0s, [0.186, 0.126, 0.591, 0.987]: crouch/kneel, talk to (e.g., self, a person, a group).", "From 1268.0s to 1269.0s, [0.204, 0.12, 0.625, 0.988]->[0.168, 0.139, 0.602, 0.988]: crouch/kneel, touch (an object), talk to (e.g., self, a person, a group).", "At 1267.0s, [0.213, 0.108, 0.601, 0.983]: sit, crouch/kneel, touch (an object), talk to (e.g., self, a person, a group).", "At 1266.0s, [0.223, 0.091, 0.642, 0.984]: crouch/kneel, touch (an object), talk to (e.g., self, a person, a group).", "At 1265.0s, [0.222, 0.086, 0.588, 0.985]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1264.0s, [0.229, 0.07, 0.557, 0.99]: talk to (e.g., self, a person, a group).", "From 1262.0s to 1263.0s, [0.237, 0.075, 0.521, 1.0]->[0.234, 0.064, 0.525, 1.0]: crouch/kneel, talk to (e.g., self, a person, a group).", "At 1261.0s, [0.26, 0.184, 0.556, 0.995]: crouch/kneel.", "From 1259.0s to 1260.0s, [0.221, 0.089, 0.551, 0.981]->[0.255, 0.197, 0.563, 0.987]: bend/bow (at the waist).", "At 1258.0s, [0.172, 0.019, 0.474, 0.987]: stand.", "At 1257.0s, [0.203, 0.019, 0.768, 0.991]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1255.0s to 1256.0s, [0.212, 0.13, 0.709, 0.999]->[0.224, 0.069, 0.735, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1254.0s, [0.216, 0.146, 0.645, 0.99]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1253.0s, [0.324, 0.237, 0.593, 1.0]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1304.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.099, 0.427, 0.247, 0.796]: sit, talk to (e.g., self, a person, a group); [0.388, 0.24, 0.577, 0.695]: carry/hold (an object), bend/bow (at the waist); [0.816, 0.203, 0.928, 0.567]: carry/hold (an object), stand."}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [1352, 1429], "conversations": [{"from": "human", "timestamps": 1382.0, "value": "For the person at bbox [0.008, 0.0, 0.841, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.008, 0.0, 0.841, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1380.0s to 1381.0s, [0.082, 0.404, 0.307, 0.946]->[0.075, 0.397, 0.306, 0.86]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1429.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.381, 0.231, 0.687, 0.996]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [1478, 1514], "conversations": [{"from": "human", "timestamps": 1508.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.009, 0.058, 0.7, 0.988]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1514.0, "value": "What actions did the person at bbox [0.673, 0.296, 0.986, 0.984] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.673, 0.296, 0.986, 0.984]: stand, listen to (a person), watch (a person).", "From 1511.0s to 1513.0s, [0.221, 0.058, 0.792, 0.972]->[0.256, 0.051, 0.879, 0.977]->[0.218, 0.032, 0.835, 0.981]: stand, listen to (a person), watch (a person)."]}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [1565, 1602], "conversations": [{"from": "human", "timestamps": 1595.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.078, 0.071, 0.594, 0.963]: listen to (a person), sit, kiss (a person); [0.464, 0.257, 0.828, 0.983]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1602.0, "value": "Describe the actions of the person at bbox [0.036, 0.037, 0.542, 0.996] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.036, 0.037, 0.542, 0.996]: sit, hug (a person), talk to (e.g., self, a person, a group).", "From 1600.0s to 1601.0s, [0.03, 0.076, 0.489, 0.974]->[0.014, 0.044, 0.518, 0.985]: sit, talk to (e.g., self, a person, a group).", "At 1599.0s, [0.088, 0.046, 0.572, 0.966]: sit, grab (a person), talk to (e.g., self, a person, a group).", "At 1598.0s, [0.095, 0.025, 0.535, 0.97]: sit, hug (a person), listen to (a person).", "At 1597.0s, [0.089, 0.0, 0.584, 0.972]: sit, listen to (a person).", "At 1596.0s, [0.073, 0.033, 0.542, 0.962]: sit, listen to (a person), watch (a person).", "At 1595.0s, [0.078, 0.071, 0.594, 0.963]: sit, kiss (a person), listen to (a person).", "At 1594.0s, [0.152, 0.137, 0.608, 0.968]: sit, kiss (a person).", "At 1593.0s, [0.184, 0.181, 0.627, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1592.0s, [0.155, 0.144, 0.612, 0.996]: sit, talk to (e.g., self, a person, a group).", "At 1591.0s, [0.163, 0.127, 0.586, 0.981]: sit, hug (a person), talk to (e.g., self, a person, a group).", "From 1589.0s to 1590.0s, [0.131, 0.022, 0.531, 0.971]->[0.173, 0.068, 0.642, 0.98]: sit, kiss (a person).", "At 1588.0s, [0.09, 0.059, 0.532, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1587.0s, [0.088, 0.035, 0.542, 0.997]: sit, watch (a person).", "At 1586.0s, [0.132, 0.0, 0.573, 0.972]: sit."]}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [1599, 1668], "conversations": [{"from": "human", "timestamps": 1629.0, "value": "For the person at bbox [0.081, 0.012, 0.807, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.081, 0.012, 0.807, 0.981]: stand.", "At 1628.0s, [0.111, 0.016, 0.808, 0.992]: stand.", "At 1627.0s, [0.12, 0.017, 0.79, 0.984]: stand, carry/hold (an object).", "At 1626.0s, [0.037, 0.005, 0.808, 0.996]: sit."]}, {"from": "human", "timestamps": 1668.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.223, 0.046, 0.988, 0.977]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [1598, 1689], "conversations": [{"from": "human", "timestamps": 1628.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.111, 0.016, 0.808, 0.992]: stand."}, {"from": "human", "timestamps": 1689.0, "value": "What actions is the person in bbox [0.269, 0.071, 0.875, 0.979] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.269, 0.071, 0.875, 0.979]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1688.0s, [0.134, 0.085, 0.887, 0.961]: sit, carry/hold (an object), eat, talk to (e.g., self, a person, a group).", "From 1685.0s to 1687.0s, [0.252, 0.0, 0.94, 0.978]->[0.271, 0.024, 0.916, 0.975]->[0.208, 0.071, 0.914, 0.981]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [1653, 1717], "conversations": [{"from": "human", "timestamps": 1683.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.177, 0.086, 0.7, 0.993]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1717.0, "value": "What actions is the person in bbox [0.282, 0.086, 0.9, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.282, 0.086, 0.9, 0.985]: get up, talk to (e.g., self, a person, a group).", "From 1715.0s to 1716.0s, [0.21, 0.103, 0.851, 1.0]->[0.259, 0.134, 0.899, 0.989]: sit, talk to (e.g., self, a person, a group).", "At 1714.0s, [0.167, 0.089, 0.892, 0.98]: sit, eat, talk to (e.g., self, a person, a group).", "At 1713.0s, [0.196, 0.091, 0.886, 0.977]: sit, eat, talk to (e.g., self, a person, a group), watch (a person).", "At 1712.0s, [0.142, 0.074, 0.883, 0.98]: sit, eat, listen to (a person), watch (a person)."]}]}, {"video": "2fwni_Kjf2M.mkv", "clip": [1732, 1793], "conversations": [{"from": "human", "timestamps": 1762.0, "value": "For the person at bbox [0.135, 0.158, 0.488, 0.975], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.135, 0.158, 0.488, 0.975]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1761.0s, [0.059, 0.123, 0.389, 0.984]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1760.0s, [0.056, 0.158, 0.375, 1.0]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1759.0s, [0.074, 0.179, 0.293, 0.983]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1758.0s, [0.128, 0.202, 0.314, 0.998]: stand, carry/hold (an object), watch (a person).", "From 1756.0s to 1757.0s, [0.233, 0.214, 0.402, 0.977]->[0.205, 0.207, 0.37, 0.984]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1755.0s, [0.237, 0.218, 0.411, 0.979]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1754.0s, [0.243, 0.197, 0.412, 0.979]: stand, text on/look at a cellphone, talk to (e.g., self, a person, a group).", "At 1753.0s, [0.234, 0.171, 0.423, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1750.0s to 1752.0s, [0.034, 0.237, 0.252, 0.971]->[0.118, 0.233, 0.322, 0.992]->[0.239, 0.194, 0.413, 0.995]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1749.0s, [0.06, 0.293, 0.228, 1.0]: walk, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1793.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.357, 0.262, 0.623, 0.99]: carry/hold (an object), watch (a person), bend/bow (at the waist); [0.628, 0.224, 0.724, 0.694]: carry/hold (an object), stand."}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [890, 1010], "conversations": [{"from": "human", "timestamps": 920.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.004, 0.287, 0.146, 0.929]: watch (a person), stand; [0.052, 0.247, 0.158, 0.504]: stand; [0.072, 0.201, 0.491, 1.0]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.333, 0.211, 0.473, 0.572]: stand; [0.444, 0.197, 0.531, 0.42]: listen to (a person), stand, watch (a person); [0.445, 0.068, 0.984, 0.994]: listen to (a person), stand, watch (a person); [0.482, 0.227, 0.595, 0.427]: watch (a person), stand."}, {"from": "human", "timestamps": 1010.0, "value": "What actions did the person at bbox [0.594, 0.269, 0.969, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.594, 0.269, 0.969, 0.982]: stand, carry/hold (an object), text on/look at a cellphone.", "At 1009.0s, [0.591, 0.285, 0.992, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1007.0s to 1008.0s, [0.56, 0.285, 0.961, 0.988]->[0.562, 0.289, 0.96, 0.991]: stand, listen to (a person), watch (a person).", "At 1006.0s, [0.588, 0.287, 0.984, 0.997]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1005.0s, [0.587, 0.294, 0.97, 0.986]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1003.0s to 1004.0s, [0.676, 0.258, 0.817, 0.744]->[0.575, 0.293, 0.982, 0.991]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1002.0s, [0.689, 0.25, 0.823, 0.744]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1001.0s, [0.713, 0.247, 0.895, 0.749]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1000.0s, [0.741, 0.255, 0.85, 0.753]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 998.0s to 999.0s, [0.791, 0.251, 0.89, 0.722]->[0.728, 0.257, 0.864, 0.719]: walk, carry/hold (an object), listen to (a person).", "At 997.0s, [0.835, 0.25, 0.936, 0.717]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [1029, 1072], "conversations": [{"from": "human", "timestamps": 1059.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.314, 0.272, 0.484, 0.781]: carry/hold (an object), sit, watch (a person), play musical instrument; [0.609, 0.283, 0.918, 0.881]: listen (e.g., to music), sit, watch (a person), sing to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1072.0, "value": "For the person at bbox [0.623, 0.109, 0.858, 0.968], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.623, 0.109, 0.858, 0.968]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1071.0s, [0.585, 0.082, 0.918, 1.0]: walk, talk to (e.g., self, a person, a group).", "From 1069.0s to 1070.0s, [0.488, 0.082, 0.752, 0.966]->[0.606, 0.092, 0.809, 0.968]: walk.", "At 1068.0s, [0.592, 0.106, 0.853, 0.938]: walk, talk to (e.g., self, a person, a group).", "At 1067.0s, [0.602, 0.098, 0.844, 0.927]: stand.", "From 1064.0s to 1066.0s, [0.566, 0.115, 0.838, 0.929]->[0.518, 0.124, 0.896, 0.935]->[0.603, 0.1, 0.838, 0.947]: stand, talk to (e.g., self, a person, a group).", "At 1063.0s, [0.536, 0.143, 0.773, 0.939]: stand.", "At 1062.0s, [0.458, 0.155, 0.537, 0.814]: walk."]}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [1013, 1095], "conversations": [{"from": "human", "timestamps": 1043.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.195, 0.175, 0.672, 0.982]: carry/hold (an object), walk; [0.196, 0.266, 0.283, 0.733]: watch (a person), stand; [0.66, 0.264, 0.703, 0.537]: stand; [0.678, 0.254, 0.815, 0.711]: watch (a person), stand."}, {"from": "human", "timestamps": 1095.0, "value": "Describe the actions of the person at bbox [0.521, 0.126, 0.848, 0.99] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.521, 0.126, 0.848, 0.99]: walk, talk to (e.g., self, a person, a group).", "At 1094.0s, [0.552, 0.129, 0.874, 1.0]: walk, talk to (e.g., self, a person, a group).", "At 1093.0s, [0.556, 0.127, 0.9, 0.995]: walk.", "At 1092.0s, [0.479, 0.08, 0.821, 1.0]: walk, talk to (e.g., self, a person, a group).", "From 1090.0s to 1091.0s, [0.491, 0.103, 0.825, 0.966]->[0.446, 0.09, 0.842, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1088.0s to 1089.0s, [0.456, 0.089, 0.813, 0.974]->[0.459, 0.078, 0.806, 0.992]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1087.0s, [0.477, 0.054, 0.789, 0.997]: stand.", "At 1086.0s, [0.438, 0.116, 0.742, 0.98]: stand, carry/hold (an object).", "From 1084.0s to 1085.0s, [0.399, 0.135, 0.595, 0.975]->[0.419, 0.121, 0.695, 0.982]: stand."]}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [1063, 1121], "conversations": [{"from": "human", "timestamps": 1093.0, "value": "For the person at bbox [0.556, 0.127, 0.9, 0.995], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.556, 0.127, 0.9, 0.995]: walk.", "At 1092.0s, [0.479, 0.08, 0.821, 1.0]: walk, talk to (e.g., self, a person, a group).", "From 1090.0s to 1091.0s, [0.491, 0.103, 0.825, 0.966]->[0.446, 0.09, 0.842, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1088.0s to 1089.0s, [0.456, 0.089, 0.813, 0.974]->[0.459, 0.078, 0.806, 0.992]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1087.0s, [0.477, 0.054, 0.789, 0.997]: stand.", "At 1086.0s, [0.438, 0.116, 0.742, 0.98]: stand, carry/hold (an object).", "From 1084.0s to 1085.0s, [0.399, 0.135, 0.595, 0.975]->[0.419, 0.121, 0.695, 0.982]: stand."]}, {"from": "human", "timestamps": 1121.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.195, 0.085, 0.466, 0.996]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.525, 0.125, 0.825, 0.984]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [1283, 1328], "conversations": [{"from": "human", "timestamps": 1313.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.017, 0.092, 0.165, 0.496]: listen to (a person), stand, watch (a person); [0.127, 0.066, 0.212, 0.35]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.166, 0.112, 0.299, 0.394]: listen to (a person), stand, watch (a person); [0.241, 0.177, 0.441, 0.735]: carry/hold (an object), stand, listen to (a person); [0.396, 0.176, 0.525, 0.682]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.449, 0.226, 0.75, 0.749]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1328.0, "value": "For the person at bbox [0.0, 0.065, 0.825, 0.978], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.065, 0.825, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1327.0s, [0.0, 0.057, 0.85, 0.971]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1326.0s, [0.027, 0.062, 0.828, 0.957]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [1390, 1441], "conversations": [{"from": "human", "timestamps": 1420.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.211, 0.106, 1.0, 0.977]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1441.0, "value": "Describe the actions of the person at bbox [0.672, 0.329, 0.774, 0.591] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.672, 0.329, 0.774, 0.591]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse).", "At 1440.0s, [0.637, 0.315, 0.727, 0.532]: sit, drive (e.g., a car, a truck), ride (e.g., a bike, a car, a horse)."]}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [1428, 1489], "conversations": [{"from": "human", "timestamps": 1458.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.082, 0.092, 0.903, 0.99]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1489.0, "value": "What actions did the person at bbox [0.071, 0.129, 0.415, 0.56] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.071, 0.129, 0.415, 0.56]: sit, listen to (a person), watch (a person).", "From 1486.0s to 1488.0s, [0.065, 0.127, 0.411, 0.565]->[0.067, 0.129, 0.414, 0.566]->[0.072, 0.131, 0.412, 0.563]: sit, listen to (a person), watch (a person).", "At 1485.0s, [0.065, 0.108, 0.397, 0.583]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1484.0s, [0.066, 0.143, 0.41, 0.558]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1482.0s to 1483.0s, [0.065, 0.149, 0.411, 0.561]->[0.065, 0.148, 0.409, 0.563]: sit, listen to (a person), watch (a person).", "At 1481.0s, [0.068, 0.145, 0.41, 0.557]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1480.0s, [0.065, 0.145, 0.413, 0.556]: sit, talk to (e.g., self, a person, a group).", "At 1479.0s, [0.069, 0.14, 0.399, 0.554]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1478.0s, [0.06, 0.143, 0.413, 0.564]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [1471, 1527], "conversations": [{"from": "human", "timestamps": 1501.0, "value": "Describe the actions of the person at bbox [0.065, 0.131, 0.413, 0.57] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.065, 0.131, 0.413, 0.57]: sit, listen to (a person), watch (a person).", "At 1500.0s, [0.064, 0.132, 0.408, 0.569]: sit, listen to (a person), watch (a person).", "At 1499.0s, [0.068, 0.135, 0.413, 0.566]: sit, touch (an object), listen to (a person), watch (a person).", "At 1498.0s, [0.071, 0.132, 0.409, 0.564]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "From 1496.0s to 1497.0s, [0.071, 0.132, 0.409, 0.567]->[0.07, 0.132, 0.411, 0.566]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1527.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.063, 0.127, 0.408, 0.565]: listen to (a person), sit, touch (an object); [0.509, 0.115, 0.902, 0.977]: sit."}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [1506, 1582], "conversations": [{"from": "human", "timestamps": 1536.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.17, 0.097, 0.431, 0.642]: carry/hold (an object), stand, bend/bow (at the waist); [0.47, 0.248, 0.859, 0.994]: listen to (a person), sit."}, {"from": "human", "timestamps": 1582.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.01, 0.119, 0.662, 0.987]?"}, {"from": "gpt", "value": ["In the current frame, [0.01, 0.119, 0.662, 0.987]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1581.0s, [0.019, 0.126, 0.671, 0.982]: stand, listen to (a person), watch (a person).", "From 1576.0s to 1580.0s, [0.027, 0.159, 0.529, 1.0]->[0.025, 0.15, 0.577, 0.988]->[0.0, 0.15, 0.557, 0.974]->[0.0, 0.199, 0.624, 0.982]->[0.0, 0.159, 0.633, 0.982]: stand, talk to (e.g., self, a person, a group).", "From 1574.0s to 1575.0s, [0.029, 0.202, 0.591, 0.985]->[0.0, 0.132, 0.516, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1573.0s, [0.039, 0.149, 0.604, 0.985]: stand, talk to (e.g., self, a person, a group).", "From 1571.0s to 1572.0s, [0.005, 0.12, 0.563, 0.991]->[0.045, 0.125, 0.604, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1570.0s, [0.0, 0.129, 0.498, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1569.0s, [0.0, 0.111, 0.544, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1565.0s to 1568.0s, [0.001, 0.186, 0.708, 1.0]->[0.004, 0.158, 0.67, 0.993]->[0.0, 0.14, 0.631, 0.99]->[0.021, 0.154, 0.609, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1562.0s to 1564.0s, [0.003, 0.154, 0.575, 0.986]->[0.022, 0.135, 0.541, 0.973]->[0.0, 0.13, 0.551, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1558.0s to 1561.0s, [0.008, 0.151, 0.577, 0.991]->[0.016, 0.127, 0.63, 0.994]->[0.025, 0.15, 0.667, 1.0]->[0.008, 0.172, 0.586, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 1556.0s to 1557.0s, [0.0, 0.142, 0.5, 1.0]->[0.0, 0.148, 0.564, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1554.0s to 1555.0s, [0.003, 0.137, 0.514, 0.986]->[0.028, 0.168, 0.536, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 1553.0s, [0.005, 0.136, 0.514, 0.986]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1552.0s, [0.009, 0.13, 0.523, 0.986]: stand, listen to (a person).", "At 1551.0s, [0.214, 0.125, 0.494, 0.833]: stand.", "From 1549.0s to 1550.0s, [0.208, 0.123, 0.483, 0.842]->[0.216, 0.123, 0.491, 0.831]: stand, watch (a person).", "At 1548.0s, [0.213, 0.129, 0.485, 0.843]: stand, listen to (a person), watch (a person).", "At 1547.0s, [0.207, 0.127, 0.48, 0.821]: stand, touch (an object), listen to (a person), watch (a person).", "From 1545.0s to 1546.0s, [0.16, 0.134, 0.433, 0.839]->[0.196, 0.128, 0.467, 0.833]: stand, listen to (a person), watch (a person).", "At 1544.0s, [0.13, 0.126, 0.387, 0.838]: stand, touch (an object), listen to (a person), watch (a person).", "From 1542.0s to 1543.0s, [0.004, 0.09, 0.253, 0.934]->[0.133, 0.135, 0.415, 0.861]: stand, listen to (a person), watch (a person)."]}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [1537, 1605], "conversations": [{"from": "human", "timestamps": 1567.0, "value": "What actions is the person in bbox [0.0, 0.14, 0.631, 0.99] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.14, 0.631, 0.99]: stand, talk to (e.g., self, a person, a group).", "From 1565.0s to 1566.0s, [0.001, 0.186, 0.708, 1.0]->[0.004, 0.158, 0.67, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 1562.0s to 1564.0s, [0.003, 0.154, 0.575, 0.986]->[0.022, 0.135, 0.541, 0.973]->[0.0, 0.13, 0.551, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1558.0s to 1561.0s, [0.008, 0.151, 0.577, 0.991]->[0.016, 0.127, 0.63, 0.994]->[0.025, 0.15, 0.667, 1.0]->[0.008, 0.172, 0.586, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 1556.0s to 1557.0s, [0.0, 0.142, 0.5, 1.0]->[0.0, 0.148, 0.564, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1554.0s to 1555.0s, [0.003, 0.137, 0.514, 0.986]->[0.028, 0.168, 0.536, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 1553.0s, [0.005, 0.136, 0.514, 0.986]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1552.0s, [0.009, 0.13, 0.523, 0.986]: stand, listen to (a person).", "At 1551.0s, [0.214, 0.125, 0.494, 0.833]: stand.", "From 1549.0s to 1550.0s, [0.208, 0.123, 0.483, 0.842]->[0.216, 0.123, 0.491, 0.831]: stand, watch (a person).", "At 1548.0s, [0.213, 0.129, 0.485, 0.843]: stand, listen to (a person), watch (a person).", "At 1547.0s, [0.207, 0.127, 0.48, 0.821]: stand, touch (an object), listen to (a person), watch (a person).", "From 1545.0s to 1546.0s, [0.16, 0.134, 0.433, 0.839]->[0.196, 0.128, 0.467, 0.833]: stand, listen to (a person), watch (a person).", "At 1544.0s, [0.13, 0.126, 0.387, 0.838]: stand, touch (an object), listen to (a person), watch (a person).", "From 1542.0s to 1543.0s, [0.004, 0.09, 0.253, 0.934]->[0.133, 0.135, 0.415, 0.861]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1605.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.093, 0.096, 0.722, 0.89]: sit, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [1593, 1676], "conversations": [{"from": "human", "timestamps": 1623.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.322, 0.091, 0.902, 0.929]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1676.0, "value": "What actions did the person at bbox [0.351, 0.116, 0.886, 0.98] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.351, 0.116, 0.886, 0.98]: get up, talk to (e.g., self, a person, a group).", "At 1675.0s, [0.335, 0.108, 0.885, 0.954]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [1606, 1698], "conversations": [{"from": "human", "timestamps": 1636.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.29, 0.152, 0.668, 0.992]: carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person), bend/bow (at the waist); [0.581, 0.309, 0.946, 0.803]: listen to (a person), sit, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1698.0, "value": "What actions did the person at bbox [0.397, 0.07, 0.983, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.397, 0.07, 0.983, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1697.0s, [0.413, 0.075, 1.0, 0.981]: stand, talk to (e.g., self, a person, a group).", "From 1693.0s to 1696.0s, [0.415, 0.071, 0.99, 1.0]->[0.403, 0.07, 0.983, 0.989]->[0.407, 0.071, 0.988, 0.998]->[0.417, 0.061, 0.988, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1686.0s to 1692.0s, [0.403, 0.071, 0.983, 0.995]->[0.429, 0.076, 0.988, 0.99]->[0.435, 0.074, 0.975, 0.992]->[0.436, 0.071, 0.987, 0.991]->[0.435, 0.069, 0.997, 1.0]->[0.431, 0.071, 1.0, 0.997]->[0.434, 0.081, 0.996, 0.991]: stand, listen to (a person), watch (a person)."]}]}, {"video": "2qQs3Y9OJX0.mkv", "clip": [1719, 1750], "conversations": [{"from": "human", "timestamps": 1749.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.291, 0.066, 0.651, 0.985]: walk."}, {"from": "human", "timestamps": 1750.0, "value": "Describe the actions of the person at bbox [0.305, 0.1, 0.699, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.305, 0.1, 0.699, 0.986]: walk.", "At 1749.0s, [0.291, 0.066, 0.651, 0.985]: walk.", "At 1748.0s, [0.265, 0.095, 0.606, 0.956]: get up."]}]}, {"video": "32HR3MnDZ8g.mp4", "clip": [879, 969], "conversations": [{"from": "human", "timestamps": 909.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.332, 0.076, 0.989, 0.995]: get up, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 969.0, "value": "What actions is the person in bbox [0.145, 0.201, 0.625, 0.992] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.145, 0.201, 0.625, 0.992]: stand, hug (a person), watch (a person).", "At 968.0s, [0.155, 0.204, 0.61, 1.0]: stand, grab (a person), watch (a person).", "At 967.0s, [0.167, 0.267, 0.853, 0.996]: stand, watch (a person).", "At 966.0s, [0.212, 0.118, 0.461, 0.966]: stand, hug (a person)."]}]}, {"video": "32HR3MnDZ8g.mp4", "clip": [998, 1052], "conversations": [{"from": "human", "timestamps": 1028.0, "value": "What actions is the person in bbox [0.0, 0.016, 0.404, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.016, 0.404, 0.993]: stand, listen to (a person), watch (a person).", "From 1026.0s to 1027.0s, [0.087, 0.0, 1.0, 1.0]->[0.006, 0.028, 0.624, 0.982]: stand.", "At 1025.0s, [0.338, 0.008, 1.0, 1.0]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1052.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.0, 0.428, 0.978]: listen to (a person), stand, watch (a person); [0.56, 0.182, 1.0, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "32HR3MnDZ8g.mp4", "clip": [997, 1096], "conversations": [{"from": "human", "timestamps": 1027.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.006, 0.028, 0.624, 0.982]: stand; [0.731, 0.252, 1.0, 0.999]: stand, answer phone."}, {"from": "human", "timestamps": 1096.0, "value": "What actions is the person in bbox [0.148, 0.07, 0.883, 0.972] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.148, 0.07, 0.883, 0.972]: sit, answer phone.", "At 1095.0s, [0.139, 0.068, 0.915, 0.979]: sit, answer phone.", "From 1093.0s to 1094.0s, [0.152, 0.065, 0.905, 0.977]->[0.149, 0.071, 0.953, 0.976]: sit, answer phone, touch (an object).", "From 1089.0s to 1092.0s, [0.141, 0.084, 0.83, 0.97]->[0.123, 0.085, 0.824, 0.967]->[0.149, 0.087, 0.873, 0.987]->[0.145, 0.078, 0.827, 0.936]: sit, answer phone.", "At 1088.0s, [0.151, 0.085, 0.838, 0.974]: sit, answer phone, touch (an object).", "From 1085.0s to 1087.0s, [0.128, 0.081, 0.821, 0.974]->[0.131, 0.098, 0.812, 0.976]->[0.13, 0.086, 0.844, 0.974]: sit, answer phone."]}]}, {"video": "32HR3MnDZ8g.mp4", "clip": [1085, 1137], "conversations": [{"from": "human", "timestamps": 1115.0, "value": "What actions did the person at bbox [0.14, 0.134, 0.418, 0.97] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.14, 0.134, 0.418, 0.97]: stand, listen to (a person), watch (a person).", "From 1113.0s to 1114.0s, [0.15, 0.155, 0.427, 0.974]->[0.147, 0.148, 0.398, 0.965]: stand, listen to (a person), watch (a person).", "At 1112.0s, [0.154, 0.161, 0.362, 0.981]: stand, watch (a person).", "At 1109.0s, [0.038, 0.144, 0.367, 0.991]: stand, listen to (a person), watch (a person).", "At 1108.0s, [0.022, 0.153, 0.391, 0.993]: walk, listen to (a person).", "At 1107.0s, [0.286, 0.201, 0.599, 0.738]: walk, talk to (e.g., self, a person, a group).", "At 1106.0s, [0.577, 0.215, 0.761, 0.983]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1104.0s to 1105.0s, [0.475, 0.204, 0.812, 0.992]->[0.532, 0.19, 0.746, 0.956]: walk, talk to (e.g., self, a person, a group).", "From 1101.0s to 1103.0s, [0.491, 0.117, 0.683, 0.836]->[0.51, 0.205, 0.8, 0.921]->[0.536, 0.248, 0.877, 0.987]: walk, touch (an object)."]}, {"from": "human", "timestamps": 1137.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.013, 0.075, 0.749, 0.968]: sit."}]}, {"video": "32HR3MnDZ8g.mp4", "clip": [1118, 1164], "conversations": [{"from": "human", "timestamps": 1148.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.108, 0.09, 1.0, 0.987]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1164.0, "value": "What actions is the person in bbox [0.159, 0.109, 0.806, 0.964] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.159, 0.109, 0.806, 0.964]: sit, talk to (e.g., self, a person, a group).", "From 1162.0s to 1163.0s, [0.164, 0.079, 0.717, 0.974]->[0.142, 0.092, 0.736, 0.976]: sit, talk to (e.g., self, a person, a group).", "From 1160.0s to 1161.0s, [0.164, 0.088, 0.72, 0.971]->[0.158, 0.076, 0.706, 0.954]: sit.", "From 1156.0s to 1159.0s, [0.156, 0.096, 0.714, 0.975]->[0.162, 0.078, 0.714, 0.978]->[0.163, 0.086, 0.718, 0.975]->[0.165, 0.085, 0.722, 0.962]: sit, talk to (e.g., self, a person, a group).", "At 1155.0s, [0.154, 0.079, 0.703, 0.981]: sit."]}]}, {"video": "32HR3MnDZ8g.mp4", "clip": [1139, 1223], "conversations": [{"from": "human", "timestamps": 1169.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.029, 0.064, 0.965, 1.0]: watch (a person), get up, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1223.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.072, 0.095, 0.704, 0.983]?"}, {"from": "gpt", "value": ["At Now, [0.072, 0.095, 0.704, 0.983]: stand, answer phone.", "From 1219.0s to 1222.0s, [0.123, 0.095, 0.734, 0.987]->[0.058, 0.073, 0.703, 0.989]->[0.056, 0.084, 0.69, 0.981]->[0.067, 0.086, 0.674, 0.982]: stand, answer phone.", "At 1218.0s, [0.127, 0.117, 0.75, 0.985]: stand, answer phone, talk to (e.g., self, a person, a group).", "At 1217.0s, [0.004, 0.135, 0.51, 0.977]: stand, answer phone."]}]}, {"video": "32HR3MnDZ8g.mp4", "clip": [1212, 1247], "conversations": [{"from": "human", "timestamps": 1242.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.113, 0.105, 0.742, 0.993]: stand, answer phone."}, {"from": "human", "timestamps": 1247.0, "value": "What actions did the person at bbox [0.175, 0.1, 0.775, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.175, 0.1, 0.775, 0.981]: stand, answer phone.", "From 1237.0s to 1246.0s, [0.103, 0.084, 0.731, 0.985]->[0.098, 0.088, 0.72, 0.976]->[0.113, 0.091, 0.71, 0.986]->[0.121, 0.101, 0.736, 0.984]->[0.116, 0.099, 0.766, 0.976]->[0.113, 0.105, 0.742, 0.993]->[0.118, 0.091, 0.725, 0.989]->[0.121, 0.091, 0.724, 0.983]->[0.131, 0.109, 0.753, 0.989]->[0.116, 0.1, 0.734, 1.0]: stand, answer phone."]}]}, {"video": "32HR3MnDZ8g.mp4", "clip": [1336, 1367], "conversations": [{"from": "human", "timestamps": 1366.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.694, 0.128, 0.839, 0.602]?"}, {"from": "gpt", "value": ["In the current frame, [0.694, 0.128, 0.839, 0.602]: stand, close (e.g., a door, a box), talk to (e.g., self, a person, a group).", "From 1364.0s to 1365.0s, [0.727, 0.128, 0.923, 0.612]->[0.715, 0.132, 0.894, 0.603]: stand, close (e.g., a door, a box)."]}, {"from": "human", "timestamps": 1367.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.647, 0.138, 0.823, 0.776]: talk to (e.g., self, a person, a group), walk."}]}, {"video": "32HR3MnDZ8g.mp4", "clip": [1375, 1480], "conversations": [{"from": "human", "timestamps": 1405.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.01, 0.492, 0.983]: ride (e.g., a bike, a car, a horse), sit, talk to (e.g., self, a person, a group), listen to (a person); [0.317, 0.15, 0.948, 1.0]: sit, drive (e.g., a car, a truck), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1480.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.055, 0.179, 0.484, 0.988]?"}, {"from": "gpt", "value": ["At Now, [0.055, 0.179, 0.484, 0.988]: stand, carry/hold (an object), lift/pick up.", "At 1479.0s, [0.273, 0.318, 0.653, 0.996]: carry/hold (an object), listen to (a person).", "At 1478.0s, [0.267, 0.249, 0.644, 0.998]: stand, carry/hold (an object), close (e.g., a door, a box), listen to (a person).", "At 1477.0s, [0.18, 0.189, 0.505, 0.985]: stand, close (e.g., a door, a box), listen to (a person).", "At 1476.0s, [0.118, 0.18, 0.542, 0.993]: walk, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "32HR3MnDZ8g.mp4", "clip": [1415, 1492], "conversations": [{"from": "human", "timestamps": 1445.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.12, 0.08, 0.648, 0.985]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1492.0, "value": "Describe the actions of the person at bbox [0.547, 0.099, 0.993, 0.985] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.547, 0.099, 0.993, 0.985]: stand, listen to (a person), watch (a person).", "At 1491.0s, [0.548, 0.097, 0.993, 0.986]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1490.0s, [0.56, 0.087, 0.993, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1489.0s, [0.583, 0.042, 0.992, 0.981]: stand, listen to (a person), watch (a person)."]}]}, {"video": "3IOE-Q3UWdA.mp4", "clip": [886, 942], "conversations": [{"from": "human", "timestamps": 916.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.457, 0.217, 0.681, 0.735]: carry/hold (an object), sit, ride (e.g., a bike, a car, a horse), watch (a person)."}, {"from": "human", "timestamps": 942.0, "value": "For the person at bbox [0.215, 0.498, 0.329, 0.707], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.215, 0.498, 0.329, 0.707]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 940.0s to 941.0s, [0.139, 0.492, 0.233, 0.696]->[0.178, 0.496, 0.301, 0.704]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "3IOE-Q3UWdA.mp4", "clip": [894, 1018], "conversations": [{"from": "human", "timestamps": 924.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.04, 0.096, 0.336, 0.904]: carry/hold (an object), sit, ride (e.g., a bike, a car, a horse); [0.409, 0.089, 0.688, 0.781]: carry/hold (an object), sit, ride (e.g., a bike, a car, a horse)."}, {"from": "human", "timestamps": 1018.0, "value": "What actions is the person in bbox [0.701, 0.176, 0.986, 0.973] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.701, 0.176, 0.986, 0.973]: walk, watch (a person).", "At 1017.0s, [0.779, 0.202, 0.982, 0.937]: walk."]}]}, {"video": "3IOE-Q3UWdA.mp4", "clip": [944, 978], "conversations": [{"from": "human", "timestamps": 974.0, "value": "For the person at bbox [0.289, 0.484, 0.332, 0.627], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.289, 0.484, 0.332, 0.627]: sit, ride (e.g., a bike, a car, a horse).", "From 972.0s to 973.0s, [0.305, 0.497, 0.343, 0.625]->[0.305, 0.5, 0.344, 0.628]: sit, ride (e.g., a bike, a car, a horse)."]}, {"from": "human", "timestamps": 978.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.289, 0.339, 0.357, 0.607]: ride (e.g., a bike, a car, a horse), sit."}]}, {"video": "3IOE-Q3UWdA.mp4", "clip": [1038, 1081], "conversations": [{"from": "human", "timestamps": 1068.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.092, 0.088, 0.883, 0.958]: watch (a person), crouch/kneel, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1081.0, "value": "What actions is the person in bbox [0.044, 0.09, 0.821, 0.965] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.044, 0.09, 0.821, 0.965]: sit, stand, talk to (e.g., self, a person, a group).", "At 1080.0s, [0.05, 0.077, 0.808, 0.969]: sit, talk to (e.g., self, a person, a group).", "At 1079.0s, [0.042, 0.056, 0.809, 0.969]: crouch/kneel, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "3IOE-Q3UWdA.mp4", "clip": [1242, 1370], "conversations": [{"from": "human", "timestamps": 1272.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.098, 0.0, 0.615, 0.971]: watch (a person), lie/sleep, talk to (e.g., self, a person, a group); [0.389, 0.435, 0.886, 0.845]: listen to (a person), lie/sleep, watch (a person)."}, {"from": "human", "timestamps": 1370.0, "value": "What actions did the person at bbox [0.002, 0.0, 0.784, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.002, 0.0, 0.784, 0.982]: stand, talk to (e.g., self, a person, a group).", "From 1364.0s to 1369.0s, [0.017, 0.003, 0.799, 0.984]->[0.009, 0.0, 0.769, 0.992]->[0.011, 0.026, 0.714, 0.987]->[0.031, 0.001, 0.806, 0.975]->[0.038, 0.0, 0.792, 0.985]->[0.018, 0.0, 0.798, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1363.0s, [0.002, 0.001, 0.858, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1361.0s to 1362.0s, [0.016, 0.012, 0.858, 0.989]->[0.018, 0.007, 0.831, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1360.0s, [0.008, 0.034, 0.89, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1356.0s to 1358.0s, [0.011, 0.027, 0.852, 0.973]->[0.014, 0.035, 0.82, 0.989]->[0.01, 0.013, 0.885, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1352.0s to 1354.0s, [0.035, 0.017, 0.801, 0.97]->[0.028, 0.01, 0.776, 0.978]->[0.019, 0.022, 0.836, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1351.0s, [0.01, 0.013, 0.785, 0.977]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1350.0s, [0.022, 0.016, 0.789, 0.976]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "3IOE-Q3UWdA.mp4", "clip": [1319, 1356], "conversations": [{"from": "human", "timestamps": 1349.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.166, 0.04, 0.971, 0.982]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1356.0, "value": "What actions did the person at bbox [0.011, 0.027, 0.852, 0.973] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.011, 0.027, 0.852, 0.973]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1352.0s to 1354.0s, [0.035, 0.017, 0.801, 0.97]->[0.028, 0.01, 0.776, 0.978]->[0.019, 0.022, 0.836, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1351.0s, [0.01, 0.013, 0.785, 0.977]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1350.0s, [0.022, 0.016, 0.789, 0.976]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "3IOE-Q3UWdA.mp4", "clip": [1558, 1592], "conversations": [{"from": "human", "timestamps": 1588.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.202, 0.264, 0.423, 0.972]: walk."}, {"from": "human", "timestamps": 1592.0, "value": "What actions did the person at bbox [0.156, 0.169, 0.899, 0.98] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.156, 0.169, 0.899, 0.98]: walk.", "At 1591.0s, [0.263, 0.214, 0.849, 0.983]: run/jog, talk to (e.g., self, a person, a group).", "At 1590.0s, [0.396, 0.233, 0.872, 0.978]: run/jog, watch (a person)."]}]}, {"video": "3IOE-Q3UWdA.mp4", "clip": [1604, 1659], "conversations": [{"from": "human", "timestamps": 1634.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.014, 0.0, 0.522, 0.947]: listen to (a person), crouch/kneel, watch (a person); [0.218, 0.135, 0.821, 0.996]: watch (a person), lie/sleep, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1659.0, "value": "For the person at bbox [0.14, 0.176, 0.807, 0.991], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.14, 0.176, 0.807, 0.991]: lie/sleep, watch (a person).", "At 1658.0s, [0.129, 0.163, 0.794, 0.985]: lie/sleep, watch (a person).", "At 1657.0s, [0.074, 0.161, 0.796, 0.991]: talk to (e.g., self, a person, a group), lie/sleep."]}]}, {"video": "3IOE-Q3UWdA.mp4", "clip": [1592, 1725], "conversations": [{"from": "human", "timestamps": 1622.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.033, 0.609, 0.267, 0.704]: lie/sleep; [0.125, 0.52, 0.613, 0.859]: listen to (a person), lie/sleep, watch (a person); [0.359, 0.375, 0.43, 0.59]: walk; [0.414, 0.385, 0.598, 0.68]: watch (a person), crouch/kneel, talk to (e.g., self, a person, a group); [0.448, 0.353, 0.505, 0.489]: watch (a person), stand; [0.624, 0.667, 0.9, 0.854]: lie/sleep."}, {"from": "human", "timestamps": 1725.0, "value": "What actions is the person in bbox [0.03, 0.032, 0.651, 0.955] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.03, 0.032, 0.651, 0.955]: bend/bow (at the waist), listen to (a person), lie/sleep.", "At 1724.0s, [0.028, 0.076, 0.703, 0.95]: listen to (a person), lie/sleep.", "At 1723.0s, [0.045, 0.11, 0.689, 0.956]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "3IOE-Q3UWdA.mp4", "clip": [1699, 1737], "conversations": [{"from": "human", "timestamps": 1729.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.038, 0.0, 0.761, 0.948]: listen to (a person), lie/sleep, watch (a person)."}, {"from": "human", "timestamps": 1737.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.057, 0.0, 0.743, 0.972]?"}, {"from": "gpt", "value": ["At Now, [0.057, 0.0, 0.743, 0.972]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 1736.0s, [0.027, 0.007, 0.909, 0.971]: listen to (a person), lie/sleep, watch (a person)."]}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [928, 975], "conversations": [{"from": "human", "timestamps": 958.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.094, 0.323, 0.312, 0.651]: read, stand, talk to (e.g., self, a person, a group), listen to (a person); [0.239, 0.357, 0.464, 0.87]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.373, 0.252, 0.548, 0.766]: read, stand, listen to (a person); [0.609, 0.21, 0.906, 0.859]: stand, sing to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 975.0, "value": "Describe the actions of the person at bbox [0.059, 0.335, 0.325, 0.741] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.059, 0.335, 0.325, 0.741]: stand, read, listen to (a person).", "At 974.0s, [0.055, 0.335, 0.32, 0.734]: stand, carry/hold (an object), read, listen to (a person).", "At 973.0s, [0.067, 0.334, 0.322, 0.729]: stand, carry/hold (an object), read.", "At 972.0s, [0.072, 0.34, 0.322, 0.765]: stand, carry/hold (an object), read, listen to (a person).", "From 970.0s to 971.0s, [0.077, 0.337, 0.322, 0.712]->[0.075, 0.342, 0.321, 0.734]: stand, read, listen to (a person).", "At 969.0s, [0.063, 0.333, 0.311, 0.712]: stand, carry/hold (an object), listen to (a person).", "From 965.0s to 968.0s, [0.082, 0.341, 0.325, 0.704]->[0.085, 0.337, 0.323, 0.703]->[0.076, 0.339, 0.312, 0.705]->[0.077, 0.333, 0.319, 0.702]: stand, read.", "From 963.0s to 964.0s, [0.098, 0.33, 0.334, 0.685]->[0.093, 0.333, 0.328, 0.679]: stand, read, listen to (a person).", "At 962.0s, [0.096, 0.327, 0.335, 0.682]: stand, read.", "At 961.0s, [0.108, 0.323, 0.335, 0.679]: stand, carry/hold (an object), read, sing to (e.g., self, a person, a group).", "At 959.0s, [0.101, 0.324, 0.322, 0.801]: stand, read, listen to (a person).", "At 958.0s, [0.094, 0.323, 0.312, 0.651]: stand, read, listen to (a person), talk to (e.g., self, a person, a group).", "At 957.0s, [0.089, 0.333, 0.309, 0.646]: stand, carry/hold (an object), listen to (a person), sing to (e.g., self, a person, a group), talk to (e.g., self, a person, a group)."]}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [952, 987], "conversations": [{"from": "human", "timestamps": 982.0, "value": "Describe the actions of the person at bbox [0.06, 0.198, 0.533, 0.858] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.06, 0.198, 0.533, 0.858]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 981.0s, [0.076, 0.176, 0.57, 0.859]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 987.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.129, 0.171, 0.606, 0.867]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.535, 0.4, 0.768, 0.851]: listen to (a person), sit, watch (a person)."}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [1020, 1073], "conversations": [{"from": "human", "timestamps": 1050.0, "value": "What actions is the person in bbox [0.047, 0.24, 0.465, 0.852] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.047, 0.24, 0.465, 0.852]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1049.0s, [0.049, 0.232, 0.503, 0.873]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1047.0s to 1048.0s, [0.0, 0.171, 0.441, 0.877]->[0.005, 0.211, 0.442, 0.867]: walk, listen to (a person), watch (a person).", "At 1046.0s, [0.012, 0.277, 0.392, 0.867]: walk, listen to (a person).", "At 1045.0s, [0.011, 0.296, 0.388, 0.868]: walk, talk to (e.g., self, a person, a group).", "At 1044.0s, [0.073, 0.255, 0.432, 0.869]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1042.0s to 1043.0s, [0.049, 0.257, 0.419, 0.871]->[0.049, 0.281, 0.4, 0.862]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "From 1040.0s to 1041.0s, [0.048, 0.26, 0.426, 0.868]->[0.041, 0.31, 0.418, 0.871]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1039.0s, [0.091, 0.299, 0.461, 0.87]: walk, talk to (e.g., self, a person, a group).", "From 1037.0s to 1038.0s, [0.16, 0.305, 0.376, 0.863]->[0.117, 0.27, 0.449, 0.855]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1073.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.076, 0.276, 0.392, 0.86]: listen to (a person), stand, watch (a person); [0.451, 0.263, 0.812, 0.875]: watch (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [1016, 1107], "conversations": [{"from": "human", "timestamps": 1046.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.012, 0.277, 0.392, 0.867]: listen to (a person), walk; [0.492, 0.247, 0.959, 0.855]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1107.0, "value": "What actions did the person at bbox [0.067, 0.124, 0.77, 0.871] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.067, 0.124, 0.77, 0.871]: stand, listen to (a person).", "At 1106.0s, [0.001, 0.131, 0.529, 0.857]: stand."]}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [1057, 1127], "conversations": [{"from": "human", "timestamps": 1087.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.077, 0.128, 0.709, 0.864]?"}, {"from": "gpt", "value": ["In the current frame, [0.077, 0.128, 0.709, 0.864]: stand.", "At 1086.0s, [0.066, 0.128, 0.688, 0.861]: stand.", "At 1085.0s, [0.062, 0.156, 0.684, 0.89]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1127.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.029, 0.106, 0.958, 0.864]: stand."}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [1152, 1216], "conversations": [{"from": "human", "timestamps": 1182.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.003, 0.211, 0.389, 0.885]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1216.0, "value": "What actions is the person in bbox [0.004, 0.217, 0.468, 0.872] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.004, 0.217, 0.468, 0.872]: sit, listen to (a person), watch (a person).", "At 1215.0s, [0.012, 0.184, 0.463, 0.862]: sit, listen to (a person), watch (a person)."]}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [1179, 1223], "conversations": [{"from": "human", "timestamps": 1209.0, "value": "What actions did the person at bbox [0.003, 0.244, 0.483, 0.889] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.003, 0.244, 0.483, 0.889]: sit, listen to (a person), watch (a person).", "From 1207.0s to 1208.0s, [0.0, 0.257, 0.488, 0.867]->[0.0, 0.25, 0.488, 0.885]: sit, watch (a person).", "At 1206.0s, [0.004, 0.257, 0.475, 0.872]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1223.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.002, 0.194, 0.492, 0.863]: listen to (a person), sit; [0.348, 0.166, 0.934, 0.884]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [1354, 1406], "conversations": [{"from": "human", "timestamps": 1384.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.187, 0.165, 0.581, 0.866]: stand."}, {"from": "human", "timestamps": 1406.0, "value": "What actions did the person at bbox [0.202, 0.2, 0.592, 0.86] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.202, 0.2, 0.592, 0.86]: stand.", "From 1404.0s to 1405.0s, [0.203, 0.197, 0.562, 0.855]->[0.2, 0.171, 0.558, 0.867]: stand."]}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [1355, 1486], "conversations": [{"from": "human", "timestamps": 1385.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.224, 0.266, 0.411, 0.806]: stand."}, {"from": "human", "timestamps": 1486.0, "value": "For the person at bbox [0.285, 0.256, 0.454, 0.837], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.285, 0.256, 0.454, 0.837]: stand, open (e.g., a window, a car door), talk to (e.g., self, a person, a group), watch (a person).", "At 1485.0s, [0.178, 0.249, 0.563, 0.852]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1484.0s, [0.06, 0.208, 0.361, 0.848]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1483.0s, [0.116, 0.21, 0.556, 0.879]: stand, talk to (e.g., self, a person, a group).", "At 1482.0s, [0.083, 0.213, 0.438, 0.863]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1481.0s, [0.213, 0.243, 0.523, 0.862]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [1432, 1490], "conversations": [{"from": "human", "timestamps": 1462.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.204, 0.218, 0.733, 0.863]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1490.0, "value": "What actions did the person at bbox [0.491, 0.277, 0.967, 0.869] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.491, 0.277, 0.967, 0.869]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1489.0s, [0.56, 0.259, 0.908, 0.869]: stand, carry/hold (an object).", "At 1488.0s, [0.49, 0.227, 0.905, 0.866]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [1472, 1596], "conversations": [{"from": "human", "timestamps": 1502.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.347, 0.191, 0.834, 0.859]: watch (a person), walk; [0.772, 0.36, 1.0, 0.873]: watch (a person), walk."}, {"from": "human", "timestamps": 1596.0, "value": "What actions did the person at bbox [0.239, 0.122, 0.947, 0.874] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.239, 0.122, 0.947, 0.874]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1595.0s, [0.175, 0.13, 0.987, 0.888]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [1550, 1594], "conversations": [{"from": "human", "timestamps": 1580.0, "value": "What actions is the person in bbox [0.494, 0.118, 0.992, 0.893] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.494, 0.118, 0.992, 0.893]: sit, eat, listen to (a person).", "At 1579.0s, [0.593, 0.127, 0.998, 0.879]: sit, eat, listen to (a person).", "From 1577.0s to 1578.0s, [0.518, 0.108, 0.998, 0.878]->[0.567, 0.107, 0.997, 0.88]: sit, eat."]}, {"from": "human", "timestamps": 1594.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.002, 0.195, 0.48, 0.875]: listen to (a person), stand, watch (a person); [0.378, 0.275, 0.727, 0.86]: listen to (a person), sit, watch (a person)."}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [1690, 1733], "conversations": [{"from": "human", "timestamps": 1720.0, "value": "Describe the actions of the person at bbox [0.471, 0.228, 0.754, 0.824] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.471, 0.228, 0.754, 0.824]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1718.0s to 1719.0s, [0.632, 0.213, 0.787, 0.826]->[0.501, 0.214, 0.759, 0.817]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1733.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.161, 0.184, 0.726, 0.801]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "3_VjIRdXVdM.mkv", "clip": [1735, 1786], "conversations": [{"from": "human", "timestamps": 1765.0, "value": "What actions is the person in bbox [0.233, 0.191, 0.93, 0.804] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.233, 0.191, 0.93, 0.804]: stand, talk to (e.g., self, a person, a group).", "From 1762.0s to 1764.0s, [0.148, 0.197, 0.928, 0.8]->[0.209, 0.205, 0.877, 0.814]->[0.231, 0.184, 0.929, 0.845]: stand, talk to (e.g., self, a person, a group).", "From 1760.0s to 1761.0s, [0.189, 0.246, 0.301, 0.544]->[0.187, 0.238, 0.299, 0.543]: sit, talk to (e.g., self, a person, a group).", "At 1759.0s, [0.187, 0.236, 0.299, 0.549]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1758.0s, [0.197, 0.24, 0.317, 0.52]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1757.0s, [0.133, 0.185, 0.72, 0.808]: sit, talk to (e.g., self, a person, a group).", "At 1756.0s, [0.095, 0.196, 0.694, 0.794]: sit, stand, talk to (e.g., self, a person, a group).", "At 1755.0s, [0.049, 0.148, 0.643, 0.837]: sit, eat, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1786.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.147, 0.196, 0.696, 0.809]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [883, 957], "conversations": [{"from": "human", "timestamps": 913.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.174, 0.11, 0.608, 0.855]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.581, 0.104, 0.981, 0.891]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 957.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.246, 0.152, 0.763, 0.798]?"}, {"from": "gpt", "value": ["In the current frame, [0.246, 0.152, 0.763, 0.798]: bend/bow (at the waist), touch (an object).", "At 955.0s, [0.344, 0.139, 0.77, 0.852]: bend/bow (at the waist), touch (an object).", "From 953.0s to 954.0s, [0.387, 0.112, 0.747, 0.864]->[0.352, 0.171, 0.753, 0.88]: bend/bow (at the waist), carry/hold (an object), touch (an object).", "At 952.0s, [0.448, 0.091, 0.8, 0.834]: walk, carry/hold (an object)."]}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [1052, 1086], "conversations": [{"from": "human", "timestamps": 1082.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.202, 0.246, 0.455, 0.92]: carry/hold (an object), stand, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1086.0, "value": "Describe the actions of the person at bbox [0.303, 0.161, 0.755, 0.914] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.303, 0.161, 0.755, 0.914]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1085.0s, [0.738, 0.1, 0.994, 0.893]: walk, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [1143, 1187], "conversations": [{"from": "human", "timestamps": 1173.0, "value": "For the person at bbox [0.375, 0.088, 0.866, 0.902], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.375, 0.088, 0.866, 0.902]: stand, talk to (e.g., self, a person, a group).", "At 1172.0s, [0.36, 0.116, 0.842, 0.895]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1171.0s, [0.385, 0.126, 0.861, 0.896]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1187.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.306, 0.105, 0.785, 0.918]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [1123, 1224], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.093, 0.095, 0.754, 0.899]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1224.0, "value": "Describe the actions of the person at bbox [0.342, 0.095, 0.691, 0.905] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.342, 0.095, 0.691, 0.905]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1223.0s, [0.321, 0.088, 0.704, 0.909]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [1240, 1281], "conversations": [{"from": "human", "timestamps": 1270.0, "value": "For the person at bbox [0.159, 0.095, 0.554, 0.91], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.159, 0.095, 0.554, 0.91]: stand, talk to (e.g., self, a person, a group).", "At 1269.0s, [0.222, 0.111, 0.504, 0.896]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1268.0s, [0.17, 0.12, 0.521, 0.92]: walk, listen to (a person), watch (a person).", "From 1266.0s to 1267.0s, [0.288, 0.114, 0.493, 0.907]->[0.207, 0.119, 0.516, 0.898]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1265.0s, [0.278, 0.104, 0.542, 0.913]: stand, carry/hold (an object).", "At 1264.0s, [0.358, 0.131, 0.653, 0.906]: bend/bow (at the waist), close (e.g., a door, a box).", "At 1263.0s, [0.339, 0.133, 0.73, 0.883]: bend/bow (at the waist), touch (an object)."]}, {"from": "human", "timestamps": 1281.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.142, 0.081, 0.529, 0.898]: listen to (a person), stand; [0.352, 0.088, 0.838, 0.896]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [1253, 1339], "conversations": [{"from": "human", "timestamps": 1283.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.14, 0.097, 0.527, 0.91]: listen to (a person), stand, watch (a person); [0.362, 0.11, 0.85, 0.896]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1339.0, "value": "What actions is the person in bbox [0.377, 0.096, 0.997, 0.908] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.377, 0.096, 0.997, 0.908]: sit, answer phone.", "At 1338.0s, [0.539, 0.094, 1.0, 0.924]: sit, answer phone, listen to (a person).", "From 1329.0s to 1337.0s, [0.425, 0.096, 0.824, 0.902]->[0.41, 0.106, 0.859, 0.903]->[0.44, 0.095, 0.867, 0.909]->[0.414, 0.097, 0.825, 0.901]->[0.431, 0.1, 0.983, 0.892]->[0.436, 0.102, 0.995, 0.903]->[0.362, 0.1, 0.996, 0.906]->[0.43, 0.101, 0.997, 0.905]->[0.439, 0.102, 0.987, 0.894]: sit, answer phone."]}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [1310, 1366], "conversations": [{"from": "human", "timestamps": 1340.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.038, 0.113, 0.527, 0.919]?"}, {"from": "gpt", "value": ["At Now, [0.038, 0.113, 0.527, 0.919]: sit, listen to (a person), watch (a person).", "At 1339.0s, [0.033, 0.095, 0.528, 0.911]: sit, listen to (a person).", "From 1333.0s to 1338.0s, [0.0, 0.141, 0.533, 0.912]->[0.062, 0.107, 0.515, 0.919]->[0.059, 0.097, 0.523, 0.917]->[0.019, 0.115, 0.516, 0.911]->[0.026, 0.098, 0.512, 0.916]->[0.004, 0.154, 0.626, 0.913]: sit, listen to (a person), watch (a person).", "From 1329.0s to 1332.0s, [0.178, 0.105, 0.548, 0.921]->[0.187, 0.105, 0.551, 0.879]->[0.209, 0.128, 0.575, 0.893]->[0.19, 0.098, 0.566, 0.879]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1328.0s, [0.174, 0.093, 0.554, 0.877]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1321.0s to 1327.0s, [0.097, 0.105, 0.543, 0.911]->[0.239, 0.101, 0.533, 0.904]->[0.221, 0.114, 0.559, 0.901]->[0.201, 0.093, 0.576, 0.868]->[0.182, 0.094, 0.56, 0.874]->[0.184, 0.109, 0.553, 0.88]->[0.192, 0.099, 0.553, 0.875]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1320.0s, [0.162, 0.102, 0.547, 0.881]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1319.0s, [0.219, 0.092, 0.586, 0.899]: sit, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1318.0s, [0.236, 0.109, 0.563, 0.877]: sit, answer phone.", "At 1317.0s, [0.237, 0.083, 0.562, 0.89]: sit, text on/look at a cellphone, talk to (e.g., self, a person, a group).", "At 1316.0s, [0.228, 0.095, 0.596, 0.889]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1315.0s, [0.227, 0.239, 0.587, 0.913]: bend/bow (at the waist), carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1311.0s to 1314.0s, [0.064, 0.108, 0.521, 0.892]->[0.072, 0.096, 0.552, 0.899]->[0.079, 0.098, 0.548, 0.908]->[0.018, 0.089, 0.562, 0.904]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1309.0s to 1310.0s, [0.09, 0.106, 0.519, 0.889]->[0.062, 0.108, 0.538, 0.891]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1307.0s to 1308.0s, [0.051, 0.089, 0.494, 0.898]->[0.158, 0.098, 0.501, 0.9]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1306.0s, [0.21, 0.11, 0.534, 0.889]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1305.0s, [0.15, 0.084, 0.509, 0.909]: sit, listen to (a person), watch (a person).", "At 1304.0s, [0.058, 0.101, 0.423, 0.93]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1302.0s to 1303.0s, [0.196, 0.363, 0.427, 0.892]->[0.148, 0.13, 0.5, 0.897]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1301.0s, [0.13, 0.087, 0.537, 0.915]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1366.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.042, 0.104, 0.799, 0.9]: lie/sleep, answer phone."}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [1387, 1444], "conversations": [{"from": "human", "timestamps": 1417.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.141, 0.093, 0.602, 0.825]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1444.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.198, 0.284, 0.476, 0.907]?"}, {"from": "gpt", "value": ["At Now, [0.198, 0.284, 0.476, 0.907]: stand, smoke, talk to (e.g., self, a person, a group).", "At 1443.0s, [0.166, 0.286, 0.456, 0.907]: stand, smoke, listen to (a person), watch (a person).", "From 1441.0s to 1442.0s, [0.245, 0.09, 0.726, 0.886]->[0.219, 0.091, 0.734, 0.879]: stand, watch (a person)."]}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [1412, 1495], "conversations": [{"from": "human", "timestamps": 1442.0, "value": "Describe the actions of the person at bbox [0.219, 0.091, 0.734, 0.879] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.219, 0.091, 0.734, 0.879]: stand, watch (a person).", "At 1441.0s, [0.245, 0.09, 0.726, 0.886]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1495.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.34, 0.101, 0.782, 0.921]: listen to (a person), sit, watch (a person)."}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [1478, 1509], "conversations": [{"from": "human", "timestamps": 1508.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.171, 0.112, 0.598, 0.894]: watch (a person), stand."}, {"from": "human", "timestamps": 1509.0, "value": "Describe the actions of the person at bbox [0.168, 0.122, 0.594, 0.887] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.168, 0.122, 0.594, 0.887]: stand, listen to (a person), watch (a person).", "At 1508.0s, [0.171, 0.112, 0.598, 0.894]: stand, watch (a person).", "At 1507.0s, [0.204, 0.118, 0.616, 0.892]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1506.0s, [0.208, 0.1, 0.61, 0.899]: stand, talk to (e.g., self, a person, a group).", "At 1505.0s, [0.232, 0.106, 0.628, 0.893]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1504.0s, [0.217, 0.121, 0.631, 0.884]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [1517, 1598], "conversations": [{"from": "human", "timestamps": 1547.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.176, 0.584, 0.329, 0.905]: listen to (a person), sit, smoke, watch (a person); [0.861, 0.541, 0.988, 0.871]: sit."}, {"from": "human", "timestamps": 1598.0, "value": "What actions is the person in bbox [0.092, 0.075, 0.77, 0.904] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.092, 0.075, 0.77, 0.904]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1597.0s, [0.099, 0.095, 0.835, 0.884]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [1675, 1709], "conversations": [{"from": "human", "timestamps": 1705.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.375, 0.091, 0.888, 0.914]: sit, smoke, answer phone."}, {"from": "human", "timestamps": 1709.0, "value": "What actions did the person at bbox [0.404, 0.098, 0.804, 0.896] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.404, 0.098, 0.804, 0.896]: sit, answer phone, smoke.", "From 1707.0s to 1708.0s, [0.38, 0.09, 0.849, 0.895]->[0.385, 0.106, 0.834, 0.891]: sit, answer phone, smoke.", "From 1703.0s to 1705.0s, [0.072, 0.095, 0.861, 0.91]->[0.187, 0.084, 0.843, 0.903]->[0.375, 0.091, 0.888, 0.914]: sit, answer phone, smoke.", "From 1700.0s to 1702.0s, [0.386, 0.246, 0.66, 0.927]->[0.469, 0.228, 0.66, 0.898]->[0.755, 0.222, 0.993, 0.904]: walk.", "From 1698.0s to 1699.0s, [0.376, 0.257, 0.569, 0.9]->[0.374, 0.256, 0.575, 0.897]: stand."]}]}, {"video": "4Y5qi1gD2Sw.mkv", "clip": [1736, 1776], "conversations": [{"from": "human", "timestamps": 1766.0, "value": "Describe the actions of the person at bbox [0.476, 0.106, 0.969, 0.891] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.476, 0.106, 0.969, 0.891]: sit, watch (a person).", "At 1765.0s, [0.453, 0.168, 0.936, 0.907]: sit, kiss (a person), talk to (e.g., self, a person, a group).", "At 1764.0s, [0.478, 0.105, 0.977, 0.917]: sit, talk to (e.g., self, a person, a group).", "At 1763.0s, [0.326, 0.099, 0.968, 0.886]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1761.0s to 1762.0s, [0.499, 0.104, 0.984, 0.922]->[0.505, 0.094, 0.974, 0.916]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1760.0s, [0.505, 0.113, 0.988, 0.889]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1755.0s to 1759.0s, [0.456, 0.133, 0.978, 0.931]->[0.399, 0.153, 0.977, 0.915]->[0.463, 0.113, 0.98, 0.907]->[0.427, 0.105, 0.979, 0.903]->[0.487, 0.109, 0.972, 0.905]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1776.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.147, 0.109, 0.576, 0.898]: listen to (a person), sit, smoke, watch (a person); [0.413, 0.108, 0.975, 0.897]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "4ZpjKfu6Cl8.mkv", "clip": [927, 1008], "conversations": [{"from": "human", "timestamps": 957.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.224, 0.036, 0.825, 0.97]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1008.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.458, 0.342, 0.76, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.458, 0.342, 0.76, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1007.0s, [0.472, 0.32, 0.758, 0.985]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1006.0s, [0.461, 0.334, 0.752, 0.985]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1004.0s to 1005.0s, [0.486, 0.332, 0.754, 0.964]->[0.457, 0.398, 0.802, 0.981]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "4ZpjKfu6Cl8.mkv", "clip": [992, 1050], "conversations": [{"from": "human", "timestamps": 1022.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.268, 0.251, 0.339, 0.775]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1050.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.069, 0.014, 0.606, 0.982]?"}, {"from": "gpt", "value": ["In the current frame, [0.069, 0.014, 0.606, 0.982]: stand, listen to (a person).", "At 1049.0s, [0.069, 0.0, 0.596, 0.97]: stand, listen to (a person), watch (a person)."]}]}, {"video": "4ZpjKfu6Cl8.mkv", "clip": [1071, 1136], "conversations": [{"from": "human", "timestamps": 1101.0, "value": "What actions did the person at bbox [0.197, 0.024, 0.759, 0.974] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.197, 0.024, 0.759, 0.974]: stand, listen to (a person), watch (a person).", "At 1100.0s, [0.145, 0.005, 0.726, 0.973]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1099.0s, [0.352, 0.079, 0.912, 0.993]: sit, stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1098.0s, [0.281, 0.04, 0.906, 1.0]: stand, drink.", "From 1096.0s to 1097.0s, [0.324, 0.276, 0.566, 0.986]->[0.28, 0.29, 0.54, 0.987]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1136.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.069, 0.032, 0.736, 0.972]: watch (a person), stand; [0.6, 0.035, 0.931, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "4ZpjKfu6Cl8.mkv", "clip": [1135, 1245], "conversations": [{"from": "human", "timestamps": 1165.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.106, 0.026, 0.699, 1.0]: listen (e.g., to music), dance, listen to (a person)."}, {"from": "human", "timestamps": 1245.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.069, 0.0, 0.643, 0.997]?"}, {"from": "gpt", "value": ["In the current frame, [0.069, 0.0, 0.643, 0.997]: sit.", "At 1244.0s, [0.072, 0.0, 0.611, 0.997]: sit."]}]}, {"video": "4ZpjKfu6Cl8.mkv", "clip": [1196, 1240], "conversations": [{"from": "human", "timestamps": 1226.0, "value": "Describe the actions of the person at bbox [0.639, 0.469, 0.709, 0.699] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.639, 0.469, 0.709, 0.699]: sit, watch (a person).", "At 1225.0s, [0.657, 0.471, 0.726, 0.698]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1240.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.257, 0.191, 0.733, 0.984]: watch (a person), stand."}]}, {"video": "4ZpjKfu6Cl8.mkv", "clip": [1237, 1278], "conversations": [{"from": "human", "timestamps": 1267.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.481, 0.012, 0.921, 0.994]?"}, {"from": "gpt", "value": ["In the current frame, [0.481, 0.012, 0.921, 0.994]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1258.0s to 1266.0s, [0.484, 0.0, 0.931, 0.999]->[0.49, 0.0, 0.931, 0.997]->[0.497, 0.006, 0.931, 0.991]->[0.496, 0.002, 0.931, 0.988]->[0.487, 0.002, 0.931, 0.995]->[0.491, 0.005, 0.931, 0.993]->[0.469, 0.001, 0.931, 1.0]->[0.478, 0.01, 0.931, 0.997]->[0.481, 0.002, 0.927, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1257.0s, [0.497, 0.0, 0.925, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1253.0s to 1256.0s, [0.482, 0.0, 0.931, 1.0]->[0.484, 0.006, 0.93, 1.0]->[0.486, 0.004, 0.924, 1.0]->[0.48, 0.013, 0.931, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1278.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.085, 0.05, 0.46, 0.951]: bend/bow (at the waist); [0.285, 0.044, 0.676, 0.985]: carry/hold (an object), stand; [0.627, 0.011, 0.908, 0.951]: carry/hold (an object), stand."}]}, {"video": "4ZpjKfu6Cl8.mkv", "clip": [1300, 1340], "conversations": [{"from": "human", "timestamps": 1330.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.163, 0.019, 0.685, 0.984]?"}, {"from": "gpt", "value": ["In the current frame, [0.163, 0.019, 0.685, 0.984]: stand, carry/hold (an object), give/serve (an object) to (a person), watch (a person).", "At 1329.0s, [0.181, 0.034, 0.639, 1.0]: stand, carry/hold (an object), watch (a person).", "From 1327.0s to 1328.0s, [0.229, 0.013, 0.648, 0.987]->[0.168, 0.004, 0.622, 0.986]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1326.0s, [0.251, 0.021, 0.687, 0.986]: stand, carry/hold (an object), write, listen to (a person), talk to (e.g., self, a person, a group).", "At 1325.0s, [0.145, 0.016, 0.598, 0.989]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1340.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.499, 0.555, 0.7, 0.915]: carry/hold (an object), stand."}]}, {"video": "4ZpjKfu6Cl8.mkv", "clip": [1378, 1492], "conversations": [{"from": "human", "timestamps": 1408.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.227, 0.159, 0.66, 0.974]: carry/hold (an object), answer phone, walk."}, {"from": "human", "timestamps": 1492.0, "value": "What actions did the person at bbox [0.122, 0.066, 0.612, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.122, 0.066, 0.612, 0.985]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1491.0s, [0.119, 0.064, 0.604, 0.983]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "4ZpjKfu6Cl8.mkv", "clip": [1453, 1497], "conversations": [{"from": "human", "timestamps": 1483.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.085, 0.0, 0.396, 0.965]: carry/hold (an object), listen to (a person), walk; [0.394, 0.137, 0.649, 0.984]: carry/hold (an object), talk to (e.g., self, a person, a group), walk; [0.614, 0.168, 0.845, 0.976]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1497.0, "value": "What actions is the person in bbox [0.447, 0.0, 0.931, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.447, 0.0, 0.931, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1496.0s, [0.415, 0.0, 0.926, 0.991]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "4ZpjKfu6Cl8.mkv", "clip": [1615, 1655], "conversations": [{"from": "human", "timestamps": 1645.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.095, 0.0, 0.554, 0.992]: listen to (a person), sit, watch (a person); [0.5, 0.011, 0.927, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1655.0, "value": "For the person at bbox [0.529, 0.025, 0.922, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.529, 0.025, 0.922, 1.0]: sit, listen to (a person), watch (a person).", "At 1654.0s, [0.527, 0.148, 0.907, 1.0]: sit, listen to (a person), watch (a person).", "At 1653.0s, [0.543, 0.318, 0.894, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1652.0s, [0.564, 0.374, 0.908, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1651.0s, [0.57, 0.407, 0.861, 0.997]: stand, talk to (e.g., self, a person, a group).", "From 1649.0s to 1650.0s, [0.426, 0.254, 0.92, 0.992]->[0.547, 0.364, 0.907, 0.994]: sit, talk to (e.g., self, a person, a group).", "At 1648.0s, [0.524, 0.081, 0.927, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 1647.0s, [0.518, 0.008, 0.929, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1646.0s, [0.514, 0.0, 0.913, 1.0]: talk to (e.g., self, a person, a group), watch (a person).", "At 1645.0s, [0.5, 0.011, 0.927, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1641.0s to 1644.0s, [0.528, 0.17, 0.908, 1.0]->[0.534, 0.067, 0.906, 1.0]->[0.535, 0.014, 0.92, 1.0]->[0.491, 0.016, 0.915, 1.0]: sit, listen to (a person), watch (a person).", "At 1640.0s, [0.51, 0.25, 0.931, 0.992]: sit, listen to (a person)."]}]}, {"video": "4ZpjKfu6Cl8.mkv", "clip": [1613, 1711], "conversations": [{"from": "human", "timestamps": 1643.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.092, 0.001, 0.568, 1.0]: sit, talk to (e.g., self, a person, a group); [0.535, 0.014, 0.92, 1.0]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1711.0, "value": "What actions did the person at bbox [0.581, 0.012, 0.759, 0.431] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.581, 0.012, 0.759, 0.431]: walk, carry/hold (an object).", "From 1709.0s to 1710.0s, [0.282, 0.024, 0.425, 0.493]->[0.43, 0.04, 0.575, 0.355]: walk, carry/hold (an object).", "At 1708.0s, [0.07, 0.009, 0.258, 0.959]: stand, carry/hold (an object), lift/pick up.", "At 1707.0s, [0.093, 0.0, 0.468, 1.0]: bend/bow (at the waist)."]}]}, {"video": "4gVsDd8PV9U.mp4", "clip": [928, 997], "conversations": [{"from": "human", "timestamps": 958.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.039, 0.104, 0.601, 0.87]: watch (a person), sit."}, {"from": "human", "timestamps": 997.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.252, 0.077, 0.756, 0.887]?"}, {"from": "gpt", "value": ["In the current frame, [0.252, 0.077, 0.756, 0.887]: stand, talk to (e.g., self, a person, a group).", "From 994.0s to 996.0s, [0.218, 0.135, 0.783, 0.866]->[0.25, 0.082, 0.822, 0.869]->[0.215, 0.089, 0.777, 0.868]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "4gVsDd8PV9U.mp4", "clip": [971, 1002], "conversations": [{"from": "human", "timestamps": 1001.0, "value": "What actions did the person at bbox [0.291, 0.105, 0.782, 0.861] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.291, 0.105, 0.782, 0.861]: stand, talk to (e.g., self, a person, a group).", "From 994.0s to 1000.0s, [0.218, 0.135, 0.783, 0.866]->[0.25, 0.082, 0.822, 0.869]->[0.215, 0.089, 0.777, 0.868]->[0.252, 0.077, 0.756, 0.887]->[0.225, 0.07, 0.755, 0.88]->[0.219, 0.073, 0.784, 0.887]->[0.23, 0.084, 0.774, 0.876]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1002.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.264, 0.075, 0.789, 0.865]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "4gVsDd8PV9U.mp4", "clip": [1011, 1060], "conversations": [{"from": "human", "timestamps": 1041.0, "value": "What actions did the person at bbox [0.224, 0.131, 0.655, 0.885] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.224, 0.131, 0.655, 0.885]: sit, talk to (e.g., self, a person, a group).", "From 1037.0s to 1040.0s, [0.164, 0.104, 0.658, 0.863]->[0.152, 0.119, 0.661, 0.878]->[0.137, 0.109, 0.676, 0.881]->[0.132, 0.096, 0.675, 0.877]: sit, talk to (e.g., self, a person, a group).", "At 1036.0s, [0.218, 0.133, 0.651, 0.889]: sit, carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group).", "From 1034.0s to 1035.0s, [0.318, 0.168, 0.619, 0.884]->[0.275, 0.166, 0.637, 0.885]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1033.0s, [0.343, 0.142, 0.625, 0.885]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1026.0s to 1032.0s, [0.257, 0.112, 0.645, 0.887]->[0.298, 0.13, 0.648, 0.888]->[0.217, 0.148, 0.638, 0.873]->[0.167, 0.137, 0.624, 0.865]->[0.191, 0.132, 0.61, 0.877]->[0.195, 0.14, 0.613, 0.867]->[0.149, 0.139, 0.621, 0.886]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1023.0s to 1025.0s, [0.134, 0.154, 0.646, 0.876]->[0.117, 0.097, 0.658, 0.878]->[0.161, 0.117, 0.652, 0.872]: sit, talk to (e.g., self, a person, a group).", "At 1022.0s, [0.302, 0.187, 0.618, 0.899]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1060.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.092, 0.127, 0.356, 0.734]: martial art, fight/hit (a person); [0.532, 0.127, 0.801, 0.872]: watch (a person), martial art, fight/hit (a person)."}]}, {"video": "4gVsDd8PV9U.mp4", "clip": [1046, 1121], "conversations": [{"from": "human", "timestamps": 1076.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.198, 0.114, 0.744, 0.865]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1121.0, "value": "What actions is the person in bbox [0.0, 0.133, 0.174, 0.905] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.133, 0.174, 0.905]: stand, listen to (a person), watch (a person).", "From 1117.0s to 1120.0s, [0.016, 0.124, 0.332, 0.882]->[0.005, 0.115, 0.339, 0.883]->[0.011, 0.119, 0.336, 0.891]->[0.0, 0.096, 0.324, 0.885]: stand, listen to (a person), watch (a person).", "From 1114.0s to 1116.0s, [0.009, 0.116, 0.459, 0.878]->[0.016, 0.11, 0.438, 0.882]->[0.016, 0.122, 0.449, 0.867]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1111.0s to 1113.0s, [0.004, 0.116, 0.46, 0.883]->[0.006, 0.114, 0.462, 0.877]->[0.012, 0.111, 0.454, 0.876]: stand, talk to (e.g., self, a person, a group).", "From 1104.0s to 1110.0s, [0.012, 0.114, 0.459, 0.878]->[0.017, 0.108, 0.458, 0.88]->[0.013, 0.112, 0.453, 0.88]->[0.013, 0.115, 0.456, 0.884]->[0.014, 0.11, 0.45, 0.868]->[0.015, 0.111, 0.458, 0.88]->[0.017, 0.103, 0.454, 0.875]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1103.0s, [0.16, 0.162, 0.487, 0.882]: stand.", "From 1096.0s to 1102.0s, [0.004, 0.15, 0.43, 0.879]->[0.008, 0.161, 0.426, 0.879]->[0.005, 0.155, 0.414, 0.878]->[0.004, 0.154, 0.417, 0.879]->[0.004, 0.152, 0.431, 0.879]->[0.008, 0.15, 0.441, 0.88]->[0.032, 0.161, 0.449, 0.884]: stand, listen to (a person).", "At 1095.0s, [0.023, 0.169, 0.436, 0.876]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1093.0s to 1094.0s, [0.028, 0.152, 0.425, 0.878]->[0.026, 0.163, 0.43, 0.878]: stand, talk to (e.g., self, a person, a group).", "At 1092.0s, [0.033, 0.164, 0.434, 0.879]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1090.0s to 1091.0s, [0.031, 0.158, 0.446, 0.881]->[0.029, 0.152, 0.44, 0.876]: stand, listen to (a person)."]}]}, {"video": "4gVsDd8PV9U.mp4", "clip": [1140, 1228], "conversations": [{"from": "human", "timestamps": 1170.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.101, 0.108, 0.741, 0.876]: sit."}, {"from": "human", "timestamps": 1228.0, "value": "What actions is the person in bbox [0.112, 0.101, 0.742, 0.888] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.112, 0.101, 0.742, 0.888]: sit.", "At 1227.0s, [0.062, 0.1, 0.691, 0.879]: sit, talk to (e.g., self, a person, a group).", "At 1226.0s, [0.049, 0.102, 0.673, 0.87]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1225.0s, [0.09, 0.095, 0.711, 0.887]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "4gVsDd8PV9U.mp4", "clip": [1247, 1310], "conversations": [{"from": "human", "timestamps": 1277.0, "value": "What actions is the person in bbox [0.423, 0.157, 0.598, 0.863] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.423, 0.157, 0.598, 0.863]: run/jog.", "From 1275.0s to 1276.0s, [0.48, 0.346, 0.631, 0.862]->[0.438, 0.161, 0.636, 0.855]: run/jog."]}, {"from": "human", "timestamps": 1310.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.652, 0.202, 0.883, 0.887]: carry/hold (an object), watch (a person), walk."}]}, {"video": "4gVsDd8PV9U.mp4", "clip": [1267, 1323], "conversations": [{"from": "human", "timestamps": 1297.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.286, 0.121, 0.7, 0.892]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1323.0, "value": "For the person at bbox [0.706, 0.259, 0.906, 0.871], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.706, 0.259, 0.906, 0.871]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1322.0s, [0.706, 0.261, 0.887, 0.868]: stand, listen to (a person), watch (a person).", "At 1321.0s, [0.699, 0.235, 0.85, 0.862]: stand, carry/hold (an object), listen to (a person)."]}]}, {"video": "4gVsDd8PV9U.mp4", "clip": [1492, 1552], "conversations": [{"from": "human", "timestamps": 1522.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.108, 0.124, 0.428, 0.899]: listen to (a person), walk; [0.402, 0.274, 0.665, 0.874]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1552.0, "value": "Describe the actions of the person at bbox [0.571, 0.179, 0.96, 0.884] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.571, 0.179, 0.96, 0.884]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1548.0s to 1551.0s, [0.429, 0.133, 0.899, 0.866]->[0.477, 0.126, 0.94, 0.865]->[0.519, 0.121, 0.947, 0.862]->[0.522, 0.132, 0.952, 0.871]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "4gVsDd8PV9U.mp4", "clip": [1487, 1563], "conversations": [{"from": "human", "timestamps": 1517.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.227, 0.106, 0.934, 0.889]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1563.0, "value": "For the person at bbox [0.513, 0.281, 0.755, 0.874], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.513, 0.281, 0.755, 0.874]: stand, watch (a person).", "From 1560.0s to 1562.0s, [0.108, 0.086, 0.961, 0.878]->[0.476, 0.183, 0.847, 0.883]->[0.485, 0.2, 0.851, 0.866]: stand, watch (a person)."]}]}, {"video": "4gVsDd8PV9U.mp4", "clip": [1627, 1716], "conversations": [{"from": "human", "timestamps": 1657.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.281, 0.139, 0.447, 0.878]: martial art; [0.362, 0.231, 0.557, 0.876]: martial art, fight/hit (a person)."}, {"from": "human", "timestamps": 1716.0, "value": "What actions is the person in bbox [0.001, 0.11, 0.536, 0.9] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.001, 0.11, 0.536, 0.9]: stand, watch (a person).", "From 1712.0s to 1715.0s, [0.0, 0.131, 0.528, 0.9]->[0.002, 0.117, 0.536, 0.919]->[0.016, 0.111, 0.542, 0.926]->[0.02, 0.097, 0.541, 0.919]: stand, watch (a person).", "At 1710.0s, [0.523, 0.242, 0.746, 0.856]: martial art.", "At 1709.0s, [0.499, 0.282, 0.746, 0.879]: fight/hit (a person), martial art."]}]}, {"video": "4gVsDd8PV9U.mp4", "clip": [1672, 1710], "conversations": [{"from": "human", "timestamps": 1702.0, "value": "What actions is the person in bbox [0.395, 0.212, 0.848, 0.878] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.395, 0.212, 0.848, 0.878]: carry/hold (an object), fight/hit (a person), watch (a person), martial art.", "At 1701.0s, [0.612, 0.318, 0.835, 0.877]: fight/hit (a person), martial art.", "From 1699.0s to 1700.0s, [0.136, 0.226, 0.417, 0.873]->[0.334, 0.166, 0.483, 0.747]: carry/hold (an object), fight/hit (a person), martial art.", "At 1698.0s, [0.002, 0.222, 0.127, 0.456]: fight/hit (a person), watch (a person), martial art."]}, {"from": "human", "timestamps": 1710.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.523, 0.242, 0.746, 0.856]: martial art."}]}, {"video": "4gVsDd8PV9U.mp4", "clip": [1737, 1775], "conversations": [{"from": "human", "timestamps": 1767.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.171, 0.25, 0.452, 0.872]: martial art, fight/hit (a person); [0.458, 0.247, 0.72, 0.876]: carry/hold (an object), martial art, fight/hit (a person)."}, {"from": "human", "timestamps": 1775.0, "value": "For the person at bbox [0.368, 0.274, 0.589, 0.899], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.368, 0.274, 0.589, 0.899]: carry/hold (an object), fight/hit (a person), martial art.", "From 1772.0s to 1774.0s, [0.228, 0.398, 0.391, 0.846]->[0.186, 0.229, 0.365, 0.755]->[0.365, 0.281, 0.615, 0.863]: carry/hold (an object), fight/hit (a person), martial art."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [888, 920], "conversations": [{"from": "human", "timestamps": 918.0, "value": "What actions is the person in bbox [0.022, 0.086, 0.894, 0.958] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.022, 0.086, 0.894, 0.958]: carry/hold (an object), talk to (e.g., self, a person, a group), lie/sleep.", "At 917.0s, [0.014, 0.087, 0.846, 0.965]: carry/hold (an object), talk to (e.g., self, a person, a group), lie/sleep.", "At 916.0s, [0.027, 0.08, 0.78, 0.966]: carry/hold (an object), lie/sleep."]}, {"from": "human", "timestamps": 920.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.021, 0.088, 0.888, 0.948]: carry/hold (an object), lie/sleep, talk to (e.g., self, a person, a group)."}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1056, 1118], "conversations": [{"from": "human", "timestamps": 1086.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.046, 0.316, 0.391, 0.968]: listen to (a person), stand; [0.053, 0.368, 0.109, 0.483]: stand; [0.405, 0.273, 0.621, 0.951]: carry/hold (an object), stand, watch (a person); [0.568, 0.114, 0.983, 0.947]: stand."}, {"from": "human", "timestamps": 1118.0, "value": "Describe the actions of the person at bbox [0.37, 0.299, 0.723, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.37, 0.299, 0.723, 0.982]: bend/bow (at the waist), put down.", "From 1107.0s to 1117.0s, [0.554, 0.295, 0.672, 0.674]->[0.526, 0.269, 0.644, 0.701]->[0.537, 0.247, 0.683, 0.744]->[0.502, 0.221, 0.68, 0.847]->[0.43, 0.155, 0.681, 0.943]->[0.46, 0.083, 0.832, 0.953]->[0.438, 0.04, 0.983, 0.98]->[0.386, 0.094, 0.883, 0.961]->[0.311, 0.173, 0.759, 0.957]->[0.431, 0.188, 0.73, 0.968]->[0.495, 0.235, 0.727, 0.953]: walk, carry/hold (an object).", "From 1103.0s to 1105.0s, [0.566, 0.418, 0.687, 0.855]->[0.559, 0.404, 0.672, 0.814]->[0.537, 0.371, 0.667, 0.752]: walk, carry/hold (an object).", "At 1101.0s, [0.609, 0.455, 0.793, 0.973]: walk, carry/hold (an object)."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1119, 1182], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.786, 0.294, 0.924, 0.6]?"}, {"from": "gpt", "value": ["In the current frame, [0.786, 0.294, 0.924, 0.6]: stand, touch (an object), watch (a person).", "At 1148.0s, [0.729, 0.294, 0.832, 0.587]: stand, touch (an object), watch (a person)."]}, {"from": "human", "timestamps": 1182.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.265, 0.334, 0.34, 0.532]: carry/hold (an object), walk."}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1168, 1222], "conversations": [{"from": "human", "timestamps": 1198.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.016, 0.226, 0.748, 0.969]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1222.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.374, 0.091, 0.991, 0.968]?"}, {"from": "gpt", "value": ["In the current frame, [0.374, 0.091, 0.991, 0.968]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1221.0s, [0.355, 0.085, 0.99, 0.966]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1220.0s, [0.72, 0.273, 0.966, 0.942]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1212.0s to 1219.0s, [0.782, 0.268, 0.974, 0.945]->[0.776, 0.271, 0.982, 0.953]->[0.743, 0.274, 0.973, 0.95]->[0.676, 0.264, 0.959, 0.959]->[0.739, 0.274, 0.963, 0.947]->[0.807, 0.27, 0.978, 0.942]->[0.786, 0.265, 0.973, 0.944]->[0.765, 0.27, 0.962, 0.943]: stand, listen to (a person), watch (a person)."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1189, 1241], "conversations": [{"from": "human", "timestamps": 1219.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.047, 0.539, 0.238, 0.832]: listen to (a person), sit, watch (a person); [0.318, 0.279, 0.431, 0.729]: listen to (a person), stand, touch (an object), watch (a person); [0.52, 0.218, 0.769, 0.962]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.765, 0.27, 0.962, 0.943]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1241.0, "value": "What actions is the person in bbox [0.018, 0.081, 0.971, 0.956] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.018, 0.081, 0.971, 0.956]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1238.0s to 1240.0s, [0.007, 0.08, 0.968, 0.971]->[0.019, 0.086, 0.959, 0.958]->[0.029, 0.059, 0.97, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1272, 1304], "conversations": [{"from": "human", "timestamps": 1302.0, "value": "What actions is the person in bbox [0.503, 0.058, 0.997, 0.971] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.503, 0.058, 0.997, 0.971]: stand, talk to (e.g., self, a person, a group).", "At 1301.0s, [0.503, 0.066, 1.0, 0.982]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1304.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.042, 0.097, 0.846, 0.956]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1255, 1328], "conversations": [{"from": "human", "timestamps": 1285.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.062, 0.097, 0.839, 0.966]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1328.0, "value": "What actions did the person at bbox [0.139, 0.085, 0.885, 0.976] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.139, 0.085, 0.885, 0.976]: stand.", "From 1325.0s to 1327.0s, [0.168, 0.075, 0.9, 0.973]->[0.162, 0.075, 0.892, 0.978]->[0.16, 0.078, 0.891, 0.969]: stand.", "At 1324.0s, [0.149, 0.079, 0.91, 0.973]: stand, talk to (e.g., self, a person, a group).", "From 1322.0s to 1323.0s, [0.154, 0.077, 0.894, 0.984]->[0.147, 0.078, 0.88, 0.968]: stand, watch (a person)."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1328, 1365], "conversations": [{"from": "human", "timestamps": 1358.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.141, 0.127, 0.345, 0.95]: listen to (a person), stand, watch (a person); [0.337, 0.314, 0.584, 0.958]: watch (a person), stand, touch (an object); [0.442, 0.525, 0.702, 0.96]: listen to (a person), sit; [0.55, 0.24, 0.719, 0.615]: stand; [0.695, 0.189, 0.922, 0.96]: stand."}, {"from": "human", "timestamps": 1365.0, "value": "What actions is the person in bbox [0.128, 0.077, 0.904, 0.972] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.128, 0.077, 0.904, 0.972]: stand, talk to (e.g., self, a person, a group).", "From 1360.0s to 1364.0s, [0.165, 0.134, 0.346, 0.941]->[0.166, 0.126, 0.355, 0.931]->[0.159, 0.127, 0.343, 0.943]->[0.155, 0.14, 0.361, 0.941]->[0.156, 0.124, 0.348, 0.951]: stand.", "At 1359.0s, [0.16, 0.122, 0.354, 0.946]: stand, watch (a person).", "From 1355.0s to 1358.0s, [0.153, 0.121, 0.342, 0.942]->[0.159, 0.104, 0.349, 0.958]->[0.155, 0.12, 0.355, 0.948]->[0.141, 0.127, 0.345, 0.95]: stand, listen to (a person), watch (a person).", "At 1354.0s, [0.146, 0.109, 0.878, 0.964]: stand, watch (a person)."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1354, 1398], "conversations": [{"from": "human", "timestamps": 1384.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.058, 0.067, 0.904, 0.968]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1398.0, "value": "What actions did the person at bbox [0.685, 0.197, 0.908, 0.957] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.685, 0.197, 0.908, 0.957]: stand, watch (a person).", "From 1394.0s to 1397.0s, [0.134, 0.09, 0.918, 0.983]->[0.684, 0.193, 0.906, 0.962]->[0.685, 0.197, 0.909, 0.958]->[0.683, 0.2, 0.907, 0.958]: stand.", "At 1393.0s, [0.15, 0.084, 0.909, 0.97]: stand, watch (a person)."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1384, 1463], "conversations": [{"from": "human", "timestamps": 1414.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.172, 0.09, 0.931, 0.978]: watch (a person), stand."}, {"from": "human", "timestamps": 1463.0, "value": "What actions did the person at bbox [0.289, 0.05, 0.989, 0.951] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.289, 0.05, 0.989, 0.951]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1462.0s, [0.573, 0.102, 0.789, 0.966]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1458.0s to 1461.0s, [0.541, 0.077, 0.808, 0.968]->[0.536, 0.077, 0.807, 0.96]->[0.541, 0.08, 0.81, 0.965]->[0.533, 0.085, 0.812, 0.957]: stand, listen to (a person)."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1490, 1551], "conversations": [{"from": "human", "timestamps": 1520.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.257, 0.026, 1.0, 0.984]: stand."}, {"from": "human", "timestamps": 1551.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.575, 0.094, 0.796, 0.921]?"}, {"from": "gpt", "value": ["At Now, [0.575, 0.094, 0.796, 0.921]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1548.0s to 1550.0s, [0.577, 0.099, 0.79, 0.922]->[0.57, 0.101, 0.796, 0.926]->[0.573, 0.092, 0.792, 0.886]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1547.0s, [0.647, 0.089, 0.794, 0.921]: stand, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1477, 1601], "conversations": [{"from": "human", "timestamps": 1507.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.007, 0.17, 0.437, 0.967]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.667, 0.036, 0.998, 0.956]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1601.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.021, 1.0, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.021, 1.0, 1.0]: sit.", "From 1599.0s to 1600.0s, [0.033, 0.024, 1.0, 1.0]->[0.0, 0.04, 1.0, 1.0]: stand."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1573, 1612], "conversations": [{"from": "human", "timestamps": 1603.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.005, 0.046, 1.0, 0.961]: stand."}, {"from": "human", "timestamps": 1612.0, "value": "What actions is the person in bbox [0.174, 0.053, 1.0, 0.979] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.174, 0.053, 1.0, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1611.0s, [0.072, 0.029, 1.0, 0.975]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1631, 1665], "conversations": [{"from": "human", "timestamps": 1661.0, "value": "Describe the actions of the person at bbox [0.016, 0.055, 0.938, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.016, 0.055, 0.938, 0.982]: sit, talk to (e.g., self, a person, a group).", "From 1658.0s to 1660.0s, [0.0, 0.071, 0.935, 1.0]->[0.022, 0.084, 0.943, 0.96]->[0.025, 0.083, 0.941, 0.975]: sit, talk to (e.g., self, a person, a group).", "At 1657.0s, [0.007, 0.076, 0.924, 0.998]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1655.0s to 1656.0s, [0.007, 0.048, 0.397, 0.996]->[0.003, 0.051, 0.402, 0.981]: sit, watch (a person).", "From 1641.0s to 1653.0s, [0.021, 0.087, 0.635, 0.97]->[0.011, 0.07, 0.675, 0.981]->[0.026, 0.08, 0.668, 0.969]->[0.019, 0.101, 0.653, 0.968]->[0.013, 0.103, 0.649, 0.964]->[0.012, 0.077, 0.666, 0.97]->[0.02, 0.074, 0.642, 0.969]->[0.016, 0.068, 0.649, 0.968]->[0.015, 0.078, 0.658, 0.967]->[0.028, 0.061, 0.645, 0.969]->[0.006, 0.043, 0.367, 0.965]->[0.004, 0.062, 0.377, 0.993]->[0.001, 0.063, 0.414, 0.971]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1665.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.015, 0.095, 0.902, 0.969]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1646, 1711], "conversations": [{"from": "human", "timestamps": 1676.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.048, 0.437, 0.286, 0.952]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group); [0.613, 0.056, 0.895, 0.979]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1711.0, "value": "For the person at bbox [0.593, 0.05, 0.936, 0.96], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.593, 0.05, 0.936, 0.96]: stand, listen to (a person).", "From 1706.0s to 1710.0s, [0.595, 0.053, 0.934, 0.963]->[0.595, 0.05, 0.936, 0.958]->[0.596, 0.051, 0.94, 0.982]->[0.594, 0.05, 0.935, 0.961]->[0.594, 0.049, 0.935, 0.964]: stand, listen to (a person)."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1653, 1719], "conversations": [{"from": "human", "timestamps": 1683.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.441, 0.512, 0.976]: listen to (a person), watch (a person); [0.39, 0.05, 0.984, 0.947]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1719.0, "value": "Describe the actions of the person at bbox [0.0, 0.452, 0.511, 0.962] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.452, 0.511, 0.962]: sit, watch (a person).", "At 1717.0s, [0.0, 0.427, 0.494, 0.966]: stand, watch (a person).", "At 1715.0s, [0.0, 0.422, 0.534, 0.97]: sit, watch (a person).", "At 1714.0s, [0.003, 0.445, 0.534, 0.966]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1713.0s, [0.047, 0.437, 0.286, 0.956]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1709.0s, [0.051, 0.436, 0.284, 0.949]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1708.0s, [0.048, 0.437, 0.29, 0.953]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1703.0s to 1707.0s, [0.023, 0.064, 0.946, 0.982]->[0.032, 0.084, 0.941, 0.964]->[0.014, 0.078, 0.911, 0.984]->[0.049, 0.436, 0.284, 0.955]->[0.052, 0.435, 0.29, 0.956]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "4k-rTF3oZKw.mp4", "clip": [1735, 1780], "conversations": [{"from": "human", "timestamps": 1765.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.02, 0.106, 0.913, 0.993]?"}, {"from": "gpt", "value": ["In the current frame, [0.02, 0.106, 0.913, 0.993]: sit.", "From 1761.0s to 1764.0s, [0.026, 0.12, 0.762, 0.978]->[0.016, 0.13, 0.814, 0.966]->[0.048, 0.125, 0.843, 0.958]->[0.031, 0.13, 0.873, 0.978]: sit, talk to (e.g., self, a person, a group).", "At 1760.0s, [0.305, 0.382, 0.615, 0.783]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1759.0s, [0.31, 0.379, 0.613, 0.791]: sit, touch (an object), watch (a person).", "From 1756.0s to 1758.0s, [0.213, 0.393, 0.558, 0.799]->[0.251, 0.402, 0.597, 0.785]->[0.298, 0.384, 0.608, 0.794]: sit, carry/hold (an object), watch (a person).", "At 1755.0s, [0.161, 0.393, 0.502, 0.778]: sit, carry/hold (an object), listen to (a person).", "At 1754.0s, [0.378, 0.403, 0.482, 0.64]: sit, watch (a person).", "At 1753.0s, [0.374, 0.392, 0.685, 0.815]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1780.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.124, 0.062, 0.994, 0.982]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [891, 1012], "conversations": [{"from": "human", "timestamps": 921.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.372, 0.049, 0.909, 0.996]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1012.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.546, 0.141, 0.776, 0.955]?"}, {"from": "gpt", "value": ["At Now, [0.546, 0.141, 0.776, 0.955]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1011.0s, [0.501, 0.169, 0.791, 0.944]: stand, carry/hold (an object).", "At 1010.0s, [0.66, 0.143, 0.976, 0.929]: walk, carry/hold (an object)."]}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [961, 1011], "conversations": [{"from": "human", "timestamps": 991.0, "value": "What actions is the person in bbox [0.393, 0.059, 0.782, 0.994] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.393, 0.059, 0.782, 0.994]: stand, carry/hold (an object).", "At 990.0s, [0.39, 0.045, 0.779, 0.996]: stand, carry/hold (an object), watch (a person).", "From 988.0s to 989.0s, [0.416, 0.021, 0.768, 1.0]->[0.397, 0.031, 0.754, 0.991]: stand, carry/hold (an object), hug (a person), listen to (a person), watch (a person).", "From 986.0s to 987.0s, [0.412, 0.02, 0.772, 1.0]->[0.454, 0.018, 0.766, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 985.0s, [0.335, 0.051, 0.748, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1011.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.191, 0.185, 0.362, 0.751]: listen to (a person), stand, touch (an object), watch (a person); [0.501, 0.169, 0.791, 0.944]: carry/hold (an object), stand."}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1044, 1078], "conversations": [{"from": "human", "timestamps": 1074.0, "value": "For the person at bbox [0.566, 0.09, 0.881, 0.997], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.566, 0.09, 0.881, 0.997]: get up, listen to (a person), watch (a person).", "At 1073.0s, [0.628, 0.357, 0.956, 0.994]: get up, talk to (e.g., self, a person, a group), watch (a person).", "At 1072.0s, [0.335, 0.124, 0.819, 0.85]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1078.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.296, 0.336, 0.49, 0.822]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.579, 0.019, 0.878, 0.989]: listen to (a person), stand, watch (a person)."}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1003, 1104], "conversations": [{"from": "human", "timestamps": 1033.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.175, 0.084, 0.639, 0.805]: read, sit."}, {"from": "human", "timestamps": 1104.0, "value": "What actions is the person in bbox [0.447, 0.173, 0.963, 0.976] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.447, 0.173, 0.963, 0.976]: sit, touch (an object), play with kids, talk to (e.g., self, a person, a group).", "At 1103.0s, [0.386, 0.161, 0.981, 0.983]: sit, put down, touch (an object), talk to (e.g., self, a person, a group).", "At 1102.0s, [0.339, 0.175, 0.953, 0.958]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1101.0s, [0.413, 0.122, 0.997, 0.979]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1100.0s, [0.478, 0.146, 0.931, 0.963]: sit, touch (an object), play with kids, talk to (e.g., self, a person, a group)."]}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1138, 1169], "conversations": [{"from": "human", "timestamps": 1168.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.061, 0.375, 0.28, 0.872]: carry/hold (an object), sit, listen to (a person); [0.34, 0.059, 0.556, 0.555]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.347, 0.315, 0.515, 0.809]: listen to (a person), stand, watch (a person); [0.433, 0.376, 0.692, 0.814]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.665, 0.337, 0.861, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1169.0, "value": "What actions did the person at bbox [0.665, 0.337, 0.875, 0.991] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.665, 0.337, 0.875, 0.991]: stand, listen to (a person), watch (a person).", "At 1168.0s, [0.665, 0.337, 0.861, 1.0]: stand, listen to (a person), watch (a person).", "At 1167.0s, [0.66, 0.334, 0.873, 0.988]: stand, touch (an object), listen to (a person).", "From 1165.0s to 1166.0s, [0.649, 0.338, 0.873, 0.984]->[0.663, 0.335, 0.868, 0.988]: stand, touch (an object), listen to (a person), watch (a person).", "From 1163.0s to 1164.0s, [0.676, 0.324, 0.872, 0.986]->[0.67, 0.337, 0.874, 0.984]: stand, listen to (a person), watch (a person).", "From 1161.0s to 1162.0s, [0.684, 0.299, 0.865, 1.0]->[0.681, 0.309, 0.877, 1.0]: stand, touch (an object), listen to (a person), watch (a person).", "At 1160.0s, [0.679, 0.314, 0.877, 0.991]: stand, touch (an object), listen to (a person).", "At 1159.0s, [0.665, 0.32, 0.875, 0.987]: stand, carry/hold (an object), listen to (a person).", "At 1158.0s, [0.674, 0.326, 0.868, 0.98]: stand, listen to (a person), watch (a person).", "From 1153.0s to 1157.0s, [0.664, 0.323, 0.879, 0.987]->[0.663, 0.324, 0.872, 0.995]->[0.667, 0.319, 0.871, 0.983]->[0.677, 0.328, 0.87, 0.977]->[0.661, 0.328, 0.868, 0.969]: stand, touch (an object), listen to (a person), watch (a person).", "From 1149.0s to 1152.0s, [0.669, 0.344, 0.889, 0.992]->[0.675, 0.318, 0.886, 1.0]->[0.67, 0.314, 0.874, 1.0]->[0.664, 0.329, 0.883, 0.986]: stand, listen to (a person), watch (a person).", "At 1148.0s, [0.674, 0.336, 0.889, 0.973]: stand, touch (an object), listen to (a person).", "From 1146.0s to 1147.0s, [0.791, 0.359, 0.989, 0.987]->[0.692, 0.334, 0.916, 0.964]: stand, touch (an object), listen to (a person), watch (a person).", "At 1145.0s, [0.799, 0.342, 0.989, 0.996]: stand, listen to (a person), watch (a person).", "At 1144.0s, [0.558, 0.396, 0.794, 0.986]: stand, listen to (a person).", "At 1143.0s, [0.005, 0.416, 0.228, 0.997]: stand, listen to (a person), watch (a person)."]}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1159, 1218], "conversations": [{"from": "human", "timestamps": 1189.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.094, 0.317, 0.346, 0.886]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.25, 0.007, 0.567, 0.861]: touch (an object), watch (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1218.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.232, 0.029, 0.501, 0.871]?"}, {"from": "gpt", "value": ["At Now, [0.232, 0.029, 0.501, 0.871]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1217.0s, [0.238, 0.024, 0.508, 0.882]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1216.0s, [0.227, 0.029, 0.588, 0.869]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1214.0s to 1215.0s, [0.261, 0.064, 0.576, 0.843]->[0.21, 0.018, 0.478, 0.87]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1213.0s, [0.249, 0.047, 0.507, 0.885]: stand, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1208.0s to 1212.0s, [0.243, 0.038, 0.505, 0.896]->[0.24, 0.048, 0.479, 0.893]->[0.247, 0.049, 0.488, 0.877]->[0.236, 0.014, 0.58, 0.896]->[0.232, 0.008, 0.551, 0.894]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1206.0s to 1207.0s, [0.208, 0.039, 0.542, 0.894]->[0.226, 0.046, 0.564, 0.885]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "From 1204.0s to 1205.0s, [0.229, 0.042, 0.513, 0.888]->[0.23, 0.067, 0.513, 0.901]: stand, touch (an object), listen to (a person), watch (a person).", "At 1203.0s, [0.246, 0.038, 0.517, 0.891]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "From 1200.0s to 1202.0s, [0.261, 0.035, 0.539, 0.906]->[0.262, 0.035, 0.52, 0.9]->[0.256, 0.037, 0.511, 0.898]: stand, touch (an object), listen to (a person), watch (a person).", "From 1197.0s to 1199.0s, [0.244, 0.047, 0.54, 0.891]->[0.228, 0.036, 0.566, 0.904]->[0.244, 0.05, 0.535, 0.893]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "From 1193.0s to 1196.0s, [0.243, 0.061, 0.506, 0.924]->[0.241, 0.053, 0.554, 0.918]->[0.243, 0.054, 0.49, 0.899]->[0.248, 0.047, 0.495, 0.912]: stand, touch (an object), listen to (a person), watch (a person).", "At 1192.0s, [0.249, 0.046, 0.577, 0.903]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1191.0s, [0.265, 0.037, 0.571, 0.917]: stand, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1189.0s to 1190.0s, [0.25, 0.007, 0.567, 0.861]->[0.231, 0.003, 0.557, 0.908]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1188.0s, [0.262, 0.05, 0.525, 0.808]: stand, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1186.0s to 1187.0s, [0.269, 0.065, 0.492, 0.77]->[0.271, 0.065, 0.468, 0.774]: stand, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1194, 1249], "conversations": [{"from": "human", "timestamps": 1224.0, "value": "What actions is the person in bbox [0.238, 0.036, 0.492, 0.846] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.238, 0.036, 0.492, 0.846]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "From 1222.0s to 1223.0s, [0.238, 0.036, 0.492, 0.85]->[0.236, 0.028, 0.49, 0.849]: stand, touch (an object), listen to (a person), watch (a person).", "At 1221.0s, [0.253, 0.035, 0.499, 0.845]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1220.0s, [0.25, 0.034, 0.506, 0.856]: stand, touch (an object), listen to (a person), watch (a person).", "At 1219.0s, [0.249, 0.035, 0.485, 0.864]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1217.0s to 1218.0s, [0.238, 0.024, 0.508, 0.882]->[0.232, 0.029, 0.501, 0.871]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1216.0s, [0.227, 0.029, 0.588, 0.869]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1214.0s to 1215.0s, [0.261, 0.064, 0.576, 0.843]->[0.21, 0.018, 0.478, 0.87]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1213.0s, [0.249, 0.047, 0.507, 0.885]: stand, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1208.0s to 1212.0s, [0.243, 0.038, 0.505, 0.896]->[0.24, 0.048, 0.479, 0.893]->[0.247, 0.049, 0.488, 0.877]->[0.236, 0.014, 0.58, 0.896]->[0.232, 0.008, 0.551, 0.894]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1206.0s to 1207.0s, [0.208, 0.039, 0.542, 0.894]->[0.226, 0.046, 0.564, 0.885]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "From 1204.0s to 1205.0s, [0.229, 0.042, 0.513, 0.888]->[0.23, 0.067, 0.513, 0.901]: stand, touch (an object), listen to (a person), watch (a person).", "At 1203.0s, [0.246, 0.038, 0.517, 0.891]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "From 1200.0s to 1202.0s, [0.261, 0.035, 0.539, 0.906]->[0.262, 0.035, 0.52, 0.9]->[0.256, 0.037, 0.511, 0.898]: stand, touch (an object), listen to (a person), watch (a person).", "From 1197.0s to 1199.0s, [0.244, 0.047, 0.54, 0.891]->[0.228, 0.036, 0.566, 0.904]->[0.244, 0.05, 0.535, 0.893]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "From 1193.0s to 1196.0s, [0.243, 0.061, 0.506, 0.924]->[0.241, 0.053, 0.554, 0.918]->[0.243, 0.054, 0.49, 0.899]->[0.248, 0.047, 0.495, 0.912]: stand, touch (an object), listen to (a person), watch (a person).", "At 1192.0s, [0.249, 0.046, 0.577, 0.903]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1191.0s, [0.265, 0.037, 0.571, 0.917]: stand, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1189.0s to 1190.0s, [0.25, 0.007, 0.567, 0.861]->[0.231, 0.003, 0.557, 0.908]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1188.0s, [0.262, 0.05, 0.525, 0.808]: stand, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1186.0s to 1187.0s, [0.269, 0.065, 0.492, 0.77]->[0.271, 0.065, 0.468, 0.774]: stand, touch (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1249.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.03, 0.26, 0.104, 0.487]: listen to (a person), stand, watch (a person); [0.1, 0.257, 0.177, 0.5]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.189, 0.268, 0.29, 0.707]: listen to (a person), stand, watch (a person); [0.291, 0.133, 0.548, 1.0]: carry/hold (an object), stand, touch (an object), talk to (e.g., self, a person, a group); [0.688, 0.308, 0.858, 0.726]: listen to (a person), sit, watch (a person)."}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1262, 1293], "conversations": [{"from": "human", "timestamps": 1292.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.038, 0.07, 0.523, 0.97]: listen to (a person), sit, touch (an object), watch (a person); [0.518, 0.108, 0.953, 0.982]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1293.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.589, 0.158, 0.969, 0.978]?"}, {"from": "gpt", "value": ["At Now, [0.589, 0.158, 0.969, 0.978]: sit, talk to (e.g., self, a person, a group).", "At 1292.0s, [0.518, 0.108, 0.953, 0.982]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1256, 1330], "conversations": [{"from": "human", "timestamps": 1286.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.022, 0.303, 0.104, 0.538]: watch (a person), stand; [0.096, 0.295, 0.178, 0.543]: watch (a person), stand; [0.156, 0.092, 0.528, 0.978]: carry/hold (an object), walk; [0.16, 0.293, 0.242, 0.58]: watch (a person), stand; [0.705, 0.353, 0.92, 0.836]: watch (a person), sit."}, {"from": "human", "timestamps": 1330.0, "value": "Describe the actions of the person at bbox [0.519, 0.049, 0.899, 0.985] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.519, 0.049, 0.899, 0.985]: stand, listen to (a person), watch (a person).", "At 1329.0s, [0.475, 0.079, 0.837, 0.993]: stand, carry/hold (an object), watch (a person).", "At 1328.0s, [0.469, 0.089, 0.818, 0.993]: stand, carry/hold (an object), hug (a person), watch (a person).", "At 1327.0s, [0.475, 0.116, 0.822, 0.998]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1302, 1372], "conversations": [{"from": "human", "timestamps": 1332.0, "value": "Describe the actions of the person at bbox [0.058, 0.088, 0.413, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.058, 0.088, 0.413, 1.0]: stand, carry/hold (an object), hand shake, talk to (e.g., self, a person, a group), watch (a person).", "From 1330.0s to 1331.0s, [0.116, 0.082, 0.477, 0.992]->[0.069, 0.116, 0.424, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1329.0s, [0.264, 0.178, 0.431, 0.719]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1372.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.159, 0.03, 0.483, 1.0]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.544, 0.12, 0.851, 0.992]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1387, 1430], "conversations": [{"from": "human", "timestamps": 1417.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.26, 0.067, 0.923, 0.973]?"}, {"from": "gpt", "value": ["At Now, [0.26, 0.067, 0.923, 0.973]: stand, talk to (e.g., self, a person, a group).", "From 1412.0s to 1416.0s, [0.325, 0.047, 0.908, 0.98]->[0.302, 0.067, 0.914, 0.978]->[0.287, 0.053, 0.909, 0.953]->[0.327, 0.035, 0.905, 0.977]->[0.282, 0.077, 0.903, 0.973]: stand, talk to (e.g., self, a person, a group).", "At 1411.0s, [0.329, 0.045, 0.927, 0.96]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1430.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.196, 0.16, 0.525, 0.999]: carry/hold (an object), stand, listen to (a person); [0.54, 0.011, 0.837, 0.99]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1412, 1455], "conversations": [{"from": "human", "timestamps": 1442.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.18, 0.105, 0.573, 0.979]?"}, {"from": "gpt", "value": ["In the current frame, [0.18, 0.105, 0.573, 0.979]: stand, talk to (e.g., self, a person, a group).", "From 1439.0s to 1441.0s, [0.162, 0.078, 0.579, 0.984]->[0.169, 0.076, 0.58, 0.991]->[0.177, 0.08, 0.617, 0.995]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1455.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.056, 0.183, 0.293, 0.867]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group); [0.49, 0.183, 0.689, 0.999]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.69, 0.177, 0.859, 0.966]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1499, 1540], "conversations": [{"from": "human", "timestamps": 1529.0, "value": "For the person at bbox [0.674, 0.094, 0.986, 0.979], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.674, 0.094, 0.986, 0.979]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1528.0s, [0.615, 0.086, 0.953, 0.962]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1527.0s, [0.646, 0.07, 0.982, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1540.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.327, 0.232, 0.689, 0.989]: stand, close (e.g., a door, a box), talk to (e.g., self, a person, a group)."}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1524, 1582], "conversations": [{"from": "human", "timestamps": 1554.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.166, 0.133, 0.52, 0.95]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1582.0, "value": "What actions is the person in bbox [0.401, 0.22, 0.595, 0.705] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.401, 0.22, 0.595, 0.705]: bend/bow (at the waist), carry/hold (an object), touch (an object).", "At 1581.0s, [0.372, 0.199, 0.546, 0.695]: walk, carry/hold (an object), touch (an object).", "At 1580.0s, [0.428, 0.388, 0.569, 0.856]: bend/bow (at the waist), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1579.0s, [0.394, 0.439, 0.561, 0.989]: bend/bow (at the waist), talk to (e.g., self, a person, a group).", "At 1578.0s, [0.457, 0.473, 0.573, 0.882]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1535, 1594], "conversations": [{"from": "human", "timestamps": 1565.0, "value": "What actions is the person in bbox [0.404, 0.109, 0.612, 0.916] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.404, 0.109, 0.612, 0.916]: stand, carry/hold (an object).", "From 1563.0s to 1564.0s, [0.308, 0.092, 0.531, 0.963]->[0.402, 0.088, 0.609, 0.94]: stand, carry/hold (an object).", "At 1562.0s, [0.345, 0.1, 0.548, 0.88]: walk, carry/hold (an object).", "From 1559.0s to 1561.0s, [0.465, 0.114, 0.674, 0.871]->[0.37, 0.1, 0.531, 0.885]->[0.377, 0.104, 0.535, 0.84]: stand, carry/hold (an object).", "From 1557.0s to 1558.0s, [0.688, 0.117, 0.88, 0.83]->[0.488, 0.099, 0.799, 0.983]: walk, carry/hold (an object).", "From 1555.0s to 1556.0s, [0.561, 0.093, 0.808, 0.981]->[0.562, 0.097, 0.804, 1.0]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1594.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.465, 0.203, 0.642, 0.71]: carry/hold (an object), stand."}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1621, 1732], "conversations": [{"from": "human", "timestamps": 1651.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.104, 0.202, 0.417, 1.0]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.528, 0.043, 0.861, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1732.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.09, 0.32, 0.454, 0.861]?"}, {"from": "gpt", "value": ["In the current frame, [0.09, 0.32, 0.454, 0.861]: sit, touch (an object), write, listen to (a person).", "From 1730.0s to 1731.0s, [0.098, 0.348, 0.464, 0.862]->[0.101, 0.349, 0.464, 0.857]: sit, write, listen to (a person).", "At 1729.0s, [0.064, 0.345, 0.496, 0.863]: sit, touch (an object), write, listen to (a person).", "At 1728.0s, [0.12, 0.32, 0.445, 0.848]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1724.0s to 1727.0s, [0.124, 0.294, 0.45, 0.862]->[0.121, 0.315, 0.453, 0.856]->[0.136, 0.288, 0.457, 0.856]->[0.12, 0.331, 0.453, 0.852]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1723.0s, [0.103, 0.289, 0.452, 0.855]: sit, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1722.0s, [0.064, 0.363, 0.459, 0.861]: sit, write, listen to (a person), talk to (e.g., self, a person, a group).", "At 1721.0s, [0.084, 0.322, 0.438, 0.839]: sit, write, listen to (a person), watch (a person).", "From 1719.0s to 1720.0s, [0.125, 0.264, 0.613, 0.823]->[0.087, 0.278, 0.433, 0.847]: sit, touch (an object).", "At 1718.0s, [0.14, 0.286, 0.416, 0.825]: sit, watch (a person).", "At 1717.0s, [0.035, 0.298, 0.362, 0.849]: sit, carry/hold (an object), touch (an object).", "At 1716.0s, [0.07, 0.32, 0.361, 0.861]: sit, touch (an object), write.", "At 1715.0s, [0.066, 0.324, 0.362, 0.863]: sit, press, write, talk to (e.g., self, a person, a group)."]}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1655, 1696], "conversations": [{"from": "human", "timestamps": 1685.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.473, 0.264, 0.62, 0.828]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1696.0, "value": "What actions did the person at bbox [0.501, 0.253, 0.728, 0.747] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.501, 0.253, 0.728, 0.747]: sit, carry/hold (an object), touch (an object), listen to (a person).", "At 1695.0s, [0.5, 0.289, 0.756, 0.76]: stand, carry/hold (an object), touch (an object), listen to (a person).", "At 1694.0s, [0.506, 0.26, 0.734, 0.748]: sit, carry/hold (an object), touch (an object), listen to (a person).", "At 1693.0s, [0.537, 0.273, 0.769, 0.763]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1692.0s, [0.516, 0.252, 0.784, 0.778]: stand, carry/hold (an object), touch (an object).", "At 1691.0s, [0.627, 0.301, 0.877, 0.817]: sit, carry/hold (an object), touch (an object), listen to (a person).", "At 1690.0s, [0.624, 0.315, 0.88, 0.819]: sit, touch (an object), listen to (a person).", "At 1689.0s, [0.632, 0.317, 0.877, 0.818]: stand, touch (an object)."]}]}, {"video": "5LrOQEt_XVM.mp4", "clip": [1713, 1786], "conversations": [{"from": "human", "timestamps": 1743.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.085, 0.03, 0.614, 0.993]: sit, touch (an object), watch (a person), carry/hold (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1786.0, "value": "What actions did the person at bbox [0.356, 0.045, 0.736, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.356, 0.045, 0.736, 0.985]: stand, carry/hold (an object), close (e.g., a door, a box), touch (an object).", "At 1785.0s, [0.352, 0.007, 0.671, 0.977]: stand, carry/hold (an object), watch (a person).", "At 1784.0s, [0.274, 0.11, 0.709, 0.923]: stand, carry/hold (an object), touch (an object)."]}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [930, 988], "conversations": [{"from": "human", "timestamps": 960.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.355, 0.175, 0.877, 0.983]: stand."}, {"from": "human", "timestamps": 988.0, "value": "What actions did the person at bbox [0.352, 0.089, 0.995, 0.963] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.352, 0.089, 0.995, 0.963]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 987.0s, [0.346, 0.073, 0.997, 0.957]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [955, 989], "conversations": [{"from": "human", "timestamps": 985.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.184, 0.102, 0.93, 0.977]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 989.0, "value": "For the person at bbox [0.34, 0.103, 0.999, 0.957], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.34, 0.103, 0.999, 0.957]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 987.0s to 988.0s, [0.346, 0.073, 0.997, 0.957]->[0.352, 0.089, 0.995, 0.963]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1002, 1036], "conversations": [{"from": "human", "timestamps": 1032.0, "value": "What actions is the person in bbox [0.235, 0.197, 0.368, 0.774] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.235, 0.197, 0.368, 0.774]: stand.", "At 1031.0s, [0.27, 0.2, 0.388, 0.661]: stand.", "From 1027.0s to 1030.0s, [0.468, 0.205, 0.559, 0.744]->[0.39, 0.195, 0.519, 0.831]->[0.341, 0.195, 0.478, 0.884]->[0.327, 0.182, 0.435, 0.8]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1036.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.145, 0.091, 0.865, 0.957]: stand."}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1146, 1186], "conversations": [{"from": "human", "timestamps": 1176.0, "value": "What actions is the person in bbox [0.344, 0.401, 0.511, 0.969] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.344, 0.401, 0.511, 0.969]: sit, carry/hold (an object).", "From 1172.0s to 1175.0s, [0.115, 0.215, 0.562, 0.976]->[0.199, 0.293, 0.691, 0.964]->[0.249, 0.341, 0.539, 0.965]->[0.303, 0.376, 0.565, 0.976]: sit, carry/hold (an object).", "At 1171.0s, [0.065, 0.193, 0.615, 0.965]: sit.", "At 1170.0s, [0.008, 0.126, 0.703, 0.95]: stand, carry/hold (an object).", "From 1168.0s to 1169.0s, [0.052, 0.091, 0.793, 0.908]->[0.0, 0.022, 0.737, 0.961]: stand."]}, {"from": "human", "timestamps": 1186.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.39, 0.452, 0.524, 0.888]: carry/hold (an object), sit; [0.481, 0.358, 0.512, 0.495]: walk; [0.57, 0.406, 0.616, 0.573]: walk."}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1121, 1213], "conversations": [{"from": "human", "timestamps": 1151.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.01, 0.193, 0.461, 0.974]: stand; [0.386, 0.107, 1.0, 0.976]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1213.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.075, 0.225, 0.216, 0.691]?"}, {"from": "gpt", "value": ["At Now, [0.075, 0.225, 0.216, 0.691]: stand, watch (a person).", "At 1212.0s, [0.056, 0.23, 0.192, 0.686]: stand, watch (a person).", "From 1208.0s to 1211.0s, [0.109, 0.258, 0.213, 0.64]->[0.081, 0.256, 0.198, 0.659]->[0.068, 0.254, 0.194, 0.669]->[0.073, 0.238, 0.214, 0.682]: walk, watch (a person).", "From 1200.0s to 1207.0s, [0.28, 0.306, 0.373, 0.585]->[0.247, 0.313, 0.31, 0.577]->[0.203, 0.293, 0.275, 0.606]->[0.186, 0.299, 0.261, 0.59]->[0.155, 0.33, 0.244, 0.619]->[0.156, 0.333, 0.247, 0.605]->[0.145, 0.282, 0.248, 0.604]->[0.144, 0.279, 0.253, 0.632]: walk."]}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1229, 1263], "conversations": [{"from": "human", "timestamps": 1259.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.375, 0.188, 0.786, 0.971]: walk."}, {"from": "human", "timestamps": 1263.0, "value": "What actions did the person at bbox [0.068, 0.025, 1.0, 0.955] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.068, 0.025, 1.0, 0.955]: stand.", "At 1262.0s, [0.129, 0.049, 0.971, 0.956]: stand.", "From 1260.0s to 1261.0s, [0.291, 0.124, 0.997, 0.972]->[0.218, 0.139, 0.991, 0.96]: stand, watch (a person)."]}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1266, 1303], "conversations": [{"from": "human", "timestamps": 1296.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.191, 0.817, 0.998]: watch (a person), sit, talk to (e.g., self, a person, a group), drink."}, {"from": "human", "timestamps": 1303.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.014, 0.046, 0.666, 0.953]?"}, {"from": "gpt", "value": ["At Now, [0.014, 0.046, 0.666, 0.953]: stand, talk to (e.g., self, a person, a group).", "At 1302.0s, [0.011, 0.107, 0.579, 0.969]: stand, walk, talk to (e.g., self, a person, a group).", "From 1299.0s to 1301.0s, [0.049, 0.103, 0.468, 0.981]->[0.036, 0.095, 0.45, 0.972]->[0.024, 0.095, 0.447, 0.967]: stand, talk to (e.g., self, a person, a group).", "At 1298.0s, [0.019, 0.093, 0.476, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1269, 1325], "conversations": [{"from": "human", "timestamps": 1299.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.049, 0.103, 0.468, 0.981]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1325.0, "value": "Describe the actions of the person at bbox [0.103, 0.135, 0.662, 0.975] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.103, 0.135, 0.662, 0.975]: sit.", "From 1323.0s to 1324.0s, [0.068, 0.12, 0.66, 0.973]->[0.067, 0.119, 0.688, 0.977]: sit."]}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1369, 1420], "conversations": [{"from": "human", "timestamps": 1399.0, "value": "For the person at bbox [0.061, 0.138, 0.989, 0.955], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.061, 0.138, 0.989, 0.955]: sit, talk to (e.g., self, a person, a group).", "From 1397.0s to 1398.0s, [0.048, 0.136, 0.996, 0.956]->[0.043, 0.144, 0.982, 0.961]: sit, talk to (e.g., self, a person, a group).", "At 1396.0s, [0.037, 0.16, 0.991, 0.96]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1420.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.02, 0.235, 0.839, 0.959]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1431, 1470], "conversations": [{"from": "human", "timestamps": 1461.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.052, 0.199, 0.923, 0.953]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1470.0, "value": "What actions is the person in bbox [0.016, 0.151, 0.985, 0.971] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.016, 0.151, 0.985, 0.971]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1469.0s, [0.046, 0.171, 0.971, 0.959]: sit, listen to (a person), watch (a person)."]}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1472, 1541], "conversations": [{"from": "human", "timestamps": 1502.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.054, 0.09, 0.747, 0.968]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1541.0, "value": "What actions is the person in bbox [0.316, 0.111, 0.989, 0.957] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.316, 0.111, 0.989, 0.957]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1540.0s, [0.274, 0.072, 0.979, 0.973]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1557, 1597], "conversations": [{"from": "human", "timestamps": 1587.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.225, 0.13, 0.984, 0.97]?"}, {"from": "gpt", "value": ["In the current frame, [0.225, 0.13, 0.984, 0.97]: stand, talk to (e.g., self, a person, a group).", "At 1586.0s, [0.371, 0.079, 1.0, 0.963]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1597.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.038, 0.013, 0.942, 0.971]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1601, 1662], "conversations": [{"from": "human", "timestamps": 1631.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.059, 0.088, 0.945, 0.988]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1662.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.054, 0.072, 0.721, 0.973]?"}, {"from": "gpt", "value": ["In the current frame, [0.054, 0.072, 0.721, 0.973]: stand, carry/hold (an object).", "From 1658.0s to 1661.0s, [0.019, 0.205, 0.689, 0.964]->[0.019, 0.152, 0.674, 0.979]->[0.03, 0.132, 0.704, 0.983]->[0.062, 0.074, 0.729, 0.972]: stand, carry/hold (an object).", "From 1656.0s to 1657.0s, [0.013, 0.224, 0.864, 0.978]->[0.007, 0.137, 0.967, 0.968]: get up.", "From 1654.0s to 1655.0s, [0.04, 0.166, 0.949, 0.959]->[0.059, 0.123, 0.961, 0.964]: sit.", "From 1652.0s to 1653.0s, [0.07, 0.128, 0.901, 0.971]->[0.028, 0.135, 0.99, 0.978]: sit, watch (a person)."]}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1595, 1712], "conversations": [{"from": "human", "timestamps": 1625.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.03, 0.134, 0.951, 0.98]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1712.0, "value": "Describe the actions of the person at bbox [0.024, 0.033, 0.995, 0.958] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.024, 0.033, 0.995, 0.958]: stand, carry/hold (an object).", "At 1711.0s, [0.019, 0.058, 0.968, 0.965]: stand, carry/hold (an object)."]}]}, {"video": "5MxjqHfkWFI.mkv", "clip": [1741, 1772], "conversations": [{"from": "human", "timestamps": 1771.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.078, 0.292, 0.34, 0.968]: watch (a person), stand; [0.659, 0.378, 0.997, 0.971]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1772.0, "value": "What actions did the person at bbox [0.573, 0.35, 0.971, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.573, 0.35, 0.971, 0.981]: walk, carry/hold (an object), watch (a person).", "From 1768.0s to 1771.0s, [0.369, 0.382, 0.972, 0.975]->[0.453, 0.377, 0.99, 0.968]->[0.614, 0.363, 0.992, 0.97]->[0.659, 0.378, 0.997, 0.971]: stand, carry/hold (an object), watch (a person).", "From 1766.0s to 1767.0s, [0.441, 0.374, 0.894, 0.998]->[0.398, 0.377, 0.895, 0.971]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1765.0s, [0.41, 0.362, 0.863, 0.971]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "5YPjcdLbs5g.mkv", "clip": [897, 954], "conversations": [{"from": "human", "timestamps": 927.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.04, 0.034, 0.782, 0.984]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 954.0, "value": "What actions did the person at bbox [0.162, 0.029, 1.0, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.162, 0.029, 1.0, 1.0]: sit, stand, talk to (e.g., self, a person, a group).", "At 953.0s, [0.11, 0.043, 0.972, 0.986]: sit, stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "5YPjcdLbs5g.mkv", "clip": [906, 991], "conversations": [{"from": "human", "timestamps": 936.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.133, 0.046, 0.837, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 991.0, "value": "Describe the actions of the person at bbox [0.08, 0.025, 0.978, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.08, 0.025, 0.978, 0.981]: sit, talk to (e.g., self, a person, a group).", "At 990.0s, [0.055, 0.028, 0.991, 0.988]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "5YPjcdLbs5g.mkv", "clip": [933, 977], "conversations": [{"from": "human", "timestamps": 963.0, "value": "What actions is the person in bbox [0.108, 0.044, 1.0, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.108, 0.044, 1.0, 0.993]: sit, talk to (e.g., self, a person, a group).", "At 962.0s, [0.061, 0.03, 0.988, 0.997]: sit, stand, talk to (e.g., self, a person, a group).", "At 961.0s, [0.13, 0.048, 1.0, 0.996]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 977.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.019, 0.005, 0.879, 0.988]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "5YPjcdLbs5g.mkv", "clip": [1073, 1136], "conversations": [{"from": "human", "timestamps": 1103.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.363, 0.095, 0.695, 0.991]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1136.0, "value": "What actions did the person at bbox [0.22, 0.167, 0.407, 0.867] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.22, 0.167, 0.407, 0.867]: walk, carry/hold (an object), listen to (a person).", "At 1135.0s, [0.173, 0.203, 0.375, 0.888]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1134.0s, [0.112, 0.27, 0.341, 0.826]: walk, carry/hold (an object), listen to (a person).", "At 1133.0s, [0.085, 0.318, 0.298, 0.794]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1132.0s, [0.118, 0.303, 0.264, 0.77]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1131.0s, [0.021, 0.328, 0.191, 0.785]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1130.0s, [0.034, 0.391, 0.144, 0.833]: walk, carry/hold (an object), pull (an object), take (an object) from (a person), talk to (e.g., self, a person, a group).", "At 1129.0s, [0.103, 0.436, 0.214, 0.857]: walk, carry/hold (an object)."]}]}, {"video": "5YPjcdLbs5g.mkv", "clip": [1187, 1221], "conversations": [{"from": "human", "timestamps": 1217.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.215, 0.029, 0.938, 0.996]?"}, {"from": "gpt", "value": ["At Now, [0.215, 0.029, 0.938, 0.996]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1216.0s, [0.142, 0.107, 0.96, 0.986]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1221.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.026, 0.349, 0.236, 0.823]: carry/hold (an object), sit, talk to (e.g., self, a person, a group), listen to (a person); [0.15, 0.375, 0.41, 0.771]: sit, touch (an object); [0.165, 0.035, 1.0, 0.992]: sit, smoke, sing to (e.g., self, a person, a group)."}]}, {"video": "5YPjcdLbs5g.mkv", "clip": [1499, 1542], "conversations": [{"from": "human", "timestamps": 1529.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.283, 0.157, 0.589, 0.972]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1542.0, "value": "What actions is the person in bbox [0.129, 0.106, 0.301, 0.96] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.129, 0.106, 0.301, 0.96]: stand, carry/hold (an object).", "At 1541.0s, [0.095, 0.165, 0.311, 0.997]: stand, carry/hold (an object).", "From 1528.0s to 1540.0s, [0.332, 0.177, 0.641, 0.974]->[0.283, 0.157, 0.589, 0.972]->[0.306, 0.193, 0.558, 0.985]->[0.252, 0.233, 0.518, 0.975]->[0.279, 0.237, 0.562, 0.982]->[0.288, 0.19, 0.533, 0.991]->[0.291, 0.21, 0.526, 0.971]->[0.281, 0.191, 0.568, 0.99]->[0.273, 0.189, 0.58, 0.971]->[0.182, 0.171, 0.474, 0.977]->[0.202, 0.151, 0.45, 0.978]->[0.14, 0.148, 0.387, 0.969]->[0.057, 0.158, 0.291, 0.991]: walk, carry/hold (an object).", "From 1520.0s to 1527.0s, [0.334, 0.128, 0.68, 0.976]->[0.347, 0.129, 0.672, 0.982]->[0.368, 0.128, 0.693, 0.995]->[0.375, 0.133, 0.707, 0.987]->[0.371, 0.151, 0.713, 0.979]->[0.383, 0.147, 0.709, 0.982]->[0.379, 0.151, 0.697, 0.985]->[0.344, 0.149, 0.681, 0.97]: stand, carry/hold (an object).", "From 1514.0s to 1519.0s, [0.531, 0.233, 0.73, 0.995]->[0.454, 0.187, 0.708, 0.97]->[0.453, 0.166, 0.674, 0.982]->[0.436, 0.163, 0.649, 0.983]->[0.424, 0.168, 0.679, 0.981]->[0.334, 0.157, 0.672, 0.998]: walk, carry/hold (an object)."]}]}, {"video": "5YPjcdLbs5g.mkv", "clip": [1491, 1617], "conversations": [{"from": "human", "timestamps": 1521.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.347, 0.129, 0.672, 0.982]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1617.0, "value": "For the person at bbox [0.0, 0.127, 0.526, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.127, 0.526, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1615.0s to 1616.0s, [0.0, 0.137, 0.462, 1.0]->[0.0, 0.122, 0.458, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1613.0s to 1614.0s, [0.201, 0.394, 0.338, 0.92]->[0.0, 0.149, 0.438, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1612.0s, [0.23, 0.397, 0.344, 0.899]: stand, carry/hold (an object), watch (a person).", "At 1611.0s, [0.228, 0.399, 0.338, 0.909]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1608.0s to 1610.0s, [0.23, 0.394, 0.365, 0.99]->[0.247, 0.401, 0.362, 0.952]->[0.243, 0.402, 0.358, 0.914]: stand, carry/hold (an object), watch (a person).", "From 1606.0s to 1607.0s, [0.09, 0.463, 0.297, 0.991]->[0.196, 0.423, 0.359, 0.999]: stand, carry/hold (an object).", "At 1605.0s, [0.003, 0.562, 0.148, 0.993]: stand."]}]}, {"video": "5YPjcdLbs5g.mkv", "clip": [1605, 1669], "conversations": [{"from": "human", "timestamps": 1635.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.202, 0.092, 1.0, 0.996]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1669.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.271, 0.08, 0.996, 0.987]?"}, {"from": "gpt", "value": ["At Now, [0.271, 0.08, 0.996, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 1666.0s to 1668.0s, [0.193, 0.067, 1.0, 0.989]->[0.251, 0.06, 1.0, 0.977]->[0.271, 0.046, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "5YPjcdLbs5g.mkv", "clip": [1652, 1704], "conversations": [{"from": "human", "timestamps": 1682.0, "value": "What actions did the person at bbox [0.006, 0.093, 0.714, 0.994] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.006, 0.093, 0.714, 0.994]: stand.", "At 1681.0s, [0.019, 0.08, 0.721, 0.993]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1704.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.152, 0.103, 1.0, 1.0]: stand."}]}, {"video": "5YPjcdLbs5g.mkv", "clip": [1734, 1775], "conversations": [{"from": "human", "timestamps": 1764.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.003, 0.291, 0.133, 0.894]: carry/hold (an object), run/jog; [0.129, 0.236, 0.332, 0.967]: carry/hold (an object), sing to (e.g., self, a person, a group), walk; [0.368, 0.262, 0.521, 0.918]: carry/hold (an object), sing to (e.g., self, a person, a group), run/jog; [0.504, 0.105, 0.776, 0.989]: carry/hold (an object), sing to (e.g., self, a person, a group), walk; [0.742, 0.241, 0.997, 0.993]: carry/hold (an object), sing to (e.g., self, a person, a group), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1775.0, "value": "What actions is the person in bbox [0.46, 0.193, 0.677, 0.755] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.46, 0.193, 0.677, 0.755]: walk, carry/hold (an object), watch (a person).", "At 1774.0s, [0.322, 0.126, 0.595, 0.704]: walk, carry/hold (an object).", "At 1773.0s, [0.113, 0.038, 0.373, 0.878]: walk, carry/hold (an object), watch (a person)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [878, 912], "conversations": [{"from": "human", "timestamps": 908.0, "value": "For the person at bbox [0.602, 0.359, 0.715, 0.813], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.602, 0.359, 0.715, 0.813]: walk, carry/hold (an object).", "From 904.0s to 907.0s, [0.743, 0.357, 0.83, 0.686]->[0.809, 0.341, 0.907, 0.672]->[0.737, 0.35, 0.861, 0.764]->[0.728, 0.359, 0.831, 0.793]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 912.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.291, 0.441, 0.368, 0.688]: carry/hold (an object), walk."}]}, {"video": "5milLu-6bWI.mp4", "clip": [889, 1012], "conversations": [{"from": "human", "timestamps": 919.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.263, 0.256, 0.435, 0.87]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1012.0, "value": "What actions is the person in bbox [0.1, 0.052, 0.54, 0.973] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.1, 0.052, 0.54, 0.973]: stand, talk to (e.g., self, a person, a group).", "At 1011.0s, [0.112, 0.059, 0.535, 0.97]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [956, 1004], "conversations": [{"from": "human", "timestamps": 986.0, "value": "What actions is the person in bbox [0.414, 0.267, 0.621, 0.744] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.414, 0.267, 0.621, 0.744]: walk, talk to (e.g., self, a person, a group).", "From 984.0s to 985.0s, [0.302, 0.199, 0.548, 0.736]->[0.379, 0.247, 0.583, 0.619]: walk, talk to (e.g., self, a person, a group).", "From 981.0s to 983.0s, [0.255, 0.222, 0.433, 0.836]->[0.275, 0.193, 0.413, 0.771]->[0.243, 0.202, 0.458, 0.74]: walk.", "From 979.0s to 980.0s, [0.261, 0.272, 0.432, 0.889]->[0.28, 0.253, 0.43, 0.827]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1004.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.071, 0.067, 0.544, 0.967]: stand, talk to (e.g., self, a person, a group); [0.319, 0.093, 0.832, 0.966]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "5milLu-6bWI.mp4", "clip": [1016, 1047], "conversations": [{"from": "human", "timestamps": 1046.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.105, 0.061, 0.549, 0.987]: stand; [0.559, 0.07, 0.921, 0.969]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1047.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.563, 0.074, 0.923, 0.965]?"}, {"from": "gpt", "value": ["In the current frame, [0.563, 0.074, 0.923, 0.965]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1046.0s, [0.559, 0.07, 0.921, 0.969]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [1029, 1084], "conversations": [{"from": "human", "timestamps": 1059.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.332, 0.126, 0.891, 0.945]: stand."}, {"from": "human", "timestamps": 1084.0, "value": "What actions is the person in bbox [0.066, 0.311, 0.238, 0.96] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.066, 0.311, 0.238, 0.96]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1082.0s to 1083.0s, [0.061, 0.321, 0.233, 0.966]->[0.063, 0.324, 0.21, 0.963]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1081.0s, [0.062, 0.313, 0.213, 0.967]: stand, carry/hold (an object).", "At 1080.0s, [0.06, 0.324, 0.234, 0.962]: stand, carry/hold (an object), listen to (a person).", "At 1079.0s, [0.061, 0.327, 0.206, 0.964]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1078.0s, [0.063, 0.312, 0.28, 0.967]: stand, carry/hold (an object), touch (an object), listen to (a person)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [1061, 1120], "conversations": [{"from": "human", "timestamps": 1091.0, "value": "Describe the actions of the person at bbox [0.066, 0.323, 0.202, 0.95] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.066, 0.323, 0.202, 0.95]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1089.0s to 1090.0s, [0.058, 0.324, 0.206, 0.959]->[0.069, 0.326, 0.203, 0.953]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1087.0s to 1088.0s, [0.052, 0.32, 0.197, 0.961]->[0.053, 0.31, 0.19, 0.957]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1086.0s, [0.05, 0.319, 0.192, 0.964]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1085.0s, [0.056, 0.32, 0.216, 0.971]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1084.0s, [0.066, 0.311, 0.238, 0.96]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1082.0s to 1083.0s, [0.061, 0.321, 0.233, 0.966]->[0.063, 0.324, 0.21, 0.963]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1081.0s, [0.062, 0.313, 0.213, 0.967]: stand, carry/hold (an object).", "At 1080.0s, [0.06, 0.324, 0.234, 0.962]: stand, carry/hold (an object), listen to (a person).", "At 1079.0s, [0.061, 0.327, 0.206, 0.964]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1078.0s, [0.063, 0.312, 0.28, 0.967]: stand, carry/hold (an object), touch (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1120.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.257, 0.18, 0.629, 0.978]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "5milLu-6bWI.mp4", "clip": [1140, 1185], "conversations": [{"from": "human", "timestamps": 1170.0, "value": "What actions did the person at bbox [0.202, 0.159, 0.623, 0.959] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.202, 0.159, 0.623, 0.959]: stand, listen to (a person), watch (a person).", "At 1169.0s, [0.158, 0.155, 0.622, 0.972]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1185.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.371, 0.098, 0.776, 0.966]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "5milLu-6bWI.mp4", "clip": [1135, 1225], "conversations": [{"from": "human", "timestamps": 1165.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.452, 0.168, 0.829, 0.974]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1225.0, "value": "Describe the actions of the person at bbox [0.0, 0.039, 0.207, 0.961] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.039, 0.207, 0.961]: stand, watch (a person).", "At 1224.0s, [0.0, 0.029, 0.291, 0.948]: stand.", "From 1220.0s to 1223.0s, [0.027, 0.163, 0.544, 0.945]->[0.006, 0.121, 0.374, 0.932]->[0.0, 0.076, 0.296, 0.912]->[0.0, 0.056, 0.277, 0.97]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [1200, 1250], "conversations": [{"from": "human", "timestamps": 1230.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.612, 0.248, 0.786, 0.955]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1250.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.439, 0.213, 0.857, 0.996]?"}, {"from": "gpt", "value": ["At Now, [0.439, 0.213, 0.857, 0.996]: stand, listen to (a person), watch (a person).", "From 1248.0s to 1249.0s, [0.544, 0.206, 0.814, 0.963]->[0.514, 0.213, 0.771, 0.95]: stand, listen to (a person), watch (a person).", "At 1247.0s, [0.458, 0.232, 0.719, 0.961]: stand, watch (a person).", "At 1246.0s, [0.424, 0.233, 0.711, 0.963]: stand, listen to (a person), watch (a person).", "From 1234.0s to 1245.0s, [0.419, 0.239, 0.725, 0.969]->[0.491, 0.312, 0.777, 0.97]->[0.445, 0.304, 0.762, 0.968]->[0.435, 0.275, 0.745, 0.97]->[0.413, 0.269, 0.694, 0.964]->[0.396, 0.263, 0.669, 0.977]->[0.389, 0.279, 0.656, 0.962]->[0.383, 0.249, 0.701, 0.983]->[0.396, 0.248, 0.776, 0.97]->[0.474, 0.233, 0.783, 0.957]->[0.456, 0.231, 0.763, 0.975]->[0.431, 0.241, 0.731, 0.968]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1228.0s to 1233.0s, [0.731, 0.271, 0.914, 0.858]->[0.657, 0.228, 0.815, 0.917]->[0.612, 0.248, 0.786, 0.955]->[0.539, 0.279, 0.764, 0.944]->[0.509, 0.121, 0.875, 0.945]->[0.209, 0.342, 0.568, 0.959]: walk, talk to (e.g., self, a person, a group).", "From 1226.0s to 1227.0s, [0.908, 0.613, 0.99, 0.974]->[0.825, 0.411, 0.96, 0.963]: walk."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [1235, 1297], "conversations": [{"from": "human", "timestamps": 1265.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.071, 0.081, 0.51, 0.963]: listen to (a person), stand, watch (a person); [0.437, 0.249, 0.92, 0.98]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1297.0, "value": "Describe the actions of the person at bbox [0.438, 0.059, 0.953, 0.972] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.438, 0.059, 0.953, 0.972]: walk, talk to (e.g., self, a person, a group).", "From 1294.0s to 1296.0s, [0.085, 0.116, 0.653, 0.97]->[0.127, 0.105, 0.663, 0.992]->[0.152, 0.045, 0.679, 0.97]: stand, talk to (e.g., self, a person, a group).", "From 1292.0s to 1293.0s, [0.461, 0.174, 0.623, 0.765]->[0.457, 0.176, 0.613, 0.741]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1291.0s, [0.435, 0.18, 0.561, 0.78]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1290.0s, [0.411, 0.198, 0.538, 0.764]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [1239, 1354], "conversations": [{"from": "human", "timestamps": 1269.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.192, 0.122, 0.595, 0.949]: stand, talk to (e.g., self, a person, a group); [0.443, 0.296, 0.789, 0.959]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1354.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.35, 0.093, 0.851, 0.86]?"}, {"from": "gpt", "value": ["In the current frame, [0.35, 0.093, 0.851, 0.86]: stand, talk to (e.g., self, a person, a group).", "At 1353.0s, [0.196, 0.062, 0.836, 0.999]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [1370, 1421], "conversations": [{"from": "human", "timestamps": 1400.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.227, 0.375, 0.504, 0.837]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.332, 0.359, 0.412, 0.594]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.514, 0.203, 0.682, 0.777]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1421.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.444, 0.215, 0.544, 0.677]?"}, {"from": "gpt", "value": ["In the current frame, [0.444, 0.215, 0.544, 0.677]: stand, carry/hold (an object), listen to (a person).", "At 1420.0s, [0.462, 0.216, 0.59, 0.688]: walk, carry/hold (an object), listen to (a person).", "At 1419.0s, [0.464, 0.241, 0.571, 0.676]: walk, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [1367, 1455], "conversations": [{"from": "human", "timestamps": 1397.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.149, 0.411, 0.491, 0.893]: listen to (a person), sit, watch (a person); [0.386, 0.365, 0.458, 0.607]: carry/hold (an object), stand; [0.477, 0.184, 0.635, 0.789]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1455.0, "value": "What actions is the person in bbox [0.458, 0.184, 1.0, 0.981] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.458, 0.184, 1.0, 0.981]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1443.0s to 1454.0s, [0.55, 0.259, 0.809, 0.822]->[0.493, 0.249, 0.81, 0.821]->[0.496, 0.252, 0.81, 0.822]->[0.494, 0.248, 0.806, 0.822]->[0.498, 0.239, 0.818, 0.803]->[0.493, 0.243, 0.822, 0.811]->[0.491, 0.246, 0.804, 0.795]->[0.493, 0.245, 0.801, 0.8]->[0.488, 0.247, 0.802, 0.805]->[0.493, 0.242, 0.797, 0.782]->[0.465, 0.244, 0.802, 0.813]->[0.424, 0.189, 0.997, 0.976]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1440.0s to 1442.0s, [0.548, 0.26, 0.804, 0.82]->[0.55, 0.253, 0.808, 0.821]->[0.549, 0.256, 0.808, 0.82]: sit, listen to (a person), watch (a person)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [1411, 1447], "conversations": [{"from": "human", "timestamps": 1441.0, "value": "What actions did the person at bbox [0.55, 0.253, 0.808, 0.821] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.55, 0.253, 0.808, 0.821]: sit, listen to (a person), watch (a person).", "At 1440.0s, [0.548, 0.26, 0.804, 0.82]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1447.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.289, 0.253, 0.561, 0.804]: listen to (a person), sit; [0.498, 0.239, 0.818, 0.803]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "5milLu-6bWI.mp4", "clip": [1481, 1547], "conversations": [{"from": "human", "timestamps": 1511.0, "value": "Describe the actions of the person at bbox [0.401, 0.162, 0.996, 0.978] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.401, 0.162, 0.996, 0.978]: sit, listen to (a person), watch (a person).", "From 1508.0s to 1510.0s, [0.345, 0.156, 1.0, 0.976]->[0.414, 0.162, 1.0, 0.977]->[0.3, 0.161, 0.994, 0.975]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1547.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.297, 0.026, 1.0, 0.98]: sit."}]}, {"video": "5milLu-6bWI.mp4", "clip": [1501, 1592], "conversations": [{"from": "human", "timestamps": 1531.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.277, 0.035, 0.985, 0.952]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1592.0, "value": "What actions did the person at bbox [0.569, 0.274, 0.667, 0.701] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.569, 0.274, 0.667, 0.701]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1589.0s to 1591.0s, [0.556, 0.279, 0.666, 0.699]->[0.557, 0.278, 0.666, 0.696]->[0.558, 0.284, 0.664, 0.698]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1588.0s, [0.554, 0.263, 0.664, 0.699]: sit, watch (a person)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [1553, 1617], "conversations": [{"from": "human", "timestamps": 1583.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.153, 0.022, 0.879, 0.972]: sit; [0.877, 0.039, 0.999, 0.95]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1617.0, "value": "What actions did the person at bbox [0.034, 0.088, 0.757, 0.992] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.034, 0.088, 0.757, 0.992]: sit, talk to (e.g., self, a person, a group).", "From 1615.0s to 1616.0s, [0.089, 0.121, 0.801, 0.975]->[0.044, 0.111, 0.787, 0.959]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [1605, 1727], "conversations": [{"from": "human", "timestamps": 1635.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.465, 0.06, 1.0, 0.997]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1727.0, "value": "For the person at bbox [0.0, 0.219, 0.223, 0.954], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.219, 0.223, 0.954]: sit, listen to (a person), watch (a person).", "From 1725.0s to 1726.0s, [0.0, 0.178, 0.203, 0.999]->[0.001, 0.211, 0.204, 0.974]: sit, listen to (a person).", "At 1724.0s, [0.0, 0.224, 0.249, 0.961]: sit, listen to (a person), watch (a person).", "From 1722.0s to 1723.0s, [0.165, 0.057, 1.0, 0.957]->[0.179, 0.06, 1.0, 0.976]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [1664, 1723], "conversations": [{"from": "human", "timestamps": 1694.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.32, 0.074, 0.833, 0.993]: sit, talk to (e.g., self, a person, a group); [0.697, 0.052, 0.843, 0.288]: carry/hold (an object), sit, read."}, {"from": "human", "timestamps": 1723.0, "value": "What actions did the person at bbox [0.179, 0.06, 1.0, 0.976] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.179, 0.06, 1.0, 0.976]: sit, talk to (e.g., self, a person, a group).", "At 1722.0s, [0.165, 0.057, 1.0, 0.957]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "5milLu-6bWI.mp4", "clip": [1721, 1766], "conversations": [{"from": "human", "timestamps": 1751.0, "value": "For the person at bbox [0.699, 0.264, 0.787, 0.449], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.699, 0.264, 0.787, 0.449]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1750.0s, [0.683, 0.27, 0.786, 0.463]: sit."]}, {"from": "human", "timestamps": 1766.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.235, 0.058, 1.0, 0.964]: listen to (a person), sit."}]}, {"video": "7YpF6DntOYw.mkv", "clip": [908, 954], "conversations": [{"from": "human", "timestamps": 938.0, "value": "Describe the actions of the person at bbox [0.242, 0.079, 0.557, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.242, 0.079, 0.557, 0.982]: stand, watch (a person).", "At 937.0s, [0.23, 0.112, 0.506, 0.997]: walk, listen to (a person)."]}, {"from": "human", "timestamps": 954.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.014, 0.728, 0.999]: watch (a person), stand, kiss (a person); [0.596, 0.129, 0.997, 0.998]: watch (a person), stand, kiss (a person)."}]}, {"video": "7YpF6DntOYw.mkv", "clip": [876, 969], "conversations": [{"from": "human", "timestamps": 906.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.31, 0.436, 0.491, 0.99]: walk; [0.433, 0.399, 0.531, 0.955]: carry/hold (an object), walk; [0.528, 0.402, 0.654, 0.993]: carry/hold (an object), walk; [0.664, 0.478, 0.759, 0.895]: carry/hold (an object), grab (a person), walk; [0.753, 0.466, 0.827, 0.889]: walk; [0.768, 0.441, 0.995, 1.0]: walk."}, {"from": "human", "timestamps": 969.0, "value": "Describe the actions of the person at bbox [0.011, 0.0, 0.704, 0.975] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.011, 0.0, 0.704, 0.975]: stand, listen to (a person), watch (a person).", "At 968.0s, [0.0, 0.0, 0.715, 0.972]: stand, listen to (a person), watch (a person).", "At 967.0s, [0.012, 0.0, 0.63, 0.941]: stand, listen to (a person)."]}]}, {"video": "7YpF6DntOYw.mkv", "clip": [1020, 1124], "conversations": [{"from": "human", "timestamps": 1050.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.42, 0.248, 0.559, 0.772]: listen to (a person), walk; [0.519, 0.302, 0.645, 0.81]: walk."}, {"from": "human", "timestamps": 1124.0, "value": "What actions did the person at bbox [0.207, 0.003, 0.982, 0.975] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.207, 0.003, 0.982, 0.975]: sit, carry/hold (an object).", "At 1123.0s, [0.032, 0.103, 0.519, 0.795]: sit, carry/hold (an object).", "At 1122.0s, [0.012, 0.085, 0.451, 0.833]: sit, carry/hold (an object), drink.", "From 1120.0s to 1121.0s, [0.01, 0.092, 0.43, 0.84]->[0.03, 0.078, 0.425, 0.837]: sit, drink.", "From 1118.0s to 1119.0s, [0.029, 0.092, 0.441, 0.85]->[0.025, 0.093, 0.437, 0.845]: sit, carry/hold (an object), drink.", "At 1117.0s, [0.036, 0.128, 0.444, 0.85]: sit, drink.", "At 1116.0s, [0.069, 0.132, 0.443, 0.802]: sit, carry/hold (an object), drink.", "At 1115.0s, [0.101, 0.108, 0.448, 0.876]: sit, carry/hold (an object), listen to (a person)."]}]}, {"video": "7YpF6DntOYw.mkv", "clip": [1163, 1249], "conversations": [{"from": "human", "timestamps": 1193.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.167, 0.0, 0.941, 0.975]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1249.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.545, 0.018, 0.636, 0.307]?"}, {"from": "gpt", "value": ["At Now, [0.545, 0.018, 0.636, 0.307]: stand, carry/hold (an object).", "At 1248.0s, [0.561, 0.021, 0.664, 0.309]: stand, carry/hold (an object), watch (a person).", "At 1247.0s, [0.591, 0.052, 0.702, 0.3]: bend/bow (at the waist), carry/hold (an object), watch (a person)."]}]}, {"video": "7YpF6DntOYw.mkv", "clip": [1205, 1240], "conversations": [{"from": "human", "timestamps": 1235.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.325, 0.307, 0.486, 0.998]: carry/hold (an object), walk; [0.525, 0.27, 0.655, 0.977]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1240.0, "value": "What actions did the person at bbox [0.44, 0.359, 0.59, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.44, 0.359, 0.59, 0.99]: walk, carry/hold (an object), listen to (a person).", "At 1239.0s, [0.427, 0.364, 0.575, 0.994]: walk, carry/hold (an object), listen to (a person)."]}]}, {"video": "7YpF6DntOYw.mkv", "clip": [1241, 1297], "conversations": [{"from": "human", "timestamps": 1271.0, "value": "Describe the actions of the person at bbox [0.543, 0.0, 0.868, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.543, 0.0, 0.868, 0.987]: stand, carry/hold (an object), watch (a person).", "At 1270.0s, [0.447, 0.0, 0.876, 0.976]: stand, listen to (a person), watch (a person).", "From 1268.0s to 1269.0s, [0.466, 0.057, 0.898, 0.993]->[0.456, 0.064, 0.869, 0.984]: bend/bow (at the waist), touch (an object), listen to (a person), watch (a person).", "At 1267.0s, [0.479, 0.034, 0.922, 0.988]: bend/bow (at the waist), stand, touch (an object), listen to (a person), watch (a person).", "At 1266.0s, [0.166, 0.241, 0.609, 0.993]: bend/bow (at the waist), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1265.0s, [0.142, 0.247, 0.643, 1.0]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1264.0s, [0.116, 0.004, 0.508, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1297.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.161, 0.0, 0.875, 0.976]: stand, answer phone."}]}, {"video": "7YpF6DntOYw.mkv", "clip": [1317, 1352], "conversations": [{"from": "human", "timestamps": 1347.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.092, 0.0, 0.756, 0.987]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1352.0, "value": "What actions is the person in bbox [0.12, 0.029, 0.648, 0.975] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.12, 0.029, 0.648, 0.975]: stand, play with pets, watch (a person).", "From 1347.0s to 1351.0s, [0.092, 0.0, 0.756, 0.987]->[0.014, 0.081, 0.42, 0.97]->[0.0, 0.054, 0.446, 1.0]->[0.0, 0.044, 0.398, 0.98]->[0.0, 0.028, 0.326, 0.979]: stand, carry/hold (an object), watch (a person).", "At 1346.0s, [0.23, 0.0, 0.796, 0.963]: stand, play with pets.", "At 1345.0s, [0.219, 0.0, 0.755, 0.974]: stand, play with pets, talk to (e.g., self, a person, a group).", "At 1344.0s, [0.186, 0.06, 0.807, 0.983]: stand, carry/hold (an object), play with pets, listen to (a person), watch (a person).", "At 1343.0s, [0.015, 0.052, 0.488, 1.0]: stand, carry/hold (an object), listen to (a person).", "From 1341.0s to 1342.0s, [0.026, 0.069, 0.502, 0.983]->[0.008, 0.077, 0.485, 0.975]: stand, carry/hold (an object).", "At 1340.0s, [0.029, 0.097, 0.5, 0.984]: stand, carry/hold (an object), play with pets.", "From 1338.0s to 1339.0s, [0.092, 0.092, 0.511, 0.975]->[0.036, 0.09, 0.513, 0.98]: sit, play with pets.", "At 1337.0s, [0.078, 0.051, 0.51, 0.987]: sit.", "At 1336.0s, [0.091, 0.071, 0.502, 0.985]: stand."]}]}, {"video": "7YpF6DntOYw.mkv", "clip": [1416, 1472], "conversations": [{"from": "human", "timestamps": 1446.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.437, 0.042, 0.981, 0.981]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1472.0, "value": "What actions did the person at bbox [0.434, 0.048, 0.973, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.434, 0.048, 0.973, 0.981]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1471.0s, [0.441, 0.035, 0.974, 0.98]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1470.0s, [0.441, 0.042, 0.979, 0.977]: sit, listen to (a person), watch (a person).", "At 1469.0s, [0.455, 0.039, 0.987, 0.981]: sit, stand, listen to (a person), watch (a person)."]}]}, {"video": "7YpF6DntOYw.mkv", "clip": [1479, 1569], "conversations": [{"from": "human", "timestamps": 1509.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.495, 0.057, 0.94, 0.968]: sit."}, {"from": "human", "timestamps": 1569.0, "value": "For the person at bbox [0.749, 0.215, 0.823, 0.534], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.749, 0.215, 0.823, 0.534]: walk, carry/hold (an object).", "From 1566.0s to 1568.0s, [0.841, 0.223, 0.905, 0.493]->[0.8, 0.222, 0.868, 0.51]->[0.771, 0.217, 0.842, 0.522]: walk, carry/hold (an object)."]}]}, {"video": "7YpF6DntOYw.mkv", "clip": [1549, 1604], "conversations": [{"from": "human", "timestamps": 1579.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.308, 0.206, 0.534, 0.957]: carry/hold (an object), walk; [0.839, 0.222, 0.909, 0.551]: walk."}, {"from": "human", "timestamps": 1604.0, "value": "What actions is the person in bbox [0.897, 0.363, 0.987, 0.794] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.897, 0.363, 0.987, 0.794]: walk, carry/hold (an object).", "At 1603.0s, [0.79, 0.148, 0.844, 0.367]: sit."]}]}, {"video": "7YpF6DntOYw.mkv", "clip": [1621, 1735], "conversations": [{"from": "human", "timestamps": 1651.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.007, 0.0, 0.574, 0.98]: listen to (a person), stand; [0.152, 0.221, 0.258, 0.458]: walk; [0.498, 0.183, 0.932, 0.998]: stand, talk to (e.g., self, a person, a group); [0.864, 0.237, 0.985, 0.922]: watch (a person), stand."}, {"from": "human", "timestamps": 1735.0, "value": "Describe the actions of the person at bbox [0.526, 0.03, 0.996, 0.973] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.526, 0.03, 0.996, 0.973]: stand, listen to (a person).", "At 1734.0s, [0.546, 0.007, 1.0, 0.974]: stand, listen to (a person).", "At 1733.0s, [0.481, 0.007, 1.0, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1731.0s to 1732.0s, [0.615, 0.09, 0.985, 0.97]->[0.598, 0.007, 0.999, 0.986]: walk, talk to (e.g., self, a person, a group).", "At 1730.0s, [0.666, 0.111, 0.947, 0.992]: walk, carry/hold (an object).", "From 1727.0s to 1729.0s, [0.544, 0.12, 0.708, 0.779]->[0.619, 0.104, 0.8, 0.78]->[0.709, 0.106, 0.928, 0.945]: walk.", "At 1726.0s, [0.54, 0.108, 0.696, 0.706]: stand, watch (a person).", "At 1725.0s, [0.546, 0.104, 0.711, 0.706]: stand, listen to (a person), watch (a person)."]}]}, {"video": "7YpF6DntOYw.mkv", "clip": [1666, 1732], "conversations": [{"from": "human", "timestamps": 1696.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.043, 0.137, 0.156, 0.971]: watch (a person), stand; [0.292, 0.323, 0.507, 0.87]: sit; [0.33, 0.119, 0.421, 0.464]: watch (a person), stand; [0.504, 0.073, 0.661, 0.655]: listen to (a person), stand, watch (a person); [0.731, 0.435, 1.0, 0.994]: listen to (a person), sit."}, {"from": "human", "timestamps": 1732.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.117, 0.016, 0.648, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.117, 0.016, 0.648, 1.0]: walk, listen to (a person).", "At 1731.0s, [0.156, 0.03, 0.589, 0.988]: walk, listen to (a person).", "At 1730.0s, [0.182, 0.071, 0.569, 0.998]: walk, carry/hold (an object), listen to (a person).", "At 1729.0s, [0.07, 0.037, 0.403, 0.981]: walk, carry/hold (an object).", "At 1728.0s, [0.144, 0.045, 0.356, 0.995]: walk.", "From 1725.0s to 1727.0s, [0.259, 0.067, 0.424, 0.934]->[0.235, 0.057, 0.423, 0.988]->[0.19, 0.074, 0.407, 0.989]: stand, listen to (a person), watch (a person)."]}]}, {"video": "7YpF6DntOYw.mkv", "clip": [1713, 1754], "conversations": [{"from": "human", "timestamps": 1743.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.001, 0.296, 0.094, 0.707]: watch (a person), stand; [0.016, 0.086, 0.474, 0.992]: listen to (a person), stand, watch (a person); [0.403, 0.088, 0.849, 0.983]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.416, 0.57, 0.475, 0.922]: sit; [0.937, 0.283, 1.0, 0.987]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1754.0, "value": "Describe the actions of the person at bbox [0.277, 0.0, 0.996, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.277, 0.0, 0.996, 0.986]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1753.0s, [0.272, 0.001, 1.0, 1.0]: bend/bow (at the waist), stand, talk to (e.g., self, a person, a group).", "At 1752.0s, [0.175, 0.0, 0.958, 0.998]: bend/bow (at the waist), talk to (e.g., self, a person, a group)."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [876, 931], "conversations": [{"from": "human", "timestamps": 906.0, "value": "Describe the actions of the person at bbox [0.185, 0.089, 0.731, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.185, 0.089, 0.731, 0.984]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 902.0s to 905.0s, [0.178, 0.107, 0.772, 0.983]->[0.183, 0.076, 0.75, 0.984]->[0.18, 0.088, 0.744, 0.969]->[0.181, 0.085, 0.722, 1.0]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 931.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.173, 0.072, 0.583, 0.969]: carry/hold (an object), sit."}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [916, 964], "conversations": [{"from": "human", "timestamps": 946.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.539, 0.115, 0.879, 0.987]: stand."}, {"from": "human", "timestamps": 964.0, "value": "Describe the actions of the person at bbox [0.419, 0.44, 0.558, 0.855] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.419, 0.44, 0.558, 0.855]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 961.0s to 963.0s, [0.42, 0.432, 0.562, 0.81]->[0.434, 0.432, 0.548, 0.835]->[0.436, 0.438, 0.55, 0.805]: sit, listen to (a person).", "At 960.0s, [0.432, 0.439, 0.558, 0.787]: sit, listen to (a person), watch (a person).", "At 959.0s, [0.435, 0.417, 0.56, 0.816]: sit, listen to (a person).", "At 958.0s, [0.373, 0.409, 0.557, 0.823]: sit, talk to (e.g., self, a person, a group).", "At 957.0s, [0.427, 0.191, 0.567, 0.933]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [941, 989], "conversations": [{"from": "human", "timestamps": 971.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.42, 0.446, 0.569, 0.877]: sit, talk to (e.g., self, a person, a group); [0.571, 0.533, 0.657, 0.787]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 989.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.494, 0.544, 0.674, 0.902]?"}, {"from": "gpt", "value": ["In the current frame, [0.494, 0.544, 0.674, 0.902]: sit, listen to (a person).", "At 988.0s, [0.498, 0.522, 0.652, 0.961]: sit, listen to (a person).", "From 985.0s to 987.0s, [0.505, 0.528, 0.679, 0.965]->[0.538, 0.552, 0.675, 0.948]->[0.542, 0.547, 0.683, 0.961]: sit, watch (a person)."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1017, 1051], "conversations": [{"from": "human", "timestamps": 1047.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.134, 0.011, 0.645, 0.996]: stand."}, {"from": "human", "timestamps": 1051.0, "value": "What actions did the person at bbox [0.433, 0.286, 0.579, 0.93] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.433, 0.286, 0.579, 0.93]: stand.", "At 1050.0s, [0.418, 0.337, 0.57, 0.856]: stand.", "At 1049.0s, [0.521, 0.378, 0.704, 0.992]: get up."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1020, 1094], "conversations": [{"from": "human", "timestamps": 1050.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.037, 0.586, 0.98]: watch (a person), stand; [0.418, 0.337, 0.57, 0.856]: stand."}, {"from": "human", "timestamps": 1094.0, "value": "For the person at bbox [0.338, 0.012, 0.974, 0.977], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.338, 0.012, 0.974, 0.977]: sit, talk to (e.g., self, a person, a group).", "At 1093.0s, [0.275, 0.002, 0.904, 0.997]: sit, talk to (e.g., self, a person, a group).", "At 1092.0s, [0.161, 0.0, 0.858, 0.974]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1072, 1119], "conversations": [{"from": "human", "timestamps": 1102.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.361, 0.19, 0.4, 0.33]: walk."}, {"from": "human", "timestamps": 1119.0, "value": "For the person at bbox [0.175, 0.09, 0.563, 0.984], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.175, 0.09, 0.563, 0.984]: walk.", "From 1117.0s to 1118.0s, [0.131, 0.098, 0.51, 0.985]->[0.166, 0.056, 0.556, 0.987]: walk, watch (a person).", "At 1116.0s, [0.091, 0.0, 0.475, 0.984]: walk.", "From 1112.0s to 1115.0s, [0.146, 0.063, 0.482, 0.987]->[0.147, 0.112, 0.52, 0.987]->[0.081, 0.164, 0.467, 0.985]->[0.073, 0.054, 0.465, 0.99]: walk, watch (a person).", "At 1111.0s, [0.215, 0.061, 0.527, 0.99]: walk.", "From 1109.0s to 1110.0s, [0.314, 0.158, 0.484, 0.973]->[0.25, 0.127, 0.504, 0.983]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1102.0s to 1108.0s, [0.361, 0.19, 0.4, 0.33]->[0.36, 0.181, 0.4, 0.371]->[0.366, 0.189, 0.415, 0.433]->[0.377, 0.153, 0.44, 0.453]->[0.362, 0.128, 0.439, 0.531]->[0.327, 0.16, 0.429, 0.687]->[0.298, 0.119, 0.431, 0.886]: walk."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1119, 1160], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.289, 0.321, 0.501, 0.994]: listen to (a person), stand; [0.451, 0.145, 0.713, 0.99]: stand, talk to (e.g., self, a person, a group); [0.658, 0.251, 0.975, 0.991]: stand."}, {"from": "human", "timestamps": 1160.0, "value": "What actions is the person in bbox [0.587, 0.263, 0.643, 0.617] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.587, 0.263, 0.643, 0.617]: stand.", "At 1159.0s, [0.561, 0.265, 0.627, 0.655]: stand, listen to (a person).", "At 1158.0s, [0.569, 0.249, 0.642, 0.673]: stand.", "At 1157.0s, [0.552, 0.313, 0.634, 0.81]: stand, watch (a person).", "From 1154.0s to 1156.0s, [0.464, 0.312, 0.584, 0.993]->[0.5, 0.308, 0.592, 0.956]->[0.521, 0.284, 0.611, 0.851]: stand.", "From 1149.0s to 1153.0s, [0.289, 0.321, 0.501, 0.994]->[0.13, 0.423, 0.376, 0.988]->[0.206, 0.45, 0.431, 0.995]->[0.278, 0.377, 0.481, 0.992]->[0.374, 0.375, 0.518, 0.998]: stand, listen to (a person)."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1165, 1245], "conversations": [{"from": "human", "timestamps": 1195.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.167, 0.06, 0.362, 0.987]: stand, talk to (e.g., self, a person, a group); [0.466, 0.341, 0.879, 0.99]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1245.0, "value": "What actions did the person at bbox [0.564, 0.256, 0.863, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.564, 0.256, 0.863, 0.99]: stand, listen to (a person), watch (a person).", "From 1232.0s to 1244.0s, [0.556, 0.281, 0.859, 0.983]->[0.552, 0.288, 0.86, 0.98]->[0.572, 0.262, 0.866, 1.0]->[0.565, 0.266, 0.867, 0.989]->[0.568, 0.264, 0.864, 0.985]->[0.57, 0.25, 0.867, 0.986]->[0.558, 0.252, 0.861, 0.994]->[0.557, 0.27, 0.868, 0.987]->[0.554, 0.248, 0.862, 0.992]->[0.558, 0.251, 0.862, 0.985]->[0.56, 0.255, 0.856, 0.987]->[0.555, 0.247, 0.852, 0.996]->[0.561, 0.259, 0.859, 0.988]: stand, listen to (a person), watch (a person).", "At 1231.0s, [0.556, 0.273, 0.864, 0.99]: stand, listen to (a person).", "From 1227.0s to 1230.0s, [0.532, 0.282, 0.863, 0.995]->[0.538, 0.275, 0.85, 0.991]->[0.536, 0.276, 0.856, 0.998]->[0.561, 0.263, 0.86, 0.988]: stand, listen to (a person), watch (a person).", "At 1226.0s, [0.607, 0.264, 0.912, 0.99]: stand, watch (a person).", "At 1225.0s, [0.705, 0.265, 0.996, 0.984]: stand, listen to (a person).", "At 1224.0s, [0.729, 0.275, 1.0, 0.995]: stand.", "At 1223.0s, [0.718, 0.272, 1.0, 0.99]: stand, listen to (a person).", "From 1221.0s to 1222.0s, [0.61, 0.28, 0.917, 1.0]->[0.682, 0.275, 0.976, 0.983]: stand.", "From 1216.0s to 1220.0s, [0.448, 0.097, 0.917, 0.988]->[0.428, 0.102, 0.919, 0.991]->[0.428, 0.106, 0.918, 0.996]->[0.46, 0.105, 0.928, 1.0]->[0.6, 0.282, 0.914, 0.981]: stand, talk to (e.g., self, a person, a group).", "From 1213.0s to 1215.0s, [0.418, 0.09, 0.947, 0.989]->[0.438, 0.083, 0.924, 0.995]->[0.427, 0.104, 0.922, 0.999]: stand.", "From 1202.0s to 1212.0s, [0.449, 0.089, 0.981, 0.988]->[0.42, 0.098, 0.957, 0.982]->[0.415, 0.099, 0.949, 0.975]->[0.396, 0.106, 0.985, 0.967]->[0.398, 0.102, 1.0, 0.988]->[0.41, 0.1, 0.991, 0.995]->[0.431, 0.083, 0.984, 0.987]->[0.439, 0.101, 0.98, 0.988]->[0.437, 0.088, 0.968, 0.985]->[0.411, 0.088, 0.96, 0.996]->[0.421, 0.084, 0.951, 0.995]: stand, talk to (e.g., self, a person, a group).", "From 1200.0s to 1201.0s, [0.45, 0.091, 0.978, 0.987]->[0.457, 0.102, 0.985, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1195.0s to 1199.0s, [0.466, 0.341, 0.879, 0.99]->[0.339, 0.352, 0.87, 0.983]->[0.358, 0.337, 0.877, 0.988]->[0.399, 0.333, 0.885, 0.987]->[0.387, 0.33, 0.886, 0.971]: stand, listen to (a person), watch (a person).", "From 1191.0s to 1194.0s, [0.44, 0.091, 0.976, 0.994]->[0.445, 0.089, 0.994, 0.997]->[0.443, 0.101, 0.985, 0.988]->[0.434, 0.097, 0.975, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1188.0s to 1190.0s, [0.573, 0.327, 0.969, 0.995]->[0.54, 0.337, 0.927, 0.977]->[0.385, 0.33, 0.9, 0.986]: stand, listen to (a person), watch (a person).", "At 1187.0s, [0.492, 0.337, 0.962, 0.98]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1186.0s, [0.547, 0.246, 0.963, 0.986]: stand, listen to (a person), watch (a person).", "From 1182.0s to 1185.0s, [0.558, 0.257, 0.99, 0.984]->[0.54, 0.215, 0.959, 0.98]->[0.472, 0.185, 0.935, 0.993]->[0.512, 0.172, 0.947, 0.988]: stand, watch (a person).", "At 1181.0s, [0.516, 0.261, 0.992, 0.989]: stand.", "From 1179.0s to 1180.0s, [0.416, 0.326, 0.979, 0.991]->[0.658, 0.194, 0.996, 0.979]: stand, listen to (a person), watch (a person).", "From 1177.0s to 1178.0s, [0.809, 0.372, 1.0, 0.985]->[0.752, 0.34, 0.997, 0.984]: stand, watch (a person).", "At 1176.0s, [0.765, 0.301, 0.999, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1174.0s to 1175.0s, [0.123, 0.104, 0.466, 0.978]->[0.638, 0.186, 0.995, 0.993]: stand, watch (a person).", "From 1171.0s to 1173.0s, [0.0, 0.0, 0.321, 0.983]->[0.01, 0.057, 0.365, 0.977]->[0.085, 0.099, 0.394, 0.993]: walk."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1190, 1246], "conversations": [{"from": "human", "timestamps": 1220.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.149, 0.038, 0.364, 0.943]: listen to (a person), stand, watch (a person); [0.6, 0.282, 0.914, 0.981]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1246.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.007, 0.007, 0.413, 0.98]?"}, {"from": "gpt", "value": ["In the current frame, [0.007, 0.007, 0.413, 0.98]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1245.0s, [0.102, 0.0, 0.428, 0.972]: stand, talk to (e.g., self, a person, a group).", "From 1232.0s to 1244.0s, [0.114, 0.008, 0.428, 0.941]->[0.115, 0.004, 0.432, 0.959]->[0.111, 0.0, 0.424, 0.948]->[0.109, 0.002, 0.424, 0.935]->[0.114, 0.0, 0.425, 0.953]->[0.112, 0.0, 0.426, 0.93]->[0.114, 0.0, 0.415, 0.973]->[0.111, 0.0, 0.41, 0.972]->[0.117, 0.0, 0.432, 0.951]->[0.118, 0.0, 0.424, 0.924]->[0.114, 0.0, 0.407, 0.952]->[0.122, 0.0, 0.419, 0.911]->[0.107, 0.0, 0.409, 0.966]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1230.0s to 1231.0s, [0.103, 0.011, 0.413, 0.962]->[0.129, 0.014, 0.433, 0.951]: stand, talk to (e.g., self, a person, a group).", "From 1228.0s to 1229.0s, [0.128, 0.0, 0.424, 0.963]->[0.115, 0.013, 0.419, 0.941]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1227.0s, [0.127, 0.023, 0.446, 0.974]: stand.", "At 1226.0s, [0.111, 0.0, 0.417, 0.98]: walk, watch (a person).", "From 1223.0s to 1225.0s, [0.041, 0.057, 0.301, 1.0]->[0.001, 0.047, 0.29, 0.975]->[0.138, 0.02, 0.454, 0.976]: walk.", "At 1222.0s, [0.002, 0.093, 0.267, 1.0]: walk, talk to (e.g., self, a person, a group).", "At 1221.0s, [0.104, 0.046, 0.337, 0.994]: stand, watch (a person).", "At 1220.0s, [0.149, 0.038, 0.364, 0.943]: stand, listen to (a person), watch (a person)."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1252, 1315], "conversations": [{"from": "human", "timestamps": 1282.0, "value": "What actions did the person at bbox [0.15, 0.0, 0.954, 0.994] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.15, 0.0, 0.954, 0.994]: stand.", "From 1278.0s to 1281.0s, [0.106, 0.0, 0.916, 0.991]->[0.149, 0.012, 0.922, 0.962]->[0.133, 0.0, 0.913, 0.99]->[0.144, 0.01, 0.931, 0.974]: stand.", "From 1276.0s to 1277.0s, [0.051, 0.0, 0.942, 0.995]->[0.115, 0.0, 0.91, 0.989]: stand, talk to (e.g., self, a person, a group).", "From 1269.0s to 1275.0s, [0.292, 0.123, 0.45, 0.904]->[0.307, 0.12, 0.461, 0.904]->[0.32, 0.123, 0.472, 0.91]->[0.32, 0.119, 0.471, 0.913]->[0.318, 0.128, 0.47, 0.913]->[0.317, 0.126, 0.469, 0.91]->[0.319, 0.129, 0.47, 0.913]: stand.", "From 1264.0s to 1268.0s, [0.384, 0.154, 0.612, 0.969]->[0.359, 0.155, 0.558, 0.976]->[0.298, 0.14, 0.5, 0.975]->[0.319, 0.118, 0.48, 0.921]->[0.265, 0.12, 0.451, 0.911]: walk.", "At 1263.0s, [0.375, 0.075, 0.655, 0.893]: walk, watch (a person).", "From 1260.0s to 1262.0s, [0.573, 0.124, 0.974, 0.969]->[0.488, 0.093, 0.813, 0.964]->[0.389, 0.091, 0.698, 0.96]: walk, listen to (a person), watch (a person).", "At 1259.0s, [0.743, 0.222, 1.0, 0.984]: walk.", "From 1255.0s to 1258.0s, [0.502, 0.086, 0.999, 0.982]->[0.508, 0.075, 1.0, 0.984]->[0.535, 0.091, 0.994, 0.986]->[0.502, 0.094, 0.99, 1.0]: stand.", "At 1254.0s, [0.476, 0.095, 0.998, 0.98]: stand, touch (an object).", "From 1252.0s to 1253.0s, [0.454, 0.095, 1.0, 0.978]->[0.454, 0.125, 0.976, 0.982]: stand.", "From 1248.0s to 1251.0s, [0.414, 0.178, 0.682, 0.856]->[0.816, 0.187, 0.997, 0.801]->[0.504, 0.111, 0.996, 0.93]->[0.439, 0.083, 0.996, 0.945]: stand, watch (a person).", "From 1232.0s to 1247.0s, [0.556, 0.281, 0.859, 0.983]->[0.552, 0.288, 0.86, 0.98]->[0.572, 0.262, 0.866, 1.0]->[0.565, 0.266, 0.867, 0.989]->[0.568, 0.264, 0.864, 0.985]->[0.57, 0.25, 0.867, 0.986]->[0.558, 0.252, 0.861, 0.994]->[0.557, 0.27, 0.868, 0.987]->[0.554, 0.248, 0.862, 0.992]->[0.558, 0.251, 0.862, 0.985]->[0.56, 0.255, 0.856, 0.987]->[0.555, 0.247, 0.852, 0.996]->[0.561, 0.259, 0.859, 0.988]->[0.564, 0.256, 0.863, 0.99]->[0.594, 0.266, 0.877, 0.989]->[0.526, 0.312, 0.917, 0.987]: stand, listen to (a person), watch (a person).", "At 1231.0s, [0.556, 0.273, 0.864, 0.99]: stand, listen to (a person).", "From 1227.0s to 1230.0s, [0.532, 0.282, 0.863, 0.995]->[0.538, 0.275, 0.85, 0.991]->[0.536, 0.276, 0.856, 0.998]->[0.561, 0.263, 0.86, 0.988]: stand, listen to (a person), watch (a person).", "At 1226.0s, [0.607, 0.264, 0.912, 0.99]: stand, watch (a person).", "At 1225.0s, [0.705, 0.265, 0.996, 0.984]: stand, listen to (a person).", "At 1224.0s, [0.729, 0.275, 1.0, 0.995]: stand.", "At 1223.0s, [0.718, 0.272, 1.0, 0.99]: stand, listen to (a person).", "From 1221.0s to 1222.0s, [0.61, 0.28, 0.917, 1.0]->[0.682, 0.275, 0.976, 0.983]: stand.", "From 1216.0s to 1220.0s, [0.448, 0.097, 0.917, 0.988]->[0.428, 0.102, 0.919, 0.991]->[0.428, 0.106, 0.918, 0.996]->[0.46, 0.105, 0.928, 1.0]->[0.6, 0.282, 0.914, 0.981]: stand, talk to (e.g., self, a person, a group).", "From 1213.0s to 1215.0s, [0.418, 0.09, 0.947, 0.989]->[0.438, 0.083, 0.924, 0.995]->[0.427, 0.104, 0.922, 0.999]: stand.", "From 1202.0s to 1212.0s, [0.449, 0.089, 0.981, 0.988]->[0.42, 0.098, 0.957, 0.982]->[0.415, 0.099, 0.949, 0.975]->[0.396, 0.106, 0.985, 0.967]->[0.398, 0.102, 1.0, 0.988]->[0.41, 0.1, 0.991, 0.995]->[0.431, 0.083, 0.984, 0.987]->[0.439, 0.101, 0.98, 0.988]->[0.437, 0.088, 0.968, 0.985]->[0.411, 0.088, 0.96, 0.996]->[0.421, 0.084, 0.951, 0.995]: stand, talk to (e.g., self, a person, a group).", "From 1200.0s to 1201.0s, [0.45, 0.091, 0.978, 0.987]->[0.457, 0.102, 0.985, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1195.0s to 1199.0s, [0.466, 0.341, 0.879, 0.99]->[0.339, 0.352, 0.87, 0.983]->[0.358, 0.337, 0.877, 0.988]->[0.399, 0.333, 0.885, 0.987]->[0.387, 0.33, 0.886, 0.971]: stand, listen to (a person), watch (a person).", "From 1191.0s to 1194.0s, [0.44, 0.091, 0.976, 0.994]->[0.445, 0.089, 0.994, 0.997]->[0.443, 0.101, 0.985, 0.988]->[0.434, 0.097, 0.975, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1188.0s to 1190.0s, [0.573, 0.327, 0.969, 0.995]->[0.54, 0.337, 0.927, 0.977]->[0.385, 0.33, 0.9, 0.986]: stand, listen to (a person), watch (a person).", "At 1187.0s, [0.492, 0.337, 0.962, 0.98]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1186.0s, [0.547, 0.246, 0.963, 0.986]: stand, listen to (a person), watch (a person).", "From 1182.0s to 1185.0s, [0.558, 0.257, 0.99, 0.984]->[0.54, 0.215, 0.959, 0.98]->[0.472, 0.185, 0.935, 0.993]->[0.512, 0.172, 0.947, 0.988]: stand, watch (a person).", "At 1181.0s, [0.516, 0.261, 0.992, 0.989]: stand.", "From 1179.0s to 1180.0s, [0.416, 0.326, 0.979, 0.991]->[0.658, 0.194, 0.996, 0.979]: stand, listen to (a person), watch (a person).", "From 1177.0s to 1178.0s, [0.809, 0.372, 1.0, 0.985]->[0.752, 0.34, 0.997, 0.984]: stand, watch (a person).", "At 1176.0s, [0.765, 0.301, 0.999, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1174.0s to 1175.0s, [0.123, 0.104, 0.466, 0.978]->[0.638, 0.186, 0.995, 0.993]: stand, watch (a person).", "From 1171.0s to 1173.0s, [0.0, 0.0, 0.321, 0.983]->[0.01, 0.057, 0.365, 0.977]->[0.085, 0.099, 0.394, 0.993]: walk."]}, {"from": "human", "timestamps": 1315.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.251, 0.282, 0.972, 0.974]: lie/sleep."}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1313, 1368], "conversations": [{"from": "human", "timestamps": 1343.0, "value": "What actions is the person in bbox [0.084, 0.095, 0.543, 0.962] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.084, 0.095, 0.543, 0.962]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1340.0s to 1342.0s, [0.018, 0.076, 0.541, 0.997]->[0.285, 0.096, 0.549, 0.94]->[0.06, 0.088, 0.533, 0.968]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1339.0s, [0.091, 0.061, 0.554, 0.998]: stand, carry/hold (an object), throw."]}, {"from": "human", "timestamps": 1368.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.048, 0.324, 0.432, 0.991]: listen to (a person), watch (a person), walk; [0.381, 0.148, 0.965, 0.992]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1355, 1459], "conversations": [{"from": "human", "timestamps": 1385.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.284, 0.25, 0.413, 0.893]: carry/hold (an object), walk; [0.611, 0.287, 0.747, 0.842]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1459.0, "value": "For the person at bbox [0.193, 0.113, 0.328, 0.768], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.193, 0.113, 0.328, 0.768]: stand, smoke.", "At 1458.0s, [0.19, 0.107, 0.321, 0.788]: stand, smoke."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1424, 1468], "conversations": [{"from": "human", "timestamps": 1454.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.197, 0.099, 0.32, 0.778]: listen to (a person), stand."}, {"from": "human", "timestamps": 1468.0, "value": "For the person at bbox [0.193, 0.105, 0.324, 0.771], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.193, 0.105, 0.324, 0.771]: stand, smoke, watch (a person).", "At 1467.0s, [0.174, 0.106, 0.325, 0.792]: stand, smoke.", "At 1466.0s, [0.174, 0.117, 0.323, 0.781]: stand, smoke, talk to (e.g., self, a person, a group).", "At 1465.0s, [0.17, 0.113, 0.323, 0.793]: stand, smoke, listen to (a person), talk to (e.g., self, a person, a group).", "From 1463.0s to 1464.0s, [0.176, 0.129, 0.325, 0.796]->[0.172, 0.132, 0.326, 0.813]: stand, talk to (e.g., self, a person, a group).", "At 1462.0s, [0.173, 0.119, 0.325, 0.783]: stand, smoke, talk to (e.g., self, a person, a group).", "From 1458.0s to 1461.0s, [0.19, 0.107, 0.321, 0.788]->[0.193, 0.113, 0.328, 0.768]->[0.177, 0.11, 0.327, 0.779]->[0.177, 0.116, 0.325, 0.779]: stand, smoke."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1497, 1574], "conversations": [{"from": "human", "timestamps": 1527.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.419, 0.51, 0.652, 0.981]: walk; [0.44, 0.164, 0.599, 0.65]: open (e.g., a window, a car door), walk."}, {"from": "human", "timestamps": 1574.0, "value": "What actions did the person at bbox [0.501, 0.609, 0.774, 0.992] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.501, 0.609, 0.774, 0.992]: sit, listen to (a person).", "From 1572.0s to 1573.0s, [0.505, 0.602, 0.769, 0.99]->[0.506, 0.605, 0.77, 0.989]: sit, listen to (a person).", "At 1571.0s, [0.507, 0.601, 0.776, 0.982]: sit.", "From 1569.0s to 1570.0s, [0.508, 0.601, 0.778, 0.98]->[0.505, 0.601, 0.766, 0.986]: sit, talk to (e.g., self, a person, a group).", "From 1567.0s to 1568.0s, [0.516, 0.607, 0.785, 0.989]->[0.508, 0.598, 0.781, 0.987]: sit."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1591, 1636], "conversations": [{"from": "human", "timestamps": 1621.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.547, 0.068, 0.894, 0.996]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1636.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.725, 0.293, 0.869, 0.954]?"}, {"from": "gpt", "value": ["At Now, [0.725, 0.293, 0.869, 0.954]: stand.", "From 1633.0s to 1635.0s, [0.695, 0.332, 0.862, 0.975]->[0.709, 0.328, 0.868, 0.926]->[0.727, 0.316, 0.866, 0.928]: stand."]}]}, {"video": "7g37N3eoQ9s.mkv", "clip": [1612, 1733], "conversations": [{"from": "human", "timestamps": 1642.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.586, 0.042, 0.903, 0.996]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1733.0, "value": "Describe the actions of the person at bbox [0.229, 0.075, 0.877, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.229, 0.075, 0.877, 0.988]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1725.0s to 1732.0s, [0.303, 0.068, 0.878, 0.98]->[0.241, 0.072, 0.884, 0.977]->[0.252, 0.056, 0.943, 0.974]->[0.276, 0.077, 0.89, 0.979]->[0.246, 0.067, 0.868, 0.979]->[0.273, 0.068, 0.874, 0.977]->[0.177, 0.07, 0.877, 0.973]->[0.223, 0.061, 0.986, 0.993]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [874, 927], "conversations": [{"from": "human", "timestamps": 904.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.24, 0.292, 0.57, 0.698]: listen to (a person), talk to (e.g., self, a person, a group), walk; [0.584, 0.352, 0.898, 0.819]: listen to (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 927.0, "value": "What actions did the person at bbox [0.084, 0.173, 0.863, 0.927] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.084, 0.173, 0.863, 0.927]: stand, carry/hold (an object).", "From 925.0s to 926.0s, [0.051, 0.145, 0.607, 0.956]->[0.015, 0.0, 0.655, 0.988]: stand, open (e.g., a window, a car door).", "From 920.0s to 924.0s, [0.253, 0.191, 0.595, 0.645]->[0.404, 0.212, 0.687, 0.581]->[0.376, 0.201, 0.567, 0.745]->[0.185, 0.218, 0.516, 0.872]->[0.006, 0.231, 0.469, 0.967]: walk.", "From 918.0s to 919.0s, [0.0, 0.138, 0.432, 0.722]->[0.071, 0.12, 0.454, 0.696]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 917.0s, [0.0, 0.143, 0.457, 0.713]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 915.0s to 916.0s, [0.007, 0.151, 0.445, 0.742]->[0.01, 0.147, 0.446, 0.718]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [875, 997], "conversations": [{"from": "human", "timestamps": 905.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.205, 0.235, 0.54, 0.733]: listen to (a person), touch (an object), walk; [0.579, 0.297, 0.998, 0.83]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 997.0, "value": "Describe the actions of the person at bbox [0.271, 0.094, 0.994, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.271, 0.094, 0.994, 0.986]: sit, answer phone.", "From 994.0s to 996.0s, [0.307, 0.124, 0.992, 0.987]->[0.337, 0.109, 0.993, 0.981]->[0.282, 0.11, 1.0, 0.985]: sit, answer phone.", "At 993.0s, [0.309, 0.122, 0.994, 0.978]: sit, answer phone, lie/sleep.", "From 990.0s to 992.0s, [0.359, 0.124, 0.989, 0.971]->[0.339, 0.11, 0.996, 0.971]->[0.298, 0.128, 0.988, 0.987]: sit, answer phone.", "From 987.0s to 989.0s, [0.295, 0.141, 0.934, 0.961]->[0.315, 0.0, 0.978, 0.984]->[0.371, 0.106, 0.99, 0.981]: answer phone, lie/sleep.", "At 986.0s, [0.32, 0.176, 0.953, 0.957]: sit, answer phone.", "From 970.0s to 984.0s, [0.332, 0.298, 0.968, 0.962]->[0.382, 0.234, 0.724, 0.804]->[0.378, 0.247, 0.712, 0.781]->[0.366, 0.231, 0.719, 0.783]->[0.299, 0.242, 0.704, 0.874]->[0.297, 0.245, 0.73, 0.912]->[0.28, 0.253, 0.914, 0.967]->[0.235, 0.277, 0.823, 0.975]->[0.257, 0.283, 0.822, 0.983]->[0.187, 0.255, 0.828, 0.983]->[0.128, 0.26, 0.938, 0.985]->[0.154, 0.231, 0.985, 0.978]->[0.256, 0.231, 0.978, 0.974]->[0.323, 0.215, 0.96, 0.972]->[0.332, 0.179, 0.976, 0.968]: answer phone, lie/sleep.", "From 962.0s to 968.0s, [0.461, 0.32, 0.796, 0.683]->[0.441, 0.316, 0.679, 0.633]->[0.438, 0.303, 0.677, 0.644]->[0.418, 0.291, 0.666, 0.647]->[0.4, 0.267, 0.672, 0.669]->[0.462, 0.232, 0.926, 0.693]->[0.381, 0.245, 0.697, 0.76]: answer phone, lie/sleep."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [943, 996], "conversations": [{"from": "human", "timestamps": 973.0, "value": "Describe the actions of the person at bbox [0.366, 0.231, 0.719, 0.783] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.366, 0.231, 0.719, 0.783]: answer phone, lie/sleep.", "From 970.0s to 972.0s, [0.332, 0.298, 0.968, 0.962]->[0.382, 0.234, 0.724, 0.804]->[0.378, 0.247, 0.712, 0.781]: answer phone, lie/sleep.", "From 962.0s to 968.0s, [0.461, 0.32, 0.796, 0.683]->[0.441, 0.316, 0.679, 0.633]->[0.438, 0.303, 0.677, 0.644]->[0.418, 0.291, 0.666, 0.647]->[0.4, 0.267, 0.672, 0.669]->[0.462, 0.232, 0.926, 0.693]->[0.381, 0.245, 0.697, 0.76]: answer phone, lie/sleep."]}, {"from": "human", "timestamps": 996.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.282, 0.11, 1.0, 0.985]: sit, answer phone."}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1014, 1056], "conversations": [{"from": "human", "timestamps": 1044.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.039, 0.023, 0.65, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1056.0, "value": "For the person at bbox [0.0, 0.027, 0.652, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.027, 0.652, 1.0]: sit, stand, talk to (e.g., self, a person, a group).", "At 1055.0s, [0.026, 0.012, 0.676, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 1054.0s, [0.048, 0.0, 0.661, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1009, 1085], "conversations": [{"from": "human", "timestamps": 1039.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.239, 0.164, 0.999, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1085.0, "value": "Describe the actions of the person at bbox [0.513, 0.265, 0.766, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.513, 0.265, 0.766, 0.984]: walk, listen to (a person).", "At 1084.0s, [0.527, 0.246, 0.796, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1083.0s, [0.616, 0.191, 0.958, 0.99]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1075, 1113], "conversations": [{"from": "human", "timestamps": 1105.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.491, 0.188, 0.714, 0.972]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.689, 0.276, 0.891, 0.982]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1113.0, "value": "Describe the actions of the person at bbox [0.009, 0.08, 0.643, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.009, 0.08, 0.643, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1106.0s to 1111.0s, [0.488, 0.184, 0.711, 0.973]->[0.491, 0.179, 0.71, 0.98]->[0.488, 0.18, 0.709, 0.975]->[0.487, 0.182, 0.71, 0.969]->[0.487, 0.183, 0.711, 0.974]->[0.488, 0.176, 0.711, 0.979]: stand, listen to (a person), watch (a person).", "From 1104.0s to 1105.0s, [0.491, 0.193, 0.708, 0.974]->[0.491, 0.188, 0.714, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1103.0s, [0.497, 0.19, 0.705, 0.975]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1102.0s, [0.484, 0.181, 0.708, 0.974]: stand, listen to (a person), watch (a person).", "At 1101.0s, [0.476, 0.19, 0.707, 0.976]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1100.0s, [0.512, 0.195, 0.751, 0.974]: stand, listen to (a person), watch (a person).", "From 1097.0s to 1098.0s, [0.404, 0.131, 0.833, 0.965]->[0.449, 0.175, 0.764, 0.975]: walk, listen to (a person), watch (a person).", "At 1096.0s, [0.13, 0.0, 0.861, 0.984]: walk."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1138, 1188], "conversations": [{"from": "human", "timestamps": 1168.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.033, 0.059, 0.993, 0.988]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1188.0, "value": "What actions did the person at bbox [0.014, 0.106, 0.423, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.014, 0.106, 0.423, 0.982]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1187.0s, [0.011, 0.108, 0.407, 0.991]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1186.0s, [0.014, 0.079, 0.428, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 1185.0s, [0.013, 0.1, 0.442, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1180.0s to 1184.0s, [0.01, 0.141, 0.436, 0.987]->[0.014, 0.148, 0.418, 0.992]->[0.008, 0.128, 0.426, 0.996]->[0.008, 0.119, 0.409, 0.989]->[0.017, 0.097, 0.442, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 1179.0s, [0.017, 0.159, 0.437, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1178.0s, [0.056, 0.172, 0.441, 0.982]: stand, talk to (e.g., self, a person, a group).", "At 1177.0s, [0.041, 0.183, 0.448, 0.992]: stand, listen to (a person).", "At 1175.0s, [0.04, 0.223, 0.411, 0.991]: stand, listen to (a person).", "At 1174.0s, [0.144, 0.187, 0.46, 0.995]: stand, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1173.0s, [0.164, 0.194, 0.428, 0.985]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1172.0s, [0.103, 0.159, 0.379, 0.935]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1209, 1252], "conversations": [{"from": "human", "timestamps": 1239.0, "value": "Describe the actions of the person at bbox [0.324, 0.219, 0.506, 0.872] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.324, 0.219, 0.506, 0.872]: walk, listen to (a person), watch (a person).", "At 1238.0s, [0.26, 0.246, 0.472, 0.992]: walk, listen to (a person), watch (a person).", "From 1236.0s to 1237.0s, [0.198, 0.246, 0.464, 0.991]->[0.188, 0.247, 0.461, 0.989]: walk.", "At 1235.0s, [0.423, 0.112, 0.757, 0.964]: walk, talk to (e.g., self, a person, a group).", "At 1234.0s, [0.614, 0.02, 0.999, 0.992]: walk, listen to (a person)."]}, {"from": "human", "timestamps": 1252.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.195, 0.188, 0.704, 0.984]: sit, talk to (e.g., self, a person, a group); [0.535, 0.044, 1.0, 1.0]: listen to (a person), sit, watch (a person)."}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1269, 1303], "conversations": [{"from": "human", "timestamps": 1299.0, "value": "What actions is the person in bbox [0.328, 0.115, 0.991, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.328, 0.115, 0.991, 0.982]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1298.0s, [0.298, 0.06, 0.992, 0.985]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1297.0s, [0.377, 0.07, 0.991, 0.976]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1303.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.134, 0.093, 0.868, 0.978]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1237, 1328], "conversations": [{"from": "human", "timestamps": 1267.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.042, 0.167, 0.516, 0.953]: listen to (a person), sit, watch (a person); [0.646, 0.204, 0.953, 0.706]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1328.0, "value": "Describe the actions of the person at bbox [0.011, 0.104, 0.713, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.011, 0.104, 0.713, 1.0]: sit.", "At 1327.0s, [0.019, 0.106, 0.674, 0.971]: sit, watch (a person).", "At 1326.0s, [0.01, 0.117, 0.734, 0.986]: sit, listen to (a person), watch (a person)."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1296, 1345], "conversations": [{"from": "human", "timestamps": 1326.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.01, 0.117, 0.734, 0.986]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1345.0, "value": "For the person at bbox [0.013, 0.05, 0.768, 0.977], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.013, 0.05, 0.768, 0.977]: sit, talk to (e.g., self, a person, a group).", "At 1344.0s, [0.018, 0.061, 0.775, 0.985]: sit, talk to (e.g., self, a person, a group).", "At 1343.0s, [0.005, 0.028, 0.499, 0.984]: sit."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1370, 1474], "conversations": [{"from": "human", "timestamps": 1400.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.002, 0.76, 0.996]: listen to (a person), stand, watch (a person); [0.08, 0.091, 0.956, 0.973]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1474.0, "value": "What actions did the person at bbox [0.348, 0.313, 0.644, 0.931] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.348, 0.313, 0.644, 0.931]: walk.", "At 1471.0s, [0.345, 0.171, 1.0, 0.986]: walk.", "From 1468.0s to 1470.0s, [0.246, 0.07, 1.0, 0.975]->[0.246, 0.067, 0.99, 0.978]->[0.184, 0.114, 0.951, 0.975]: stand, carry/hold (an object).", "From 1465.0s to 1467.0s, [0.372, 0.143, 0.933, 0.982]->[0.481, 0.2, 0.869, 0.983]->[0.393, 0.102, 0.892, 0.979]: walk.", "At 1463.0s, [0.129, 0.005, 0.708, 0.971]: walk."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1478, 1511], "conversations": [{"from": "human", "timestamps": 1508.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.095, 0.429, 0.521, 0.985]: walk."}, {"from": "human", "timestamps": 1511.0, "value": "Describe the actions of the person at bbox [0.653, 0.18, 0.877, 0.979] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.653, 0.18, 0.877, 0.979]: walk, enter.", "From 1507.0s to 1510.0s, [0.02, 0.177, 0.678, 0.976]->[0.095, 0.429, 0.521, 0.985]->[0.121, 0.313, 0.387, 0.991]->[0.659, 0.233, 0.809, 0.865]: walk.", "From 1501.0s to 1506.0s, [0.004, 0.111, 0.581, 1.0]->[0.0, 0.114, 0.517, 0.997]->[0.011, 0.178, 0.585, 1.0]->[0.002, 0.165, 0.598, 0.996]->[0.002, 0.161, 0.662, 0.997]->[0.007, 0.176, 0.698, 1.0]: stand."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1507, 1591], "conversations": [{"from": "human", "timestamps": 1537.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.237, 0.117, 1.0, 0.979]: stand, answer phone."}, {"from": "human", "timestamps": 1591.0, "value": "What actions did the person at bbox [0.079, 0.091, 0.907, 0.976] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.079, 0.091, 0.907, 0.976]: stand, answer phone.", "From 1586.0s to 1590.0s, [0.077, 0.165, 0.77, 1.0]->[0.105, 0.091, 0.864, 0.984]->[0.181, 0.088, 0.939, 0.971]->[0.184, 0.087, 0.918, 0.986]->[0.096, 0.104, 0.986, 0.968]: stand, answer phone.", "At 1585.0s, [0.025, 0.179, 0.788, 0.968]: stand, answer phone, talk to (e.g., self, a person, a group).", "From 1579.0s to 1584.0s, [0.039, 0.077, 0.948, 1.0]->[0.044, 0.114, 0.961, 0.993]->[0.0, 0.082, 0.918, 0.983]->[0.007, 0.079, 0.905, 0.99]->[0.024, 0.08, 0.954, 0.987]->[0.0, 0.0, 0.815, 0.977]: stand, answer phone."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1531, 1584], "conversations": [{"from": "human", "timestamps": 1561.0, "value": "What actions is the person in bbox [0.018, 0.14, 0.717, 0.971] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.018, 0.14, 0.717, 0.971]: sit, answer phone.", "At 1560.0s, [0.026, 0.114, 0.763, 0.984]: sit, answer phone.", "At 1559.0s, [0.0, 0.101, 0.784, 0.984]: sit, stand, answer phone.", "At 1558.0s, [0.017, 0.086, 0.753, 0.986]: sit, answer phone.", "At 1556.0s, [0.086, 0.184, 0.84, 0.994]: bend/bow (at the waist), answer phone."]}, {"from": "human", "timestamps": 1584.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.815, 0.977]: stand, answer phone."}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1590, 1626], "conversations": [{"from": "human", "timestamps": 1620.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.553, 0.233, 0.896, 0.65]: sit, answer phone."}, {"from": "human", "timestamps": 1626.0, "value": "For the person at bbox [0.577, 0.213, 0.922, 0.637], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.577, 0.213, 0.922, 0.637]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1625.0s, [0.582, 0.212, 0.938, 0.622]: sit, carry/hold (an object).", "From 1623.0s to 1624.0s, [0.532, 0.22, 0.91, 0.687]->[0.592, 0.202, 0.956, 0.652]: sit, carry/hold (an object), put down.", "At 1622.0s, [0.561, 0.23, 0.896, 0.651]: sit, carry/hold (an object).", "From 1620.0s to 1621.0s, [0.553, 0.233, 0.896, 0.65]->[0.56, 0.232, 0.893, 0.709]: sit, answer phone.", "From 1601.0s to 1617.0s, [0.578, 0.177, 0.857, 0.544]->[0.575, 0.185, 0.844, 0.53]->[0.601, 0.214, 0.827, 0.52]->[0.608, 0.247, 0.833, 0.533]->[0.596, 0.29, 0.979, 0.801]->[0.604, 0.312, 0.975, 0.818]->[0.621, 0.325, 0.968, 0.807]->[0.447, 0.218, 0.88, 0.816]->[0.386, 0.196, 0.85, 0.701]->[0.416, 0.213, 0.857, 0.717]->[0.447, 0.214, 0.871, 0.729]->[0.445, 0.225, 0.868, 0.716]->[0.472, 0.224, 0.881, 0.701]->[0.496, 0.221, 0.891, 0.694]->[0.513, 0.232, 0.886, 0.674]->[0.536, 0.227, 0.899, 0.658]->[0.531, 0.23, 0.893, 0.662]: sit, answer phone."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1633, 1699], "conversations": [{"from": "human", "timestamps": 1663.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.423, 0.163, 0.931, 0.976]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1699.0, "value": "What actions did the person at bbox [0.219, 0.27, 0.546, 0.878] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.219, 0.27, 0.546, 0.878]: sit.", "At 1698.0s, [0.246, 0.285, 0.548, 0.867]: sit, listen to (a person), watch (a person).", "At 1697.0s, [0.248, 0.282, 0.56, 0.875]: sit, listen to (a person).", "From 1695.0s to 1696.0s, [0.244, 0.28, 0.558, 0.868]->[0.244, 0.285, 0.557, 0.866]: sit, listen to (a person), watch (a person)."]}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1678, 1729], "conversations": [{"from": "human", "timestamps": 1708.0, "value": "For the person at bbox [0.522, 0.38, 0.905, 0.948], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.522, 0.38, 0.905, 0.948]: sit, listen to (a person), watch (a person).", "From 1706.0s to 1707.0s, [0.522, 0.382, 0.902, 0.959]->[0.531, 0.382, 0.903, 0.947]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1729.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.156, 0.095, 0.545, 0.983]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "7nHkh4sP5Ks.mkv", "clip": [1737, 1771], "conversations": [{"from": "human", "timestamps": 1767.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.012, 0.11, 0.807, 0.992]?"}, {"from": "gpt", "value": ["At Now, [0.012, 0.11, 0.807, 0.992]: sit, talk to (e.g., self, a person, a group).", "From 1763.0s to 1766.0s, [0.001, 0.079, 0.802, 0.973]->[0.014, 0.072, 0.823, 0.986]->[0.0, 0.071, 0.805, 0.982]->[0.0, 0.092, 0.797, 0.971]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1771.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.013, 0.105, 0.794, 0.984]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "8JSxLhDMGtE.mkv", "clip": [1123, 1166], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.588, 0.476, 0.634, 0.57]: watch (a person), sit; [0.659, 0.265, 0.776, 0.902]: touch (an object), close (e.g., a door, a box), walk; [0.78, 0.338, 0.841, 0.59]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1166.0, "value": "Describe the actions of the person at bbox [0.074, 0.077, 0.615, 0.93] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.074, 0.077, 0.615, 0.93]: sit, answer phone, touch (an object).", "From 1158.0s to 1165.0s, [0.005, 0.07, 0.608, 0.923]->[0.0, 0.075, 0.625, 0.929]->[0.036, 0.084, 0.609, 0.929]->[0.095, 0.066, 0.595, 0.937]->[0.086, 0.075, 0.613, 0.932]->[0.088, 0.079, 0.621, 0.928]->[0.056, 0.08, 0.618, 0.923]->[0.093, 0.071, 0.611, 0.925]: sit, answer phone, touch (an object).", "At 1157.0s, [0.125, 0.073, 0.59, 0.913]: sit, answer phone."]}]}, {"video": "8JSxLhDMGtE.mkv", "clip": [1622, 1699], "conversations": [{"from": "human", "timestamps": 1652.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.068, 0.183, 0.263, 0.955]: carry/hold (an object), touch (an object), walk."}, {"from": "human", "timestamps": 1699.0, "value": "Describe the actions of the person at bbox [0.448, 0.148, 0.914, 0.995] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.448, 0.148, 0.914, 0.995]: run/jog, carry/hold (an object).", "At 1698.0s, [0.22, 0.108, 0.652, 0.968]: run/jog, carry/hold (an object)."]}]}, {"video": "8JSxLhDMGtE.mkv", "clip": [1751, 1787], "conversations": [{"from": "human", "timestamps": 1781.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.299, 0.122, 0.466, 0.855]: carry/hold (an object), stand; [0.429, 0.061, 0.663, 0.988]: watch (a person), talk to (e.g., self, a person, a group), walk; [0.632, 0.135, 0.784, 0.989]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.766, 0.062, 0.971, 0.969]: listen to (a person), watch (a person), walk."}, {"from": "human", "timestamps": 1787.0, "value": "Describe the actions of the person at bbox [0.562, 0.234, 0.825, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.562, 0.234, 0.825, 0.991]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1786.0s, [0.506, 0.246, 0.773, 0.962]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1785.0s, [0.64, 0.321, 0.878, 0.982]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1784.0s, [0.569, 0.286, 0.772, 0.983]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1783.0s, [0.589, 0.247, 0.774, 0.985]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1782.0s, [0.603, 0.226, 0.784, 0.988]: walk, carry/hold (an object), listen to (a person).", "From 1780.0s to 1781.0s, [0.577, 0.241, 0.736, 0.998]->[0.632, 0.135, 0.784, 0.989]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1779.0s, [0.528, 0.182, 0.709, 0.989]: walk, listen to (a person).", "At 1778.0s, [0.479, 0.324, 0.623, 0.993]: walk, listen to (a person), watch (a person).", "At 1777.0s, [0.506, 0.301, 0.614, 0.976]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "8VZEwOCQ8bc.mkv", "clip": [879, 920], "conversations": [{"from": "human", "timestamps": 909.0, "value": "What actions is the person in bbox [0.081, 0.162, 0.464, 0.995] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.081, 0.162, 0.464, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 906.0s to 908.0s, [0.093, 0.178, 0.421, 0.978]->[0.078, 0.166, 0.437, 0.984]->[0.08, 0.163, 0.44, 0.991]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 905.0s, [0.077, 0.147, 0.432, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 903.0s to 904.0s, [0.076, 0.147, 0.422, 0.992]->[0.087, 0.171, 0.412, 0.994]: sit, talk to (e.g., self, a person, a group).", "At 902.0s, [0.083, 0.136, 0.437, 0.976]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 920.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.107, 0.148, 0.437, 0.742]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.453, 0.117, 0.909, 0.981]: carry/hold (an object), sit, listen to (a person)."}]}, {"video": "8VZEwOCQ8bc.mkv", "clip": [908, 1004], "conversations": [{"from": "human", "timestamps": 938.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.208, 0.156, 0.558, 0.93]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.524, 0.069, 0.977, 0.979]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1004.0, "value": "For the person at bbox [0.134, 0.232, 0.388, 0.938], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.134, 0.232, 0.388, 0.938]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1003.0s, [0.163, 0.263, 0.431, 0.99]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1002.0s, [0.159, 0.269, 0.368, 0.973]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1001.0s, [0.233, 0.294, 0.472, 0.993]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 999.0s to 1000.0s, [0.262, 0.283, 0.494, 0.98]->[0.256, 0.283, 0.463, 0.988]: stand, carry/hold (an object), listen to (a person).", "From 996.0s to 998.0s, [0.278, 0.235, 0.539, 0.99]->[0.266, 0.236, 0.491, 0.962]->[0.268, 0.252, 0.469, 0.996]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 995.0s, [0.286, 0.252, 0.509, 0.987]: stand, listen to (a person), watch (a person).", "From 993.0s to 994.0s, [0.308, 0.247, 0.525, 0.976]->[0.319, 0.26, 0.504, 0.976]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "8VZEwOCQ8bc.mkv", "clip": [1119, 1158], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.022, 0.172, 0.466, 0.979]: listen to (a person), watch (a person); [0.753, 0.205, 1.0, 1.0]: walk."}, {"from": "human", "timestamps": 1158.0, "value": "What actions is the person in bbox [0.432, 0.258, 0.889, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.432, 0.258, 0.889, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1155.0s to 1157.0s, [0.486, 0.249, 0.911, 0.981]->[0.438, 0.256, 0.904, 0.984]->[0.429, 0.258, 0.9, 0.985]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1154.0s, [0.509, 0.258, 0.923, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1152.0s to 1153.0s, [0.572, 0.229, 1.0, 1.0]->[0.502, 0.237, 0.91, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1151.0s, [0.607, 0.235, 0.992, 0.996]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1149.0s to 1150.0s, [0.753, 0.205, 1.0, 1.0]->[0.705, 0.251, 1.0, 1.0]: walk.", "At 1148.0s, [0.645, 0.197, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1146.0s to 1147.0s, [0.518, 0.102, 0.996, 0.967]->[0.768, 0.175, 1.0, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "8VZEwOCQ8bc.mkv", "clip": [1112, 1218], "conversations": [{"from": "human", "timestamps": 1142.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.223, 0.061, 0.643, 0.965]: listen to (a person), watch (a person), walk; [0.588, 0.099, 1.0, 0.982]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1218.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.295, 0.172, 0.842, 0.987]?"}, {"from": "gpt", "value": ["In the current frame, [0.295, 0.172, 0.842, 0.987]: walk, carry/hold (an object).", "At 1217.0s, [0.027, 0.149, 0.518, 0.996]: walk, carry/hold (an object), dress/put on clothing.", "From 1215.0s to 1216.0s, [0.204, 0.217, 0.602, 0.991]->[0.191, 0.213, 0.575, 1.0]: stand, carry/hold (an object).", "At 1214.0s, [0.196, 0.224, 0.604, 0.982]: stand, press, touch (an object).", "At 1213.0s, [0.183, 0.238, 0.537, 1.0]: stand, carry/hold (an object).", "At 1212.0s, [0.226, 0.2, 0.632, 0.989]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1211.0s, [0.007, 0.078, 0.452, 0.974]: walk, carry/hold (an object)."]}]}, {"video": "8VZEwOCQ8bc.mkv", "clip": [1175, 1245], "conversations": [{"from": "human", "timestamps": 1205.0, "value": "For the person at bbox [0.738, 0.23, 0.914, 0.856], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.738, 0.23, 0.914, 0.856]: walk.", "From 1202.0s to 1204.0s, [0.85, 0.233, 0.998, 0.976]->[0.819, 0.257, 0.992, 0.981]->[0.796, 0.237, 0.997, 0.93]: walk."]}, {"from": "human", "timestamps": 1245.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.457, 0.207, 0.619, 0.805]: carry/hold (an object), stand."}]}, {"video": "8VZEwOCQ8bc.mkv", "clip": [1252, 1319], "conversations": [{"from": "human", "timestamps": 1282.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.267, 0.204, 0.85, 0.958]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1319.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.389, 0.034, 0.932, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.389, 0.034, 0.932, 1.0]: stand.", "At 1318.0s, [0.387, 0.095, 0.948, 0.995]: stand.", "From 1316.0s to 1317.0s, [0.243, 0.102, 0.644, 0.994]->[0.397, 0.23, 0.821, 0.994]: walk.", "At 1315.0s, [0.277, 0.07, 0.923, 1.0]: stand, walk, press.", "At 1314.0s, [0.436, 0.226, 0.896, 0.989]: walk."]}]}, {"video": "8VZEwOCQ8bc.mkv", "clip": [1361, 1491], "conversations": [{"from": "human", "timestamps": 1391.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.17, 0.107, 0.863, 0.982]: stand."}, {"from": "human", "timestamps": 1491.0, "value": "Describe the actions of the person at bbox [0.311, 0.128, 1.0, 0.963] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.311, 0.128, 1.0, 0.963]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "From 1489.0s to 1490.0s, [0.559, 0.117, 1.0, 0.868]->[0.312, 0.126, 1.0, 0.97]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group), watch (a person).", "From 1487.0s to 1488.0s, [0.535, 0.14, 1.0, 0.882]->[0.447, 0.124, 0.999, 0.996]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "From 1483.0s to 1486.0s, [0.33, 0.107, 1.0, 0.961]->[0.546, 0.125, 1.0, 0.873]->[0.556, 0.12, 1.0, 0.879]->[0.583, 0.143, 1.0, 0.872]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group), watch (a person).", "At 1482.0s, [0.554, 0.114, 1.0, 0.855]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), watch (a person)."]}]}, {"video": "8VZEwOCQ8bc.mkv", "clip": [1513, 1576], "conversations": [{"from": "human", "timestamps": 1543.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.065, 0.028, 0.698, 0.971]: ride (e.g., a bike, a car, a horse), sit."}, {"from": "human", "timestamps": 1576.0, "value": "What actions is the person in bbox [0.034, 0.0, 0.559, 0.956] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.034, 0.0, 0.559, 0.956]: stand, answer phone.", "From 1573.0s to 1575.0s, [0.029, 0.0, 0.591, 0.946]->[0.024, 0.0, 0.599, 0.93]->[0.013, 0.028, 0.683, 0.994]: stand, answer phone."]}]}, {"video": "8VZEwOCQ8bc.mkv", "clip": [1627, 1665], "conversations": [{"from": "human", "timestamps": 1657.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.126, 0.102, 1.0, 0.976]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1665.0, "value": "What actions did the person at bbox [0.035, 0.376, 0.779, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.035, 0.376, 0.779, 0.99]: sit, hand shake, listen to (a person), watch (a person).", "At 1664.0s, [0.001, 0.144, 0.421, 0.994]: sit, listen to (a person), watch (a person)."]}]}, {"video": "8VZEwOCQ8bc.mkv", "clip": [1625, 1693], "conversations": [{"from": "human", "timestamps": 1655.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.055, 0.099, 0.982, 0.974]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1693.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.229, 0.043, 1.0, 0.999]?"}, {"from": "gpt", "value": ["In the current frame, [0.229, 0.043, 1.0, 0.999]: sit, talk to (e.g., self, a person, a group).", "From 1684.0s to 1692.0s, [0.339, 0.004, 1.0, 0.998]->[0.331, 0.0, 1.0, 0.994]->[0.262, 0.025, 1.0, 1.0]->[0.273, 0.012, 1.0, 1.0]->[0.256, 0.023, 1.0, 0.994]->[0.223, 0.03, 1.0, 0.995]->[0.239, 0.044, 1.0, 1.0]->[0.242, 0.038, 1.0, 0.995]->[0.194, 0.033, 1.0, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1681.0s to 1683.0s, [0.353, 0.177, 0.987, 0.973]->[0.381, 0.095, 1.0, 1.0]->[0.339, 0.027, 1.0, 1.0]: sit.", "From 1679.0s to 1680.0s, [0.358, 0.364, 0.942, 0.972]->[0.414, 0.276, 0.961, 0.979]: sit, watch (a person).", "At 1678.0s, [0.594, 0.444, 0.996, 0.993]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1677.0s, [0.664, 0.442, 0.994, 0.994]: sit, listen to (a person).", "At 1676.0s, [0.711, 0.442, 0.995, 0.991]: sit, listen to (a person), watch (a person)."]}]}, {"video": "8VZEwOCQ8bc.mkv", "clip": [1651, 1720], "conversations": [{"from": "human", "timestamps": 1681.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.353, 0.177, 0.987, 0.973]: sit."}, {"from": "human", "timestamps": 1720.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.125, 0.2, 0.688, 0.962]?"}, {"from": "gpt", "value": ["At Now, [0.125, 0.2, 0.688, 0.962]: sit, listen to (a person), watch (a person).", "From 1718.0s to 1719.0s, [0.134, 0.236, 0.75, 0.981]->[0.142, 0.205, 0.72, 0.982]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1716.0s to 1717.0s, [0.125, 0.214, 0.791, 0.987]->[0.092, 0.227, 0.769, 1.0]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1713.0s to 1715.0s, [0.056, 0.114, 0.876, 0.999]->[0.072, 0.167, 0.854, 0.985]->[0.052, 0.18, 0.814, 0.978]: sit, talk to (e.g., self, a person, a group).", "At 1712.0s, [0.026, 0.114, 0.9, 0.987]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1700.0s to 1711.0s, [0.03, 0.105, 0.923, 0.999]->[0.014, 0.142, 0.941, 0.989]->[0.02, 0.114, 0.944, 1.0]->[0.032, 0.148, 0.924, 0.987]->[0.028, 0.131, 0.91, 0.992]->[0.037, 0.131, 0.902, 0.987]->[0.032, 0.153, 0.908, 0.983]->[0.031, 0.139, 0.92, 0.998]->[0.02, 0.157, 0.921, 0.982]->[0.01, 0.134, 0.915, 0.999]->[0.03, 0.15, 0.906, 0.98]->[0.02, 0.129, 0.913, 0.989]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "8VZEwOCQ8bc.mkv", "clip": [1741, 1795], "conversations": [{"from": "human", "timestamps": 1771.0, "value": "For the person at bbox [0.044, 0.094, 0.827, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.044, 0.094, 0.827, 0.992]: sit, write, talk to (e.g., self, a person, a group).", "From 1768.0s to 1770.0s, [0.045, 0.06, 0.837, 0.986]->[0.055, 0.078, 0.834, 0.991]->[0.044, 0.063, 0.829, 0.98]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1795.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.087, 0.056, 0.725, 0.985]: carry/hold (an object), stand, listen to (a person); [0.71, 0.193, 0.992, 0.834]: carry/hold (an object), stand, watch (a person)."}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [903, 952], "conversations": [{"from": "human", "timestamps": 933.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.085, 0.072, 0.874, 0.988]?"}, {"from": "gpt", "value": ["In the current frame, [0.085, 0.072, 0.874, 0.988]: stand, listen (e.g., to music), sing to (e.g., self, a person, a group).", "At 932.0s, [0.068, 0.055, 0.827, 0.985]: stand, listen (e.g., to music), sing to (e.g., self, a person, a group).", "At 931.0s, [0.075, 0.104, 0.888, 0.995]: sit, listen (e.g., to music), sing to (e.g., self, a person, a group).", "From 927.0s to 930.0s, [0.12, 0.322, 0.347, 0.671]->[0.109, 0.313, 0.367, 0.661]->[0.143, 0.308, 0.364, 0.668]->[0.124, 0.323, 0.371, 0.669]: sit, carry/hold (an object), listen (e.g., to music), sing to (e.g., self, a person, a group).", "From 923.0s to 926.0s, [0.106, 0.304, 0.311, 0.644]->[0.108, 0.307, 0.325, 0.661]->[0.113, 0.323, 0.334, 0.655]->[0.112, 0.321, 0.33, 0.651]: sit, carry/hold (an object), listen (e.g., to music).", "From 921.0s to 922.0s, [0.108, 0.294, 0.329, 0.651]->[0.093, 0.308, 0.285, 0.631]: sit, carry/hold (an object), listen (e.g., to music), sing to (e.g., self, a person, a group).", "At 920.0s, [0.146, 0.302, 0.338, 0.617]: sit, listen (e.g., to music), sing to (e.g., self, a person, a group).", "At 919.0s, [0.162, 0.278, 0.299, 0.508]: sit, listen (e.g., to music), listen to (a person).", "At 918.0s, [0.186, 0.286, 0.3, 0.518]: sit, carry/hold (an object), listen (e.g., to music), sing to (e.g., self, a person, a group).", "At 917.0s, [0.177, 0.277, 0.281, 0.49]: sit, listen (e.g., to music), hand wave, sing to (e.g., self, a person, a group).", "From 915.0s to 916.0s, [0.129, 0.268, 0.286, 0.499]->[0.168, 0.262, 0.279, 0.475]: sit, listen (e.g., to music).", "At 914.0s, [0.263, 0.351, 0.372, 0.516]: sit, listen (e.g., to music), sing to (e.g., self, a person, a group).", "From 910.0s to 913.0s, [0.263, 0.352, 0.367, 0.51]->[0.262, 0.352, 0.375, 0.509]->[0.265, 0.353, 0.377, 0.509]->[0.263, 0.359, 0.373, 0.511]: sit, listen (e.g., to music).", "At 909.0s, [0.262, 0.346, 0.384, 0.513]: sit, listen (e.g., to music), sing to (e.g., self, a person, a group).", "At 908.0s, [0.264, 0.352, 0.347, 0.51]: sit, carry/hold (an object), listen (e.g., to music), talk to (e.g., self, a person, a group).", "At 907.0s, [0.264, 0.349, 0.37, 0.51]: sit, listen (e.g., to music).", "At 906.0s, [0.267, 0.342, 0.378, 0.516]: sit, listen (e.g., to music), sing to (e.g., self, a person, a group), watch (a person).", "From 903.0s to 905.0s, [0.262, 0.351, 0.351, 0.513]->[0.264, 0.35, 0.388, 0.512]->[0.265, 0.347, 0.381, 0.513]: sit, listen (e.g., to music), sing to (e.g., self, a person, a group).", "At 902.0s, [0.249, 0.351, 0.354, 0.511]: sit, listen (e.g., to music)."]}, {"from": "human", "timestamps": 952.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.152, 0.306, 0.394, 0.709]: carry/hold (an object), sit, listen to (a person); [0.438, 0.103, 0.823, 0.986]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [883, 1020], "conversations": [{"from": "human", "timestamps": 913.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.263, 0.359, 0.373, 0.511]: listen (e.g., to music), sit; [0.501, 0.341, 0.661, 0.676]: carry/hold (an object), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1020.0, "value": "What actions did the person at bbox [0.391, 0.13, 0.638, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.391, 0.13, 0.638, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1019.0s, [0.428, 0.091, 0.761, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1016.0s to 1017.0s, [0.366, 0.119, 0.69, 1.0]->[0.362, 0.05, 0.637, 1.0]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1013.0s to 1015.0s, [0.405, 0.095, 0.825, 1.0]->[0.391, 0.105, 0.688, 1.0]->[0.251, 0.063, 0.595, 0.998]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1005.0s to 1011.0s, [0.189, 0.083, 0.648, 0.991]->[0.127, 0.077, 0.568, 1.0]->[0.119, 0.077, 0.541, 0.997]->[0.074, 0.071, 0.575, 0.996]->[0.198, 0.085, 0.69, 1.0]->[0.188, 0.079, 0.657, 1.0]->[0.351, 0.078, 0.881, 0.994]: stand, talk to (e.g., self, a person, a group).", "From 997.0s to 1004.0s, [0.326, 0.078, 0.847, 0.973]->[0.251, 0.065, 0.739, 0.989]->[0.209, 0.066, 0.63, 0.983]->[0.213, 0.082, 0.58, 0.961]->[0.133, 0.055, 0.566, 0.973]->[0.133, 0.046, 0.667, 0.979]->[0.167, 0.053, 0.656, 0.979]->[0.145, 0.07, 0.597, 0.848]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 996.0s, [0.311, 0.132, 0.854, 0.98]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 995.0s, [0.207, 0.0, 0.628, 0.984]: stand, carry/hold (an object), fight/hit (a person), watch (a person).", "At 994.0s, [0.249, 0.067, 0.457, 1.0]: walk."]}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [978, 1016], "conversations": [{"from": "human", "timestamps": 1008.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.074, 0.071, 0.575, 0.996]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1016.0, "value": "For the person at bbox [0.366, 0.119, 0.69, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.366, 0.119, 0.69, 1.0]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1013.0s to 1015.0s, [0.405, 0.095, 0.825, 1.0]->[0.391, 0.105, 0.688, 1.0]->[0.251, 0.063, 0.595, 0.998]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1005.0s to 1011.0s, [0.189, 0.083, 0.648, 0.991]->[0.127, 0.077, 0.568, 1.0]->[0.119, 0.077, 0.541, 0.997]->[0.074, 0.071, 0.575, 0.996]->[0.198, 0.085, 0.69, 1.0]->[0.188, 0.079, 0.657, 1.0]->[0.351, 0.078, 0.881, 0.994]: stand, talk to (e.g., self, a person, a group).", "From 997.0s to 1004.0s, [0.326, 0.078, 0.847, 0.973]->[0.251, 0.065, 0.739, 0.989]->[0.209, 0.066, 0.63, 0.983]->[0.213, 0.082, 0.58, 0.961]->[0.133, 0.055, 0.566, 0.973]->[0.133, 0.046, 0.667, 0.979]->[0.167, 0.053, 0.656, 0.979]->[0.145, 0.07, 0.597, 0.848]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 996.0s, [0.311, 0.132, 0.854, 0.98]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 995.0s, [0.207, 0.0, 0.628, 0.984]: stand, carry/hold (an object), fight/hit (a person), watch (a person).", "At 994.0s, [0.249, 0.067, 0.457, 1.0]: walk."]}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1047, 1083], "conversations": [{"from": "human", "timestamps": 1077.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.017, 0.082, 0.724, 0.989]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1083.0, "value": "What actions is the person in bbox [0.121, 0.102, 0.931, 0.926] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.121, 0.102, 0.931, 0.926]: stand, talk to (e.g., self, a person, a group).", "At 1082.0s, [0.225, 0.124, 0.978, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1051, 1100], "conversations": [{"from": "human", "timestamps": 1081.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.069, 0.086, 0.766, 0.997]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1100.0, "value": "What actions is the person in bbox [0.155, 0.152, 0.969, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.155, 0.152, 0.969, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 1095.0s to 1099.0s, [0.088, 0.055, 0.95, 0.959]->[0.178, 0.06, 0.986, 0.975]->[0.139, 0.047, 0.949, 0.978]->[0.166, 0.103, 1.0, 0.976]->[0.175, 0.066, 0.951, 0.999]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1146, 1178], "conversations": [{"from": "human", "timestamps": 1176.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.006, 0.072, 0.333, 1.0]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.808, 0.328, 0.999, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1178.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.011, 0.037, 0.928, 0.997]?"}, {"from": "gpt", "value": ["At Now, [0.011, 0.037, 0.928, 0.997]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1176.0s to 1177.0s, [0.006, 0.072, 0.333, 1.0]->[0.0, 0.072, 0.385, 0.984]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1174.0s to 1175.0s, [0.163, 0.073, 0.482, 0.994]->[0.087, 0.037, 0.451, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1173.0s, [0.088, 0.038, 0.47, 0.991]: stand, carry/hold (an object), listen (e.g., to music), sing to (e.g., self, a person, a group).", "At 1172.0s, [0.145, 0.08, 0.505, 0.993]: carry/hold (an object), listen (e.g., to music), dance, sing to (e.g., self, a person, a group)."]}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1259, 1301], "conversations": [{"from": "human", "timestamps": 1289.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.238, 0.002, 0.63, 1.0]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1301.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.28, 0.0, 0.968, 0.99]?"}, {"from": "gpt", "value": ["In the current frame, [0.28, 0.0, 0.968, 0.99]: stand, talk to (e.g., self, a person, a group).", "At 1300.0s, [0.306, 0.0, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1299.0s, [0.315, 0.0, 0.984, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1298.0s, [0.674, 0.118, 0.883, 0.983]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1264, 1353], "conversations": [{"from": "human", "timestamps": 1294.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.214, 0.029, 0.716, 0.993]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1353.0, "value": "Describe the actions of the person at bbox [0.0, 0.035, 0.79, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.035, 0.79, 1.0]: stand, listen to (a person).", "From 1351.0s to 1352.0s, [0.0, 0.038, 0.748, 0.983]->[0.0, 0.086, 0.701, 0.978]: stand.", "At 1349.0s, [0.0, 0.032, 0.817, 0.984]: stand.", "At 1348.0s, [0.0, 0.015, 0.792, 0.985]: stand, listen to (a person), watch (a person)."]}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1357, 1433], "conversations": [{"from": "human", "timestamps": 1387.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.029, 0.041, 0.817, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1433.0, "value": "For the person at bbox [0.537, 0.253, 0.734, 0.795], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.537, 0.253, 0.734, 0.795]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1427.0s to 1432.0s, [0.544, 0.258, 0.791, 0.964]->[0.599, 0.247, 0.838, 0.863]->[0.624, 0.248, 0.849, 0.872]->[0.56, 0.229, 0.817, 0.923]->[0.461, 0.253, 0.768, 0.821]->[0.496, 0.285, 0.812, 0.958]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1424.0s to 1426.0s, [0.614, 0.218, 0.858, 0.99]->[0.603, 0.215, 0.838, 0.978]->[0.627, 0.234, 0.866, 0.976]: stand, listen to (a person), watch (a person).", "At 1423.0s, [0.635, 0.24, 0.87, 0.924]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1422.0s, [0.568, 0.219, 0.844, 0.958]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1419.0s to 1421.0s, [0.565, 0.248, 0.807, 0.769]->[0.629, 0.24, 0.862, 0.954]->[0.619, 0.236, 0.846, 0.929]: stand, listen to (a person), watch (a person).", "From 1414.0s to 1418.0s, [0.578, 0.289, 0.833, 0.739]->[0.55, 0.299, 0.821, 0.755]->[0.617, 0.301, 0.87, 0.738]->[0.523, 0.285, 0.776, 0.738]->[0.562, 0.274, 0.832, 0.741]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1413.0s, [0.602, 0.295, 0.849, 0.738]: stand, walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1412.0s, [0.516, 0.265, 0.765, 0.816]: walk, talk to (e.g., self, a person, a group).", "At 1411.0s, [0.395, 0.279, 0.596, 0.714]: stand, talk to (e.g., self, a person, a group).", "From 1407.0s to 1410.0s, [0.218, 0.272, 0.413, 0.93]->[0.248, 0.265, 0.427, 0.644]->[0.362, 0.253, 0.573, 0.702]->[0.381, 0.27, 0.521, 0.689]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1406.0s, [0.232, 0.258, 0.4, 0.573]: stand, listen to (a person), watch (a person)."]}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1381, 1468], "conversations": [{"from": "human", "timestamps": 1411.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.395, 0.279, 0.596, 0.714]: stand, talk to (e.g., self, a person, a group); [0.56, 0.194, 0.731, 0.94]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1468.0, "value": "Describe the actions of the person at bbox [0.271, 0.175, 0.388, 0.494] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.271, 0.175, 0.388, 0.494]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1464.0s to 1467.0s, [0.474, 0.144, 0.627, 0.487]->[0.462, 0.171, 0.576, 0.478]->[0.397, 0.162, 0.504, 0.546]->[0.313, 0.193, 0.453, 0.469]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1463.0s, [0.538, 0.195, 0.652, 0.588]: walk, carry/hold (an object)."]}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1421, 1456], "conversations": [{"from": "human", "timestamps": 1451.0, "value": "What actions did the person at bbox [0.483, 0.238, 0.683, 0.693] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.483, 0.238, 0.683, 0.693]: walk.", "From 1448.0s to 1450.0s, [0.408, 0.198, 0.693, 0.713]->[0.389, 0.208, 0.627, 0.793]->[0.429, 0.221, 0.675, 0.77]: walk, talk to (e.g., self, a person, a group).", "From 1445.0s to 1447.0s, [0.446, 0.237, 0.753, 0.99]->[0.38, 0.195, 0.696, 0.984]->[0.362, 0.237, 0.61, 0.993]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1456.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.383, 0.163, 0.604, 0.959]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1476, 1550], "conversations": [{"from": "human", "timestamps": 1506.0, "value": "What actions is the person in bbox [0.184, 0.095, 0.862, 0.973] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.184, 0.095, 0.862, 0.973]: sit, listen to (a person), watch (a person).", "At 1505.0s, [0.176, 0.091, 0.867, 0.972]: sit, listen to (a person), watch (a person).", "At 1504.0s, [0.166, 0.082, 0.862, 0.961]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1550.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.14, 0.119, 0.968, 0.973]: lie/sleep, talk to (e.g., self, a person, a group)."}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1490, 1569], "conversations": [{"from": "human", "timestamps": 1520.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.131, 0.245, 0.467, 0.95]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.535, 0.35, 0.684, 0.753]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.689, 0.419, 0.899, 0.609]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1569.0, "value": "Describe the actions of the person at bbox [0.151, 0.072, 0.884, 0.972] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.151, 0.072, 0.884, 0.972]: sit.", "At 1567.0s, [0.157, 0.101, 0.857, 0.974]: sit.", "At 1566.0s, [0.181, 0.098, 0.862, 0.97]: sit, listen to (a person), watch (a person)."]}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1558, 1605], "conversations": [{"from": "human", "timestamps": 1588.0, "value": "What actions is the person in bbox [0.038, 0.047, 0.984, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.038, 0.047, 0.984, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1587.0s, [0.02, 0.062, 1.0, 1.0]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person)."]}, {"from": "human", "timestamps": 1605.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.139, 0.063, 0.876, 0.983]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1604, 1659], "conversations": [{"from": "human", "timestamps": 1634.0, "value": "For the person at bbox [0.488, 0.337, 0.639, 0.783], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.488, 0.337, 0.639, 0.783]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1633.0s, [0.496, 0.344, 0.646, 0.804]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1659.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.493, 0.41, 0.765, 0.819]: watch (e.g., TV), sit; [0.78, 0.076, 0.955, 0.721]: walk."}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1633, 1689], "conversations": [{"from": "human", "timestamps": 1663.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.304, 0.099, 0.953, 0.989]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1689.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.249, 0.148, 0.923, 0.972]?"}, {"from": "gpt", "value": ["In the current frame, [0.249, 0.148, 0.923, 0.972]: stand, talk to (e.g., self, a person, a group).", "At 1688.0s, [0.233, 0.119, 0.891, 0.97]: stand, talk to (e.g., self, a person, a group).", "At 1687.0s, [0.296, 0.085, 1.0, 0.985]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1685.0s, [0.136, 0.102, 0.917, 0.975]: stand.", "At 1684.0s, [0.111, 0.103, 0.921, 0.972]: stand, listen to (a person), watch (a person).", "From 1682.0s to 1683.0s, [0.118, 0.098, 0.887, 0.981]->[0.123, 0.1, 0.918, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1648, 1725], "conversations": [{"from": "human", "timestamps": 1678.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.05, 0.092, 0.925, 0.989]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1725.0, "value": "What actions did the person at bbox [0.003, 0.029, 0.726, 0.999] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.003, 0.029, 0.726, 0.999]: stand, talk to (e.g., self, a person, a group).", "From 1723.0s to 1724.0s, [0.068, 0.066, 0.78, 0.987]->[0.06, 0.076, 0.794, 0.99]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "8aMv-ZGD4ic.mkv", "clip": [1729, 1788], "conversations": [{"from": "human", "timestamps": 1759.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.052, 0.128, 0.975, 0.955]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1788.0, "value": "What actions is the person in bbox [0.013, 0.091, 0.894, 0.966] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.013, 0.091, 0.894, 0.966]: stand, talk to (e.g., self, a person, a group).", "From 1786.0s to 1787.0s, [0.02, 0.076, 0.938, 0.893]->[0.144, 0.102, 0.887, 0.996]: stand, talk to (e.g., self, a person, a group).", "At 1785.0s, [0.166, 0.153, 0.884, 0.966]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "8nO5FFbIAog.webm", "clip": [999, 1035], "conversations": [{"from": "human", "timestamps": 1029.0, "value": "What actions did the person at bbox [0.007, 0.025, 0.686, 0.978] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.007, 0.025, 0.686, 0.978]: stand.", "At 1028.0s, [0.0, 0.037, 0.655, 0.977]: stand."]}, {"from": "human", "timestamps": 1035.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.042, 0.041, 0.797, 0.981]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "8nO5FFbIAog.webm", "clip": [1133, 1178], "conversations": [{"from": "human", "timestamps": 1163.0, "value": "For the person at bbox [0.599, 0.515, 0.837, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.599, 0.515, 0.837, 0.981]: stand, carry/hold (an object).", "At 1162.0s, [0.631, 0.568, 0.858, 0.994]: stand, carry/hold (an object).", "At 1161.0s, [0.776, 0.557, 0.993, 0.994]: stand.", "From 1159.0s to 1160.0s, [0.653, 0.59, 0.864, 0.981]->[0.724, 0.629, 0.967, 0.995]: walk."]}, {"from": "human", "timestamps": 1178.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.019, 0.585, 0.992]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "8nO5FFbIAog.webm", "clip": [1332, 1372], "conversations": [{"from": "human", "timestamps": 1362.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.538, 0.199, 0.979, 0.989]: sit."}, {"from": "human", "timestamps": 1372.0, "value": "What actions is the person in bbox [0.138, 0.028, 0.4, 0.963] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.138, 0.028, 0.4, 0.963]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1369.0s to 1371.0s, [0.129, 0.048, 0.429, 0.981]->[0.142, 0.045, 0.403, 0.966]->[0.138, 0.046, 0.401, 0.964]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1366.0s to 1368.0s, [0.149, 0.071, 0.433, 0.976]->[0.123, 0.074, 0.434, 0.982]->[0.103, 0.089, 0.54, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1364.0s to 1365.0s, [0.111, 0.04, 0.425, 0.98]->[0.21, 0.074, 0.455, 0.953]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1363.0s, [0.015, 0.01, 0.282, 0.934]: walk, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "8nO5FFbIAog.webm", "clip": [1362, 1427], "conversations": [{"from": "human", "timestamps": 1392.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.396, 0.25, 0.508, 0.637]: stand, talk to (e.g., self, a person, a group); [0.74, 0.024, 0.985, 1.0]: ride (e.g., a bike, a car, a horse), sit, watch (a person)."}, {"from": "human", "timestamps": 1427.0, "value": "For the person at bbox [0.267, 0.126, 0.987, 0.999], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.267, 0.126, 0.987, 0.999]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1426.0s, [0.391, 0.081, 0.989, 0.979]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "8nO5FFbIAog.webm", "clip": [1364, 1482], "conversations": [{"from": "human", "timestamps": 1394.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.012, 0.008, 0.574, 0.987]: listen to (a person), stand."}, {"from": "human", "timestamps": 1482.0, "value": "For the person at bbox [0.077, 0.112, 0.423, 0.886], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.077, 0.112, 0.423, 0.886]: sit.", "At 1481.0s, [0.064, 0.107, 0.425, 0.885]: sit."]}]}, {"video": "8nO5FFbIAog.webm", "clip": [1474, 1592], "conversations": [{"from": "human", "timestamps": 1504.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.002, 0.086, 0.64, 0.991]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1592.0, "value": "For the person at bbox [0.586, 0.128, 0.804, 0.892], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.586, 0.128, 0.804, 0.892]: stand, watch (a person).", "From 1589.0s to 1591.0s, [0.484, 0.084, 0.777, 0.895]->[0.584, 0.124, 0.787, 0.893]->[0.585, 0.115, 0.8, 0.893]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "8nO5FFbIAog.webm", "clip": [1557, 1604], "conversations": [{"from": "human", "timestamps": 1587.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.22, 0.202, 0.367, 0.93]: listen to (a person), stand, watch (a person); [0.538, 0.125, 0.958, 0.883]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1604.0, "value": "What actions is the person in bbox [0.063, 0.0, 0.732, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.063, 0.0, 0.732, 1.0]: sit, drive (e.g., a car, a truck).", "At 1603.0s, [0.045, 0.01, 0.694, 0.993]: sit, drive (e.g., a car, a truck)."]}]}, {"video": "8nO5FFbIAog.webm", "clip": [1651, 1685], "conversations": [{"from": "human", "timestamps": 1681.0, "value": "Describe the actions of the person at bbox [0.244, 0.042, 0.898, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.244, 0.042, 0.898, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1680.0s, [0.179, 0.064, 0.876, 0.982]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1685.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.16, 0.072, 0.952, 0.973]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "8nO5FFbIAog.webm", "clip": [1743, 1791], "conversations": [{"from": "human", "timestamps": 1773.0, "value": "For the person at bbox [0.182, 0.353, 0.25, 0.571], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.182, 0.353, 0.25, 0.571]: stand, watch (a person).", "From 1771.0s to 1772.0s, [0.195, 0.142, 0.522, 0.996]->[0.17, 0.363, 0.249, 0.57]: stand, watch (a person).", "At 1770.0s, [0.092, 0.251, 0.491, 0.986]: get up, watch (a person).", "At 1767.0s, [0.001, 0.275, 0.23, 0.992]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1791.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.448, 0.36, 0.589, 0.824]: run/jog."}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [888, 954], "conversations": [{"from": "human", "timestamps": 918.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.482, 0.04, 0.993, 0.982]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 954.0, "value": "What actions is the person in bbox [0.113, 0.067, 0.712, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.113, 0.067, 0.712, 1.0]: walk.", "From 949.0s to 953.0s, [0.0, 0.015, 0.378, 0.992]->[0.443, 0.036, 0.903, 1.0]->[0.379, 0.014, 0.877, 0.98]->[0.213, 0.077, 0.982, 0.98]->[0.205, 0.054, 0.924, 0.995]: walk, talk to (e.g., self, a person, a group).", "At 948.0s, [0.0, 0.029, 0.338, 0.981]: stand, touch (an object), watch (a person).", "From 942.0s to 947.0s, [0.003, 0.052, 0.372, 0.994]->[0.0, 0.066, 0.357, 0.986]->[0.002, 0.028, 0.358, 0.988]->[0.002, 0.024, 0.341, 0.98]->[0.001, 0.017, 0.348, 0.979]->[0.001, 0.016, 0.335, 0.979]: stand, touch (an object), listen to (a person), watch (a person).", "From 939.0s to 941.0s, [0.0, 0.0, 0.325, 0.98]->[0.0, 0.049, 0.329, 0.951]->[0.0, 0.036, 0.343, 0.983]: stand, listen to (a person), watch (a person).", "At 938.0s, [0.005, 0.013, 0.323, 0.992]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 936.0s to 937.0s, [0.0, 0.031, 0.331, 0.99]->[0.006, 0.013, 0.326, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 935.0s, [0.0, 0.039, 0.323, 0.999]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [944, 998], "conversations": [{"from": "human", "timestamps": 974.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.013, 0.169, 0.367, 1.0]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.276, 0.012, 0.721, 0.978]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group); [0.424, 0.002, 0.525, 0.148]: carry/hold (an object), stand; [0.597, 0.228, 0.997, 0.993]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 998.0, "value": "For the person at bbox [0.449, 0.227, 0.762, 0.979], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.449, 0.227, 0.762, 0.979]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 997.0s, [0.223, 0.153, 0.589, 0.967]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [993, 1025], "conversations": [{"from": "human", "timestamps": 1023.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.149, 0.052, 1.0, 0.987]?"}, {"from": "gpt", "value": ["At Now, [0.149, 0.052, 1.0, 0.987]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1022.0s, [0.054, 0.036, 0.99, 0.987]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1021.0s, [0.608, 0.17, 0.858, 0.758]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1025.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.051, 0.031, 0.972, 0.969]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1018, 1103], "conversations": [{"from": "human", "timestamps": 1048.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.141, 0.0, 1.0, 0.988]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.211, 0.312, 0.367, 0.714]: stand."}, {"from": "human", "timestamps": 1103.0, "value": "What actions is the person in bbox [0.179, 0.013, 0.968, 0.999] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.179, 0.013, 0.968, 0.999]: stand, watch (a person).", "At 1102.0s, [0.188, 0.022, 0.982, 0.977]: stand, talk to (e.g., self, a person, a group).", "At 1101.0s, [0.142, 0.013, 0.994, 0.993]: stand.", "At 1100.0s, [0.209, 0.033, 1.0, 0.992]: stand, talk to (e.g., self, a person, a group).", "From 1098.0s to 1099.0s, [0.132, 0.0, 0.973, 0.993]->[0.192, 0.0, 0.991, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1113, 1174], "conversations": [{"from": "human", "timestamps": 1143.0, "value": "Describe the actions of the person at bbox [0.526, 0.082, 1.0, 0.996] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.526, 0.082, 1.0, 0.996]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1142.0s, [0.599, 0.062, 1.0, 0.988]: stand, carry/hold (an object), listen to (a person).", "From 1140.0s to 1141.0s, [0.501, 0.094, 0.967, 0.996]->[0.464, 0.127, 1.0, 0.992]: walk, carry/hold (an object).", "At 1139.0s, [0.437, 0.098, 0.849, 0.98]: stand, carry/hold (an object), listen to (a person), take (an object) from (a person).", "At 1138.0s, [0.387, 0.104, 0.825, 0.984]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1137.0s, [0.535, 0.099, 0.863, 1.0]: stand, watch (a person).", "At 1136.0s, [0.501, 0.161, 0.933, 1.0]: bend/bow (at the waist), watch (a person).", "From 1134.0s to 1135.0s, [0.512, 0.102, 0.881, 0.987]->[0.501, 0.143, 0.917, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1132.0s to 1133.0s, [0.459, 0.1, 1.0, 0.984]->[0.524, 0.076, 1.0, 0.986]: walk, watch (a person).", "At 1131.0s, [0.0, 0.029, 0.808, 1.0]: walk, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1174.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.525, 0.348, 0.94, 0.899]: carry/hold (an object), lie/sleep."}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1169, 1257], "conversations": [{"from": "human", "timestamps": 1199.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.215, 0.049, 0.816, 0.957]: lie/sleep, talk to (e.g., self, a person, a group); [0.475, 0.43, 0.96, 0.995]: lie/sleep."}, {"from": "human", "timestamps": 1257.0, "value": "What actions did the person at bbox [0.253, 0.025, 1.0, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.253, 0.025, 1.0, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1256.0s, [0.235, 0.019, 1.0, 0.977]: stand, talk to (e.g., self, a person, a group).", "At 1255.0s, [0.203, 0.021, 1.0, 0.983]: stand."]}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1248, 1346], "conversations": [{"from": "human", "timestamps": 1278.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.156, 0.009, 1.0, 0.976]: watch (a person), sit."}, {"from": "human", "timestamps": 1346.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.142, 0.03, 1.0, 0.979]?"}, {"from": "gpt", "value": ["In the current frame, [0.142, 0.03, 1.0, 0.979]: lie/sleep, watch (a person).", "At 1345.0s, [0.424, 0.072, 1.0, 0.998]: lie/sleep, watch (a person)."]}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1301, 1369], "conversations": [{"from": "human", "timestamps": 1331.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.005, 0.609, 0.982]: sit, smoke, talk to (e.g., self, a person, a group); [0.562, 0.195, 0.996, 0.986]: listen to (a person), lie/sleep, watch (a person)."}, {"from": "human", "timestamps": 1369.0, "value": "For the person at bbox [0.338, 0.097, 0.698, 0.985], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.338, 0.097, 0.698, 0.985]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1368.0s, [0.343, 0.127, 0.641, 0.987]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1357, 1428], "conversations": [{"from": "human", "timestamps": 1387.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.028, 0.128, 0.574, 0.996]: stand, kiss (a person)."}, {"from": "human", "timestamps": 1428.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.023, 0.533, 0.977]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.023, 0.533, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1426.0s to 1427.0s, [0.0, 0.0, 0.584, 0.995]->[0.012, 0.001, 0.589, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1425.0s, [0.0, 0.005, 0.521, 0.987]: stand, watch (a person)."]}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1390, 1485], "conversations": [{"from": "human", "timestamps": 1420.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.001, 0.078, 0.781, 0.994]: stand, kiss (a person); [0.364, 0.058, 0.941, 0.987]: stand, kiss (a person)."}, {"from": "human", "timestamps": 1485.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.007, 0.009, 0.835, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.007, 0.009, 0.835, 1.0]: stand, watch (a person).", "At 1484.0s, [0.087, 0.005, 0.965, 1.0]: stand, watch (a person).", "From 1482.0s to 1483.0s, [0.188, 0.122, 0.969, 0.991]->[0.089, 0.024, 0.97, 1.0]: stand.", "From 1479.0s to 1481.0s, [0.091, 0.032, 0.944, 0.971]->[0.02, 0.152, 0.602, 1.0]->[0.252, 0.053, 0.979, 0.913]: stand, talk to (e.g., self, a person, a group).", "At 1478.0s, [0.0, 0.059, 0.869, 0.996]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1435, 1480], "conversations": [{"from": "human", "timestamps": 1465.0, "value": "What actions is the person in bbox [0.503, 0.043, 0.99, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.503, 0.043, 0.99, 1.0]: walk, talk to (e.g., self, a person, a group).", "From 1462.0s to 1464.0s, [0.472, 0.044, 0.975, 0.996]->[0.727, 0.063, 0.993, 0.989]->[0.672, 0.034, 0.989, 1.0]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1461.0s, [0.652, 0.058, 0.939, 0.982]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1460.0s, [0.577, 0.103, 1.0, 0.971]: walk, talk to (e.g., self, a person, a group).", "From 1458.0s to 1459.0s, [0.64, 0.069, 0.937, 0.987]->[0.717, 0.061, 0.937, 0.979]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1457.0s, [0.733, 0.113, 0.959, 0.956]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1456.0s, [0.703, 0.087, 1.0, 0.984]: walk, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1480.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.02, 0.152, 0.602, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1493, 1587], "conversations": [{"from": "human", "timestamps": 1523.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.279, 0.128, 0.955, 0.99]: watch (a person), touch (an object), walk."}, {"from": "human", "timestamps": 1587.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.454, 0.042, 0.995, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.454, 0.042, 0.995, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1586.0s, [0.447, 0.053, 0.997, 0.986]: sit, talk to (e.g., self, a person, a group).", "At 1585.0s, [0.434, 0.05, 1.0, 0.972]: sit.", "At 1584.0s, [0.433, 0.049, 0.992, 0.973]: sit, listen to (a person).", "At 1583.0s, [0.437, 0.06, 0.996, 0.975]: sit, talk to (e.g., self, a person, a group).", "At 1582.0s, [0.441, 0.046, 0.999, 1.0]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "From 1580.0s to 1581.0s, [0.426, 0.065, 0.999, 0.979]->[0.413, 0.069, 0.995, 0.975]: sit, listen to (a person), watch (a person).", "From 1575.0s to 1579.0s, [0.372, 0.039, 1.0, 0.994]->[0.404, 0.039, 0.989, 0.97]->[0.395, 0.042, 0.985, 0.967]->[0.372, 0.041, 0.99, 0.981]->[0.405, 0.041, 0.983, 0.987]: sit, listen to (a person)."]}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1584, 1617], "conversations": [{"from": "human", "timestamps": 1614.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.054, 0.367, 1.0]: listen to (a person), sit; [0.384, 0.038, 0.976, 0.981]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1617.0, "value": "What actions is the person in bbox [0.0, 0.049, 0.398, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.049, 0.398, 1.0]: sit, listen to (a person).", "At 1616.0s, [0.0, 0.056, 0.41, 1.0]: sit, touch (an object), listen to (a person).", "From 1604.0s to 1615.0s, [0.006, 0.077, 0.46, 0.996]->[0.0, 0.082, 0.434, 0.995]->[0.0, 0.077, 0.429, 0.997]->[0.0, 0.082, 0.406, 1.0]->[0.0, 0.056, 0.398, 0.999]->[0.0, 0.062, 0.39, 1.0]->[0.002, 0.056, 0.405, 0.996]->[0.0, 0.049, 0.408, 1.0]->[0.0, 0.065, 0.424, 0.995]->[0.0, 0.031, 0.393, 0.995]->[0.0, 0.054, 0.367, 1.0]->[0.001, 0.064, 0.428, 0.995]: sit, listen to (a person).", "From 1601.0s to 1603.0s, [0.0, 0.066, 0.415, 1.0]->[0.003, 0.105, 0.436, 0.984]->[0.006, 0.085, 0.432, 0.986]: sit, talk to (e.g., self, a person, a group).", "From 1591.0s to 1600.0s, [0.005, 0.069, 0.512, 0.995]->[0.008, 0.071, 0.53, 0.993]->[0.006, 0.07, 0.531, 0.987]->[0.008, 0.07, 0.531, 0.994]->[0.008, 0.075, 0.519, 0.99]->[0.004, 0.064, 0.523, 0.99]->[0.002, 0.08, 0.491, 0.993]->[0.0, 0.088, 0.477, 0.992]->[0.0, 0.093, 0.486, 0.989]->[0.0, 0.076, 0.387, 0.989]: sit, listen to (a person).", "At 1590.0s, [0.016, 0.089, 0.489, 0.992]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1589.0s, [0.018, 0.065, 0.55, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1588.0s, [0.02, 0.08, 0.569, 0.997]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1586.0s to 1587.0s, [0.016, 0.06, 0.589, 0.993]->[0.014, 0.076, 0.56, 1.0]: sit, listen to (a person).", "At 1585.0s, [0.016, 0.056, 0.59, 1.0]: sit.", "At 1584.0s, [0.013, 0.07, 0.582, 0.994]: sit, touch (an object), listen to (a person).", "At 1583.0s, [0.016, 0.067, 0.59, 1.0]: sit, listen to (a person).", "At 1582.0s, [0.024, 0.052, 0.562, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1581.0s, [0.031, 0.06, 0.565, 0.992]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1580.0s, [0.037, 0.071, 0.608, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1579.0s, [0.004, 0.067, 0.5, 0.986]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1575.0s to 1578.0s, [0.011, 0.065, 0.623, 0.995]->[0.008, 0.059, 0.58, 0.982]->[0.021, 0.081, 0.611, 1.0]->[0.012, 0.055, 0.561, 0.995]: sit, talk to (e.g., self, a person, a group).", "At 1574.0s, [0.381, 0.003, 1.0, 1.0]: stand, listen to (a person), watch (a person).", "At 1573.0s, [0.346, 0.041, 1.0, 1.0]: stand, watch (a person).", "From 1569.0s to 1572.0s, [0.348, 0.0, 1.0, 0.984]->[0.405, 0.0, 1.0, 0.988]->[0.211, 0.092, 0.947, 1.0]->[0.344, 0.02, 1.0, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1568.0s, [0.208, 0.214, 0.987, 0.996]: bend/bow (at the waist).", "At 1567.0s, [0.225, 0.355, 0.757, 0.982]: carry/hold (an object).", "At 1566.0s, [0.345, 0.133, 0.993, 0.984]: stand, carry/hold (an object).", "From 1561.0s to 1565.0s, [0.0, 0.339, 0.552, 0.987]->[0.367, 0.22, 0.719, 0.97]->[0.01, 0.175, 0.487, 0.983]->[0.153, 0.096, 0.46, 0.775]->[0.067, 0.228, 0.714, 0.987]: run/jog.", "From 1559.0s to 1560.0s, [0.34, 0.037, 0.856, 0.997]->[0.403, 0.07, 0.855, 0.997]: stand."]}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1612, 1676], "conversations": [{"from": "human", "timestamps": 1642.0, "value": "What actions did the person at bbox [0.114, 0.105, 0.539, 0.98] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.114, 0.105, 0.539, 0.98]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1641.0s, [0.417, 0.105, 0.687, 0.863]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1676.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.173, 0.01, 1.0, 0.976]: watch (a person), stand."}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1593, 1721], "conversations": [{"from": "human", "timestamps": 1623.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.416, 0.063, 0.97, 0.97]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1721.0, "value": "For the person at bbox [0.0, 0.107, 0.437, 0.984], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.107, 0.437, 0.984]: stand, carry/hold (an object), watch (a person).", "At 1720.0s, [0.0, 0.092, 0.436, 0.985]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1719.0s, [0.0, 0.101, 0.434, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1718.0s, [0.0, 0.092, 0.424, 0.981]: stand, carry/hold (an object), listen to (a person).", "At 1717.0s, [0.0, 0.101, 0.428, 0.983]: stand, listen to (a person), watch (a person)."]}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1690, 1733], "conversations": [{"from": "human", "timestamps": 1720.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.092, 0.436, 0.985]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.606, 0.013, 1.0, 0.996]: listen to (a person), stand."}, {"from": "human", "timestamps": 1733.0, "value": "What actions is the person in bbox [0.587, 0.115, 0.842, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.587, 0.115, 0.842, 1.0]: walk.", "At 1732.0s, [0.509, 0.128, 0.829, 0.99]: walk."]}]}, {"video": "9HOMUW7QNFc.mkv", "clip": [1716, 1772], "conversations": [{"from": "human", "timestamps": 1746.0, "value": "What actions is the person in bbox [0.051, 0.046, 0.773, 0.995] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.051, 0.046, 0.773, 0.995]: walk.", "From 1736.0s to 1745.0s, [0.287, 0.589, 0.406, 0.898]->[0.296, 0.543, 0.391, 0.796]->[0.045, 0.111, 0.455, 0.882]->[0.0, 0.09, 0.418, 0.971]->[0.117, 0.116, 0.533, 0.759]->[0.0, 0.145, 0.577, 0.976]->[0.09, 0.061, 0.744, 0.979]->[0.004, 0.108, 0.677, 0.984]->[0.061, 0.045, 0.66, 0.995]->[0.125, 0.202, 0.717, 0.988]: walk."]}, {"from": "human", "timestamps": 1772.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.237, 0.107, 0.773]: walk; [0.833, 0.245, 0.999, 0.915]: carry/hold (an object), listen to (a person), walk."}]}, {"video": "9IF8uTRrWAM.mkv", "clip": [886, 995], "conversations": [{"from": "human", "timestamps": 916.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.025, 0.018, 0.794, 0.999]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 995.0, "value": "Describe the actions of the person at bbox [0.227, 0.023, 0.988, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.227, 0.023, 0.988, 0.981]: sit, talk to (e.g., self, a person, a group).", "At 994.0s, [0.355, 0.005, 1.0, 0.981]: sit, listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "9IF8uTRrWAM.mkv", "clip": [1040, 1076], "conversations": [{"from": "human", "timestamps": 1070.0, "value": "For the person at bbox [0.008, 0.0, 0.988, 0.964], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.008, 0.0, 0.988, 0.964]: sit, talk to (e.g., self, a person, a group).", "At 1069.0s, [0.001, 0.023, 0.967, 0.982]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1076.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.198, 0.241, 0.494, 0.933]: sit, watch (a person), listen to (a person), carry/hold (an object), eat; [0.565, 0.219, 0.818, 0.671]: carry/hold (an object), sit, cut, talk to (e.g., self, a person, a group)."}]}, {"video": "9IF8uTRrWAM.mkv", "clip": [994, 1089], "conversations": [{"from": "human", "timestamps": 1024.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.017, 0.016, 0.92, 0.985]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1089.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.044, 0.957, 0.986]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.044, 0.957, 0.986]: sit, talk to (e.g., self, a person, a group).", "From 1087.0s to 1088.0s, [0.0, 0.0, 0.9, 0.984]->[0.0, 0.021, 0.962, 0.993]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "9IF8uTRrWAM.mkv", "clip": [1069, 1113], "conversations": [{"from": "human", "timestamps": 1099.0, "value": "For the person at bbox [0.179, 0.003, 1.0, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.179, 0.003, 1.0, 0.994]: sit, talk to (e.g., self, a person, a group).", "At 1098.0s, [0.158, 0.015, 0.978, 0.992]: sit, stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1113.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.264, 0.0, 1.0, 0.983]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "9IF8uTRrWAM.mkv", "clip": [1190, 1236], "conversations": [{"from": "human", "timestamps": 1220.0, "value": "Describe the actions of the person at bbox [0.213, 0.022, 0.877, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.213, 0.022, 0.877, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1219.0s, [0.234, 0.008, 0.974, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1236.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.049, 0.047, 0.978, 0.985]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "9IF8uTRrWAM.mkv", "clip": [1310, 1372], "conversations": [{"from": "human", "timestamps": 1340.0, "value": "For the person at bbox [0.017, 0.088, 0.956, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.017, 0.088, 0.956, 0.981]: carry/hold (an object), dance.", "From 1338.0s to 1339.0s, [0.012, 0.027, 0.437, 0.977]->[0.018, 0.061, 0.695, 0.909]: dance.", "At 1337.0s, [0.15, 0.0, 0.687, 0.993]: carry/hold (an object), dance.", "From 1335.0s to 1336.0s, [0.21, 0.015, 0.824, 0.968]->[0.024, 0.009, 0.779, 0.996]: dance.", "At 1334.0s, [0.101, 0.0, 0.729, 1.0]: stand.", "At 1333.0s, [0.237, 0.0, 0.84, 1.0]: stand, walk.", "At 1332.0s, [0.125, 0.051, 0.743, 0.98]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1372.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.003, 0.385, 0.928, 0.996]: lie/sleep, talk to (e.g., self, a person, a group); [0.075, 0.015, 0.689, 0.888]: stand."}]}, {"video": "9IF8uTRrWAM.mkv", "clip": [1364, 1398], "conversations": [{"from": "human", "timestamps": 1394.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.014, 0.067, 0.965, 0.987]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1398.0, "value": "What actions did the person at bbox [0.151, 0.204, 0.922, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.151, 0.204, 0.922, 1.0]: sit.", "From 1396.0s to 1397.0s, [0.142, 0.207, 0.918, 1.0]->[0.157, 0.217, 0.936, 1.0]: sit.", "At 1395.0s, [0.131, 0.103, 0.944, 0.962]: sit, touch (an object), get up.", "At 1394.0s, [0.014, 0.067, 0.965, 0.987]: sit, carry/hold (an object).", "From 1386.0s to 1392.0s, [0.027, 0.199, 0.929, 0.959]->[0.051, 0.192, 0.985, 0.996]->[0.058, 0.206, 0.982, 0.939]->[0.041, 0.203, 0.99, 0.953]->[0.038, 0.204, 0.99, 0.979]->[0.011, 0.199, 0.997, 1.0]->[0.234, 0.199, 0.967, 0.984]: lie/sleep.", "From 1383.0s to 1385.0s, [0.0, 0.159, 0.947, 0.993]->[0.059, 0.169, 0.938, 0.997]->[0.037, 0.185, 0.945, 1.0]: touch (an object), lie/sleep.", "At 1382.0s, [0.022, 0.175, 0.858, 0.979]: lie/sleep."]}]}, {"video": "9IF8uTRrWAM.mkv", "clip": [1483, 1525], "conversations": [{"from": "human", "timestamps": 1513.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.004, 0.945, 1.0]: talk to (e.g., self, a person, a group), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1525.0, "value": "For the person at bbox [0.269, 0.006, 0.829, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.269, 0.006, 0.829, 1.0]: bend/bow (at the waist), carry/hold (an object).", "At 1524.0s, [0.249, 0.005, 0.835, 0.995]: bend/bow (at the waist), carry/hold (an object), listen to (a person).", "At 1523.0s, [0.301, 0.0, 0.9, 0.971]: stand, carry/hold (an object).", "At 1522.0s, [0.397, 0.004, 0.929, 0.964]: stand, carry/hold (an object), put down, talk to (e.g., self, a person, a group)."]}]}, {"video": "9IF8uTRrWAM.mkv", "clip": [1488, 1576], "conversations": [{"from": "human", "timestamps": 1518.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.072, 0.019, 0.932, 1.0]: listen to (a person), lie/sleep."}, {"from": "human", "timestamps": 1576.0, "value": "Describe the actions of the person at bbox [0.05, 0.157, 0.632, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.05, 0.157, 0.632, 0.991]: bend/bow (at the waist), watch (a person).", "At 1575.0s, [0.396, 0.252, 0.996, 0.996]: bend/bow (at the waist), watch (a person)."]}]}, {"video": "9IF8uTRrWAM.mkv", "clip": [1532, 1594], "conversations": [{"from": "human", "timestamps": 1562.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.245, 0.0, 0.991, 1.0]: watch (a person), lie/sleep."}, {"from": "human", "timestamps": 1594.0, "value": "Describe the actions of the person at bbox [0.452, 0.205, 0.927, 0.742] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.452, 0.205, 0.927, 0.742]: grab (a person), listen to (a person), lie/sleep.", "At 1593.0s, [0.405, 0.268, 0.707, 0.677]: listen to (a person), lie/sleep.", "At 1592.0s, [0.358, 0.284, 0.643, 0.667]: listen to (a person), lie/sleep, watch (a person)."]}]}, {"video": "9IF8uTRrWAM.mkv", "clip": [1625, 1674], "conversations": [{"from": "human", "timestamps": 1655.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.224, 0.164, 0.525, 0.809]: sit, grab (a person)."}, {"from": "human", "timestamps": 1674.0, "value": "For the person at bbox [0.128, 0.413, 0.555, 0.999], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.128, 0.413, 0.555, 0.999]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1673.0s, [0.148, 0.394, 0.588, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1672.0s, [0.125, 0.393, 0.577, 0.991]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1668.0s to 1671.0s, [0.173, 0.391, 0.556, 0.995]->[0.154, 0.406, 0.533, 1.0]->[0.146, 0.398, 0.552, 1.0]->[0.173, 0.393, 0.655, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1667.0s, [0.169, 0.374, 0.569, 0.991]: sit, watch (a person)."]}]}, {"video": "9IF8uTRrWAM.mkv", "clip": [1735, 1781], "conversations": [{"from": "human", "timestamps": 1765.0, "value": "Describe the actions of the person at bbox [0.224, 0.192, 0.814, 0.999] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.224, 0.192, 0.814, 0.999]: stand, talk to (e.g., self, a person, a group).", "From 1761.0s to 1764.0s, [0.013, 0.069, 0.932, 0.996]->[0.102, 0.043, 0.875, 0.978]->[0.181, 0.148, 0.793, 0.986]->[0.231, 0.184, 0.963, 0.998]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1781.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.297, 0.166, 0.637, 0.992]: listen to (a person), stand."}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [910, 999], "conversations": [{"from": "human", "timestamps": 940.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.103, 0.581, 0.874]: listen to (a person), stand, watch (a person); [0.048, 0.114, 0.927, 0.863]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 999.0, "value": "Describe the actions of the person at bbox [0.387, 0.403, 0.474, 0.831] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.387, 0.403, 0.474, 0.831]: walk, press.", "At 998.0s, [0.431, 0.283, 0.561, 0.849]: walk."]}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [933, 1016], "conversations": [{"from": "human", "timestamps": 963.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.249, 0.508, 0.314, 0.842]: listen to (a person), stand, watch (a person); [0.303, 0.505, 0.375, 0.833]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1016.0, "value": "Describe the actions of the person at bbox [0.377, 0.222, 0.858, 0.872] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.377, 0.222, 0.858, 0.872]: stand, answer phone.", "From 1009.0s to 1015.0s, [0.424, 0.209, 0.866, 0.877]->[0.409, 0.21, 0.847, 0.879]->[0.448, 0.209, 0.866, 0.883]->[0.441, 0.211, 0.869, 0.876]->[0.423, 0.212, 0.86, 0.879]->[0.39, 0.218, 0.84, 0.86]->[0.411, 0.207, 0.864, 0.876]: stand, answer phone.", "At 1008.0s, [0.463, 0.278, 0.852, 0.874]: walk, carry/hold (an object).", "At 1007.0s, [0.167, 0.175, 0.644, 0.876]: stand, carry/hold (an object).", "From 1004.0s to 1006.0s, [0.378, 0.459, 0.45, 0.822]->[0.374, 0.462, 0.454, 0.822]->[0.13, 0.163, 0.587, 0.868]: stand, answer phone.", "At 1003.0s, [0.379, 0.435, 0.452, 0.819]: stand, answer phone, text on/look at a cellphone.", "At 1002.0s, [0.372, 0.438, 0.454, 0.821]: stand, answer phone, carry/hold (an object), text on/look at a cellphone.", "At 1001.0s, [0.378, 0.458, 0.468, 0.82]: stand, text on/look at a cellphone.", "At 1000.0s, [0.372, 0.444, 0.465, 0.821]: stand.", "At 999.0s, [0.387, 0.403, 0.474, 0.831]: walk, press.", "At 998.0s, [0.431, 0.283, 0.561, 0.849]: walk."]}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [995, 1033], "conversations": [{"from": "human", "timestamps": 1025.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.002, 0.089, 0.944, 0.866]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1033.0, "value": "For the person at bbox [0.128, 0.466, 0.239, 0.672], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.128, 0.466, 0.239, 0.672]: sit, carry/hold (an object), listen to (a person).", "At 1031.0s, [0.002, 0.472, 0.104, 0.694]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1042, 1129], "conversations": [{"from": "human", "timestamps": 1072.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.186, 0.223, 0.46, 0.878]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.272, 0.187, 0.774, 0.871]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1129.0, "value": "What actions is the person in bbox [0.435, 0.318, 0.821, 0.869] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.435, 0.318, 0.821, 0.869]: sit, carry/hold (an object), eat, listen to (a person), watch (a person).", "From 1127.0s to 1128.0s, [0.492, 0.327, 0.802, 0.875]->[0.5, 0.321, 0.811, 0.874]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1126.0s, [0.49, 0.323, 0.797, 0.878]: sit, touch (an object), listen to (a person), watch (a person).", "At 1125.0s, [0.479, 0.326, 0.794, 0.878]: sit, drink, touch (an object), listen to (a person), watch (a person).", "From 1123.0s to 1124.0s, [0.475, 0.331, 0.783, 0.877]->[0.479, 0.329, 0.787, 0.879]: sit, touch (an object), listen to (a person), watch (a person).", "At 1122.0s, [0.472, 0.333, 0.777, 0.879]: sit, listen to (a person), watch (a person).", "From 1120.0s to 1121.0s, [0.471, 0.335, 0.768, 0.883]->[0.476, 0.332, 0.77, 0.879]: sit, touch (an object), listen to (a person), watch (a person).", "From 1118.0s to 1119.0s, [0.468, 0.349, 0.768, 0.878]->[0.471, 0.342, 0.766, 0.88]: sit, drink, touch (an object), listen to (a person), watch (a person).", "At 1117.0s, [0.471, 0.351, 0.766, 0.879]: sit, carry/hold (an object), listen to (a person).", "At 1116.0s, [0.474, 0.35, 0.762, 0.875]: sit, carry/hold (an object), touch (an object), listen to (a person).", "At 1115.0s, [0.474, 0.333, 0.746, 0.877]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1114.0s, [0.469, 0.348, 0.755, 0.878]: sit, listen to (a person), watch (a person).", "At 1113.0s, [0.467, 0.358, 0.762, 0.873]: sit, drink, eat, listen to (a person), watch (a person).", "At 1112.0s, [0.477, 0.356, 0.756, 0.873]: sit, touch (an object), listen to (a person), watch (a person).", "From 1106.0s to 1111.0s, [0.575, 0.116, 0.999, 0.855]->[0.274, 0.172, 0.966, 0.834]->[0.221, 0.177, 0.955, 0.875]->[0.479, 0.358, 0.748, 0.872]->[0.478, 0.356, 0.746, 0.871]->[0.465, 0.362, 0.752, 0.871]: sit, listen to (a person), watch (a person).", "From 1104.0s to 1105.0s, [0.6, 0.112, 0.973, 0.87]->[0.606, 0.11, 1.0, 0.874]: stand, listen to (a person), watch (a person).", "From 1102.0s to 1103.0s, [0.551, 0.169, 0.993, 0.862]->[0.582, 0.146, 0.993, 0.881]: sit, listen to (a person), watch (a person).", "From 1100.0s to 1101.0s, [0.294, 0.151, 0.972, 0.839]->[0.283, 0.176, 0.969, 0.875]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1096.0s to 1099.0s, [0.529, 0.156, 0.997, 0.87]->[0.659, 0.177, 0.995, 0.871]->[0.61, 0.166, 0.997, 0.888]->[0.537, 0.149, 0.995, 0.866]: stand, listen to (a person), watch (a person)."]}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1088, 1127], "conversations": [{"from": "human", "timestamps": 1118.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.083, 0.393, 0.393, 0.885]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group); [0.468, 0.349, 0.768, 0.878]: sit, touch (an object), drink, listen to (a person), watch (a person)."}, {"from": "human", "timestamps": 1127.0, "value": "Describe the actions of the person at bbox [0.115, 0.39, 0.395, 0.875] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.115, 0.39, 0.395, 0.875]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1114.0s to 1126.0s, [0.137, 0.419, 0.394, 0.863]->[0.146, 0.404, 0.374, 0.862]->[0.07, 0.405, 0.376, 0.874]->[0.137, 0.415, 0.376, 0.882]->[0.083, 0.393, 0.393, 0.885]->[0.058, 0.385, 0.379, 0.876]->[0.084, 0.373, 0.384, 0.878]->[0.129, 0.388, 0.382, 0.843]->[0.142, 0.377, 0.38, 0.838]->[0.137, 0.385, 0.403, 0.842]->[0.139, 0.379, 0.402, 0.863]->[0.131, 0.379, 0.405, 0.858]->[0.121, 0.376, 0.39, 0.871]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1113.0s, [0.143, 0.407, 0.37, 0.856]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1112.0s, [0.144, 0.407, 0.399, 0.859]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1111.0s, [0.143, 0.402, 0.39, 0.877]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1110.0s, [0.083, 0.411, 0.364, 0.871]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1109.0s, [0.118, 0.411, 0.375, 0.875]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1108.0s, [0.0, 0.134, 0.29, 0.891]: sit, watch (a person).", "From 1106.0s to 1107.0s, [0.189, 0.204, 0.68, 0.883]->[0.0, 0.177, 0.401, 0.875]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1105.0s, [0.214, 0.211, 0.677, 0.89]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1104.0s, [0.222, 0.208, 0.703, 0.875]: talk to (e.g., self, a person, a group), watch (a person).", "At 1103.0s, [0.239, 0.267, 0.702, 0.865]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1102.0s, [0.195, 0.221, 0.67, 0.852]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1123, 1230], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.054, 0.205, 0.527, 0.86]: sit, touch (an object), watch (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.616, 0.147, 0.991, 0.872]: listen to (a person), sit, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1230.0, "value": "Describe the actions of the person at bbox [0.095, 0.132, 0.527, 0.819] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.095, 0.132, 0.527, 0.819]: sit, drive (e.g., a car, a truck).", "From 1228.0s to 1229.0s, [0.017, 0.128, 0.587, 0.891]->[0.051, 0.136, 0.662, 0.818]: sit, drive (e.g., a car, a truck)."]}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1183, 1255], "conversations": [{"from": "human", "timestamps": 1213.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.014, 0.176, 0.483, 0.844]: listen to (a person), sit, drive (e.g., a car, a truck)."}, {"from": "human", "timestamps": 1255.0, "value": "For the person at bbox [0.487, 0.224, 0.67, 0.609], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.487, 0.224, 0.67, 0.609]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1254.0s, [0.512, 0.224, 0.672, 0.6]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1253.0s, [0.514, 0.218, 0.682, 0.592]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1251, 1318], "conversations": [{"from": "human", "timestamps": 1281.0, "value": "What actions did the person at bbox [0.157, 0.306, 0.521, 0.868] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.157, 0.306, 0.521, 0.868]: stand, hug (a person).", "At 1280.0s, [0.213, 0.244, 0.606, 0.876]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1318.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.151, 0.361, 0.486, 0.78]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.384, 0.354, 0.828, 0.778]: listen to (a person), sit."}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1361, 1438], "conversations": [{"from": "human", "timestamps": 1391.0, "value": "What actions did the person at bbox [0.177, 0.298, 0.489, 0.889] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.177, 0.298, 0.489, 0.889]: sit, talk to (e.g., self, a person, a group).", "At 1390.0s, [0.178, 0.301, 0.492, 0.892]: sit, talk to (e.g., self, a person, a group).", "From 1379.0s to 1389.0s, [0.016, 0.42, 0.458, 0.891]->[0.19, 0.306, 0.481, 0.889]->[0.182, 0.303, 0.485, 0.893]->[0.191, 0.307, 0.486, 0.888]->[0.192, 0.308, 0.478, 0.885]->[0.194, 0.32, 0.484, 0.883]->[0.2, 0.318, 0.496, 0.883]->[0.185, 0.316, 0.482, 0.886]->[0.175, 0.295, 0.493, 0.894]->[0.175, 0.298, 0.497, 0.889]->[0.181, 0.305, 0.496, 0.889]: sit.", "From 1373.0s to 1377.0s, [0.46, 0.174, 0.893, 0.879]->[0.46, 0.183, 0.888, 0.865]->[0.435, 0.17, 0.884, 0.873]->[0.414, 0.174, 0.88, 0.877]->[0.137, 0.146, 0.726, 0.882]: stand.", "At 1372.0s, [0.461, 0.179, 0.903, 0.876]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1438.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.198, 0.314, 0.307, 0.789]: listen to (a person), stand; [0.367, 0.393, 0.465, 0.782]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1456, 1487], "conversations": [{"from": "human", "timestamps": 1486.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.139, 0.273, 0.463, 0.781]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.425, 0.313, 0.698, 0.836]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1487.0, "value": "Describe the actions of the person at bbox [0.421, 0.324, 0.7, 0.848] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.421, 0.324, 0.7, 0.848]: sit, listen to (a person), watch (a person).", "From 1485.0s to 1486.0s, [0.415, 0.303, 0.692, 0.842]->[0.425, 0.313, 0.698, 0.836]: sit, listen to (a person), watch (a person).", "From 1483.0s to 1484.0s, [0.433, 0.302, 0.701, 0.864]->[0.414, 0.298, 0.696, 0.852]: sit, touch (an object), listen to (a person), watch (a person).", "At 1482.0s, [0.448, 0.297, 0.706, 0.871]: sit, listen to (a person), watch (a person).", "From 1480.0s to 1481.0s, [0.44, 0.297, 0.709, 0.876]->[0.445, 0.298, 0.709, 0.888]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1479.0s, [0.439, 0.301, 0.705, 0.848]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1476.0s to 1478.0s, [0.449, 0.301, 0.703, 0.878]->[0.447, 0.294, 0.705, 0.874]->[0.441, 0.294, 0.703, 0.875]: sit, listen to (a person), watch (a person).", "At 1475.0s, [0.453, 0.291, 0.704, 0.879]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1474.0s, [0.451, 0.289, 0.7, 0.872]: sit, touch (an object), listen to (a person), watch (a person).", "At 1473.0s, [0.448, 0.281, 0.692, 0.864]: sit, listen to (a person), watch (a person).", "At 1472.0s, [0.441, 0.279, 0.694, 0.862]: sit, listen to (a person).", "At 1471.0s, [0.444, 0.277, 0.707, 0.822]: sit, touch (an object), listen to (a person).", "From 1469.0s to 1470.0s, [0.454, 0.269, 0.703, 0.87]->[0.45, 0.274, 0.709, 0.873]: sit, listen to (a person).", "At 1468.0s, [0.446, 0.26, 0.715, 0.851]: sit, touch (an object), listen to (a person).", "At 1467.0s, [0.471, 0.272, 0.72, 0.868]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "From 1461.0s to 1466.0s, [0.483, 0.251, 0.747, 0.849]->[0.491, 0.254, 0.753, 0.822]->[0.491, 0.253, 0.736, 0.864]->[0.479, 0.252, 0.726, 0.855]->[0.467, 0.259, 0.735, 0.87]->[0.465, 0.271, 0.725, 0.875]: sit, talk to (e.g., self, a person, a group).", "At 1459.0s, [0.434, 0.407, 0.728, 0.868]: stand, touch (an object).", "At 1458.0s, [0.221, 0.209, 0.66, 0.882]: stand.", "From 1454.0s to 1457.0s, [0.268, 0.137, 0.682, 0.869]->[0.28, 0.149, 0.657, 0.871]->[0.249, 0.169, 0.677, 0.872]->[0.217, 0.185, 0.661, 0.873]: stand, talk to (e.g., self, a person, a group).", "At 1453.0s, [0.211, 0.19, 0.66, 0.878]: stand.", "At 1452.0s, [0.203, 0.193, 0.66, 0.875]: stand, watch (a person).", "From 1450.0s to 1451.0s, [0.001, 0.122, 0.415, 0.872]->[0.0, 0.127, 0.351, 0.883]: bend/bow (at the waist), watch (a person).", "At 1449.0s, [0.009, 0.127, 0.381, 0.907]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 1448.0s, [0.007, 0.13, 0.34, 0.873]: stand, watch (a person).", "At 1447.0s, [0.0, 0.115, 0.373, 0.891]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1446.0s, [0.003, 0.111, 0.333, 0.89]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1499, 1557], "conversations": [{"from": "human", "timestamps": 1529.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.01, 0.22, 0.735, 0.893]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.598, 0.129, 1.0, 0.892]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1557.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.32, 0.131, 0.97, 0.866]?"}, {"from": "gpt", "value": ["In the current frame, [0.32, 0.131, 0.97, 0.866]: sit, talk to (e.g., self, a person, a group).", "From 1555.0s to 1556.0s, [0.323, 0.121, 0.963, 0.869]->[0.352, 0.124, 0.955, 0.853]: sit."]}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1484, 1603], "conversations": [{"from": "human", "timestamps": 1514.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.199, 0.491, 0.902]: listen to (a person), sit; [0.429, 0.211, 0.872, 0.872]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1603.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.119, 0.129, 0.775, 0.869]?"}, {"from": "gpt", "value": ["In the current frame, [0.119, 0.129, 0.775, 0.869]: stand, eat, talk to (e.g., self, a person, a group).", "At 1602.0s, [0.084, 0.143, 0.666, 0.884]: stand, eat, talk to (e.g., self, a person, a group).", "At 1601.0s, [0.045, 0.131, 0.616, 0.885]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1600.0s, [0.069, 0.158, 0.851, 0.884]: sit, carry/hold (an object), take (an object) from (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1599.0s, [0.378, 0.614, 0.498, 0.875]: sit, listen to (a person), watch (a person).", "At 1598.0s, [0.377, 0.614, 0.505, 0.872]: sit, touch (an object), listen to (a person), watch (a person).", "From 1596.0s to 1597.0s, [0.372, 0.61, 0.536, 0.875]->[0.366, 0.617, 0.515, 0.874]: sit, eat, touch (an object), listen to (a person), watch (a person).", "From 1594.0s to 1595.0s, [0.366, 0.606, 0.502, 0.878]->[0.369, 0.608, 0.529, 0.875]: sit, eat, listen to (a person), watch (a person).", "At 1593.0s, [0.365, 0.609, 0.503, 0.879]: sit, eat, watch (a person)."]}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1567, 1604], "conversations": [{"from": "human", "timestamps": 1597.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.366, 0.617, 0.515, 0.874]: sit, touch (an object), listen to (a person), watch (a person), eat; [0.603, 0.616, 0.714, 0.874]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1604.0, "value": "What actions did the person at bbox [0.194, 0.151, 0.654, 0.879] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.194, 0.151, 0.654, 0.879]: sit, talk to (e.g., self, a person, a group).", "From 1602.0s to 1603.0s, [0.084, 0.143, 0.666, 0.884]->[0.119, 0.129, 0.775, 0.869]: stand, eat, talk to (e.g., self, a person, a group).", "At 1601.0s, [0.045, 0.131, 0.616, 0.885]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1600.0s, [0.069, 0.158, 0.851, 0.884]: sit, carry/hold (an object), take (an object) from (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1599.0s, [0.378, 0.614, 0.498, 0.875]: sit, listen to (a person), watch (a person).", "At 1598.0s, [0.377, 0.614, 0.505, 0.872]: sit, touch (an object), listen to (a person), watch (a person).", "From 1596.0s to 1597.0s, [0.372, 0.61, 0.536, 0.875]->[0.366, 0.617, 0.515, 0.874]: sit, eat, touch (an object), listen to (a person), watch (a person).", "From 1594.0s to 1595.0s, [0.366, 0.606, 0.502, 0.878]->[0.369, 0.608, 0.529, 0.875]: sit, eat, listen to (a person), watch (a person).", "At 1593.0s, [0.365, 0.609, 0.503, 0.879]: sit, eat, watch (a person)."]}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1601, 1643], "conversations": [{"from": "human", "timestamps": 1631.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.425, 0.125, 0.996, 0.868]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1643.0, "value": "For the person at bbox [0.0, 0.117, 0.311, 0.895], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.117, 0.311, 0.895]: sit, listen to (a person), watch (a person).", "At 1642.0s, [0.0, 0.17, 0.31, 0.882]: listen to (a person), watch (a person).", "At 1641.0s, [0.061, 0.122, 0.641, 0.878]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1640.0s, [0.165, 0.144, 0.662, 0.875]: stand, talk to (e.g., self, a person, a group).", "At 1639.0s, [0.084, 0.155, 0.621, 0.893]: stand, carry/hold (an object).", "From 1637.0s to 1638.0s, [0.093, 0.157, 0.611, 0.902]->[0.064, 0.152, 0.618, 0.896]: sit.", "At 1636.0s, [0.094, 0.161, 0.579, 0.877]: sit, drink, watch (a person).", "At 1635.0s, [0.064, 0.15, 0.751, 0.861]: sit, drink, listen to (a person), watch (a person)."]}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1668, 1713], "conversations": [{"from": "human", "timestamps": 1698.0, "value": "Describe the actions of the person at bbox [0.212, 0.167, 0.665, 0.88] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.212, 0.167, 0.665, 0.88]: sit, talk to (e.g., self, a person, a group).", "At 1697.0s, [0.211, 0.161, 0.664, 0.892]: sit, stand, talk to (e.g., self, a person, a group).", "At 1696.0s, [0.228, 0.159, 0.69, 0.865]: sit, talk to (e.g., self, a person, a group).", "At 1695.0s, [0.229, 0.163, 0.695, 0.861]: sit, stand, talk to (e.g., self, a person, a group).", "At 1694.0s, [0.239, 0.162, 0.706, 0.877]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1713.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.334, 0.657, 0.456, 0.873]: sit, touch (an object)."}]}, {"video": "9QbzS8bZXFE.mkv", "clip": [1726, 1787], "conversations": [{"from": "human", "timestamps": 1756.0, "value": "For the person at bbox [0.331, 0.381, 0.461, 0.747], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.331, 0.381, 0.461, 0.747]: stand, carry/hold (an object), watch (a person).", "From 1751.0s to 1755.0s, [0.172, 0.319, 0.31, 0.745]->[0.165, 0.327, 0.312, 0.746]->[0.155, 0.317, 0.297, 0.738]->[0.159, 0.326, 0.32, 0.747]->[0.29, 0.356, 0.431, 0.744]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1787.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.169, 0.164, 0.942, 0.898]: sit."}]}, {"video": "9Rcxr3IEX4E.mkv", "clip": [882, 931], "conversations": [{"from": "human", "timestamps": 912.0, "value": "For the person at bbox [0.158, 0.0, 1.0, 0.995], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.158, 0.0, 1.0, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 911.0s, [0.155, 0.011, 1.0, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 931.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.166, 0.073, 1.0, 0.989]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "9Rcxr3IEX4E.mkv", "clip": [941, 983], "conversations": [{"from": "human", "timestamps": 971.0, "value": "What actions did the person at bbox [0.121, 0.054, 0.821, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.121, 0.054, 0.821, 1.0]: sit, listen to (a person), watch (a person).", "At 970.0s, [0.031, 0.058, 1.0, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 968.0s to 969.0s, [0.015, 0.052, 1.0, 0.982]->[0.023, 0.045, 1.0, 0.977]: sit, talk to (e.g., self, a person, a group).", "From 966.0s to 967.0s, [0.04, 0.044, 1.0, 0.983]->[0.019, 0.059, 1.0, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 964.0s to 965.0s, [0.048, 0.052, 1.0, 0.973]->[0.035, 0.068, 1.0, 0.979]: sit, talk to (e.g., self, a person, a group).", "At 963.0s, [0.033, 0.055, 1.0, 0.976]: sit, stand, talk to (e.g., self, a person, a group).", "From 961.0s to 962.0s, [0.043, 0.06, 1.0, 0.977]->[0.043, 0.049, 1.0, 0.993]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 983.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.261, 0.053, 0.985, 0.994]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "9Rcxr3IEX4E.mkv", "clip": [1025, 1112], "conversations": [{"from": "human", "timestamps": 1055.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.399, 0.079, 0.953, 0.961]: watch (a person), dance."}, {"from": "human", "timestamps": 1112.0, "value": "What actions is the person in bbox [0.477, 0.191, 0.749, 0.975] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.477, 0.191, 0.749, 0.975]: stand, listen to (a person), watch (a person).", "At 1111.0s, [0.479, 0.202, 0.745, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1106.0s to 1110.0s, [0.467, 0.182, 0.707, 0.97]->[0.475, 0.204, 0.714, 0.973]->[0.469, 0.21, 0.72, 0.978]->[0.467, 0.195, 0.719, 0.982]->[0.489, 0.188, 0.737, 0.972]: stand, listen to (a person), watch (a person).", "From 1104.0s to 1105.0s, [0.468, 0.19, 0.726, 0.988]->[0.46, 0.197, 0.724, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9Rcxr3IEX4E.mkv", "clip": [1283, 1314], "conversations": [{"from": "human", "timestamps": 1313.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.138, 0.207, 0.302, 0.651]: walk; [0.285, 0.27, 0.346, 0.553]: walk; [0.332, 0.31, 0.44, 0.628]: watch (a person), stand; [0.425, 0.22, 0.547, 0.59]: watch (a person), walk."}, {"from": "human", "timestamps": 1314.0, "value": "Describe the actions of the person at bbox [0.361, 0.227, 0.537, 0.629] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.361, 0.227, 0.537, 0.629]: walk, watch (a person).", "At 1313.0s, [0.425, 0.22, 0.547, 0.59]: walk, watch (a person)."]}]}, {"video": "9Rcxr3IEX4E.mkv", "clip": [1234, 1372], "conversations": [{"from": "human", "timestamps": 1264.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.089, 0.117, 0.674, 0.986]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1372.0, "value": "What actions did the person at bbox [0.024, 0.01, 0.895, 0.887] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.024, 0.01, 0.895, 0.887]: talk to (e.g., self, a person, a group), lie/sleep.", "At 1371.0s, [0.011, 0.037, 0.819, 0.888]: talk to (e.g., self, a person, a group), lie/sleep."]}]}, {"video": "9Rcxr3IEX4E.mkv", "clip": [1349, 1427], "conversations": [{"from": "human", "timestamps": 1379.0, "value": "What actions is the person in bbox [0.154, 0.049, 0.877, 0.972] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.154, 0.049, 0.877, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1378.0s, [0.162, 0.056, 0.885, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1375.0s to 1377.0s, [0.164, 0.05, 0.837, 0.96]->[0.146, 0.052, 0.835, 0.971]->[0.163, 0.058, 0.877, 0.963]: stand, talk to (e.g., self, a person, a group).", "From 1373.0s to 1374.0s, [0.159, 0.067, 0.855, 0.96]->[0.146, 0.054, 0.837, 0.968]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1427.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.173, 0.0, 0.954, 0.98]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "9Rcxr3IEX4E.mkv", "clip": [1375, 1447], "conversations": [{"from": "human", "timestamps": 1405.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.181, 0.011, 0.981, 0.972]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1447.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.101, 1.0, 0.994]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.101, 1.0, 0.994]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1446.0s, [0.0, 0.106, 1.0, 0.999]: sit, stand, talk to (e.g., self, a person, a group).", "At 1445.0s, [0.012, 0.087, 0.985, 0.994]: sit, talk to (e.g., self, a person, a group).", "At 1444.0s, [0.039, 0.079, 0.963, 0.968]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1443.0s, [0.001, 0.074, 0.986, 0.979]: sit, listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "9Rcxr3IEX4E.mkv", "clip": [1449, 1492], "conversations": [{"from": "human", "timestamps": 1479.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.509, 0.421, 0.574, 0.631]?"}, {"from": "gpt", "value": ["At Now, [0.509, 0.421, 0.574, 0.631]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1477.0s, [0.511, 0.422, 0.572, 0.635]: stand, talk to (e.g., self, a person, a group).", "From 1475.0s to 1476.0s, [0.506, 0.421, 0.574, 0.643]->[0.516, 0.422, 0.572, 0.639]: stand.", "At 1474.0s, [0.515, 0.426, 0.577, 0.643]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1492.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.083, 0.167, 0.304, 0.962]: listen to (a person), walk; [0.271, 0.16, 0.517, 0.952]: talk to (e.g., self, a person, a group), walk; [0.594, 0.203, 0.878, 0.966]: walk; [0.856, 0.177, 0.993, 0.954]: carry/hold (an object), listen to (a person), walk."}]}, {"video": "9Rcxr3IEX4E.mkv", "clip": [1473, 1513], "conversations": [{"from": "human", "timestamps": 1503.0, "value": "What actions did the person at bbox [0.321, 0.072, 0.918, 0.975] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.321, 0.072, 0.918, 0.975]: walk, talk to (e.g., self, a person, a group).", "From 1501.0s to 1502.0s, [0.337, 0.066, 0.945, 0.993]->[0.374, 0.099, 0.935, 0.982]: walk, talk to (e.g., self, a person, a group).", "At 1500.0s, [0.367, 0.113, 0.955, 0.992]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1499.0s, [0.374, 0.135, 0.94, 0.986]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1495.0s to 1498.0s, [0.308, 0.141, 0.686, 0.982]->[0.346, 0.155, 0.743, 0.984]->[0.368, 0.16, 0.862, 0.981]->[0.395, 0.163, 0.927, 0.993]: walk, talk to (e.g., self, a person, a group).", "At 1494.0s, [0.297, 0.16, 0.582, 0.983]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1492.0s to 1493.0s, [0.271, 0.16, 0.517, 0.952]->[0.275, 0.13, 0.529, 0.967]: walk, talk to (e.g., self, a person, a group).", "At 1491.0s, [0.263, 0.183, 0.482, 0.945]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1489.0s to 1490.0s, [0.237, 0.247, 0.463, 0.921]->[0.252, 0.214, 0.445, 0.939]: walk, talk to (e.g., self, a person, a group).", "At 1488.0s, [0.234, 0.299, 0.427, 0.92]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1486.0s to 1487.0s, [0.222, 0.37, 0.379, 0.892]->[0.227, 0.349, 0.408, 0.927]: walk, talk to (e.g., self, a person, a group).", "At 1485.0s, [0.232, 0.378, 0.371, 0.888]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1483.0s to 1484.0s, [0.233, 0.4, 0.364, 0.858]->[0.219, 0.392, 0.361, 0.872]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1513.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.222, 0.441, 0.996]: listen to (a person), walk; [0.406, 0.116, 1.0, 0.983]: listen to (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "9Rcxr3IEX4E.mkv", "clip": [1617, 1650], "conversations": [{"from": "human", "timestamps": 1647.0, "value": "For the person at bbox [0.396, 0.048, 0.637, 0.813], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.396, 0.048, 0.637, 0.813]: stand, carry/hold (an object), listen to (a person).", "At 1646.0s, [0.396, 0.055, 0.641, 0.811]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1643.0s to 1645.0s, [0.392, 0.052, 0.654, 0.648]->[0.393, 0.05, 0.635, 0.725]->[0.397, 0.058, 0.644, 0.774]: stand, carry/hold (an object), listen to (a person).", "From 1639.0s to 1642.0s, [0.413, 0.058, 0.642, 0.866]->[0.4, 0.057, 0.632, 0.71]->[0.396, 0.06, 0.632, 0.814]->[0.399, 0.055, 0.624, 0.861]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1650.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.446, 0.038, 0.716, 0.739]: carry/hold (an object), stand, watch (a person); [0.72, 0.069, 0.997, 0.979]: carry/hold (an object), stand, watch (a person)."}]}, {"video": "9Rcxr3IEX4E.mkv", "clip": [1611, 1681], "conversations": [{"from": "human", "timestamps": 1641.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.19, 0.334, 0.528, 0.988]: listen to (a person), stand, watch (a person); [0.396, 0.06, 0.632, 0.814]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.623, 0.061, 0.995, 0.99]: carry/hold (an object), stand, listen to (a person); [0.822, 0.003, 0.948, 0.327]: carry/hold (an object), stand, listen to (a person)."}, {"from": "human", "timestamps": 1681.0, "value": "Describe the actions of the person at bbox [0.332, 0.226, 0.492, 0.805] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.332, 0.226, 0.492, 0.805]: dance, watch (a person).", "From 1679.0s to 1680.0s, [0.317, 0.237, 0.488, 0.822]->[0.316, 0.248, 0.441, 0.752]: dance, watch (a person).", "From 1677.0s to 1678.0s, [0.73, 0.555, 0.81, 0.853]->[0.729, 0.556, 0.813, 0.889]: stand, carry/hold (an object)."]}]}, {"video": "9Rcxr3IEX4E.mkv", "clip": [1687, 1718], "conversations": [{"from": "human", "timestamps": 1717.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.143, 0.009, 0.986, 0.977]: dance."}, {"from": "human", "timestamps": 1718.0, "value": "For the person at bbox [0.002, 0.052, 0.906, 0.982], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.002, 0.052, 0.906, 0.982]: crouch/kneel.", "At 1717.0s, [0.143, 0.009, 0.986, 0.977]: dance."]}]}, {"video": "9bK05eBt1GM.mp4", "clip": [886, 946], "conversations": [{"from": "human", "timestamps": 916.0, "value": "What actions is the person in bbox [0.009, 0.278, 0.386, 0.903] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.009, 0.278, 0.386, 0.903]: stand, watch (e.g., TV), listen to (a person), watch (a person).", "From 914.0s to 915.0s, [0.064, 0.311, 0.282, 0.758]->[0.0, 0.26, 0.196, 0.815]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 946.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.341, 0.139, 0.912]: talk to (e.g., self, a person, a group), walk; [0.188, 0.361, 0.312, 0.908]: walk; [0.301, 0.377, 0.4, 0.881]: walk; [0.387, 0.35, 0.607, 0.914]: carry/hold (an object), walk; [0.569, 0.369, 0.639, 0.794]: walk; [0.629, 0.38, 0.707, 0.781]: walk; [0.772, 0.467, 0.821, 0.691]: walk; [0.822, 0.471, 0.908, 0.716]: walk; [0.885, 0.452, 0.96, 0.717]: watch (a person), walk."}]}, {"video": "9bK05eBt1GM.mp4", "clip": [983, 1015], "conversations": [{"from": "human", "timestamps": 1013.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.198, 0.092, 1.0, 0.924]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1015.0, "value": "What actions did the person at bbox [0.221, 0.064, 0.984, 0.904] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.221, 0.064, 0.984, 0.904]: stand, talk to (e.g., self, a person, a group).", "At 1014.0s, [0.172, 0.11, 0.982, 0.924]: stand, talk to (e.g., self, a person, a group).", "At 1013.0s, [0.198, 0.092, 1.0, 0.924]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9bK05eBt1GM.mp4", "clip": [1115, 1165], "conversations": [{"from": "human", "timestamps": 1145.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.19, 0.21, 0.454, 0.925]: carry/hold (an object), watch (a person), walk; [0.734, 0.14, 0.994, 0.834]: walk."}, {"from": "human", "timestamps": 1165.0, "value": "What actions is the person in bbox [0.279, 0.229, 0.699, 0.935] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.279, 0.229, 0.699, 0.935]: stand, talk to (e.g., self, a person, a group).", "At 1164.0s, [0.369, 0.245, 0.666, 0.89]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1163.0s, [0.342, 0.246, 0.626, 0.918]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1162.0s, [0.386, 0.243, 0.665, 0.913]: stand, listen to (a person), watch (a person).", "At 1161.0s, [0.354, 0.228, 0.665, 0.926]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1159.0s to 1160.0s, [0.548, 0.262, 0.674, 0.657]->[0.35, 0.216, 0.654, 0.93]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "9bK05eBt1GM.mp4", "clip": [1134, 1224], "conversations": [{"from": "human", "timestamps": 1164.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.05, 0.233, 0.323, 0.909]: listen to (a person), stand, watch (a person); [0.369, 0.245, 0.666, 0.89]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.637, 0.132, 0.948, 0.911]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1224.0, "value": "What actions did the person at bbox [0.07, 0.118, 0.89, 0.915] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.07, 0.118, 0.89, 0.915]: sit, talk to (e.g., self, a person, a group).", "At 1223.0s, [0.038, 0.049, 0.943, 0.925]: sit, talk to (e.g., self, a person, a group).", "At 1222.0s, [0.132, 0.092, 1.0, 0.905]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9bK05eBt1GM.mp4", "clip": [1200, 1235], "conversations": [{"from": "human", "timestamps": 1230.0, "value": "For the person at bbox [0.088, 0.128, 0.862, 0.919], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.088, 0.128, 0.862, 0.919]: sit, talk to (e.g., self, a person, a group).", "At 1229.0s, [0.091, 0.128, 0.901, 0.92]: sit, stand, talk to (e.g., self, a person, a group).", "At 1228.0s, [0.127, 0.113, 0.79, 0.927]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1235.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.166, 0.118, 0.965, 0.904]: stand."}]}, {"video": "9bK05eBt1GM.mp4", "clip": [1376, 1480], "conversations": [{"from": "human", "timestamps": 1406.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.153, 0.137, 1.0, 0.935]: sit."}, {"from": "human", "timestamps": 1480.0, "value": "What actions did the person at bbox [0.316, 0.288, 0.542, 0.811] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.316, 0.288, 0.542, 0.811]: sit.", "At 1479.0s, [0.319, 0.285, 0.54, 0.824]: sit, carry/hold (an object).", "At 1478.0s, [0.328, 0.288, 0.54, 0.786]: sit, touch (an object).", "From 1476.0s to 1477.0s, [0.319, 0.288, 0.531, 0.81]->[0.323, 0.283, 0.543, 0.804]: sit.", "At 1475.0s, [0.321, 0.297, 0.535, 0.807]: sit, touch (an object).", "From 1467.0s to 1474.0s, [0.006, 0.076, 0.93, 0.92]->[0.305, 0.296, 0.535, 0.809]->[0.321, 0.288, 0.537, 0.804]->[0.32, 0.292, 0.538, 0.81]->[0.314, 0.289, 0.531, 0.807]->[0.321, 0.293, 0.535, 0.79]->[0.317, 0.298, 0.536, 0.807]->[0.32, 0.305, 0.536, 0.8]: sit."]}]}, {"video": "9bK05eBt1GM.mp4", "clip": [1505, 1553], "conversations": [{"from": "human", "timestamps": 1535.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.255, 0.095, 1.0, 0.9]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1553.0, "value": "What actions is the person in bbox [0.299, 0.092, 0.995, 0.938] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.299, 0.092, 0.995, 0.938]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1552.0s, [0.289, 0.091, 0.944, 0.918]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9bK05eBt1GM.mp4", "clip": [1475, 1616], "conversations": [{"from": "human", "timestamps": 1505.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.279, 0.145, 0.564, 0.9]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1616.0, "value": "Describe the actions of the person at bbox [0.142, 0.099, 0.947, 0.921] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.142, 0.099, 0.947, 0.921]: sit, talk to (e.g., self, a person, a group).", "At 1615.0s, [0.064, 0.106, 0.812, 0.914]: sit, stand, talk to (e.g., self, a person, a group).", "From 1613.0s to 1614.0s, [0.001, 0.107, 0.854, 0.903]->[0.086, 0.094, 0.932, 0.917]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "9bK05eBt1GM.mp4", "clip": [1590, 1632], "conversations": [{"from": "human", "timestamps": 1620.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.162, 0.181, 0.934, 0.911]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1632.0, "value": "What actions is the person in bbox [0.226, 0.168, 0.944, 0.917] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.226, 0.168, 0.944, 0.917]: sit, talk to (e.g., self, a person, a group).", "From 1630.0s to 1631.0s, [0.195, 0.167, 0.927, 0.921]->[0.204, 0.144, 1.0, 0.908]: sit, talk to (e.g., self, a person, a group).", "At 1628.0s, [0.649, 0.272, 0.849, 0.715]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1627.0s, [0.677, 0.275, 0.842, 0.588]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "9bK05eBt1GM.mp4", "clip": [1601, 1735], "conversations": [{"from": "human", "timestamps": 1631.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.204, 0.144, 1.0, 0.908]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1735.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.093, 0.127, 0.828, 0.902]?"}, {"from": "gpt", "value": ["At Now, [0.093, 0.127, 0.828, 0.902]: sit, talk to (e.g., self, a person, a group).", "From 1733.0s to 1734.0s, [0.121, 0.117, 0.855, 0.92]->[0.135, 0.129, 0.851, 0.928]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1732.0s, [0.141, 0.106, 0.913, 0.935]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "9eAOr_ttXp0.mkv", "clip": [897, 940], "conversations": [{"from": "human", "timestamps": 927.0, "value": "Describe the actions of the person at bbox [0.567, 0.333, 0.869, 0.992] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.567, 0.333, 0.869, 0.992]: walk.", "From 925.0s to 926.0s, [0.547, 0.441, 0.94, 0.993]->[0.602, 0.396, 0.977, 0.989]: bend/bow (at the waist)."]}, {"from": "human", "timestamps": 940.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.238, 0.038, 0.657, 0.969]: carry/hold (an object), watch (a person), bend/bow (at the waist), walk."}]}, {"video": "9eAOr_ttXp0.mkv", "clip": [1008, 1071], "conversations": [{"from": "human", "timestamps": 1038.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.242, 0.195, 0.922, 0.983]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1071.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.084, 0.269, 0.355, 0.972]?"}, {"from": "gpt", "value": ["At Now, [0.084, 0.269, 0.355, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1070.0s, [0.287, 0.201, 0.602, 0.993]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9eAOr_ttXp0.mkv", "clip": [1019, 1112], "conversations": [{"from": "human", "timestamps": 1049.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.025, 0.097, 0.808, 0.983]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1112.0, "value": "For the person at bbox [0.034, 0.191, 0.606, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.034, 0.191, 0.606, 0.992]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1111.0s, [0.035, 0.187, 0.621, 0.975]: stand, carry/hold (an object), touch (an object).", "At 1110.0s, [0.028, 0.196, 0.518, 0.975]: stand, touch (an object).", "At 1109.0s, [0.035, 0.206, 0.554, 0.972]: stand, put down.", "At 1108.0s, [0.042, 0.272, 0.478, 0.981]: stand, carry/hold (an object).", "At 1107.0s, [0.023, 0.268, 0.515, 1.0]: stand, carry/hold (an object), listen (e.g., to music).", "At 1106.0s, [0.024, 0.259, 0.584, 0.985]: stand, carry/hold (an object).", "At 1105.0s, [0.034, 0.264, 0.472, 0.989]: sit, answer phone, carry/hold (an object), listen (e.g., to music), talk to (e.g., self, a person, a group).", "At 1104.0s, [0.035, 0.265, 0.486, 0.988]: stand, answer phone, carry/hold (an object).", "From 1102.0s to 1103.0s, [0.032, 0.274, 0.513, 0.991]->[0.041, 0.262, 0.509, 0.99]: stand, answer phone.", "At 1101.0s, [0.026, 0.272, 0.5, 0.986]: stand, answer phone, carry/hold (an object).", "From 1098.0s to 1100.0s, [0.025, 0.264, 0.508, 0.984]->[0.037, 0.273, 0.503, 0.989]->[0.031, 0.266, 0.49, 0.988]: stand, answer phone.", "At 1097.0s, [0.032, 0.269, 0.488, 0.986]: stand, answer phone, carry/hold (an object).", "At 1096.0s, [0.027, 0.251, 0.481, 0.984]: stand, answer phone, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1095.0s, [0.015, 0.245, 0.474, 0.979]: stand, carry/hold (an object), listen (e.g., to music), talk to (e.g., self, a person, a group).", "At 1094.0s, [0.017, 0.235, 0.476, 0.984]: stand, carry/hold (an object), listen (e.g., to music), watch (a person).", "At 1093.0s, [0.024, 0.261, 0.455, 1.0]: stand, carry/hold (an object).", "From 1090.0s to 1092.0s, [0.003, 0.002, 0.44, 0.976]->[0.0, 0.086, 0.413, 0.981]->[0.027, 0.261, 0.488, 0.988]: stand, answer phone."]}]}, {"video": "9eAOr_ttXp0.mkv", "clip": [1065, 1103], "conversations": [{"from": "human", "timestamps": 1095.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.015, 0.245, 0.474, 0.979]: carry/hold (an object), stand, listen (e.g., to music), talk to (e.g., self, a person, a group); [0.493, 0.048, 0.979, 0.977]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1103.0, "value": "What actions did the person at bbox [0.48, 0.046, 0.973, 0.978] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.48, 0.046, 0.973, 0.978]: stand, listen to (a person), watch (a person).", "From 1091.0s to 1102.0s, [0.443, 0.002, 0.986, 0.99]->[0.503, 0.056, 0.977, 0.977]->[0.503, 0.05, 0.979, 0.983]->[0.499, 0.043, 0.972, 0.973]->[0.493, 0.048, 0.979, 0.977]->[0.485, 0.034, 0.972, 0.976]->[0.447, 0.042, 0.974, 0.982]->[0.447, 0.045, 0.964, 0.983]->[0.466, 0.051, 0.969, 0.99]->[0.482, 0.05, 0.975, 0.982]->[0.47, 0.044, 0.967, 0.989]->[0.476, 0.043, 0.965, 0.987]: stand, listen to (a person), watch (a person)."]}]}, {"video": "9eAOr_ttXp0.mkv", "clip": [1134, 1175], "conversations": [{"from": "human", "timestamps": 1164.0, "value": "For the person at bbox [0.059, 0.005, 0.613, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.059, 0.005, 0.613, 1.0]: walk.", "At 1163.0s, [0.167, 0.089, 0.58, 0.978]: walk."]}, {"from": "human", "timestamps": 1175.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.485, 0.19, 0.802, 1.0]: stand, close (e.g., a door, a box)."}]}, {"video": "9eAOr_ttXp0.mkv", "clip": [1137, 1211], "conversations": [{"from": "human", "timestamps": 1167.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.302, 0.386, 0.487, 0.976]: watch (a person), stand, smoke, touch (an object); [0.644, 0.187, 0.81, 0.989]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1211.0, "value": "What actions is the person in bbox [0.397, 0.064, 0.978, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.397, 0.064, 0.978, 1.0]: sit, carry/hold (an object).", "From 1205.0s to 1210.0s, [0.321, 0.051, 0.97, 1.0]->[0.383, 0.071, 0.948, 0.984]->[0.422, 0.079, 0.952, 1.0]->[0.418, 0.069, 0.974, 0.998]->[0.419, 0.067, 0.985, 1.0]->[0.406, 0.047, 0.979, 0.976]: sit, carry/hold (an object).", "From 1203.0s to 1204.0s, [0.226, 0.055, 0.921, 0.916]->[0.146, 0.051, 0.95, 0.979]: sit, carry/hold (an object), listen to (a person).", "From 1199.0s to 1202.0s, [0.343, 0.008, 0.981, 0.975]->[0.393, 0.0, 0.975, 0.991]->[0.401, 0.0, 0.969, 0.983]->[0.421, 0.001, 0.912, 0.989]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1198.0s, [0.394, 0.037, 0.981, 0.954]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1196.0s to 1197.0s, [0.431, 0.024, 0.989, 0.981]->[0.428, 0.043, 0.995, 0.974]: sit, carry/hold (an object), watch (a person)."]}]}, {"video": "9eAOr_ttXp0.mkv", "clip": [1254, 1302], "conversations": [{"from": "human", "timestamps": 1284.0, "value": "Describe the actions of the person at bbox [0.529, 0.228, 0.64, 0.783] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.529, 0.228, 0.64, 0.783]: bend/bow (at the waist), carry/hold (an object).", "From 1281.0s to 1283.0s, [0.526, 0.215, 0.651, 0.706]->[0.518, 0.202, 0.646, 0.731]->[0.524, 0.205, 0.631, 0.742]: walk, carry/hold (an object).", "At 1280.0s, [0.236, 0.074, 0.719, 0.979]: stand, walk, touch (an object).", "At 1279.0s, [0.059, 0.04, 0.585, 0.993]: stand, open (e.g., a window, a car door), touch (an object), watch (a person).", "At 1278.0s, [0.103, 0.006, 0.954, 0.993]: stand, lift/pick up, watch (a person)."]}, {"from": "human", "timestamps": 1302.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.082, 0.083, 0.481, 0.93]: watch (a person), stand; [0.637, 0.06, 0.983, 0.993]: watch (a person), stand."}]}, {"video": "9eAOr_ttXp0.mkv", "clip": [1538, 1599], "conversations": [{"from": "human", "timestamps": 1568.0, "value": "For the person at bbox [0.214, 0.301, 0.569, 0.874], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.214, 0.301, 0.569, 0.874]: carry/hold (an object), crouch/kneel.", "From 1566.0s to 1567.0s, [0.314, 0.285, 0.606, 0.893]->[0.206, 0.283, 0.556, 0.881]: bend/bow (at the waist), carry/hold (an object)."]}, {"from": "human", "timestamps": 1599.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.004, 0.25, 0.994, 0.991]: ride (e.g., a bike, a car, a horse), lie/sleep."}]}, {"video": "9eAOr_ttXp0.mkv", "clip": [1608, 1701], "conversations": [{"from": "human", "timestamps": 1638.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.036, 0.157, 0.21, 0.983]: watch (a person), stand, smoke, talk to (e.g., self, a person, a group); [0.647, 0.101, 0.946, 0.97]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1701.0, "value": "For the person at bbox [0.53, 0.003, 0.982, 0.856], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.53, 0.003, 0.982, 0.856]: sit, watch (a person).", "At 1700.0s, [0.518, 0.001, 0.985, 0.832]: sit, listen to (a person), watch (a person)."]}]}, {"video": "9eAOr_ttXp0.mkv", "clip": [1653, 1684], "conversations": [{"from": "human", "timestamps": 1683.0, "value": "What actions did the person at bbox [0.523, 0.146, 0.966, 0.813] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.523, 0.146, 0.966, 0.813]: sit, drink, listen to (a person), watch (a person).", "From 1677.0s to 1682.0s, [0.52, 0.138, 0.947, 0.763]->[0.473, 0.148, 0.958, 0.908]->[0.421, 0.137, 0.946, 0.943]->[0.552, 0.153, 0.966, 0.825]->[0.528, 0.15, 0.968, 0.813]->[0.53, 0.154, 0.975, 0.841]: sit, drink, listen to (a person), watch (a person).", "From 1675.0s to 1676.0s, [0.542, 0.147, 0.96, 0.803]->[0.538, 0.142, 0.952, 0.775]: sit, drink, listen to (a person).", "At 1674.0s, [0.534, 0.148, 0.972, 0.844]: sit, listen to (a person), watch (a person).", "From 1672.0s to 1673.0s, [0.527, 0.138, 0.946, 0.786]->[0.54, 0.148, 0.963, 0.808]: sit, drink, listen to (a person), watch (a person).", "From 1670.0s to 1671.0s, [0.495, 0.145, 0.933, 0.78]->[0.519, 0.144, 0.942, 0.749]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1669.0s, [0.508, 0.152, 0.939, 0.788]: sit, drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1668.0s, [0.502, 0.146, 0.939, 0.795]: sit, carry/hold (an object), drink, talk to (e.g., self, a person, a group), watch (a person).", "From 1665.0s to 1667.0s, [0.496, 0.011, 0.974, 0.886]->[0.51, 0.159, 0.916, 0.811]->[0.504, 0.146, 0.942, 0.792]: sit, drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1664.0s, [0.512, 0.012, 0.98, 0.917]: sit, drink, talk to (e.g., self, a person, a group).", "At 1663.0s, [0.523, 0.041, 0.978, 0.941]: sit, drink, listen to (a person), watch (a person).", "At 1662.0s, [0.514, 0.04, 0.982, 0.969]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1659.0s to 1661.0s, [0.531, 0.029, 0.979, 0.969]->[0.53, 0.029, 0.983, 0.985]->[0.517, 0.036, 0.981, 0.94]: sit, listen to (a person), watch (a person).", "From 1656.0s to 1658.0s, [0.527, 0.04, 0.982, 0.946]->[0.525, 0.04, 0.98, 0.954]->[0.528, 0.031, 0.988, 0.972]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1655.0s, [0.53, 0.031, 0.979, 0.946]: sit, drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1654.0s, [0.577, 0.04, 0.987, 0.969]: sit, carry/hold (an object), drink, talk to (e.g., self, a person, a group).", "At 1653.0s, [0.594, 0.018, 0.984, 0.996]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1652.0s, [0.493, 0.051, 0.97, 0.854]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1650.0s to 1651.0s, [0.537, 0.072, 0.977, 0.978]->[0.528, 0.066, 0.987, 0.907]: sit, carry/hold (an object), listen to (a person).", "At 1649.0s, [0.542, 0.064, 0.982, 1.0]: sit, carry/hold (an object), touch (an object), listen to (a person).", "From 1645.0s to 1648.0s, [0.441, 0.04, 0.989, 0.984]->[0.441, 0.036, 0.993, 0.988]->[0.469, 0.06, 0.989, 0.974]->[0.548, 0.071, 0.984, 0.997]: sit, carry/hold (an object), listen to (a person).", "At 1644.0s, [0.45, 0.005, 0.984, 0.967]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1643.0s, [0.535, 0.014, 0.986, 0.966]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1684.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.091, 0.054, 0.505, 0.963]: sit, talk to (e.g., self, a person, a group), drink; [0.528, 0.143, 0.967, 0.79]: sit, drink, listen to (a person), watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "9eAOr_ttXp0.mkv", "clip": [1708, 1757], "conversations": [{"from": "human", "timestamps": 1738.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.16, 0.211, 0.368, 0.99]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.588, 0.063, 0.81, 0.984]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1757.0, "value": "What actions did the person at bbox [0.0, 0.07, 0.593, 0.971] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.07, 0.593, 0.971]: stand, watch (a person).", "From 1752.0s to 1756.0s, [0.223, 0.02, 0.758, 0.984]->[0.137, 0.241, 0.326, 0.806]->[0.139, 0.231, 0.329, 0.813]->[0.146, 0.254, 0.347, 0.811]->[0.0, 0.039, 0.498, 0.995]: stand, watch (a person)."]}]}, {"video": "9mLYmkonWZQ.mkv", "clip": [1038, 1104], "conversations": [{"from": "human", "timestamps": 1068.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.442, 0.086, 0.838, 0.904]: ride (e.g., a bike, a car, a horse), sit."}, {"from": "human", "timestamps": 1104.0, "value": "For the person at bbox [0.195, 0.115, 0.962, 0.966], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.195, 0.115, 0.962, 0.966]: sit, stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1101.0s to 1103.0s, [0.118, 0.108, 0.982, 0.973]->[0.157, 0.093, 0.954, 0.965]->[0.172, 0.066, 0.965, 0.977]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "9mLYmkonWZQ.mkv", "clip": [1212, 1262], "conversations": [{"from": "human", "timestamps": 1242.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.547, 0.241, 0.976, 0.767]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1262.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.022, 0.635, 0.986]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.022, 0.635, 0.986]: stand, listen to (a person).", "At 1261.0s, [0.006, 0.004, 0.644, 0.982]: stand.", "From 1258.0s to 1260.0s, [0.012, 0.021, 0.641, 0.979]->[0.009, 0.016, 0.621, 0.978]->[0.0, 0.007, 0.626, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 1257.0s, [0.543, 0.221, 0.755, 0.578]: sit, stand, carry/hold (an object), drink, listen to (a person), watch (a person).", "At 1256.0s, [0.54, 0.218, 0.754, 0.588]: stand, drink, touch (an object).", "From 1254.0s to 1255.0s, [0.543, 0.228, 0.753, 0.58]->[0.54, 0.225, 0.755, 0.581]: stand, drink.", "At 1253.0s, [0.541, 0.209, 0.754, 0.589]: bend/bow (at the waist), carry/hold (an object), drink.", "At 1252.0s, [0.545, 0.188, 0.755, 0.581]: stand, carry/hold (an object), listen to (a person).", "At 1251.0s, [0.541, 0.165, 0.768, 0.585]: stand, carry/hold (an object)."]}]}, {"video": "9mLYmkonWZQ.mkv", "clip": [1245, 1294], "conversations": [{"from": "human", "timestamps": 1275.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.004, 0.011, 0.644, 0.981]: listen to (a person), stand; [0.588, 0.438, 0.774, 0.905]: watch (a person), stand; [0.712, 0.358, 0.827, 0.641]: carry/hold (an object), sit, watch (a person), stand."}, {"from": "human", "timestamps": 1294.0, "value": "Describe the actions of the person at bbox [0.031, 0.043, 0.87, 0.976] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.031, 0.043, 0.87, 0.976]: stand, watch (a person).", "From 1292.0s to 1293.0s, [0.028, 0.029, 0.863, 0.986]->[0.032, 0.044, 0.874, 0.981]: stand.", "At 1291.0s, [0.028, 0.039, 0.857, 0.977]: stand, listen to (a person)."]}]}, {"video": "9mLYmkonWZQ.mkv", "clip": [1257, 1344], "conversations": [{"from": "human", "timestamps": 1287.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.106, 0.0, 1.0, 0.998]: watch (a person), stand."}, {"from": "human", "timestamps": 1344.0, "value": "For the person at bbox [0.125, 0.028, 0.973, 0.974], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.125, 0.028, 0.973, 0.974]: stand, watch (a person).", "From 1342.0s to 1343.0s, [0.0, 0.326, 0.106, 0.789]->[0.119, 0.032, 0.957, 0.986]: stand, watch (a person).", "At 1341.0s, [0.0, 0.401, 0.094, 0.813]: stand."]}]}, {"video": "9mLYmkonWZQ.mkv", "clip": [1339, 1375], "conversations": [{"from": "human", "timestamps": 1369.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.581, 0.249, 0.863, 0.985]?"}, {"from": "gpt", "value": ["In the current frame, [0.581, 0.249, 0.863, 0.985]: walk, close (e.g., a door, a box), enter, watch (a person).", "At 1368.0s, [0.519, 0.255, 0.789, 0.971]: walk, enter, open (e.g., a window, a car door), touch (an object), watch (a person)."]}, {"from": "human", "timestamps": 1375.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.007, 0.848, 0.988]: watch (a person), stand."}]}, {"video": "9mLYmkonWZQ.mkv", "clip": [1389, 1426], "conversations": [{"from": "human", "timestamps": 1419.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.017, 0.297, 0.468, 0.983]: listen to (a person), sit, drink; [0.316, 0.051, 1.0, 0.981]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1426.0, "value": "For the person at bbox [0.01, 0.306, 0.454, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.01, 0.306, 0.454, 0.99]: sit, eat, talk to (e.g., self, a person, a group).", "From 1424.0s to 1425.0s, [0.03, 0.31, 0.471, 0.998]->[0.024, 0.3, 0.462, 0.99]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1423.0s, [0.042, 0.296, 0.495, 0.995]: sit, carry/hold (an object), eat.", "At 1422.0s, [0.041, 0.302, 0.473, 0.99]: sit, stand, carry/hold (an object), eat, listen to (a person).", "At 1421.0s, [0.05, 0.298, 0.486, 0.99]: sit, carry/hold (an object), listen to (a person).", "At 1420.0s, [0.047, 0.299, 0.471, 0.991]: sit, carry/hold (an object), drink, listen to (a person).", "At 1419.0s, [0.017, 0.297, 0.468, 0.983]: sit, drink, listen to (a person).", "At 1418.0s, [0.041, 0.314, 0.467, 0.987]: sit, carry/hold (an object), listen to (a person).", "At 1417.0s, [0.008, 0.318, 0.439, 0.988]: stand, carry/hold (an object), listen to (a person)."]}]}, {"video": "9mLYmkonWZQ.mkv", "clip": [1397, 1458], "conversations": [{"from": "human", "timestamps": 1427.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.019, 0.305, 0.465, 0.99]: sit, eat; [0.311, 0.017, 1.0, 0.991]: listen to (a person), stand."}, {"from": "human", "timestamps": 1458.0, "value": "For the person at bbox [0.0, 0.072, 0.683, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.072, 0.683, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1457.0s, [0.0, 0.066, 0.709, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1456.0s, [0.037, 0.083, 0.727, 0.978]: stand, talk to (e.g., self, a person, a group).", "From 1454.0s to 1455.0s, [0.038, 0.114, 0.738, 0.968]->[0.055, 0.126, 0.725, 0.976]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1453.0s, [0.035, 0.126, 0.717, 0.967]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "9mLYmkonWZQ.mkv", "clip": [1422, 1475], "conversations": [{"from": "human", "timestamps": 1452.0, "value": "What actions is the person in bbox [0.115, 0.037, 0.991, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.115, 0.037, 0.991, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1449.0s to 1451.0s, [0.053, 0.0, 0.964, 1.0]->[0.014, 0.01, 0.975, 1.0]->[0.085, 0.004, 0.987, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1448.0s, [0.03, 0.008, 0.964, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1475.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.021, 0.044, 0.698, 0.992]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "9mLYmkonWZQ.mkv", "clip": [1483, 1524], "conversations": [{"from": "human", "timestamps": 1513.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.266, 0.051, 1.0, 0.978]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1524.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.415, 0.032, 1.0, 0.975]?"}, {"from": "gpt", "value": ["At Now, [0.415, 0.032, 1.0, 0.975]: stand.", "From 1522.0s to 1523.0s, [0.175, 0.017, 0.997, 0.974]->[0.25, 0.008, 1.0, 0.966]: stand.", "At 1521.0s, [0.129, 0.026, 0.992, 0.973]: stand, carry/hold (an object).", "From 1519.0s to 1520.0s, [0.221, 0.052, 0.989, 0.995]->[0.154, 0.029, 0.979, 0.997]: stand.", "From 1515.0s to 1518.0s, [0.242, 0.059, 1.0, 0.983]->[0.26, 0.074, 1.0, 0.979]->[0.254, 0.04, 1.0, 0.978]->[0.248, 0.053, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1510.0s to 1514.0s, [0.574, 0.111, 0.831, 0.851]->[0.577, 0.111, 0.829, 0.867]->[0.576, 0.114, 0.859, 0.797]->[0.266, 0.051, 1.0, 0.978]->[0.237, 0.043, 1.0, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1509.0s, [0.583, 0.104, 0.846, 0.882]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1508.0s, [0.591, 0.104, 0.827, 0.443]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1506.0s to 1507.0s, [0.611, 0.111, 0.849, 0.802]->[0.62, 0.111, 0.862, 0.845]: stand, carry/hold (an object), give/serve (an object) to (a person), watch (a person).", "At 1505.0s, [0.598, 0.112, 0.824, 0.436]: stand, carry/hold (an object), give/serve (an object) to (a person).", "At 1504.0s, [0.561, 0.134, 0.802, 0.748]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "9mLYmkonWZQ.mkv", "clip": [1554, 1616], "conversations": [{"from": "human", "timestamps": 1584.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.088, 0.401, 0.63, 1.0]: listen to (a person), sit; [0.44, 0.029, 0.794, 0.917]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.807, 0.462, 0.909, 0.889]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1616.0, "value": "What actions did the person at bbox [0.406, 0.178, 0.579, 0.911] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.406, 0.178, 0.579, 0.911]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1614.0s to 1615.0s, [0.305, 0.084, 0.67, 0.977]->[0.417, 0.195, 0.696, 0.988]: walk, carry/hold (an object)."]}]}, {"video": "9mLYmkonWZQ.mkv", "clip": [1730, 1766], "conversations": [{"from": "human", "timestamps": 1760.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.316, 0.059, 0.937, 0.974]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1766.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.244, 0.127, 0.763, 0.975]?"}, {"from": "gpt", "value": ["In the current frame, [0.244, 0.127, 0.763, 0.975]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1765.0s, [0.247, 0.13, 0.761, 0.985]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1764.0s, [0.253, 0.071, 0.704, 0.997]: sit, carry/hold (an object).", "From 1761.0s to 1763.0s, [0.236, 0.056, 0.718, 0.996]->[0.25, 0.065, 0.755, 0.991]->[0.254, 0.06, 0.762, 0.99]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "9tyiDEYiWiA.mkv", "clip": [900, 953], "conversations": [{"from": "human", "timestamps": 930.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.397, 0.006, 0.879, 0.999]: watch (a person), stand."}, {"from": "human", "timestamps": 953.0, "value": "What actions is the person in bbox [0.246, 0.035, 1.0, 0.953] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.246, 0.035, 1.0, 0.953]: stand, fight/hit (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 952.0s, [0.176, 0.004, 0.882, 1.0]: stand, fight/hit (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9tyiDEYiWiA.mkv", "clip": [890, 1014], "conversations": [{"from": "human", "timestamps": 920.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.414, 0.023, 0.998, 0.976]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1014.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.71, 0.194, 0.906, 0.997]?"}, {"from": "gpt", "value": ["At Now, [0.71, 0.194, 0.906, 0.997]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1010.0s to 1013.0s, [0.339, 0.075, 0.958, 0.989]->[0.344, 0.074, 0.929, 0.984]->[0.744, 0.0, 0.998, 0.992]->[0.466, 0.0, 0.975, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9tyiDEYiWiA.mkv", "clip": [1043, 1075], "conversations": [{"from": "human", "timestamps": 1073.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.431, 0.042, 0.996, 0.976]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1075.0, "value": "What actions is the person in bbox [0.391, 0.039, 0.99, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.391, 0.039, 0.99, 0.985]: stand, talk to (e.g., self, a person, a group).", "At 1074.0s, [0.4, 0.088, 0.983, 0.988]: stand, talk to (e.g., self, a person, a group).", "At 1073.0s, [0.431, 0.042, 0.996, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9tyiDEYiWiA.mkv", "clip": [1077, 1124], "conversations": [{"from": "human", "timestamps": 1107.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.354, 0.119, 0.983, 0.979]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1124.0, "value": "For the person at bbox [0.041, 0.014, 0.658, 0.968], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.041, 0.014, 0.658, 0.968]: stand.", "At 1123.0s, [0.029, 0.029, 0.698, 0.993]: stand."]}]}, {"video": "9tyiDEYiWiA.mkv", "clip": [1118, 1171], "conversations": [{"from": "human", "timestamps": 1148.0, "value": "What actions is the person in bbox [0.395, 0.084, 0.972, 0.977] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.395, 0.084, 0.972, 0.977]: stand.", "At 1147.0s, [0.386, 0.053, 0.955, 0.984]: stand."]}, {"from": "human", "timestamps": 1171.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.019, 0.579, 0.981]: listen to (a person), stand, watch (a person)."}]}, {"video": "9tyiDEYiWiA.mkv", "clip": [1239, 1302], "conversations": [{"from": "human", "timestamps": 1269.0, "value": "What actions did the person at bbox [0.23, 0.002, 0.983, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.23, 0.002, 0.983, 1.0]: sit.", "At 1268.0s, [0.282, 0.0, 0.977, 0.993]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1267.0s, [0.234, 0.0, 1.0, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1302.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.262, 0.007, 0.96, 0.996]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "9tyiDEYiWiA.mkv", "clip": [1275, 1347], "conversations": [{"from": "human", "timestamps": 1305.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.098, 0.0, 0.789, 0.99]: stand."}, {"from": "human", "timestamps": 1347.0, "value": "What actions did the person at bbox [0.007, 0.0, 0.708, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.007, 0.0, 0.708, 0.982]: stand.", "From 1344.0s to 1346.0s, [0.0, 0.007, 0.755, 0.997]->[0.013, 0.004, 0.714, 0.994]->[0.007, 0.003, 0.724, 0.992]: stand."]}]}, {"video": "9tyiDEYiWiA.mkv", "clip": [1387, 1469], "conversations": [{"from": "human", "timestamps": 1417.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.551, 0.436, 0.641, 0.794]: sit."}, {"from": "human", "timestamps": 1469.0, "value": "What actions did the person at bbox [0.014, 0.0, 0.753, 0.998] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.014, 0.0, 0.753, 0.998]: stand, read.", "From 1467.0s to 1468.0s, [0.001, 0.0, 0.747, 1.0]->[0.009, 0.009, 0.773, 1.0]: stand, read."]}]}, {"video": "9tyiDEYiWiA.mkv", "clip": [1573, 1614], "conversations": [{"from": "human", "timestamps": 1603.0, "value": "What actions is the person in bbox [0.004, 0.074, 0.341, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.004, 0.074, 0.341, 1.0]: stand, listen to (a person), watch (a person).", "From 1601.0s to 1602.0s, [0.114, 0.019, 0.732, 0.983]->[0.03, 0.03, 0.639, 0.982]: stand, watch (a person).", "At 1600.0s, [0.502, 0.401, 0.623, 0.966]: stand, listen to (a person), watch (a person).", "At 1599.0s, [0.534, 0.393, 0.645, 0.953]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1614.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.532, 0.0, 0.998, 0.998]: stand."}]}, {"video": "9tyiDEYiWiA.mkv", "clip": [1614, 1652], "conversations": [{"from": "human", "timestamps": 1644.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.078, 0.032, 0.669, 0.984]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1652.0, "value": "What actions is the person in bbox [0.244, 0.038, 0.872, 0.992] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.244, 0.038, 0.872, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1648.0s to 1651.0s, [0.245, 0.049, 0.949, 0.981]->[0.286, 0.0, 0.879, 0.992]->[0.251, 0.017, 0.914, 0.994]->[0.273, 0.016, 0.952, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 1647.0s, [0.269, 0.016, 0.879, 0.986]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "9tyiDEYiWiA.mkv", "clip": [1759, 1795], "conversations": [{"from": "human", "timestamps": 1789.0, "value": "Describe the actions of the person at bbox [0.016, 0.02, 0.76, 0.989] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.016, 0.02, 0.76, 0.989]: sit, talk to (e.g., self, a person, a group).", "From 1787.0s to 1788.0s, [0.035, 0.025, 0.697, 1.0]->[0.027, 0.029, 0.744, 0.99]: sit, talk to (e.g., self, a person, a group).", "At 1786.0s, [0.028, 0.018, 0.652, 0.982]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1783.0s to 1785.0s, [0.034, 0.009, 0.64, 0.984]->[0.03, 0.024, 0.693, 0.984]->[0.033, 0.003, 0.65, 0.989]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1795.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.025, 0.022, 0.679, 0.982]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "AN07xQokfiE.mp4", "clip": [901, 1011], "conversations": [{"from": "human", "timestamps": 931.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.159, 0.001, 1.0, 0.981]: stand."}, {"from": "human", "timestamps": 1011.0, "value": "For the person at bbox [0.565, 0.3, 0.979, 0.991], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.565, 0.3, 0.979, 0.991]: stand, listen to (a person).", "From 1009.0s to 1010.0s, [0.575, 0.305, 0.982, 0.977]->[0.574, 0.305, 0.978, 0.985]: stand, listen to (a person)."]}]}, {"video": "AN07xQokfiE.mp4", "clip": [997, 1109], "conversations": [{"from": "human", "timestamps": 1027.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.24, 0.072, 1.0, 0.981]: listen to (a person), stand."}, {"from": "human", "timestamps": 1109.0, "value": "For the person at bbox [0.354, 0.142, 0.661, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.354, 0.142, 0.661, 0.994]: walk, carry/hold (an object).", "From 1104.0s to 1108.0s, [0.1, 0.109, 0.524, 0.749]->[0.02, 0.064, 0.431, 0.973]->[0.009, 0.073, 0.446, 0.986]->[0.153, 0.1, 0.491, 0.973]->[0.223, 0.132, 0.542, 0.911]: walk, talk to (e.g., self, a person, a group).", "At 1103.0s, [0.432, 0.108, 0.691, 0.846]: stand, talk to (e.g., self, a person, a group).", "At 1102.0s, [0.549, 0.079, 0.835, 0.993]: walk.", "At 1101.0s, [0.47, 0.069, 0.794, 0.992]: stand, listen to (a person).", "At 1100.0s, [0.442, 0.082, 0.758, 0.976]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1099.0s, [0.484, 0.066, 0.746, 0.826]: stand, listen to (a person).", "At 1098.0s, [0.427, 0.077, 0.644, 0.743]: walk, listen to (a person).", "At 1097.0s, [0.305, 0.107, 0.557, 0.767]: walk, talk to (e.g., self, a person, a group).", "At 1096.0s, [0.249, 0.085, 0.523, 0.932]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1095.0s, [0.118, 0.076, 0.442, 0.956]: walk, listen to (a person), watch (a person).", "From 1093.0s to 1094.0s, [0.255, 0.065, 0.529, 0.956]->[0.208, 0.071, 0.495, 0.95]: stand, listen to (a person).", "At 1092.0s, [0.268, 0.075, 0.536, 0.949]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1090.0s to 1091.0s, [0.49, 0.125, 0.708, 0.946]->[0.379, 0.078, 0.617, 0.695]: walk, listen to (a person).", "From 1087.0s to 1089.0s, [0.624, 0.084, 0.973, 0.974]->[0.571, 0.092, 0.86, 0.969]->[0.539, 0.112, 0.804, 0.981]: stand, listen to (a person).", "From 1085.0s to 1086.0s, [0.382, 0.091, 0.958, 0.986]->[0.619, 0.058, 0.925, 0.897]: stand, carry/hold (an object), give/serve (an object) to (a person), listen to (a person), watch (a person).", "At 1084.0s, [0.423, 0.109, 0.945, 0.986]: stand, carry/hold (an object), read.", "At 1083.0s, [0.3, 0.115, 0.903, 0.981]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 1081.0s to 1082.0s, [0.031, 0.042, 0.653, 0.991]->[0.123, 0.107, 0.978, 0.983]: walk, read.", "At 1080.0s, [0.025, 0.014, 0.655, 1.0]: stand.", "From 1078.0s to 1079.0s, [0.007, 0.0, 0.648, 0.996]->[0.006, 0.0, 0.664, 0.993]: stand, talk to (e.g., self, a person, a group).", "At 1077.0s, [0.004, 0.0, 0.58, 0.994]: stand."]}]}, {"video": "AN07xQokfiE.mp4", "clip": [1095, 1127], "conversations": [{"from": "human", "timestamps": 1125.0, "value": "What actions is the person in bbox [0.081, 0.018, 0.804, 0.947] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.081, 0.018, 0.804, 0.947]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1121.0s to 1124.0s, [0.136, 0.078, 0.637, 0.946]->[0.141, 0.085, 0.64, 0.947]->[0.144, 0.076, 0.64, 0.941]->[0.15, 0.078, 0.651, 0.945]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1127.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.155, 0.094, 0.864, 0.969]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "AN07xQokfiE.mp4", "clip": [1172, 1251], "conversations": [{"from": "human", "timestamps": 1202.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.311, 0.088, 0.732, 0.916]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1251.0, "value": "What actions is the person in bbox [0.002, 0.084, 0.905, 0.966] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.002, 0.084, 0.905, 0.966]: sit, talk to (e.g., self, a person, a group).", "At 1250.0s, [0.0, 0.09, 0.919, 0.971]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "AN07xQokfiE.mp4", "clip": [1237, 1305], "conversations": [{"from": "human", "timestamps": 1267.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.007, 0.085, 0.973, 0.979]?"}, {"from": "gpt", "value": ["In the current frame, [0.007, 0.085, 0.973, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1266.0s, [0.029, 0.071, 0.998, 0.991]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1305.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.003, 0.072, 0.934, 0.969]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "AN07xQokfiE.mp4", "clip": [1231, 1374], "conversations": [{"from": "human", "timestamps": 1261.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.012, 0.091, 0.903, 0.97]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1374.0, "value": "Describe the actions of the person at bbox [0.032, 0.082, 0.979, 0.978] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.032, 0.082, 0.979, 0.978]: sit, talk to (e.g., self, a person, a group).", "From 1372.0s to 1373.0s, [0.044, 0.047, 0.976, 0.986]->[0.036, 0.084, 0.973, 0.984]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "AN07xQokfiE.mp4", "clip": [1311, 1361], "conversations": [{"from": "human", "timestamps": 1341.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.084, 0.909, 0.965]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1361.0, "value": "For the person at bbox [0.0, 0.085, 0.919, 0.965], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.085, 0.919, 0.965]: sit, talk to (e.g., self, a person, a group).", "From 1357.0s to 1360.0s, [0.01, 0.056, 0.901, 0.974]->[0.002, 0.059, 0.907, 0.977]->[0.003, 0.051, 0.908, 0.97]->[0.009, 0.07, 0.906, 0.977]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "AN07xQokfiE.mp4", "clip": [1400, 1449], "conversations": [{"from": "human", "timestamps": 1430.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.1, 0.057, 0.994, 0.994]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1449.0, "value": "For the person at bbox [0.008, 0.061, 0.637, 0.978], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.008, 0.061, 0.637, 0.978]: stand, talk to (e.g., self, a person, a group).", "From 1447.0s to 1448.0s, [0.004, 0.054, 0.641, 0.981]->[0.008, 0.055, 0.633, 0.98]: stand, talk to (e.g., self, a person, a group).", "At 1446.0s, [0.003, 0.063, 0.629, 0.98]: stand.", "From 1440.0s to 1445.0s, [0.006, 0.073, 0.622, 0.975]->[0.009, 0.073, 0.625, 0.978]->[0.0, 0.069, 0.621, 0.977]->[0.009, 0.075, 0.623, 0.979]->[0.005, 0.062, 0.622, 1.0]->[0.009, 0.069, 0.628, 0.981]: stand, talk to (e.g., self, a person, a group).", "At 1439.0s, [0.007, 0.083, 0.654, 0.977]: sit, talk to (e.g., self, a person, a group).", "From 1436.0s to 1438.0s, [0.019, 0.096, 0.878, 0.987]->[0.03, 0.041, 0.974, 0.975]->[0.019, 0.196, 0.7, 0.997]: stand.", "At 1435.0s, [0.018, 0.074, 0.849, 0.989]: stand, listen to (a person)."]}]}, {"video": "AN07xQokfiE.mp4", "clip": [1484, 1532], "conversations": [{"from": "human", "timestamps": 1514.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.58, 0.505, 0.939, 1.0]: listen to (a person), sit."}, {"from": "human", "timestamps": 1532.0, "value": "What actions did the person at bbox [0.145, 0.079, 0.923, 0.984] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.145, 0.079, 0.923, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1531.0s, [0.151, 0.069, 0.941, 0.981]: stand, talk to (e.g., self, a person, a group).", "At 1530.0s, [0.152, 0.075, 0.931, 0.984]: sit, talk to (e.g., self, a person, a group).", "From 1527.0s to 1529.0s, [0.131, 0.078, 0.935, 0.983]->[0.131, 0.072, 0.927, 0.983]->[0.154, 0.065, 0.948, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 1526.0s, [0.122, 0.079, 0.926, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "AN07xQokfiE.mp4", "clip": [1480, 1594], "conversations": [{"from": "human", "timestamps": 1510.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.183, 0.054, 0.601, 1.0]: stand; [0.58, 0.499, 0.937, 1.0]: listen to (a person), sit."}, {"from": "human", "timestamps": 1594.0, "value": "What actions did the person at bbox [0.147, 0.068, 0.979, 0.98] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.147, 0.068, 0.979, 0.98]: stand, talk to (e.g., self, a person, a group).", "At 1593.0s, [0.139, 0.062, 0.998, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "AN07xQokfiE.mp4", "clip": [1539, 1580], "conversations": [{"from": "human", "timestamps": 1569.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.142, 0.074, 0.983, 0.978]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1580.0, "value": "What actions did the person at bbox [0.165, 0.077, 0.973, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.165, 0.077, 0.973, 0.986]: stand, talk to (e.g., self, a person, a group).", "From 1575.0s to 1579.0s, [0.319, 0.126, 0.966, 0.987]->[0.073, 0.143, 0.834, 0.984]->[0.231, 0.128, 0.991, 0.981]->[0.201, 0.121, 0.99, 0.99]->[0.179, 0.095, 0.978, 0.99]: stand, talk to (e.g., self, a person, a group).", "From 1572.0s to 1574.0s, [0.319, 0.052, 0.992, 0.98]->[0.362, 0.051, 0.938, 0.987]->[0.327, 0.125, 0.869, 0.993]: walk, talk to (e.g., self, a person, a group).", "From 1568.0s to 1571.0s, [0.183, 0.099, 0.976, 0.986]->[0.142, 0.074, 0.983, 0.978]->[0.219, 0.092, 0.979, 0.972]->[0.386, 0.104, 0.984, 0.972]: stand, talk to (e.g., self, a person, a group).", "From 1566.0s to 1567.0s, [0.026, 0.068, 0.627, 0.982]->[0.202, 0.054, 0.98, 0.982]: stand."]}]}, {"video": "AN07xQokfiE.mp4", "clip": [1615, 1653], "conversations": [{"from": "human", "timestamps": 1645.0, "value": "Describe the actions of the person at bbox [0.162, 0.184, 0.421, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.162, 0.184, 0.421, 0.987]: stand, listen to (a person).", "From 1640.0s to 1644.0s, [0.621, 0.073, 0.981, 0.933]->[0.51, 0.249, 0.837, 0.996]->[0.467, 0.352, 0.864, 0.975]->[0.267, 0.281, 0.592, 0.98]->[0.225, 0.216, 0.497, 0.979]: walk."]}, {"from": "human", "timestamps": 1653.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.246, 0.1, 0.887, 0.988]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "AN07xQokfiE.mp4", "clip": [1673, 1729], "conversations": [{"from": "human", "timestamps": 1703.0, "value": "What actions did the person at bbox [0.433, 0.022, 0.998, 0.997] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.433, 0.022, 0.998, 0.997]: sit, listen to (a person).", "From 1701.0s to 1702.0s, [0.462, 0.0, 0.996, 1.0]->[0.439, 0.049, 0.996, 0.991]: sit, listen to (a person).", "At 1700.0s, [0.451, 0.0, 0.999, 0.99]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "From 1698.0s to 1699.0s, [0.451, 0.0, 0.99, 0.998]->[0.41, 0.0, 0.992, 0.986]: sit, talk to (e.g., self, a person, a group).", "At 1697.0s, [0.481, 0.0, 0.978, 0.997]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1729.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.026, 0.182, 0.587, 0.977]: listen to (a person), sit; [0.419, 0.0, 0.995, 0.997]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "AN07xQokfiE.mp4", "clip": [1719, 1778], "conversations": [{"from": "human", "timestamps": 1749.0, "value": "Describe the actions of the person at bbox [0.769, 0.307, 0.975, 0.985] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.769, 0.307, 0.975, 0.985]: walk, talk to (e.g., self, a person, a group).", "From 1746.0s to 1748.0s, [0.667, 0.356, 0.948, 0.969]->[0.672, 0.351, 0.981, 1.0]->[0.728, 0.292, 0.984, 0.98]: walk.", "From 1744.0s to 1745.0s, [0.664, 0.346, 0.901, 0.792]->[0.661, 0.368, 0.868, 0.946]: stand, close (e.g., a door, a box)."]}, {"from": "human", "timestamps": 1778.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.023, 0.184, 0.575, 0.987]: listen to (a person), stand, watch (a person); [0.519, 0.1, 0.977, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [909, 966], "conversations": [{"from": "human", "timestamps": 939.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.006, 0.115, 0.489, 0.867]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.505, 0.105, 1.0, 0.871]: listen to (a person), stand."}, {"from": "human", "timestamps": 966.0, "value": "For the person at bbox [0.419, 0.115, 0.932, 0.9], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.419, 0.115, 0.932, 0.9]: sit, watch (a person).", "At 965.0s, [0.464, 0.107, 0.992, 0.891]: sit, listen to (a person), watch (a person).", "At 964.0s, [0.457, 0.138, 0.984, 0.905]: sit, listen to (a person).", "At 963.0s, [0.383, 0.145, 0.722, 0.884]: listen to (a person)."]}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [998, 1041], "conversations": [{"from": "human", "timestamps": 1028.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.673, 0.137, 0.904, 0.88]?"}, {"from": "gpt", "value": ["At Now, [0.673, 0.137, 0.904, 0.88]: walk.", "From 1024.0s to 1027.0s, [0.753, 0.169, 0.923, 0.827]->[0.711, 0.147, 0.926, 0.835]->[0.719, 0.161, 0.924, 0.849]->[0.712, 0.167, 0.906, 0.861]: walk.", "From 1022.0s to 1023.0s, [0.756, 0.169, 0.935, 0.806]->[0.745, 0.192, 0.93, 0.812]: walk, watch (a person).", "From 1019.0s to 1021.0s, [0.813, 0.216, 0.99, 0.793]->[0.799, 0.224, 0.992, 0.805]->[0.799, 0.187, 0.97, 0.82]: walk.", "At 1018.0s, [0.169, 0.1, 0.774, 0.878]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1041.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.275, 0.379, 0.4, 0.869]: sit; [0.43, 0.309, 0.525, 0.721]: carry/hold (an object), stand, push (an object), watch (a person)."}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [1133, 1184], "conversations": [{"from": "human", "timestamps": 1163.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.001, 0.111, 0.192, 0.882]: listen to (a person), stand, watch (a person); [0.15, 0.127, 0.631, 0.87]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.65, 0.129, 1.0, 0.881]: listen to (a person), stand."}, {"from": "human", "timestamps": 1184.0, "value": "For the person at bbox [0.841, 0.452, 1.0, 0.883], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.841, 0.452, 1.0, 0.883]: sit, listen to (a person), watch (a person).", "At 1183.0s, [0.799, 0.429, 0.988, 0.896]: sit, listen to (a person), watch (a person).", "At 1182.0s, [0.778, 0.462, 0.983, 0.882]: sit, listen to (a person).", "At 1181.0s, [0.788, 0.46, 0.977, 0.887]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [1194, 1226], "conversations": [{"from": "human", "timestamps": 1224.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.037, 0.256, 0.283, 0.892]: listen to (a person), sit, smoke, watch (a person); [0.227, 0.198, 0.435, 0.757]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.369, 0.275, 0.419, 0.434]: sit; [0.522, 0.251, 0.721, 0.679]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.703, 0.239, 0.947, 0.882]: listen to (a person), sit, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1226.0, "value": "For the person at bbox [0.031, 0.268, 0.245, 0.893], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.031, 0.268, 0.245, 0.893]: sit, smoke, listen to (a person), watch (a person).", "At 1225.0s, [0.035, 0.269, 0.335, 0.887]: sit, smoke, listen to (a person).", "From 1223.0s to 1224.0s, [0.035, 0.283, 0.259, 0.891]->[0.037, 0.256, 0.283, 0.892]: sit, smoke, listen to (a person), watch (a person).", "At 1222.0s, [0.024, 0.246, 0.336, 0.886]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1221.0s, [0.031, 0.212, 0.326, 0.894]: sit, carry/hold (an object), smoke, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1220.0s, [0.032, 0.234, 0.35, 0.883]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1219.0s, [0.037, 0.248, 0.349, 0.895]: sit, listen to (a person), watch (a person).", "At 1218.0s, [0.032, 0.237, 0.33, 0.883]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1215.0s to 1217.0s, [0.037, 0.27, 0.267, 0.884]->[0.035, 0.281, 0.25, 0.891]->[0.037, 0.283, 0.261, 0.888]: sit, smoke, listen to (a person), watch (a person).", "At 1214.0s, [0.037, 0.245, 0.357, 0.887]: sit, carry/hold (an object), smoke, listen to (a person), watch (a person).", "At 1213.0s, [0.033, 0.22, 0.344, 0.877]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1212.0s, [0.035, 0.238, 0.353, 0.887]: sit, smoke, listen to (a person), watch (a person).", "At 1211.0s, [0.034, 0.269, 0.596, 0.891]: sit, put down, smoke, listen to (a person).", "From 1209.0s to 1210.0s, [0.037, 0.256, 0.345, 0.87]->[0.055, 0.222, 0.305, 0.877]: sit, carry/hold (an object), smoke, listen to (a person).", "At 1208.0s, [0.035, 0.264, 0.281, 0.878]: sit, carry/hold (an object), smoke."]}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [1238, 1298], "conversations": [{"from": "human", "timestamps": 1268.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.507, 0.221, 0.683, 0.625]?"}, {"from": "gpt", "value": ["At Now, [0.507, 0.221, 0.683, 0.625]: sit, carry/hold (an object), eat, listen to (a person), watch (a person).", "At 1267.0s, [0.497, 0.228, 0.695, 0.633]: sit, eat, listen to (a person), watch (a person).", "At 1266.0s, [0.52, 0.197, 0.714, 0.651]: sit, listen to (a person).", "At 1265.0s, [0.493, 0.21, 0.727, 0.655]: sit, eat, listen to (a person).", "At 1264.0s, [0.505, 0.22, 0.73, 0.656]: sit, carry/hold (an object), listen to (a person).", "At 1263.0s, [0.499, 0.216, 0.743, 0.662]: sit, carry/hold (an object), eat, listen to (a person)."]}, {"from": "human", "timestamps": 1298.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.033, 0.257, 0.352, 0.859]: listen to (a person), sit; [0.312, 0.169, 0.535, 0.62]: listen to (a person), sit, watch (a person); [0.343, 0.213, 0.398, 0.342]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.509, 0.214, 0.708, 0.613]: listen to (a person), sit, watch (a person); [0.695, 0.199, 0.925, 0.851]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [1266, 1344], "conversations": [{"from": "human", "timestamps": 1296.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.175, 0.09, 0.814, 0.914]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1344.0, "value": "What actions did the person at bbox [0.594, 0.152, 0.93, 0.871] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.594, 0.152, 0.93, 0.871]: stand, watch (a person).", "At 1343.0s, [0.597, 0.16, 0.924, 0.869]: stand, listen to (a person), watch (a person).", "From 1336.0s to 1342.0s, [0.65, 0.186, 1.0, 0.898]->[0.604, 0.19, 0.936, 0.869]->[0.56, 0.171, 0.879, 0.855]->[0.61, 0.173, 0.924, 0.851]->[0.594, 0.16, 0.919, 0.854]->[0.602, 0.169, 0.921, 0.862]->[0.597, 0.147, 0.921, 0.861]: stand, listen to (a person).", "From 1334.0s to 1335.0s, [0.553, 0.174, 0.973, 0.884]->[0.644, 0.159, 1.0, 0.87]: stand, listen to (a person), watch (a person).", "From 1331.0s to 1333.0s, [0.514, 0.192, 0.968, 0.861]->[0.527, 0.183, 0.965, 0.876]->[0.532, 0.19, 0.963, 0.881]: stand, listen to (a person)."]}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [1380, 1419], "conversations": [{"from": "human", "timestamps": 1410.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.089, 0.144, 0.462, 0.882]: listen to (a person), stand, watch (a person); [0.53, 0.107, 0.862, 0.875]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1419.0, "value": "What actions is the person in bbox [0.03, 0.15, 0.416, 0.875] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.03, 0.15, 0.416, 0.875]: stand, listen to (a person), watch (a person).", "From 1414.0s to 1418.0s, [0.129, 0.108, 0.555, 0.888]->[0.104, 0.132, 0.569, 0.893]->[0.106, 0.135, 0.561, 0.884]->[0.159, 0.137, 0.535, 0.894]->[0.077, 0.148, 0.592, 0.896]: stand, listen to (a person)."]}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [1383, 1483], "conversations": [{"from": "human", "timestamps": 1413.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.075, 0.085, 0.784, 0.875]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1483.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.307, 0.123, 0.894, 0.88]?"}, {"from": "gpt", "value": ["In the current frame, [0.307, 0.123, 0.894, 0.88]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1482.0s, [0.342, 0.138, 0.912, 0.891]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1478.0s to 1481.0s, [0.07, 0.112, 0.78, 0.9]->[0.142, 0.133, 0.746, 0.872]->[0.441, 0.129, 0.854, 0.9]->[0.428, 0.135, 0.91, 0.896]: stand, talk to (e.g., self, a person, a group).", "At 1477.0s, [0.244, 0.145, 0.869, 0.871]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [1450, 1497], "conversations": [{"from": "human", "timestamps": 1480.0, "value": "Describe the actions of the person at bbox [0.441, 0.129, 0.854, 0.9] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.441, 0.129, 0.854, 0.9]: stand, talk to (e.g., self, a person, a group).", "From 1478.0s to 1479.0s, [0.07, 0.112, 0.78, 0.9]->[0.142, 0.133, 0.746, 0.872]: stand, talk to (e.g., self, a person, a group).", "At 1477.0s, [0.244, 0.145, 0.869, 0.871]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1497.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.227, 0.147, 0.895, 0.857]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [1480, 1558], "conversations": [{"from": "human", "timestamps": 1510.0, "value": "For the person at bbox [0.205, 0.124, 0.922, 0.86], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.205, 0.124, 0.922, 0.86]: stand, talk to (e.g., self, a person, a group).", "At 1509.0s, [0.223, 0.122, 0.923, 0.89]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1558.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.286, 0.116, 0.915, 0.884]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [1504, 1569], "conversations": [{"from": "human", "timestamps": 1534.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.28, 0.145, 0.575, 0.886]: listen to (a person), stand, watch (a person); [0.496, 0.164, 0.866, 0.902]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1569.0, "value": "For the person at bbox [0.485, 0.199, 0.595, 0.829], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.485, 0.199, 0.595, 0.829]: stand, watch (a person).", "From 1566.0s to 1568.0s, [0.482, 0.204, 0.594, 0.826]->[0.486, 0.2, 0.6, 0.836]->[0.482, 0.197, 0.597, 0.83]: stand."]}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [1547, 1590], "conversations": [{"from": "human", "timestamps": 1577.0, "value": "What actions did the person at bbox [0.094, 0.1, 0.624, 0.879] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.094, 0.1, 0.624, 0.879]: stand, play musical instrument.", "At 1576.0s, [0.125, 0.117, 0.673, 0.877]: play musical instrument.", "From 1570.0s to 1575.0s, [0.576, 0.205, 0.676, 0.878]->[0.576, 0.198, 0.678, 0.882]->[0.122, 0.114, 0.755, 0.888]->[0.056, 0.107, 0.694, 0.89]->[0.101, 0.109, 0.741, 0.881]->[0.096, 0.122, 0.768, 0.889]: stand, play musical instrument.", "From 1566.0s to 1569.0s, [0.575, 0.217, 0.698, 0.882]->[0.577, 0.211, 0.69, 0.877]->[0.576, 0.202, 0.686, 0.882]->[0.579, 0.201, 0.679, 0.881]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1590.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.11, 0.123, 0.682, 0.891]: stand, play musical instrument."}]}, {"video": "AYebXQ8eUkM.mkv", "clip": [1596, 1629], "conversations": [{"from": "human", "timestamps": 1626.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.538, 0.097, 0.997, 0.874]: listen to (a person), walk."}, {"from": "human", "timestamps": 1629.0, "value": "What actions did the person at bbox [0.701, 0.174, 1.0, 0.886] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.701, 0.174, 1.0, 0.886]: stand, close (e.g., a door, a box).", "At 1628.0s, [0.387, 0.133, 0.919, 0.884]: stand.", "At 1627.0s, [0.235, 0.128, 0.729, 0.851]: stand, grab (a person), watch (a person)."]}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [906, 951], "conversations": [{"from": "human", "timestamps": 936.0, "value": "What actions did the person at bbox [0.001, 0.108, 0.569, 0.903] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.001, 0.108, 0.569, 0.903]: stand, carry/hold (an object).", "At 935.0s, [0.043, 0.117, 0.577, 0.91]: sit."]}, {"from": "human", "timestamps": 951.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.557, 0.128, 0.908, 0.896]: sit."}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [924, 984], "conversations": [{"from": "human", "timestamps": 954.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.025, 0.071, 0.472, 0.906]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 984.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.41, 0.175, 0.823, 0.875]?"}, {"from": "gpt", "value": ["In the current frame, [0.41, 0.175, 0.823, 0.875]: sit.", "From 980.0s to 983.0s, [0.438, 0.126, 0.868, 0.881]->[0.43, 0.116, 0.86, 0.883]->[0.427, 0.126, 0.856, 0.879]->[0.405, 0.116, 0.837, 0.887]: sit."]}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [965, 1013], "conversations": [{"from": "human", "timestamps": 995.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.417, 0.087, 0.895, 0.922]: sit."}, {"from": "human", "timestamps": 1013.0, "value": "What actions is the person in bbox [0.561, 0.143, 0.813, 0.678] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.561, 0.143, 0.813, 0.678]: sit, carry/hold (an object), watch (e.g., TV).", "At 1012.0s, [0.565, 0.153, 0.817, 0.712]: sit, smoke.", "At 1011.0s, [0.543, 0.156, 0.795, 0.71]: sit, carry/hold (an object), touch (an object), watch (e.g., TV).", "At 1010.0s, [0.52, 0.158, 0.738, 0.698]: stand, carry/hold (an object), watch (e.g., TV)."]}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [1116, 1167], "conversations": [{"from": "human", "timestamps": 1146.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.415, 0.126, 0.978, 0.893]?"}, {"from": "gpt", "value": ["At Now, [0.415, 0.126, 0.978, 0.893]: sit, answer phone.", "At 1145.0s, [0.436, 0.121, 0.969, 0.87]: sit, answer phone.", "At 1144.0s, [0.424, 0.124, 0.985, 0.875]: sit, text on/look at a cellphone."]}, {"from": "human", "timestamps": 1167.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.451, 0.122, 0.99, 0.885]: sit, work on a computer."}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [1211, 1248], "conversations": [{"from": "human", "timestamps": 1241.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.133, 0.106, 0.763, 0.869]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1248.0, "value": "Describe the actions of the person at bbox [0.158, 0.146, 0.779, 0.924] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.158, 0.146, 0.779, 0.924]: stand, answer phone, carry/hold (an object).", "From 1246.0s to 1247.0s, [0.161, 0.2, 0.788, 0.895]->[0.174, 0.191, 0.789, 0.903]: stand, answer phone, carry/hold (an object)."]}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [1245, 1279], "conversations": [{"from": "human", "timestamps": 1275.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.444, 0.119, 1.0, 0.883]: sit, answer phone."}, {"from": "human", "timestamps": 1279.0, "value": "What actions did the person at bbox [0.402, 0.116, 0.994, 0.866] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.402, 0.116, 0.994, 0.866]: sit, answer phone.", "From 1277.0s to 1278.0s, [0.42, 0.094, 1.0, 0.871]->[0.425, 0.108, 1.0, 0.866]: sit, answer phone.", "At 1276.0s, [0.417, 0.121, 0.999, 0.883]: answer phone.", "At 1275.0s, [0.444, 0.119, 1.0, 0.883]: sit, answer phone."]}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [1285, 1322], "conversations": [{"from": "human", "timestamps": 1315.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.108, 0.109, 0.529, 0.872]: stand, answer phone."}, {"from": "human", "timestamps": 1322.0, "value": "What actions did the person at bbox [0.325, 0.134, 0.796, 0.84] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.325, 0.134, 0.796, 0.84]: stand, listen to (a person), watch (a person).", "At 1321.0s, [0.41, 0.12, 0.884, 0.837]: stand, listen to (a person), watch (a person)."]}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [1306, 1348], "conversations": [{"from": "human", "timestamps": 1336.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.228, 0.115, 0.686, 0.856]?"}, {"from": "gpt", "value": ["At Now, [0.228, 0.115, 0.686, 0.856]: stand, answer phone.", "At 1335.0s, [0.251, 0.117, 0.72, 0.862]: stand, answer phone, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1348.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.257, 0.131, 0.695, 0.856]: stand, answer phone."}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [1371, 1434], "conversations": [{"from": "human", "timestamps": 1401.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.295, 0.167, 0.727, 0.854]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1434.0, "value": "What actions is the person in bbox [0.521, 0.125, 0.904, 0.874] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.521, 0.125, 0.904, 0.874]: stand.", "At 1433.0s, [0.528, 0.131, 0.903, 0.875]: stand.", "At 1432.0s, [0.532, 0.129, 0.904, 0.889]: stand, listen to (a person)."]}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [1437, 1494], "conversations": [{"from": "human", "timestamps": 1467.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.09, 0.109, 0.627, 0.877]: listen to (a person), stand."}, {"from": "human", "timestamps": 1494.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.233, 0.159, 0.705, 0.885]?"}, {"from": "gpt", "value": ["In the current frame, [0.233, 0.159, 0.705, 0.885]: sit.", "From 1481.0s to 1493.0s, [0.778, 0.641, 0.877, 0.823]->[0.778, 0.61, 0.871, 0.816]->[0.769, 0.614, 0.86, 0.819]->[0.767, 0.609, 0.857, 0.815]->[0.762, 0.605, 0.854, 0.818]->[0.276, 0.12, 0.717, 0.89]->[0.277, 0.122, 0.715, 0.888]->[0.278, 0.117, 0.719, 0.887]->[0.277, 0.118, 0.717, 0.888]->[0.277, 0.121, 0.718, 0.888]->[0.278, 0.12, 0.72, 0.887]->[0.281, 0.119, 0.719, 0.887]->[0.225, 0.154, 0.708, 0.889]: sit."]}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [1476, 1548], "conversations": [{"from": "human", "timestamps": 1506.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.277, 0.116, 0.716, 0.903]: sit; [0.689, 0.143, 1.0, 0.899]: watch (a person), sit."}, {"from": "human", "timestamps": 1548.0, "value": "Describe the actions of the person at bbox [0.356, 0.099, 0.881, 0.884] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.356, 0.099, 0.881, 0.884]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1545.0s to 1547.0s, [0.358, 0.101, 0.869, 0.889]->[0.366, 0.1, 0.889, 0.907]->[0.357, 0.095, 0.886, 0.884]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [1520, 1574], "conversations": [{"from": "human", "timestamps": 1550.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.179, 0.099, 0.699, 0.875]: listen to (a person), sit."}, {"from": "human", "timestamps": 1574.0, "value": "For the person at bbox [0.611, 0.091, 1.0, 0.876], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.611, 0.091, 1.0, 0.876]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1566.0s to 1573.0s, [0.682, 0.122, 1.0, 0.897]->[0.71, 0.14, 1.0, 0.884]->[0.693, 0.111, 1.0, 0.887]->[0.67, 0.11, 1.0, 0.885]->[0.668, 0.124, 1.0, 0.885]->[0.689, 0.13, 1.0, 0.876]->[0.353, 0.102, 0.867, 0.88]->[0.356, 0.102, 0.867, 0.88]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1563.0s to 1565.0s, [0.692, 0.121, 0.997, 0.882]->[0.693, 0.117, 0.999, 0.881]->[0.688, 0.109, 1.0, 0.885]: sit, listen to (a person)."]}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [1542, 1610], "conversations": [{"from": "human", "timestamps": 1572.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.001, 0.101, 0.399, 0.904]: listen to (a person), sit; [0.353, 0.102, 0.867, 0.88]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1610.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.001, 0.116, 0.566, 0.893]?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.116, 0.566, 0.893]: sit.", "At 1609.0s, [0.003, 0.102, 0.524, 0.907]: sit.", "At 1608.0s, [0.0, 0.076, 0.508, 0.896]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [1632, 1700], "conversations": [{"from": "human", "timestamps": 1662.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.194, 0.079, 0.75, 0.889]: sit."}, {"from": "human", "timestamps": 1700.0, "value": "For the person at bbox [0.297, 0.115, 0.596, 0.737], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.297, 0.115, 0.596, 0.737]: sit, listen to (a person), watch (a person).", "From 1696.0s to 1699.0s, [0.283, 0.113, 0.604, 0.903]->[0.283, 0.119, 0.619, 0.908]->[0.288, 0.118, 0.596, 0.89]->[0.283, 0.121, 0.596, 0.897]: sit, listen to (a person), watch (a person).", "At 1695.0s, [0.28, 0.11, 0.609, 0.897]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "Ag-pXiLrd48.mp4", "clip": [1693, 1733], "conversations": [{"from": "human", "timestamps": 1723.0, "value": "What actions is the person in bbox [0.297, 0.116, 0.616, 0.893] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.297, 0.116, 0.616, 0.893]: sit.", "At 1722.0s, [0.3, 0.122, 0.619, 0.899]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1733.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.373, 0.112, 0.83, 0.888]: sit, touch (an object)."}]}, {"video": "B1MAUxpKaV8.mkv", "clip": [891, 1007], "conversations": [{"from": "human", "timestamps": 921.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.499, 0.235, 0.797, 0.969]: stand."}, {"from": "human", "timestamps": 1007.0, "value": "What actions is the person in bbox [0.224, 0.002, 0.916, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.224, 0.002, 0.916, 0.982]: stand, talk to (e.g., self, a person, a group).", "From 1004.0s to 1006.0s, [0.226, 0.049, 0.931, 0.964]->[0.152, 0.031, 0.96, 0.957]->[0.108, 0.015, 0.909, 0.973]: stand, talk to (e.g., self, a person, a group).", "At 1003.0s, [0.204, 0.035, 0.902, 0.982]: talk to (e.g., self, a person, a group).", "From 1000.0s to 1002.0s, [0.146, 0.011, 0.852, 0.972]->[0.366, 0.033, 0.926, 0.972]->[0.217, 0.018, 0.945, 0.991]: stand, talk to (e.g., self, a person, a group).", "At 999.0s, [0.152, 0.023, 0.855, 0.966]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 997.0s to 998.0s, [0.166, 0.057, 0.81, 0.977]->[0.132, 0.026, 0.822, 0.954]: stand, talk to (e.g., self, a person, a group).", "From 995.0s to 996.0s, [0.052, 0.049, 1.0, 0.956]->[0.21, 0.058, 0.943, 0.975]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 994.0s, [0.081, 0.04, 0.882, 0.96]: stand, talk to (e.g., self, a person, a group).", "At 993.0s, [0.108, 0.045, 0.954, 0.951]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 992.0s, [0.179, 0.052, 0.961, 0.957]: stand.", "From 990.0s to 991.0s, [0.164, 0.034, 0.85, 0.973]->[0.094, 0.042, 0.909, 0.969]: stand, talk to (e.g., self, a person, a group).", "At 989.0s, [0.141, 0.01, 0.839, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 986.0s to 988.0s, [0.593, 0.092, 0.958, 0.975]->[0.578, 0.091, 0.958, 0.966]->[0.065, 0.033, 0.83, 0.969]: stand.", "From 984.0s to 985.0s, [0.613, 0.053, 0.948, 0.984]->[0.573, 0.088, 0.968, 0.964]: stand, carry/hold (an object)."]}]}, {"video": "B1MAUxpKaV8.mkv", "clip": [989, 1022], "conversations": [{"from": "human", "timestamps": 1019.0, "value": "For the person at bbox [0.091, 0.043, 0.972, 0.957], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.091, 0.043, 0.972, 0.957]: sit, talk to (e.g., self, a person, a group).", "At 1018.0s, [0.032, 0.01, 0.991, 0.959]: stand, talk to (e.g., self, a person, a group).", "At 1017.0s, [0.186, 0.093, 0.909, 0.966]: talk to (e.g., self, a person, a group).", "At 1016.0s, [0.113, 0.04, 0.986, 0.962]: stand, talk to (e.g., self, a person, a group).", "At 1015.0s, [0.169, 0.019, 0.951, 0.963]: sit, talk to (e.g., self, a person, a group).", "At 1014.0s, [0.064, 0.029, 0.916, 0.951]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1013.0s, [0.255, 0.05, 1.0, 0.978]: sit, talk to (e.g., self, a person, a group).", "At 1012.0s, [0.233, 0.014, 0.937, 0.974]: stand, talk to (e.g., self, a person, a group).", "At 1011.0s, [0.223, 0.024, 0.966, 0.973]: sit, talk to (e.g., self, a person, a group).", "At 1010.0s, [0.316, 0.03, 0.972, 0.983]: stand, talk to (e.g., self, a person, a group).", "From 1008.0s to 1009.0s, [0.145, 0.055, 1.0, 0.972]->[0.189, 0.017, 0.965, 0.956]: sit, talk to (e.g., self, a person, a group).", "From 1004.0s to 1007.0s, [0.226, 0.049, 0.931, 0.964]->[0.152, 0.031, 0.96, 0.957]->[0.108, 0.015, 0.909, 0.973]->[0.224, 0.002, 0.916, 0.982]: stand, talk to (e.g., self, a person, a group).", "At 1003.0s, [0.204, 0.035, 0.902, 0.982]: talk to (e.g., self, a person, a group).", "From 1000.0s to 1002.0s, [0.146, 0.011, 0.852, 0.972]->[0.366, 0.033, 0.926, 0.972]->[0.217, 0.018, 0.945, 0.991]: stand, talk to (e.g., self, a person, a group).", "At 999.0s, [0.152, 0.023, 0.855, 0.966]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 997.0s to 998.0s, [0.166, 0.057, 0.81, 0.977]->[0.132, 0.026, 0.822, 0.954]: stand, talk to (e.g., self, a person, a group).", "From 995.0s to 996.0s, [0.052, 0.049, 1.0, 0.956]->[0.21, 0.058, 0.943, 0.975]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 994.0s, [0.081, 0.04, 0.882, 0.96]: stand, talk to (e.g., self, a person, a group).", "At 993.0s, [0.108, 0.045, 0.954, 0.951]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 992.0s, [0.179, 0.052, 0.961, 0.957]: stand.", "From 990.0s to 991.0s, [0.164, 0.034, 0.85, 0.973]->[0.094, 0.042, 0.909, 0.969]: stand, talk to (e.g., self, a person, a group).", "At 989.0s, [0.141, 0.01, 0.839, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 986.0s to 988.0s, [0.593, 0.092, 0.958, 0.975]->[0.578, 0.091, 0.958, 0.966]->[0.065, 0.033, 0.83, 0.969]: stand.", "From 984.0s to 985.0s, [0.613, 0.053, 0.948, 0.984]->[0.573, 0.088, 0.968, 0.964]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1022.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.188, 0.023, 0.97, 0.977]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "B1MAUxpKaV8.mkv", "clip": [1016, 1052], "conversations": [{"from": "human", "timestamps": 1046.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.199, 0.024, 0.483, 0.871]: listen to (a person), stand, watch (a person); [0.627, 0.109, 0.891, 0.839]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1052.0, "value": "What actions is the person in bbox [0.634, 0.095, 0.88, 0.882] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.634, 0.095, 0.88, 0.882]: stand, listen to (a person), watch (a person).", "At 1051.0s, [0.608, 0.098, 0.878, 0.926]: stand, carry/hold (an object), listen to (a person).", "At 1050.0s, [0.631, 0.106, 0.89, 0.911]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1049.0s, [0.552, 0.097, 0.885, 0.948]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1048.0s, [0.559, 0.08, 0.941, 0.949]: stand, talk to (e.g., self, a person, a group).", "At 1047.0s, [0.577, 0.109, 0.904, 0.923]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1046.0s, [0.627, 0.109, 0.891, 0.839]: stand, talk to (e.g., self, a person, a group).", "From 1044.0s to 1045.0s, [0.643, 0.105, 0.898, 0.887]->[0.64, 0.097, 0.903, 0.875]: stand, listen to (a person).", "At 1043.0s, [0.639, 0.106, 0.887, 0.88]: stand, smoke, listen to (a person).", "At 1042.0s, [0.634, 0.056, 0.97, 0.975]: stand, listen to (a person).", "From 1040.0s to 1041.0s, [0.639, 0.106, 0.906, 0.952]->[0.632, 0.107, 0.909, 0.944]: stand, smoke, listen to (a person).", "From 1037.0s to 1039.0s, [0.663, 0.115, 0.901, 0.837]->[0.635, 0.092, 0.889, 0.903]->[0.62, 0.085, 0.93, 0.962]: stand, listen to (a person)."]}]}, {"video": "B1MAUxpKaV8.mkv", "clip": [1052, 1119], "conversations": [{"from": "human", "timestamps": 1082.0, "value": "For the person at bbox [0.586, 0.07, 0.977, 0.958], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.586, 0.07, 0.977, 0.958]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1081.0s, [0.538, 0.06, 0.992, 0.953]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1079.0s to 1080.0s, [0.503, 0.051, 0.973, 0.961]->[0.532, 0.064, 0.983, 0.955]: stand, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1077.0s to 1078.0s, [0.658, 0.072, 0.994, 0.983]->[0.547, 0.049, 0.983, 0.95]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1076.0s, [0.724, 0.069, 0.997, 0.968]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1075.0s, [0.7, 0.093, 0.993, 0.97]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1074.0s, [0.656, 0.064, 0.998, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1073.0s, [0.724, 0.072, 0.991, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1072.0s, [0.612, 0.1, 0.987, 0.963]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1071.0s, [0.616, 0.14, 0.971, 0.95]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1067.0s to 1070.0s, [0.539, 0.077, 0.989, 0.984]->[0.584, 0.084, 0.974, 0.954]->[0.662, 0.086, 0.948, 0.926]->[0.629, 0.109, 0.962, 0.964]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1066.0s, [0.631, 0.082, 0.99, 0.975]: stand, listen to (a person).", "From 1059.0s to 1065.0s, [0.592, 0.051, 0.951, 0.974]->[0.585, 0.069, 0.98, 0.959]->[0.573, 0.069, 0.976, 0.966]->[0.605, 0.077, 0.985, 0.97]->[0.693, 0.073, 0.978, 0.981]->[0.531, 0.074, 0.975, 0.957]->[0.703, 0.08, 0.983, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1057.0s to 1058.0s, [0.67, 0.09, 0.956, 0.954]->[0.689, 0.063, 0.936, 0.922]: stand, listen to (a person), watch (a person).", "At 1056.0s, [0.675, 0.078, 0.95, 0.971]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1055.0s, [0.643, 0.053, 0.947, 0.943]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1119.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.165, 0.05, 0.522, 0.952]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.298, 0.043, 0.97, 0.919]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "B1MAUxpKaV8.mkv", "clip": [1161, 1197], "conversations": [{"from": "human", "timestamps": 1191.0, "value": "For the person at bbox [0.287, 0.127, 0.514, 0.936], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.287, 0.127, 0.514, 0.936]: stand, listen to (a person), watch (a person).", "At 1190.0s, [0.294, 0.136, 0.512, 0.943]: stand, listen to (a person), watch (a person).", "From 1185.0s to 1189.0s, [0.27, 0.095, 0.523, 0.935]->[0.27, 0.107, 0.568, 0.852]->[0.284, 0.113, 0.512, 0.927]->[0.283, 0.125, 0.512, 0.899]->[0.289, 0.125, 0.517, 0.928]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1184.0s, [0.271, 0.065, 0.569, 0.931]: stand, talk to (e.g., self, a person, a group).", "From 1181.0s to 1183.0s, [0.223, 0.074, 0.513, 0.896]->[0.237, 0.055, 0.509, 0.85]->[0.257, 0.063, 0.502, 0.878]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1180.0s, [0.253, 0.101, 0.521, 0.916]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1179.0s, [0.259, 0.084, 0.517, 0.927]: stand, listen to (a person), watch (a person).", "At 1178.0s, [0.267, 0.151, 0.542, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1197.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.303, 0.1, 0.538, 0.903]: stand, talk to (e.g., self, a person, a group); [0.606, 0.039, 0.84, 0.944]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.768, 0.03, 0.908, 0.722]: run/jog."}]}, {"video": "B1MAUxpKaV8.mkv", "clip": [1275, 1343], "conversations": [{"from": "human", "timestamps": 1305.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.178, 0.167, 0.625]: watch (a person), run/jog, talk to (e.g., self, a person, a group); [0.033, 0.223, 0.617, 0.946]: walk; [0.342, 0.106, 0.538, 0.571]: walk; [0.589, 0.216, 0.999, 0.93]: run/jog; [0.603, 0.091, 0.753, 0.725]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1343.0, "value": "Describe the actions of the person at bbox [0.219, 0.042, 0.871, 0.978] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.219, 0.042, 0.871, 0.978]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1342.0s, [0.305, 0.041, 0.988, 0.981]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "B1MAUxpKaV8.mkv", "clip": [1295, 1375], "conversations": [{"from": "human", "timestamps": 1325.0, "value": "What actions is the person in bbox [0.214, 0.025, 0.846, 0.92] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.214, 0.025, 0.846, 0.92]: stand, talk to (e.g., self, a person, a group).", "At 1324.0s, [0.44, 0.094, 0.941, 0.959]: stand, talk to (e.g., self, a person, a group).", "From 1322.0s to 1323.0s, [0.605, 0.121, 0.954, 0.972]->[0.543, 0.089, 0.987, 0.954]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1375.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.045, 0.027, 0.63, 0.977]: stand; [0.387, 0.041, 0.949, 0.965]: watch (a person), stand."}]}, {"video": "B1MAUxpKaV8.mkv", "clip": [1375, 1453], "conversations": [{"from": "human", "timestamps": 1405.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.046, 0.07, 0.847, 0.988]: fall down."}, {"from": "human", "timestamps": 1453.0, "value": "What actions is the person in bbox [0.033, 0.037, 0.746, 0.796] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.033, 0.037, 0.746, 0.796]: lie/sleep.", "From 1451.0s to 1452.0s, [0.033, 0.048, 0.768, 0.739]->[0.04, 0.046, 0.773, 0.759]: lie/sleep."]}]}, {"video": "B1MAUxpKaV8.mkv", "clip": [1437, 1492], "conversations": [{"from": "human", "timestamps": 1467.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.177, 0.306, 0.79, 0.943]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1492.0, "value": "Describe the actions of the person at bbox [0.207, 0.052, 0.717, 0.933] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.207, 0.052, 0.717, 0.933]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1485.0s to 1491.0s, [0.249, 0.024, 0.993, 1.0]->[0.225, 0.102, 0.776, 0.937]->[0.247, 0.099, 0.784, 0.941]->[0.25, 0.094, 0.789, 0.939]->[0.212, 0.098, 0.794, 0.923]->[0.23, 0.088, 0.799, 0.937]->[0.183, 0.052, 0.809, 0.938]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "B1MAUxpKaV8.mkv", "clip": [1483, 1531], "conversations": [{"from": "human", "timestamps": 1513.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.05, 0.191, 0.32, 0.997]: listen to (a person), stand, watch (a person); [0.236, 0.107, 0.485, 0.981]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.381, 0.18, 0.617, 0.94]: listen to (a person), stand, watch (a person); [0.576, 0.055, 0.926, 0.958]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1531.0, "value": "For the person at bbox [0.288, 0.09, 0.454, 0.922], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.288, 0.09, 0.454, 0.922]: stand, listen to (a person), watch (a person).", "At 1530.0s, [0.31, 0.126, 0.489, 0.851]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "B1MAUxpKaV8.mkv", "clip": [1610, 1654], "conversations": [{"from": "human", "timestamps": 1640.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.056, 0.041, 0.747, 0.955]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1654.0, "value": "What actions is the person in bbox [0.411, 0.034, 0.999, 0.938] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.411, 0.034, 0.999, 0.938]: sit, listen to (a person), watch (a person).", "From 1651.0s to 1653.0s, [0.393, 0.042, 1.0, 0.997]->[0.387, 0.064, 1.0, 0.963]->[0.346, 0.053, 0.999, 0.925]: sit, listen to (a person), watch (a person)."]}]}, {"video": "B1MAUxpKaV8.mkv", "clip": [1684, 1732], "conversations": [{"from": "human", "timestamps": 1714.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.029, 0.139, 0.414, 0.945]: listen to (a person), stand; [0.31, 0.045, 0.542, 0.893]: listen to (a person), stand; [0.653, 0.097, 0.982, 0.959]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1732.0, "value": "What actions did the person at bbox [0.393, 0.024, 1.0, 0.96] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.393, 0.024, 1.0, 0.96]: stand, talk to (e.g., self, a person, a group).", "At 1731.0s, [0.319, 0.035, 1.0, 0.98]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1730.0s, [0.149, 0.03, 1.0, 0.965]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "BCiuXAuCKAU.mp4", "clip": [893, 1013], "conversations": [{"from": "human", "timestamps": 923.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.236, 0.303, 0.998]: listen to (a person), stand, watch (a person); [0.558, 0.087, 0.913, 0.955]: carry/hold (an object), stand, listen to (a person)."}, {"from": "human", "timestamps": 1013.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.22, 0.156, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.22, 0.156, 1.0]: stand, carry/hold (an object), listen (e.g., to music).", "At 1012.0s, [0.0, 0.206, 0.176, 1.0]: stand, carry/hold (an object), listen (e.g., to music).", "At 1011.0s, [0.0, 0.202, 0.17, 1.0]: stand."]}]}, {"video": "BCiuXAuCKAU.mp4", "clip": [938, 1016], "conversations": [{"from": "human", "timestamps": 968.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.496, 0.305, 0.619, 0.506]: carry/hold (an object), sit, ride (e.g., a bike, a car, a horse)."}, {"from": "human", "timestamps": 1016.0, "value": "What actions is the person in bbox [0.054, 0.01, 0.859, 0.977] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.054, 0.01, 0.859, 0.977]: walk, carry/hold (an object), watch (a person).", "At 1015.0s, [0.141, 0.019, 0.997, 0.988]: walk, carry/hold (an object)."]}]}, {"video": "BCiuXAuCKAU.mp4", "clip": [1018, 1065], "conversations": [{"from": "human", "timestamps": 1048.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.407, 0.162, 0.719, 0.854]: stand."}, {"from": "human", "timestamps": 1065.0, "value": "What actions is the person in bbox [0.0, 0.0, 0.376, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.0, 0.376, 1.0]: walk.", "At 1064.0s, [0.006, 0.0, 0.453, 1.0]: stand, touch (an object).", "From 1062.0s to 1063.0s, [0.012, 0.072, 0.452, 0.977]->[0.0, 0.001, 0.475, 0.977]: stand."]}]}, {"video": "BCiuXAuCKAU.mp4", "clip": [999, 1111], "conversations": [{"from": "human", "timestamps": 1029.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.281, 0.12, 0.483, 0.856]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1111.0, "value": "For the person at bbox [0.203, 0.329, 0.648, 0.986], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.203, 0.329, 0.648, 0.986]: sit.", "From 1109.0s to 1110.0s, [0.011, 0.011, 0.614, 0.986]->[0.041, 0.005, 0.542, 0.98]: stand, carry/hold (an object).", "At 1108.0s, [0.032, 0.013, 0.495, 1.0]: stand.", "From 1106.0s to 1107.0s, [0.052, 0.013, 0.421, 0.998]->[0.098, 0.004, 0.514, 0.999]: walk.", "At 1105.0s, [0.128, 0.019, 0.435, 1.0]: walk, carry/hold (an object).", "At 1104.0s, [0.095, 0.08, 0.353, 0.991]: walk."]}]}, {"video": "BCiuXAuCKAU.mp4", "clip": [1156, 1251], "conversations": [{"from": "human", "timestamps": 1186.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.246, 0.114, 0.99, 0.998]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1251.0, "value": "For the person at bbox [0.0, 0.024, 0.557, 0.998], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.024, 0.557, 0.998]: walk, talk to (e.g., self, a person, a group).", "At 1250.0s, [0.0, 0.0, 0.676, 1.0]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1249.0s, [0.02, 0.0, 0.618, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 1248.0s, [0.013, 0.0, 0.64, 0.998]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1247.0s, [0.055, 0.0, 0.674, 1.0]: stand, listen to (a person), watch (a person).", "At 1246.0s, [0.028, 0.0, 0.661, 0.989]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1244.0s to 1245.0s, [0.063, 0.0, 0.701, 0.989]->[0.029, 0.007, 0.701, 1.0]: stand, listen to (a person), watch (a person).", "At 1243.0s, [0.031, 0.01, 0.714, 0.99]: stand, listen to (a person).", "From 1241.0s to 1242.0s, [0.032, 0.01, 0.639, 0.983]->[0.03, 0.001, 0.659, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1240.0s, [0.02, 0.0, 0.701, 0.966]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1239.0s, [0.005, 0.022, 0.617, 0.966]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1238.0s, [0.0, 0.005, 0.513, 0.978]: stand, watch (a person)."]}]}, {"video": "BCiuXAuCKAU.mp4", "clip": [1176, 1220], "conversations": [{"from": "human", "timestamps": 1206.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.182, 0.0, 0.964, 0.995]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1220.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.123, 0.017, 0.923, 0.984]?"}, {"from": "gpt", "value": ["In the current frame, [0.123, 0.017, 0.923, 0.984]: walk.", "At 1219.0s, [0.012, 0.0, 0.838, 0.997]: walk.", "From 1216.0s to 1218.0s, [0.001, 0.016, 0.713, 0.995]->[0.013, 0.007, 0.859, 1.0]->[0.028, 0.013, 0.852, 0.999]: stand."]}]}, {"video": "BCiuXAuCKAU.mp4", "clip": [1397, 1431], "conversations": [{"from": "human", "timestamps": 1427.0, "value": "What actions did the person at bbox [0.472, 0.057, 0.979, 0.831] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.472, 0.057, 0.979, 0.831]: carry/hold (an object).", "At 1426.0s, [0.489, 0.056, 0.975, 0.839]: sit, carry/hold (an object), eat.", "At 1425.0s, [0.472, 0.072, 0.976, 0.866]: sit, carry/hold (an object), eat, watch (a person)."]}, {"from": "human", "timestamps": 1431.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.28, 0.032, 0.57, 0.995]: stand."}]}, {"video": "BCiuXAuCKAU.mp4", "clip": [1450, 1482], "conversations": [{"from": "human", "timestamps": 1480.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.05, 0.375, 0.995]: listen to (a person), sit; [0.179, 0.018, 0.972, 0.982]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1482.0, "value": "What actions is the person in bbox [0.177, 0.014, 0.995, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.177, 0.014, 0.995, 0.982]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1480.0s to 1481.0s, [0.179, 0.018, 0.972, 0.982]->[0.155, 0.029, 0.954, 0.985]: sit, talk to (e.g., self, a person, a group).", "From 1478.0s to 1479.0s, [0.234, 0.014, 0.959, 0.983]->[0.241, 0.04, 0.961, 0.986]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "BCiuXAuCKAU.mp4", "clip": [1502, 1595], "conversations": [{"from": "human", "timestamps": 1532.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.135, 0.01, 1.0, 0.974]: stand."}, {"from": "human", "timestamps": 1595.0, "value": "What actions did the person at bbox [0.14, 0.163, 0.761, 0.972] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.14, 0.163, 0.761, 0.972]: sit, talk to (e.g., self, a person, a group).", "At 1594.0s, [0.033, 0.078, 0.741, 0.982]: sit, talk to (e.g., self, a person, a group).", "From 1592.0s to 1593.0s, [0.069, 0.137, 0.732, 0.98]->[0.098, 0.164, 0.726, 0.983]: sit, listen to (a person), watch (a person).", "At 1591.0s, [0.194, 0.167, 0.732, 0.985]: sit, talk to (e.g., self, a person, a group).", "From 1589.0s to 1590.0s, [0.046, 0.069, 0.723, 0.959]->[0.124, 0.138, 0.714, 0.989]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "BCiuXAuCKAU.mp4", "clip": [1529, 1598], "conversations": [{"from": "human", "timestamps": 1559.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.326, 0.02, 0.985, 0.984]: watch (a person), stand."}, {"from": "human", "timestamps": 1598.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.039, 0.183, 0.677, 0.966]?"}, {"from": "gpt", "value": ["At Now, [0.039, 0.183, 0.677, 0.966]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1597.0s, [0.064, 0.139, 0.67, 0.96]: sit, talk to (e.g., self, a person, a group).", "At 1596.0s, [0.076, 0.148, 0.693, 0.961]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1594.0s to 1595.0s, [0.033, 0.078, 0.741, 0.982]->[0.14, 0.163, 0.761, 0.972]: sit, talk to (e.g., self, a person, a group).", "From 1592.0s to 1593.0s, [0.069, 0.137, 0.732, 0.98]->[0.098, 0.164, 0.726, 0.983]: sit, listen to (a person), watch (a person).", "At 1591.0s, [0.194, 0.167, 0.732, 0.985]: sit, talk to (e.g., self, a person, a group).", "From 1589.0s to 1590.0s, [0.046, 0.069, 0.723, 0.959]->[0.124, 0.138, 0.714, 0.989]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "BCiuXAuCKAU.mp4", "clip": [1748, 1785], "conversations": [{"from": "human", "timestamps": 1778.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.522, 0.121, 1.0, 0.975]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1785.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.534, 0.138, 1.0, 0.984]?"}, {"from": "gpt", "value": ["In the current frame, [0.534, 0.138, 1.0, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1784.0s, [0.534, 0.14, 1.0, 0.978]: stand, watch (a person)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [903, 942], "conversations": [{"from": "human", "timestamps": 933.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.019, 0.014, 1.0, 0.998]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 942.0, "value": "What actions did the person at bbox [0.026, 0.49, 0.257, 0.971] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.026, 0.49, 0.257, 0.971]: sit, carry/hold (an object), listen to (a person).", "At 941.0s, [0.097, 0.261, 0.609, 0.977]: stand, carry/hold (an object), open (e.g., a window, a car door), listen to (a person).", "At 940.0s, [0.171, 0.262, 0.612, 0.987]: stand, open (e.g., a window, a car door), listen to (a person).", "From 938.0s to 939.0s, [0.139, 0.123, 0.876, 0.978]->[0.365, 0.165, 0.692, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 937.0s, [0.0, 0.081, 0.463, 0.996]: stand, carry/hold (an object), listen to (a person)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [910, 965], "conversations": [{"from": "human", "timestamps": 940.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.171, 0.262, 0.612, 0.987]: listen to (a person), stand, open (e.g., a window, a car door); [0.698, 0.0, 1.0, 1.0]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 965.0, "value": "What actions did the person at bbox [0.28, 0.234, 0.382, 0.656] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.28, 0.234, 0.382, 0.656]: stand, carry/hold (an object), listen to (a person).", "At 964.0s, [0.271, 0.215, 0.378, 0.621]: walk, carry/hold (an object).", "At 963.0s, [0.273, 0.186, 0.38, 0.623]: walk, carry/hold (an object), listen to (a person).", "At 962.0s, [0.259, 0.176, 0.371, 0.622]: stand, carry/hold (an object).", "At 961.0s, [0.272, 0.153, 0.393, 0.638]: walk, carry/hold (an object), listen to (a person).", "At 960.0s, [0.255, 0.133, 0.375, 0.631]: walk, carry/hold (an object)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [964, 1017], "conversations": [{"from": "human", "timestamps": 994.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.001, 0.014, 0.527, 0.987]: listen to (a person), stand; [0.412, 0.202, 0.863, 0.777]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1017.0, "value": "Describe the actions of the person at bbox [0.041, 0.0, 1.0, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.041, 0.0, 1.0, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1015.0s to 1016.0s, [0.058, 0.01, 1.0, 0.992]->[0.032, 0.017, 1.0, 0.997]: sit, talk to (e.g., self, a person, a group).", "From 1013.0s to 1014.0s, [0.073, 0.012, 0.996, 1.0]->[0.394, 0.246, 0.835, 0.791]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1012.0s, [0.089, 0.0, 0.974, 0.99]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [991, 1044], "conversations": [{"from": "human", "timestamps": 1021.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.059, 0.009, 0.971, 0.98]: sit."}, {"from": "human", "timestamps": 1044.0, "value": "Describe the actions of the person at bbox [0.508, 0.245, 0.839, 0.5] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.508, 0.245, 0.839, 0.5]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1043.0s, [0.647, 0.256, 0.826, 0.494]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1050, 1101], "conversations": [{"from": "human", "timestamps": 1080.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.043, 0.992, 0.941]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1101.0, "value": "What actions did the person at bbox [0.775, 0.009, 1.0, 0.991] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.775, 0.009, 1.0, 0.991]: stand, listen to (a person), watch (a person).", "At 1100.0s, [0.778, 0.003, 1.0, 0.985]: stand, listen to (a person), watch (a person).", "At 1099.0s, [0.785, 0.02, 1.0, 0.997]: stand, listen to (a person).", "From 1097.0s to 1098.0s, [0.594, 0.305, 0.864, 0.858]->[0.604, 0.302, 0.872, 0.856]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1096.0s, [0.589, 0.301, 0.86, 0.865]: stand, talk to (e.g., self, a person, a group).", "From 1094.0s to 1095.0s, [0.592, 0.306, 0.864, 0.866]->[0.589, 0.308, 0.857, 0.874]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1093.0s, [0.6, 0.308, 0.866, 0.859]: stand, talk to (e.g., self, a person, a group).", "From 1091.0s to 1092.0s, [0.603, 0.309, 0.864, 0.86]->[0.599, 0.308, 0.866, 0.861]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1090.0s, [0.605, 0.306, 0.869, 0.859]: stand, talk to (e.g., self, a person, a group).", "From 1088.0s to 1089.0s, [0.609, 0.302, 0.884, 0.861]->[0.605, 0.309, 0.877, 0.861]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1087.0s, [0.629, 0.296, 0.902, 0.862]: stand, talk to (e.g., self, a person, a group).", "At 1086.0s, [0.61, 0.298, 0.879, 0.866]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1085.0s, [0.603, 0.3, 0.872, 0.877]: stand, listen to (a person), watch (a person)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1059, 1105], "conversations": [{"from": "human", "timestamps": 1089.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.67, 0.985]: listen to (a person), sit, watch (a person); [0.605, 0.309, 0.877, 0.861]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1105.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.095, 0.051, 0.968, 0.883]?"}, {"from": "gpt", "value": ["In the current frame, [0.095, 0.051, 0.968, 0.883]: sit, talk to (e.g., self, a person, a group).", "From 1103.0s to 1104.0s, [0.169, 0.431, 0.531, 0.788]->[0.054, 0.067, 0.999, 0.866]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1102.0s, [0.185, 0.426, 0.559, 0.789]: sit, talk to (e.g., self, a person, a group).", "From 1100.0s to 1101.0s, [0.149, 0.424, 0.542, 0.788]->[0.189, 0.422, 0.541, 0.783]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1099.0s, [0.133, 0.433, 0.539, 0.791]: sit, talk to (e.g., self, a person, a group).", "From 1097.0s to 1098.0s, [0.0, 0.0, 0.642, 0.974]->[0.0, 0.0, 0.651, 0.978]: sit, listen to (a person), watch (a person).", "At 1096.0s, [0.0, 0.0, 0.631, 0.965]: sit, listen to (a person).", "From 1094.0s to 1095.0s, [0.0, 0.0, 0.651, 0.952]->[0.0, 0.0, 0.636, 0.971]: sit, listen to (a person), watch (a person).", "At 1093.0s, [0.0, 0.0, 0.652, 0.942]: sit, listen to (a person).", "From 1091.0s to 1092.0s, [0.0, 0.0, 0.621, 0.964]->[0.0, 0.0, 0.643, 0.961]: sit, listen to (a person), watch (a person).", "At 1090.0s, [0.0, 0.0, 0.641, 0.949]: sit, listen to (a person).", "At 1089.0s, [0.0, 0.0, 0.67, 0.985]: sit, listen to (a person), watch (a person)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1163, 1195], "conversations": [{"from": "human", "timestamps": 1193.0, "value": "Describe the actions of the person at bbox [0.187, 0.0, 1.0, 0.989] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.187, 0.0, 1.0, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 1192.0s, [0.175, 0.003, 1.0, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1189.0s to 1191.0s, [0.141, 0.007, 1.0, 1.0]->[0.239, 0.0, 1.0, 1.0]->[0.235, 0.005, 1.0, 0.998]: stand, talk to (e.g., self, a person, a group).", "At 1188.0s, [0.045, 0.0, 0.987, 0.997]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1195.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.568, 0.278, 0.869, 0.915]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1146, 1226], "conversations": [{"from": "human", "timestamps": 1176.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.576, 0.285, 0.867, 0.885]: carry/hold (an object), stand, read."}, {"from": "human", "timestamps": 1226.0, "value": "What actions did the person at bbox [0.157, 0.015, 1.0, 0.971] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.157, 0.015, 1.0, 0.971]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1225.0s, [0.162, 0.0, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1223.0s to 1224.0s, [0.673, 0.0, 0.992, 0.998]->[0.707, 0.003, 0.996, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1222.0s, [0.67, 0.01, 0.986, 0.963]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1193, 1234], "conversations": [{"from": "human", "timestamps": 1223.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.111, 0.364, 0.549, 0.742]: listen to (a person), sit, watch (a person); [0.673, 0.0, 0.992, 0.998]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1234.0, "value": "Describe the actions of the person at bbox [0.006, 0.071, 0.932, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.006, 0.071, 0.932, 0.987]: sit, talk to (e.g., self, a person, a group).", "At 1233.0s, [0.008, 0.024, 0.986, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1241, 1303], "conversations": [{"from": "human", "timestamps": 1271.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.141, 0.024, 0.949, 0.981]?"}, {"from": "gpt", "value": ["In the current frame, [0.141, 0.024, 0.949, 0.981]: stand, talk to (e.g., self, a person, a group).", "At 1270.0s, [0.111, 0.034, 0.926, 0.974]: talk to (e.g., self, a person, a group).", "From 1268.0s to 1269.0s, [0.2, 0.005, 0.982, 0.979]->[0.169, 0.0, 0.974, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1303.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.659, 0.292, 1.0, 0.987]: sit."}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1350, 1416], "conversations": [{"from": "human", "timestamps": 1380.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.26, 0.006, 0.997, 0.996]: carry/hold (an object), write, bend/bow (at the waist)."}, {"from": "human", "timestamps": 1416.0, "value": "For the person at bbox [0.234, 0.039, 0.964, 0.965], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.234, 0.039, 0.964, 0.965]: stand, carry/hold (an object), watch (a person).", "At 1415.0s, [0.51, 0.234, 0.807, 0.985]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1414.0s, [0.516, 0.26, 0.762, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1412.0s to 1413.0s, [0.413, 0.323, 0.675, 1.0]->[0.41, 0.283, 0.683, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1422, 1455], "conversations": [{"from": "human", "timestamps": 1452.0, "value": "What actions did the person at bbox [0.0, 0.083, 0.698, 0.995] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.083, 0.698, 0.995]: stand, watch (a person).", "At 1451.0s, [0.0, 0.015, 0.673, 0.988]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1455.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.151, 0.051, 0.979, 0.98]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1472, 1609], "conversations": [{"from": "human", "timestamps": 1502.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.113, 0.0, 0.731, 0.981]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1609.0, "value": "What actions is the person in bbox [0.121, 0.117, 0.579, 0.973] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.121, 0.117, 0.579, 0.973]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1608.0s, [0.115, 0.129, 0.577, 1.0]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1607.0s, [0.074, 0.064, 0.527, 1.0]: walk, carry/hold (an object)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1554, 1591], "conversations": [{"from": "human", "timestamps": 1584.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.098, 0.095, 0.989, 0.999]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1591.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.254, 0.012, 0.984, 0.991]?"}, {"from": "gpt", "value": ["In the current frame, [0.254, 0.012, 0.984, 0.991]: stand, carry/hold (an object).", "From 1589.0s to 1590.0s, [0.103, 0.0, 0.973, 0.983]->[0.147, 0.011, 0.978, 0.994]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1588.0s, [0.073, 0.0, 0.976, 0.986]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1587.0s, [0.0, 0.0, 0.967, 0.999]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1584.0s to 1586.0s, [0.098, 0.095, 0.989, 0.999]->[0.046, 0.013, 1.0, 1.0]->[0.0, 0.003, 0.895, 0.976]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1583.0s, [0.05, 0.011, 0.938, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1606, 1658], "conversations": [{"from": "human", "timestamps": 1636.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.009, 0.015, 0.754, 0.976]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1658.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.005, 0.053, 0.96, 0.976]?"}, {"from": "gpt", "value": ["At Now, [0.005, 0.053, 0.96, 0.976]: sit, talk to (e.g., self, a person, a group).", "At 1657.0s, [0.048, 0.066, 0.984, 0.975]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1656.0s, [0.027, 0.062, 0.982, 0.983]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1674, 1727], "conversations": [{"from": "human", "timestamps": 1704.0, "value": "Describe the actions of the person at bbox [0.023, 0.0, 0.937, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.023, 0.0, 0.937, 0.987]: sit, talk to (e.g., self, a person, a group).", "At 1703.0s, [0.009, 0.008, 0.797, 0.998]: sit.", "At 1702.0s, [0.073, 0.381, 0.275, 0.745]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1727.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.119, 0.086, 0.971, 0.985]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "BY3sZmvUp-0.mp4", "clip": [1735, 1782], "conversations": [{"from": "human", "timestamps": 1765.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.211, 0.011, 0.993, 0.989]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1782.0, "value": "What actions did the person at bbox [0.034, 0.038, 0.998, 0.995] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.034, 0.038, 0.998, 0.995]: sit, talk to (e.g., self, a person, a group).", "At 1781.0s, [0.031, 0.02, 0.983, 0.987]: sit, talk to (e.g., self, a person, a group).", "At 1780.0s, [0.155, 0.029, 1.0, 0.979]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "C25wkwAMB-w.mkv", "clip": [922, 954], "conversations": [{"from": "human", "timestamps": 952.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.031, 0.635, 0.975]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 954.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.388, 0.029, 0.986, 0.979]?"}, {"from": "gpt", "value": ["In the current frame, [0.388, 0.029, 0.986, 0.979]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 953.0s, [0.46, 0.004, 0.977, 0.97]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "C25wkwAMB-w.mkv", "clip": [999, 1068], "conversations": [{"from": "human", "timestamps": 1029.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.056, 0.095, 0.399, 0.946]: carry/hold (an object), stand, listen to (a person); [0.643, 0.138, 0.929, 0.985]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1068.0, "value": "What actions is the person in bbox [0.579, 0.078, 0.975, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.579, 0.078, 0.975, 0.985]: walk, carry/hold (an object).", "At 1067.0s, [0.214, 0.138, 0.811, 0.993]: walk.", "At 1066.0s, [0.035, 0.071, 0.665, 0.971]: walk, watch (a person).", "From 1063.0s to 1065.0s, [0.018, 0.019, 0.747, 0.977]->[0.0, 0.0, 0.79, 0.983]->[0.022, 0.0, 0.754, 0.978]: stand, talk to (e.g., self, a person, a group).", "From 1060.0s to 1062.0s, [0.211, 0.067, 0.745, 0.977]->[0.022, 0.016, 0.758, 0.979]->[0.03, 0.012, 0.756, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "C25wkwAMB-w.mkv", "clip": [1068, 1127], "conversations": [{"from": "human", "timestamps": 1098.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.48, 0.52, 0.692, 0.991]: bend/bow (at the waist); [0.482, 0.344, 0.99, 0.995]: bend/bow (at the waist)."}, {"from": "human", "timestamps": 1127.0, "value": "What actions is the person in bbox [0.278, 0.005, 0.995, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.278, 0.005, 0.995, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1126.0s, [0.292, 0.0, 0.995, 0.998]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "C25wkwAMB-w.mkv", "clip": [1117, 1196], "conversations": [{"from": "human", "timestamps": 1147.0, "value": "Describe the actions of the person at bbox [0.003, 0.0, 0.794, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.003, 0.0, 0.794, 1.0]: stand, listen to (a person), watch (a person).", "At 1146.0s, [0.027, 0.0, 0.763, 0.987]: sit, watch (a person).", "At 1145.0s, [0.011, 0.0, 0.781, 1.0]: sit."]}, {"from": "human", "timestamps": 1196.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.191, 0.0, 0.86, 0.997]: watch (a person), sit."}]}, {"video": "C25wkwAMB-w.mkv", "clip": [1159, 1257], "conversations": [{"from": "human", "timestamps": 1189.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.231, 0.072, 0.998, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1257.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.36, 0.05, 0.749, 0.983]?"}, {"from": "gpt", "value": ["In the current frame, [0.36, 0.05, 0.749, 0.983]: stand, listen to (a person), watch (a person).", "At 1256.0s, [0.368, 0.061, 0.751, 0.996]: stand, listen to (a person).", "At 1255.0s, [0.36, 0.044, 0.755, 0.982]: stand, listen to (a person), watch (a person)."]}]}, {"video": "C25wkwAMB-w.mkv", "clip": [1257, 1311], "conversations": [{"from": "human", "timestamps": 1287.0, "value": "What actions is the person in bbox [0.377, 0.0, 1.0, 0.992] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.377, 0.0, 1.0, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1286.0s, [0.335, 0.0, 0.988, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1283.0s to 1285.0s, [0.352, 0.019, 0.741, 0.982]->[0.345, 0.026, 0.739, 0.986]->[0.347, 0.025, 0.733, 0.979]: stand, listen to (a person), watch (a person).", "At 1282.0s, [0.342, 0.027, 0.738, 0.977]: stand, listen to (a person).", "At 1281.0s, [0.336, 0.02, 0.727, 0.987]: stand, listen to (a person), watch (a person).", "At 1280.0s, [0.172, 0.002, 0.835, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1279.0s, [0.256, 0.0, 0.906, 0.99]: stand, talk to (e.g., self, a person, a group).", "At 1278.0s, [0.265, 0.012, 0.892, 0.999]: sit, talk to (e.g., self, a person, a group).", "At 1277.0s, [0.262, 0.002, 0.899, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1276.0s, [0.272, 0.0, 0.891, 0.979]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1311.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.059, 0.173, 0.365, 0.884]: listen to (a person), sit; [0.344, 0.144, 0.52, 0.652]: listen to (a person), sit, watch (a person); [0.513, 0.196, 0.66, 0.598]: listen to (a person), sit, watch (a person); [0.644, 0.18, 0.791, 0.583]: listen to (a person), sit, watch (a person); [0.77, 0.178, 0.925, 0.61]: sit, talk to (e.g., self, a person, a group); [0.891, 0.118, 1.0, 0.644]: carry/hold (an object), give/serve (an object) to (a person), crouch/kneel, listen to (a person)."}]}, {"video": "C25wkwAMB-w.mkv", "clip": [1247, 1353], "conversations": [{"from": "human", "timestamps": 1277.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.262, 0.002, 0.899, 0.98]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1353.0, "value": "For the person at bbox [0.427, 0.198, 0.989, 0.986], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.427, 0.198, 0.989, 0.986]: sit.", "At 1352.0s, [0.432, 0.175, 0.906, 0.981]: sit, talk to (e.g., self, a person, a group).", "From 1348.0s to 1351.0s, [0.511, 0.069, 1.0, 0.976]->[0.552, 0.013, 0.985, 0.98]->[0.537, 0.052, 0.957, 0.978]->[0.533, 0.082, 0.956, 0.98]: sit.", "From 1346.0s to 1347.0s, [0.335, 0.068, 0.915, 0.975]->[0.399, 0.113, 0.917, 1.0]: stand.", "At 1345.0s, [0.363, 0.077, 0.915, 0.98]: stand, carry/hold (an object).", "At 1344.0s, [0.603, 0.079, 0.788, 0.829]: stand, carry/hold (an object), fishing.", "At 1343.0s, [0.611, 0.075, 0.79, 0.833]: stand, fishing.", "At 1342.0s, [0.641, 0.077, 0.823, 0.804]: stand, carry/hold (an object), fishing.", "At 1341.0s, [0.714, 0.079, 0.903, 0.752]: stand, fishing.", "At 1340.0s, [0.809, 0.072, 0.993, 0.854]: stand, carry/hold (an object), fishing."]}]}, {"video": "C25wkwAMB-w.mkv", "clip": [1389, 1443], "conversations": [{"from": "human", "timestamps": 1419.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.205, 0.257, 0.413, 0.885]: carry/hold (an object), run/jog, listen to (a person); [0.394, 0.15, 0.627, 0.787]: carry/hold (an object), run/jog, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1443.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.29, 0.163, 0.568, 0.854]?"}, {"from": "gpt", "value": ["In the current frame, [0.29, 0.163, 0.568, 0.854]: sit.", "At 1442.0s, [0.299, 0.119, 0.604, 0.881]: sit, listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "C25wkwAMB-w.mkv", "clip": [1492, 1531], "conversations": [{"from": "human", "timestamps": 1522.0, "value": "What actions is the person in bbox [0.174, 0.008, 1.0, 0.981] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.174, 0.008, 1.0, 0.981]: sit, talk to (e.g., self, a person, a group).", "At 1521.0s, [0.183, 0.008, 1.0, 0.991]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1520.0s, [0.22, 0.032, 0.994, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1519.0s, [0.192, 0.036, 1.0, 0.987]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1531.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.191, 0.006, 0.998, 0.982]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "C25wkwAMB-w.mkv", "clip": [1472, 1590], "conversations": [{"from": "human", "timestamps": 1502.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.027, 0.013, 0.728, 0.981]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1590.0, "value": "Describe the actions of the person at bbox [0.0, 0.01, 0.734, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.01, 0.734, 1.0]: sit.", "At 1589.0s, [0.012, 0.03, 0.7, 0.982]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "C25wkwAMB-w.mkv", "clip": [1565, 1613], "conversations": [{"from": "human", "timestamps": 1595.0, "value": "What actions did the person at bbox [0.0, 0.03, 0.671, 0.976] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.03, 0.671, 0.976]: sit.", "From 1592.0s to 1594.0s, [0.005, 0.035, 0.674, 0.987]->[0.0, 0.045, 0.682, 0.991]->[0.0, 0.008, 0.661, 0.988]: sit."]}, {"from": "human", "timestamps": 1613.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.093, 0.046, 0.626, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.509, 0.049, 0.997, 0.984]: listen to (a person), sit, watch (a person)."}]}, {"video": "C3qk4yAMANk.mkv", "clip": [876, 958], "conversations": [{"from": "human", "timestamps": 906.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.093, 0.437, 0.878]: listen to (a person), sit, eat, watch (a person); [0.456, 0.096, 0.8, 0.817]: sit, watch (a person), carry/hold (an object), talk to (e.g., self, a person, a group), smoke; [0.691, 0.063, 1.0, 0.953]: listen to (a person), sit, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 958.0, "value": "Describe the actions of the person at bbox [0.275, 0.059, 0.407, 0.779] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.275, 0.059, 0.407, 0.779]: stand, close (e.g., a door, a box), watch (a person).", "At 957.0s, [0.234, 0.105, 0.355, 0.749]: stand, open (e.g., a window, a car door)."]}]}, {"video": "C3qk4yAMANk.mkv", "clip": [901, 1016], "conversations": [{"from": "human", "timestamps": 931.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.45, 0.143, 0.752, 0.984]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1016.0, "value": "For the person at bbox [0.357, 0.256, 0.548, 0.745], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.357, 0.256, 0.548, 0.745]: bend/bow (at the waist), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1014.0s to 1015.0s, [0.385, 0.134, 0.515, 0.75]->[0.361, 0.129, 0.495, 0.745]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "C3qk4yAMANk.mkv", "clip": [949, 1010], "conversations": [{"from": "human", "timestamps": 979.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.237, 0.183, 1.0, 0.989]: lie/sleep, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1010.0, "value": "What actions is the person in bbox [0.036, 0.018, 0.688, 0.98] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.036, 0.018, 0.688, 0.98]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1009.0s, [0.032, 0.0, 0.698, 0.964]: bend/bow (at the waist), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "C3qk4yAMANk.mkv", "clip": [1010, 1075], "conversations": [{"from": "human", "timestamps": 1040.0, "value": "What actions did the person at bbox [0.397, 0.155, 0.926, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.397, 0.155, 0.926, 0.987]: stand, watch (a person).", "At 1039.0s, [0.405, 0.173, 0.89, 0.979]: stand, watch (a person).", "From 1037.0s to 1038.0s, [0.368, 0.19, 0.782, 0.987]->[0.407, 0.155, 0.852, 0.982]: stand, listen to (a person), watch (a person).", "At 1036.0s, [0.376, 0.205, 0.732, 0.985]: stand, listen to (a person).", "At 1035.0s, [0.404, 0.228, 0.742, 0.988]: walk, smoke.", "From 1033.0s to 1034.0s, [0.376, 0.23, 0.567, 0.98]->[0.465, 0.243, 0.753, 0.984]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1032.0s, [0.395, 0.225, 0.546, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1031.0s, [0.405, 0.214, 0.557, 0.978]: stand, listen to (a person), watch (a person).", "From 1028.0s to 1030.0s, [0.39, 0.181, 0.555, 0.966]->[0.408, 0.182, 0.576, 0.987]->[0.419, 0.186, 0.573, 0.972]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1027.0s, [0.393, 0.169, 0.56, 0.971]: stand, close (e.g., a door, a box), listen to (a person)."]}, {"from": "human", "timestamps": 1075.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.415, 0.246, 0.619, 0.607]: listen to (a person), sit, watch (a person)."}]}, {"video": "C3qk4yAMANk.mkv", "clip": [1080, 1116], "conversations": [{"from": "human", "timestamps": 1110.0, "value": "What actions is the person in bbox [0.099, 0.031, 0.833, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.099, 0.031, 0.833, 0.982]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1107.0s to 1109.0s, [0.111, 0.06, 0.826, 0.973]->[0.111, 0.056, 0.835, 0.989]->[0.098, 0.05, 0.816, 0.975]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1116.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.066, 0.012, 0.908, 0.997]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "C3qk4yAMANk.mkv", "clip": [1122, 1218], "conversations": [{"from": "human", "timestamps": 1152.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.116, 0.041, 0.838, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1218.0, "value": "What actions is the person in bbox [0.095, 0.031, 0.943, 0.987] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.095, 0.031, 0.943, 0.987]: sit, answer phone, ride (e.g., a bike, a car, a horse).", "From 1216.0s to 1217.0s, [0.0, 0.007, 0.929, 0.985]->[0.106, 0.007, 0.923, 0.956]: sit, answer phone, ride (e.g., a bike, a car, a horse)."]}]}, {"video": "C3qk4yAMANk.mkv", "clip": [1249, 1307], "conversations": [{"from": "human", "timestamps": 1279.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.117, 0.039, 0.774, 0.994]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1307.0, "value": "What actions did the person at bbox [0.291, 0.059, 0.986, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.291, 0.059, 0.986, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1306.0s, [0.363, 0.059, 1.0, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1305.0s, [0.308, 0.072, 0.997, 0.984]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "C3qk4yAMANk.mkv", "clip": [1398, 1488], "conversations": [{"from": "human", "timestamps": 1428.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.079, 0.04, 0.775, 0.978]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.657, 0.0, 0.997, 0.896]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1488.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.552, 0.351, 0.646, 0.801]?"}, {"from": "gpt", "value": ["At Now, [0.552, 0.351, 0.646, 0.801]: walk.", "At 1487.0s, [0.471, 0.357, 0.578, 0.831]: walk."]}]}, {"video": "C3qk4yAMANk.mkv", "clip": [1421, 1485], "conversations": [{"from": "human", "timestamps": 1451.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.008, 0.021, 0.781, 0.983]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.623, 0.016, 1.0, 0.985]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1485.0, "value": "What actions is the person in bbox [0.128, 0.056, 0.727, 0.981] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.128, 0.056, 0.727, 0.981]: stand.", "From 1483.0s to 1484.0s, [0.111, 0.006, 0.729, 0.978]->[0.155, 0.051, 0.708, 0.998]: stand, carry/hold (an object).", "From 1478.0s to 1482.0s, [0.128, 0.084, 0.502, 0.958]->[0.118, 0.043, 0.54, 0.944]->[0.125, 0.023, 0.596, 1.0]->[0.149, 0.099, 0.601, 0.983]->[0.112, 0.034, 0.63, 0.989]: walk, carry/hold (an object).", "At 1477.0s, [0.074, 0.064, 0.36, 0.951]: walk, carry/hold (an object), listen to (a person)."]}]}, {"video": "C3qk4yAMANk.mkv", "clip": [1502, 1555], "conversations": [{"from": "human", "timestamps": 1532.0, "value": "What actions did the person at bbox [0.179, 0.239, 0.514, 0.969] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.179, 0.239, 0.514, 0.969]: bend/bow (at the waist), answer phone, put down.", "At 1531.0s, [0.325, 0.207, 0.597, 0.934]: bend/bow (at the waist), answer phone, carry/hold (an object), open (e.g., a window, a car door).", "At 1530.0s, [0.211, 0.077, 0.495, 0.94]: stand, answer phone, carry/hold (an object), open (e.g., a window, a car door)."]}, {"from": "human", "timestamps": 1555.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.483, 0.107, 0.703, 0.959]: carry/hold (an object), stand, answer phone."}]}, {"video": "C3qk4yAMANk.mkv", "clip": [1516, 1608], "conversations": [{"from": "human", "timestamps": 1546.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.331, 0.173, 0.555, 0.952]: answer phone, walk."}, {"from": "human", "timestamps": 1608.0, "value": "For the person at bbox [0.458, 0.0, 0.885, 0.978], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.458, 0.0, 0.885, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1606.0s to 1607.0s, [0.424, 0.044, 0.9, 0.964]->[0.42, 0.0, 0.874, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1605.0s, [0.358, 0.027, 0.887, 0.978]: stand, listen to (a person), watch (a person).", "From 1603.0s to 1604.0s, [0.552, 0.268, 0.657, 0.895]->[0.472, 0.034, 0.883, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1602.0s, [0.562, 0.269, 0.66, 0.901]: stand, watch (a person).", "At 1601.0s, [0.547, 0.267, 0.657, 0.898]: stand, play with kids, watch (a person).", "From 1599.0s to 1600.0s, [0.543, 0.265, 0.657, 0.896]->[0.531, 0.278, 0.649, 0.901]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "C3qk4yAMANk.mkv", "clip": [1537, 1573], "conversations": [{"from": "human", "timestamps": 1567.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.435, 0.018, 0.744, 0.986]?"}, {"from": "gpt", "value": ["In the current frame, [0.435, 0.018, 0.744, 0.986]: stand, answer phone.", "At 1566.0s, [0.462, 0.0, 0.744, 0.966]: stand, answer phone.", "At 1565.0s, [0.434, 0.091, 0.764, 0.987]: bend/bow (at the waist), answer phone.", "At 1564.0s, [0.428, 0.034, 0.755, 1.0]: stand, answer phone.", "At 1563.0s, [0.442, 0.049, 0.758, 0.984]: stand, answer phone, carry/hold (an object).", "At 1562.0s, [0.436, 0.027, 0.752, 0.97]: stand, answer phone.", "From 1559.0s to 1561.0s, [0.423, 0.193, 0.748, 0.979]->[0.398, 0.273, 0.759, 0.978]->[0.414, 0.163, 0.76, 0.987]: bend/bow (at the waist), answer phone.", "From 1557.0s to 1558.0s, [0.441, 0.279, 0.752, 0.96]->[0.417, 0.285, 0.753, 0.964]: bend/bow (at the waist), answer phone, brush teeth.", "At 1556.0s, [0.462, 0.203, 0.739, 0.976]: bend/bow (at the waist), answer phone, carry/hold (an object).", "At 1555.0s, [0.483, 0.107, 0.703, 0.959]: stand, answer phone, carry/hold (an object).", "At 1554.0s, [0.458, 0.153, 0.718, 0.964]: stand, answer phone, touch (an object).", "At 1553.0s, [0.535, 0.123, 0.705, 0.955]: stand, answer phone.", "At 1552.0s, [0.526, 0.081, 0.725, 1.0]: walk, answer phone.", "At 1551.0s, [0.484, 0.101, 0.726, 0.931]: stand, answer phone, touch (an object).", "At 1550.0s, [0.569, 0.115, 0.859, 0.921]: walk, answer phone, open (e.g., a window, a car door)."]}, {"from": "human", "timestamps": 1573.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.455, 0.894, 0.995]: lie/sleep."}]}, {"video": "C3qk4yAMANk.mkv", "clip": [1588, 1644], "conversations": [{"from": "human", "timestamps": 1618.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.177, 0.067, 0.709, 0.965]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.336, 0.193, 0.911, 0.995]: watch (a person), crouch/kneel; [0.671, 0.089, 0.913, 0.614]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1644.0, "value": "Describe the actions of the person at bbox [0.183, 0.006, 0.799, 0.985] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.183, 0.006, 0.799, 0.985]: stand, carry/hold (an object), watch (a person).", "At 1643.0s, [0.083, 0.0, 0.887, 0.973]: stand, carry/hold (an object)."]}]}, {"video": "C3qk4yAMANk.mkv", "clip": [1589, 1691], "conversations": [{"from": "human", "timestamps": 1619.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.155, 0.082, 0.602, 0.989]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.433, 0.029, 0.61, 0.689]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.433, 0.355, 0.884, 0.986]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.682, 0.095, 0.868, 0.631]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1691.0, "value": "For the person at bbox [0.219, 0.034, 0.64, 0.984], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.219, 0.034, 0.64, 0.984]: stand, answer phone.", "From 1689.0s to 1690.0s, [0.226, 0.013, 0.615, 1.0]->[0.222, 0.026, 0.658, 0.993]: stand, answer phone.", "At 1688.0s, [0.255, 0.032, 0.767, 0.985]: stand, carry/hold (an object).", "From 1686.0s to 1687.0s, [0.539, 0.242, 0.678, 0.892]->[0.508, 0.25, 0.634, 0.903]: stand, text on/look at a cellphone.", "At 1685.0s, [0.534, 0.245, 0.689, 0.896]: stand, carry/hold (an object).", "At 1684.0s, [0.54, 0.24, 0.694, 0.897]: stand.", "From 1680.0s to 1683.0s, [0.526, 0.246, 0.664, 0.896]->[0.506, 0.25, 0.654, 0.894]->[0.529, 0.236, 0.697, 0.908]->[0.526, 0.24, 0.69, 0.903]: stand, watch (a person).", "At 1679.0s, [0.527, 0.278, 0.648, 0.899]: stand, play with kids, watch (a person).", "At 1678.0s, [0.536, 0.269, 0.649, 0.895]: stand, watch (a person).", "At 1677.0s, [0.536, 0.265, 0.666, 0.896]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1676.0s, [0.538, 0.262, 0.666, 0.895]: stand, watch (a person).", "At 1675.0s, [0.518, 0.262, 0.654, 0.9]: stand, listen to (a person), watch (a person).", "At 1674.0s, [0.535, 0.276, 0.639, 0.91]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1673.0s, [0.507, 0.279, 0.636, 0.906]: stand, watch (a person)."]}]}, {"video": "C3qk4yAMANk.mkv", "clip": [1669, 1711], "conversations": [{"from": "human", "timestamps": 1699.0, "value": "Describe the actions of the person at bbox [0.205, 0.041, 0.678, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.205, 0.041, 0.678, 0.991]: stand, answer phone.", "From 1696.0s to 1698.0s, [0.185, 0.04, 0.639, 0.992]->[0.2, 0.017, 0.666, 0.995]->[0.203, 0.034, 0.688, 0.981]: stand, answer phone.", "At 1695.0s, [0.201, 0.062, 0.728, 0.988]: stand, answer phone, talk to (e.g., self, a person, a group).", "From 1689.0s to 1694.0s, [0.226, 0.013, 0.615, 1.0]->[0.222, 0.026, 0.658, 0.993]->[0.219, 0.034, 0.64, 0.984]->[0.222, 0.049, 0.733, 0.984]->[0.292, 0.078, 0.747, 1.0]->[0.151, 0.08, 0.601, 0.992]: stand, answer phone.", "At 1688.0s, [0.255, 0.032, 0.767, 0.985]: stand, carry/hold (an object).", "From 1686.0s to 1687.0s, [0.539, 0.242, 0.678, 0.892]->[0.508, 0.25, 0.634, 0.903]: stand, text on/look at a cellphone.", "At 1685.0s, [0.534, 0.245, 0.689, 0.896]: stand, carry/hold (an object).", "At 1684.0s, [0.54, 0.24, 0.694, 0.897]: stand.", "From 1680.0s to 1683.0s, [0.526, 0.246, 0.664, 0.896]->[0.506, 0.25, 0.654, 0.894]->[0.529, 0.236, 0.697, 0.908]->[0.526, 0.24, 0.69, 0.903]: stand, watch (a person).", "At 1679.0s, [0.527, 0.278, 0.648, 0.899]: stand, play with kids, watch (a person).", "At 1678.0s, [0.536, 0.269, 0.649, 0.895]: stand, watch (a person).", "At 1677.0s, [0.536, 0.265, 0.666, 0.896]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1676.0s, [0.538, 0.262, 0.666, 0.895]: stand, watch (a person).", "At 1675.0s, [0.518, 0.262, 0.654, 0.9]: stand, listen to (a person), watch (a person).", "At 1674.0s, [0.535, 0.276, 0.639, 0.91]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1673.0s, [0.507, 0.279, 0.636, 0.906]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1711.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.0, 0.509, 0.987]: stand, answer phone."}]}, {"video": "CG98XdYsgrA.mkv", "clip": [889, 930], "conversations": [{"from": "human", "timestamps": 919.0, "value": "For the person at bbox [0.0, 0.013, 1.0, 0.989], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.013, 1.0, 0.989]: stand, talk to (e.g., self, a person, a group).", "From 908.0s to 918.0s, [0.0, 0.0, 1.0, 0.962]->[0.0, 0.0, 1.0, 0.984]->[0.002, 0.0, 0.798, 0.987]->[0.0, 0.0, 0.781, 0.982]->[0.0, 0.009, 1.0, 0.96]->[0.0, 0.004, 0.986, 0.986]->[0.0, 0.019, 1.0, 0.968]->[0.0, 0.014, 1.0, 0.965]->[0.0, 0.003, 0.99, 0.98]->[0.0, 0.025, 1.0, 0.973]->[0.002, 0.023, 1.0, 0.968]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 930.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.016, 0.016, 1.0, 0.996]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "CG98XdYsgrA.mkv", "clip": [1028, 1070], "conversations": [{"from": "human", "timestamps": 1058.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.554, 0.149, 1.0, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.554, 0.149, 1.0, 1.0]: stand, listen to (a person), watch (a person).", "From 1055.0s to 1057.0s, [0.546, 0.151, 1.0, 1.0]->[0.545, 0.146, 1.0, 1.0]->[0.555, 0.145, 1.0, 1.0]: stand, listen to (a person), watch (a person).", "At 1054.0s, [0.565, 0.158, 1.0, 1.0]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1070.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.031, 0.012, 0.745, 0.977]: listen to (a person), stand, watch (a person); [0.547, 0.006, 1.0, 0.935]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "CG98XdYsgrA.mkv", "clip": [1138, 1169], "conversations": [{"from": "human", "timestamps": 1168.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.03, 0.444, 1.0]: listen to (a person), stand, watch (a person); [0.464, 0.076, 0.88, 0.988]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1169.0, "value": "For the person at bbox [0.398, 0.065, 0.844, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.398, 0.065, 0.844, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1168.0s, [0.464, 0.076, 0.88, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1167.0s, [0.374, 0.124, 0.812, 1.0]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1166.0s, [0.636, 0.114, 0.891, 0.96]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1164.0s to 1165.0s, [0.618, 0.186, 0.878, 0.982]->[0.555, 0.135, 0.859, 0.97]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1163.0s, [0.762, 0.277, 0.926, 0.971]: walk."]}]}, {"video": "CG98XdYsgrA.mkv", "clip": [1252, 1309], "conversations": [{"from": "human", "timestamps": 1282.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.076, 0.968, 0.959]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1309.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.221, 0.012, 0.748, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.221, 0.012, 0.748, 1.0]: walk.", "From 1307.0s to 1308.0s, [0.033, 0.042, 0.752, 0.998]->[0.222, 0.033, 0.827, 1.0]: walk.", "At 1306.0s, [0.094, 0.038, 0.791, 1.0]: stand."]}]}, {"video": "CG98XdYsgrA.mkv", "clip": [1243, 1348], "conversations": [{"from": "human", "timestamps": 1273.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.256, 0.064, 0.487, 0.955]: watch (a person), stand."}, {"from": "human", "timestamps": 1348.0, "value": "For the person at bbox [0.203, 0.604, 0.261, 0.781], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.203, 0.604, 0.261, 0.781]: stand, carry/hold (an object).", "At 1347.0s, [0.216, 0.607, 0.261, 0.768]: stand, carry/hold (an object)."]}]}, {"video": "CG98XdYsgrA.mkv", "clip": [1309, 1349], "conversations": [{"from": "human", "timestamps": 1339.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.106, 0.011, 0.487, 0.977]: walk."}, {"from": "human", "timestamps": 1349.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.206, 0.647, 0.27, 0.764]?"}, {"from": "gpt", "value": ["In the current frame, [0.206, 0.647, 0.27, 0.764]: sit.", "From 1347.0s to 1348.0s, [0.216, 0.607, 0.261, 0.768]->[0.203, 0.604, 0.261, 0.781]: stand, carry/hold (an object)."]}]}, {"video": "CG98XdYsgrA.mkv", "clip": [1521, 1604], "conversations": [{"from": "human", "timestamps": 1551.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.215, 0.143, 0.501, 0.9]: shovel, stand."}, {"from": "human", "timestamps": 1604.0, "value": "For the person at bbox [0.096, 0.0, 0.978, 0.999], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.096, 0.0, 0.978, 0.999]: stand.", "At 1603.0s, [0.079, 0.0, 0.962, 1.0]: stand, watch (a person)."]}]}, {"video": "CG98XdYsgrA.mkv", "clip": [1567, 1603], "conversations": [{"from": "human", "timestamps": 1597.0, "value": "What actions is the person in bbox [0.066, 0.05, 0.33, 0.987] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.066, 0.05, 0.33, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1595.0s to 1596.0s, [0.105, 0.064, 0.289, 0.977]->[0.068, 0.082, 0.289, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1594.0s, [0.114, 0.036, 0.29, 0.997]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1603.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.079, 0.0, 0.962, 1.0]: watch (a person), stand."}]}, {"video": "CG98XdYsgrA.mkv", "clip": [1713, 1783], "conversations": [{"from": "human", "timestamps": 1743.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.02, 0.0, 0.673, 0.871]: watch (a person), stand, touch (an object)."}, {"from": "human", "timestamps": 1783.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.422, 0.107, 0.688, 0.467]?"}, {"from": "gpt", "value": ["In the current frame, [0.422, 0.107, 0.688, 0.467]: sit, touch (an object), listen to (a person), watch (a person).", "At 1782.0s, [0.416, 0.103, 0.687, 0.466]: sit, touch (an object), watch (a person)."]}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [915, 961], "conversations": [{"from": "human", "timestamps": 945.0, "value": "What actions is the person in bbox [0.506, 0.036, 0.911, 0.983] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.506, 0.036, 0.911, 0.983]: stand, listen to (a person), watch (a person).", "At 944.0s, [0.498, 0.032, 0.897, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 943.0s, [0.526, 0.046, 0.896, 0.981]: stand, talk to (e.g., self, a person, a group).", "From 941.0s to 942.0s, [0.546, 0.047, 0.903, 0.975]->[0.539, 0.044, 0.905, 0.98]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 940.0s, [0.552, 0.045, 0.909, 0.977]: stand, listen to (a person).", "At 939.0s, [0.537, 0.04, 0.902, 0.979]: stand, watch (a person).", "At 938.0s, [0.566, 0.043, 0.92, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 937.0s, [0.539, 0.035, 0.898, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 931.0s to 936.0s, [0.546, 0.028, 0.906, 0.976]->[0.534, 0.039, 0.906, 0.977]->[0.539, 0.034, 0.918, 0.98]->[0.556, 0.036, 0.92, 0.989]->[0.542, 0.032, 0.906, 0.982]->[0.536, 0.037, 0.907, 0.981]: stand, listen to (a person), watch (a person).", "From 929.0s to 930.0s, [0.617, 0.06, 0.971, 0.968]->[0.584, 0.016, 0.929, 0.97]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 928.0s, [0.636, 0.073, 0.997, 0.97]: walk, listen to (a person).", "At 927.0s, [0.668, 0.05, 0.901, 0.811]: walk, touch (an object), listen to (a person)."]}, {"from": "human", "timestamps": 961.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.029, 0.1, 0.572, 0.995]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.555, 0.0, 1.0, 0.983]: listen to (a person), stand, watch (a person)."}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [902, 1014], "conversations": [{"from": "human", "timestamps": 932.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.155, 0.052, 0.534, 0.984]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.534, 0.039, 0.906, 0.977]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1014.0, "value": "For the person at bbox [0.559, 0.007, 1.0, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.559, 0.007, 1.0, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1007.0s to 1013.0s, [0.507, 0.049, 0.879, 0.964]->[0.487, 0.077, 0.811, 0.979]->[0.45, 0.102, 0.813, 0.971]->[0.515, 0.051, 0.838, 0.967]->[0.539, 0.086, 0.863, 0.985]->[0.505, 0.016, 0.846, 0.974]->[0.542, 0.0, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1006.0s, [0.602, 0.0, 1.0, 1.0]: stand, listen to (a person), watch (a person).", "At 1005.0s, [0.592, 0.0, 1.0, 1.0]: stand, listen to (a person).", "At 1004.0s, [0.586, 0.003, 1.0, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1003.0s, [0.597, 0.0, 1.0, 0.993]: stand, listen to (a person), watch (a person).", "At 1002.0s, [0.501, 0.107, 0.848, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [938, 971], "conversations": [{"from": "human", "timestamps": 968.0, "value": "For the person at bbox [0.555, 0.046, 0.933, 0.96], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.555, 0.046, 0.933, 0.96]: stand, listen to (a person), watch (a person).", "From 965.0s to 967.0s, [0.371, 0.1, 0.97, 0.982]->[0.326, 0.102, 0.975, 0.985]->[0.358, 0.112, 0.97, 0.987]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 971.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.115, 0.097, 0.394, 0.974]: watch (a person), stand, hand shake; [0.378, 0.568, 0.473, 0.842]: walk; [0.481, 0.058, 0.939, 0.982]: watch (a person), stand, hand shake, talk to (e.g., self, a person, a group)."}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1046, 1079], "conversations": [{"from": "human", "timestamps": 1076.0, "value": "What actions did the person at bbox [0.538, 0.008, 1.0, 0.965] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.538, 0.008, 1.0, 0.965]: stand, listen to (a person), watch (a person).", "From 1073.0s to 1075.0s, [0.589, 0.032, 1.0, 0.991]->[0.549, 0.02, 1.0, 0.983]->[0.529, 0.016, 1.0, 0.973]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1079.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.03, 0.136, 0.941]: listen to (a person), stand, watch (a person); [0.388, 0.071, 0.997, 0.99]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1045, 1133], "conversations": [{"from": "human", "timestamps": 1075.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.01, 0.141, 0.587, 0.983]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.529, 0.016, 1.0, 0.973]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1133.0, "value": "What actions did the person at bbox [0.0, 0.021, 0.402, 0.968] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.021, 0.402, 0.968]: walk, listen to (a person).", "From 1129.0s to 1132.0s, [0.002, 0.094, 0.372, 0.989]->[0.0, 0.066, 0.367, 0.988]->[0.0, 0.0, 0.271, 0.978]->[0.001, 0.075, 0.403, 0.98]: walk, listen to (a person).", "At 1128.0s, [0.025, 0.122, 0.425, 0.983]: walk, talk to (e.g., self, a person, a group).", "From 1125.0s to 1127.0s, [0.041, 0.086, 0.5, 0.976]->[0.007, 0.066, 0.426, 0.987]->[0.0, 0.096, 0.397, 0.979]: walk, listen to (a person).", "At 1124.0s, [0.027, 0.104, 0.462, 1.0]: walk, carry/hold (an object), listen to (a person).", "At 1123.0s, [0.036, 0.109, 0.453, 0.973]: walk, listen to (a person).", "From 1121.0s to 1122.0s, [0.259, 0.176, 0.589, 0.984]->[0.228, 0.16, 0.573, 0.993]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1120.0s, [0.293, 0.164, 0.603, 1.0]: walk, listen to (a person).", "At 1119.0s, [0.344, 0.171, 0.662, 0.988]: walk, listen to (a person), watch (a person).", "From 1113.0s to 1118.0s, [0.307, 0.183, 0.618, 0.998]->[0.334, 0.187, 0.642, 0.999]->[0.295, 0.191, 0.611, 0.988]->[0.309, 0.191, 0.627, 1.0]->[0.343, 0.191, 0.65, 0.991]->[0.328, 0.165, 0.679, 0.986]: walk, listen to (a person).", "At 1112.0s, [0.286, 0.169, 0.603, 0.994]: walk, carry/hold (an object), listen to (a person).", "At 1111.0s, [0.0, 0.041, 0.344, 0.99]: walk, listen to (a person), watch (a person).", "At 1110.0s, [0.011, 0.066, 0.466, 1.0]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1109.0s, [0.0, 0.016, 0.431, 1.0]: walk, listen to (a person), watch (a person).", "At 1108.0s, [0.099, 0.048, 0.508, 0.989]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1107.0s, [0.038, 0.024, 0.533, 0.982]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1106.0s, [0.056, 0.064, 0.466, 0.971]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1102.0s to 1105.0s, [0.012, 0.087, 0.434, 0.996]->[0.057, 0.101, 0.506, 0.985]->[0.0, 0.074, 0.393, 0.988]->[0.006, 0.066, 0.418, 0.986]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1099.0s to 1101.0s, [0.008, 0.081, 0.38, 0.983]->[0.0, 0.061, 0.358, 0.999]->[0.06, 0.079, 0.45, 0.994]: walk, carry/hold (an object), listen to (a person).", "From 1092.0s to 1098.0s, [0.059, 0.053, 0.415, 1.0]->[0.055, 0.08, 0.443, 0.985]->[0.062, 0.082, 0.445, 0.992]->[0.117, 0.115, 0.476, 0.991]->[0.055, 0.108, 0.432, 0.988]->[0.044, 0.118, 0.398, 0.991]->[0.0, 0.09, 0.372, 0.982]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1091.0s, [0.121, 0.076, 0.45, 0.991]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1090.0s, [0.118, 0.101, 0.421, 0.962]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1089.0s, [0.088, 0.116, 0.427, 0.989]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1086.0s to 1088.0s, [0.236, 0.159, 0.455, 1.0]->[0.2, 0.123, 0.451, 0.996]->[0.173, 0.131, 0.458, 1.0]: walk, carry/hold (an object), listen to (a person).", "At 1085.0s, [0.273, 0.163, 0.47, 0.99]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1084.0s, [0.368, 0.186, 0.553, 0.978]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1082.0s to 1083.0s, [0.0, 0.165, 0.622, 0.984]->[0.0, 0.152, 0.602, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1077.0s to 1081.0s, [0.0, 0.065, 0.142, 0.931]->[0.0, 0.034, 0.139, 0.957]->[0.0, 0.03, 0.136, 0.941]->[0.035, 0.152, 0.583, 0.981]->[0.02, 0.137, 0.595, 0.98]: stand, listen to (a person), watch (a person).", "At 1076.0s, [0.014, 0.156, 0.587, 0.985]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1074.0s to 1075.0s, [0.018, 0.161, 0.595, 0.985]->[0.01, 0.141, 0.587, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1073.0s, [0.02, 0.176, 0.599, 0.98]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1070.0s to 1072.0s, [0.094, 0.089, 0.368, 0.991]->[0.096, 0.087, 0.349, 0.989]->[0.171, 0.097, 0.454, 0.982]: stand, listen to (a person), watch (a person).", "From 1068.0s to 1069.0s, [0.105, 0.066, 0.361, 0.96]->[0.09, 0.11, 0.333, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1067.0s, [0.084, 0.086, 0.341, 0.996]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1064.0s to 1066.0s, [0.008, 0.037, 0.254, 1.0]->[0.078, 0.076, 0.365, 1.0]->[0.081, 0.1, 0.339, 0.99]: stand, listen to (a person), watch (a person)."]}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1113, 1170], "conversations": [{"from": "human", "timestamps": 1143.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.005, 0.211, 0.42, 0.973]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.538, 0.019, 1.0, 0.958]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1170.0, "value": "For the person at bbox [0.373, 0.178, 0.545, 0.997], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.373, 0.178, 0.545, 0.997]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1169.0s, [0.227, 0.195, 0.476, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1168.0s, [0.255, 0.218, 0.521, 0.994]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1165.0s to 1167.0s, [0.597, 0.158, 0.781, 0.985]->[0.662, 0.147, 0.874, 0.977]->[0.589, 0.205, 0.869, 0.995]: walk.", "At 1164.0s, [0.543, 0.177, 0.718, 0.988]: walk, carry/hold (an object).", "At 1163.0s, [0.408, 0.16, 0.563, 0.999]: walk."]}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1198, 1237], "conversations": [{"from": "human", "timestamps": 1228.0, "value": "What actions did the person at bbox [0.182, 0.275, 0.506, 0.995] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.182, 0.275, 0.506, 0.995]: stand, talk to (e.g., self, a person, a group).", "At 1227.0s, [0.179, 0.269, 0.512, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1226.0s, [0.176, 0.284, 0.506, 0.993]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1237.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.035, 0.587, 0.253, 0.991]: carry/hold (an object), listen to (a person), walk; [0.623, 0.104, 1.0, 0.972]: watch (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1276, 1312], "conversations": [{"from": "human", "timestamps": 1306.0, "value": "Describe the actions of the person at bbox [0.54, 0.05, 0.886, 0.99] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.54, 0.05, 0.886, 0.99]: stand, carry/hold (an object), listen to (a person).", "From 1304.0s to 1305.0s, [0.534, 0.061, 0.878, 0.965]->[0.535, 0.051, 0.823, 0.986]: stand, carry/hold (an object), listen to (a person).", "At 1303.0s, [0.606, 0.077, 1.0, 0.982]: stand, pull (an object), listen to (a person).", "From 1301.0s to 1302.0s, [0.219, 0.099, 0.393, 0.777]->[0.341, 0.07, 0.651, 0.956]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1312.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.247, 0.05, 0.76, 1.0]: stand."}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1255, 1351], "conversations": [{"from": "human", "timestamps": 1285.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.082, 0.355, 0.554, 0.995]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.664, 0.052, 0.913, 0.994]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1351.0, "value": "Describe the actions of the person at bbox [0.695, 0.215, 1.0, 0.997] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.695, 0.215, 1.0, 0.997]: stand, listen to (a person), watch (a person).", "From 1349.0s to 1350.0s, [0.677, 0.178, 0.955, 0.988]->[0.705, 0.21, 1.0, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1348.0s, [0.358, 0.11, 0.834, 1.0]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1347.0s, [0.041, 0.09, 0.418, 0.975]: walk.", "At 1346.0s, [0.019, 0.05, 0.376, 0.989]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1345.0s, [0.115, 0.183, 0.455, 0.982]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1341.0s to 1344.0s, [0.492, 0.131, 0.739, 0.815]->[0.469, 0.103, 0.728, 0.747]->[0.347, 0.118, 0.624, 0.994]->[0.221, 0.094, 0.657, 0.996]: walk, carry/hold (an object).", "At 1340.0s, [0.336, 0.127, 0.583, 0.715]: carry/hold (an object), get up."]}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1291, 1344], "conversations": [{"from": "human", "timestamps": 1321.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.288, 0.117, 0.514, 0.995]: carry/hold (an object), talk to (e.g., self, a person, a group), walk; [0.381, 0.146, 0.734, 0.988]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1344.0, "value": "What actions is the person in bbox [0.221, 0.094, 0.657, 0.996] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.221, 0.094, 0.657, 0.996]: walk, carry/hold (an object).", "From 1341.0s to 1343.0s, [0.492, 0.131, 0.739, 0.815]->[0.469, 0.103, 0.728, 0.747]->[0.347, 0.118, 0.624, 0.994]: walk, carry/hold (an object).", "At 1340.0s, [0.336, 0.127, 0.583, 0.715]: carry/hold (an object), get up."]}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1376, 1422], "conversations": [{"from": "human", "timestamps": 1406.0, "value": "What actions did the person at bbox [0.659, 0.062, 0.913, 0.995] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.659, 0.062, 0.913, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1405.0s, [0.628, 0.056, 0.904, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1399.0s to 1404.0s, [0.52, 0.122, 0.805, 0.989]->[0.613, 0.053, 0.913, 0.985]->[0.63, 0.089, 0.935, 1.0]->[0.633, 0.093, 0.945, 1.0]->[0.638, 0.086, 0.939, 1.0]->[0.635, 0.082, 0.94, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1397.0s to 1398.0s, [0.733, 0.048, 1.0, 0.974]->[0.583, 0.065, 0.885, 0.985]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1396.0s, [0.792, 0.0, 0.999, 0.974]: walk."]}, {"from": "human", "timestamps": 1422.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.092, 0.041, 0.461, 0.961]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.461, 0.126, 0.804, 0.947]: carry/hold (an object), stand, listen to (a person)."}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1360, 1461], "conversations": [{"from": "human", "timestamps": 1390.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.11, 0.105, 0.64, 0.981]: carry/hold (an object), stand, watch (a person); [0.401, 0.04, 0.985, 0.961]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1461.0, "value": "What actions is the person in bbox [0.001, 0.016, 0.435, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.016, 0.435, 0.993]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1456.0s to 1460.0s, [0.031, 0.026, 0.426, 0.978]->[0.0, 0.049, 0.43, 0.978]->[0.0, 0.05, 0.425, 0.986]->[0.021, 0.053, 0.403, 0.979]->[0.0, 0.042, 0.428, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1455.0s, [0.0, 0.037, 0.45, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1454.0s, [0.0, 0.044, 0.438, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1452.0s to 1453.0s, [0.0, 0.0, 0.342, 0.989]->[0.0, 0.009, 0.361, 0.986]: stand, listen to (a person), watch (a person).", "At 1451.0s, [0.0, 0.003, 0.344, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1450.0s, [0.0, 0.0, 0.332, 0.987]: stand, listen to (a person), watch (a person).", "At 1449.0s, [0.0, 0.015, 0.356, 0.989]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1448.0s, [0.0, 0.002, 0.358, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1447.0s, [0.0, 0.0, 0.371, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 1445.0s to 1446.0s, [0.0, 0.056, 0.412, 0.975]->[0.0, 0.046, 0.401, 0.985]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1443.0s to 1444.0s, [0.01, 0.032, 0.415, 0.978]->[0.009, 0.033, 0.403, 0.98]: stand, drink, watch (a person).", "From 1439.0s to 1442.0s, [0.037, 0.102, 0.473, 0.968]->[0.109, 0.059, 0.494, 1.0]->[0.065, 0.035, 0.447, 0.981]->[0.054, 0.072, 0.46, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1438.0s, [0.001, 0.035, 0.418, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1437.0s, [0.001, 0.049, 0.44, 0.975]: stand, listen to (a person), take (an object) from (a person), talk to (e.g., self, a person, a group).", "From 1435.0s to 1436.0s, [0.006, 0.0, 0.398, 0.988]->[0.002, 0.025, 0.45, 0.969]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1434.0s, [0.003, 0.033, 0.453, 0.98]: stand, talk to (e.g., self, a person, a group).", "At 1433.0s, [0.001, 0.014, 0.41, 0.971]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1430.0s to 1432.0s, [0.032, 0.044, 0.431, 0.978]->[0.01, 0.043, 0.397, 0.971]->[0.0, 0.019, 0.394, 0.965]: stand, talk to (e.g., self, a person, a group).", "At 1429.0s, [0.043, 0.06, 0.438, 0.967]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1428.0s, [0.043, 0.077, 0.455, 0.97]: stand, talk to (e.g., self, a person, a group).", "From 1426.0s to 1427.0s, [0.081, 0.018, 0.439, 0.966]->[0.053, 0.019, 0.442, 0.965]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1425.0s, [0.085, 0.057, 0.476, 0.956]: stand, talk to (e.g., self, a person, a group).", "At 1424.0s, [0.082, 0.009, 0.455, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1423.0s, [0.082, 0.008, 0.469, 0.983]: stand, talk to (e.g., self, a person, a group).", "From 1421.0s to 1422.0s, [0.102, 0.029, 0.463, 0.966]->[0.092, 0.041, 0.461, 0.961]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1420.0s, [0.111, 0.028, 0.451, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 1419.0s, [0.093, 0.01, 0.446, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1417.0s to 1418.0s, [0.13, 0.022, 0.482, 0.976]->[0.099, 0.008, 0.467, 0.979]: stand, talk to (e.g., self, a person, a group).", "From 1415.0s to 1416.0s, [0.172, 0.083, 0.508, 0.966]->[0.219, 0.067, 0.477, 0.972]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1422, 1497], "conversations": [{"from": "human", "timestamps": 1452.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.0, 0.342, 0.989]: listen to (a person), stand, watch (a person); [0.58, 0.213, 0.962, 0.999]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1497.0, "value": "What actions is the person in bbox [0.0, 0.045, 0.326, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.045, 0.326, 1.0]: sit, listen to (a person), watch (a person).", "At 1496.0s, [0.0, 0.029, 0.342, 1.0]: sit, listen to (a person), watch (a person)."]}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1479, 1515], "conversations": [{"from": "human", "timestamps": 1509.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.167, 0.071, 0.927, 0.976]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1515.0, "value": "What actions did the person at bbox [0.166, 0.039, 0.651, 0.969] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.166, 0.039, 0.651, 0.969]: sit, talk to (e.g., self, a person, a group).", "From 1513.0s to 1514.0s, [0.161, 0.046, 0.651, 0.981]->[0.16, 0.033, 0.662, 0.977]: sit, talk to (e.g., self, a person, a group).", "From 1511.0s to 1512.0s, [0.169, 0.044, 0.656, 0.97]->[0.157, 0.034, 0.662, 0.976]: sit."]}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1496, 1614], "conversations": [{"from": "human", "timestamps": 1526.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.196, 0.019, 0.41, 0.848]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk; [0.613, 0.379, 0.915, 0.993]: listen to (a person), sit."}, {"from": "human", "timestamps": 1614.0, "value": "What actions did the person at bbox [0.315, 0.102, 0.88, 0.988] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.315, 0.102, 0.88, 0.988]: sit, talk to (e.g., self, a person, a group).", "From 1612.0s to 1613.0s, [0.349, 0.084, 0.865, 0.988]->[0.324, 0.086, 0.931, 0.985]: sit, talk to (e.g., self, a person, a group).", "At 1611.0s, [0.337, 0.09, 0.911, 0.991]: sit, listen to (a person)."]}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1602, 1656], "conversations": [{"from": "human", "timestamps": 1632.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.342, 0.233, 0.391, 0.468]?"}, {"from": "gpt", "value": ["At Now, [0.342, 0.233, 0.391, 0.468]: stand.", "At 1631.0s, [0.391, 0.226, 0.442, 0.489]: stand.", "From 1629.0s to 1630.0s, [0.42, 0.239, 0.483, 0.499]->[0.387, 0.235, 0.434, 0.493]: stand, throw, watch (a person).", "At 1628.0s, [0.431, 0.238, 0.491, 0.51]: stand, catch (an object), watch (a person).", "From 1626.0s to 1627.0s, [0.419, 0.235, 0.476, 0.517]->[0.42, 0.237, 0.484, 0.507]: stand, watch (a person).", "At 1625.0s, [0.428, 0.228, 0.478, 0.519]: stand, throw, watch (a person).", "At 1624.0s, [0.403, 0.226, 0.461, 0.507]: stand, catch (an object).", "At 1623.0s, [0.26, 0.065, 0.826, 0.98]: sit, drink.", "From 1618.0s to 1622.0s, [0.338, 0.068, 0.925, 0.987]->[0.348, 0.072, 0.888, 0.987]->[0.325, 0.078, 0.934, 0.987]->[0.313, 0.09, 0.896, 0.983]->[0.289, 0.084, 0.819, 0.982]: sit.", "At 1617.0s, [0.321, 0.123, 0.834, 0.982]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1656.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.284, 0.355, 0.369, 0.829]: stand; [0.606, 0.25, 0.968, 0.979]: walk."}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1628, 1695], "conversations": [{"from": "human", "timestamps": 1658.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.347, 0.334, 0.425, 0.82]: read, stand; [0.446, 0.289, 0.517, 0.697]: walk; [0.577, 0.396, 0.612, 0.586]: walk; [0.607, 0.366, 0.648, 0.583]: walk; [0.618, 0.329, 0.813, 0.983]: walk."}, {"from": "human", "timestamps": 1695.0, "value": "Describe the actions of the person at bbox [0.0, 0.123, 0.283, 0.989] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.123, 0.283, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1693.0s to 1694.0s, [0.004, 0.077, 0.273, 0.971]->[0.0, 0.102, 0.312, 0.999]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "CrlfWnsS7ac.mkv", "clip": [1673, 1708], "conversations": [{"from": "human", "timestamps": 1703.0, "value": "What actions is the person in bbox [0.479, 0.0, 0.799, 0.976] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.479, 0.0, 0.799, 0.976]: stand, listen to (a person), watch (a person).", "From 1701.0s to 1702.0s, [0.49, 0.0, 0.76, 0.975]->[0.476, 0.0, 0.783, 0.969]: stand, listen to (a person), watch (a person).", "At 1700.0s, [0.495, 0.0, 0.768, 0.978]: stand, watch (a person).", "From 1697.0s to 1699.0s, [0.495, 0.025, 0.795, 0.967]->[0.494, 0.0, 0.784, 1.0]->[0.502, 0.001, 0.767, 0.976]: stand, listen to (a person), watch (a person).", "At 1696.0s, [0.536, 0.044, 0.829, 0.995]: stand, watch (a person).", "At 1695.0s, [0.531, 0.032, 0.824, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1694.0s, [0.538, 0.034, 0.842, 0.991]: stand, watch (a person).", "At 1693.0s, [0.441, 0.0, 0.733, 0.967]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1692.0s, [0.463, 0.025, 0.774, 0.978]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1708.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.145, 0.27, 0.95]: listen to (a person), stand, watch (a person); [0.154, 0.152, 0.64, 0.989]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.474, 0.0, 0.789, 0.975]: listen to (a person), stand, watch (a person); [0.719, 0.268, 1.0, 0.967]: listen to (a person), stand, watch (a person)."}]}, {"video": "D-BJTU6NxZ8.mkv", "clip": [1006, 1118], "conversations": [{"from": "human", "timestamps": 1036.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.399, 0.181, 0.557, 0.842]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1118.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.428, 0.281, 0.716, 0.758]?"}, {"from": "gpt", "value": ["In the current frame, [0.428, 0.281, 0.716, 0.758]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1117.0s, [0.439, 0.317, 0.735, 0.838]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "D-BJTU6NxZ8.mkv", "clip": [1154, 1187], "conversations": [{"from": "human", "timestamps": 1184.0, "value": "What actions is the person in bbox [0.068, 0.051, 1.0, 0.974] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.068, 0.051, 1.0, 0.974]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1183.0s, [0.073, 0.035, 0.708, 0.959]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1182.0s, [0.119, 0.056, 1.0, 0.957]: stand, talk to (e.g., self, a person, a group).", "At 1181.0s, [0.119, 0.048, 1.0, 0.955]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1187.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.151, 0.414, 0.229, 0.564]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.506, 0.311, 0.643, 0.686]: listen to (a person), stand; [0.79, 0.376, 0.94, 0.72]: carry/hold (an object), sit, watch (a person), listen to (a person)."}]}, {"video": "D-BJTU6NxZ8.mkv", "clip": [1132, 1258], "conversations": [{"from": "human", "timestamps": 1162.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.124, 0.1, 0.947, 0.947]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1258.0, "value": "What actions did the person at bbox [0.016, 0.12, 0.921, 0.991] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.016, 0.12, 0.921, 0.991]: stand.", "At 1257.0s, [0.068, 0.155, 0.825, 0.964]: stand, talk to (e.g., self, a person, a group).", "At 1256.0s, [0.051, 0.11, 0.83, 0.966]: sit, talk to (e.g., self, a person, a group).", "At 1255.0s, [0.117, 0.108, 0.785, 0.969]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "D-BJTU6NxZ8.mkv", "clip": [1171, 1237], "conversations": [{"from": "human", "timestamps": 1201.0, "value": "What actions is the person in bbox [0.063, 0.363, 0.189, 0.619] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.063, 0.363, 0.189, 0.619]: sit, listen to (a person).", "At 1200.0s, [0.064, 0.412, 0.214, 0.621]: sit, watch (a person).", "At 1199.0s, [0.064, 0.404, 0.225, 0.626]: sit, listen to (a person), watch (a person).", "At 1198.0s, [0.065, 0.396, 0.212, 0.622]: sit, watch (a person).", "At 1197.0s, [0.065, 0.37, 0.203, 0.625]: sit, listen to (a person), watch (a person).", "At 1196.0s, [0.064, 0.401, 0.214, 0.62]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1237.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.061, 0.407, 0.194, 0.62]: listen to (a person), sit; [0.494, 0.342, 0.604, 0.642]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.764, 0.373, 0.925, 0.722]: carry/hold (an object), sit, listen to (a person)."}]}, {"video": "D-BJTU6NxZ8.mkv", "clip": [1274, 1305], "conversations": [{"from": "human", "timestamps": 1304.0, "value": "Describe the actions of the person at bbox [0.057, 0.044, 0.759, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.057, 0.044, 0.759, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1303.0s, [0.089, 0.058, 0.783, 0.994]: talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1305.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.082, 0.037, 0.764, 0.965]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "D-BJTU6NxZ8.mkv", "clip": [1252, 1356], "conversations": [{"from": "human", "timestamps": 1282.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.096, 0.06, 1.0, 0.979]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1356.0, "value": "Describe the actions of the person at bbox [0.052, 0.116, 0.628, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.052, 0.116, 0.628, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1355.0s, [0.008, 0.272, 0.374, 0.956]: stand, listen to (a person), watch (a person)."]}]}, {"video": "D-BJTU6NxZ8.mkv", "clip": [1400, 1432], "conversations": [{"from": "human", "timestamps": 1430.0, "value": "Describe the actions of the person at bbox [0.022, 0.076, 0.887, 0.973] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.022, 0.076, 0.887, 0.973]: sit.", "At 1429.0s, [0.214, 0.047, 0.934, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1428.0s, [0.033, 0.097, 0.901, 0.953]: stand, talk to (e.g., self, a person, a group).", "At 1427.0s, [0.098, 0.115, 0.967, 0.962]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1432.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.096, 0.072, 0.75, 0.975]: sit."}]}, {"video": "D-BJTU6NxZ8.mkv", "clip": [1405, 1470], "conversations": [{"from": "human", "timestamps": 1435.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.067, 0.064, 0.926, 0.996]: listen to (a person), sit."}, {"from": "human", "timestamps": 1470.0, "value": "Describe the actions of the person at bbox [0.126, 0.079, 0.995, 0.999] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.126, 0.079, 0.995, 0.999]: sit, talk to (e.g., self, a person, a group).", "At 1469.0s, [0.14, 0.056, 0.923, 0.975]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "D8Vhxbho1fY.mp4", "clip": [1010, 1127], "conversations": [{"from": "human", "timestamps": 1040.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.005, 0.307, 0.091, 0.872]: carry/hold (an object), stand, watch (a person); [0.162, 0.152, 0.559, 0.859]: watch (a person), talk to (e.g., self, a person, a group), walk; [0.526, 0.227, 0.979, 0.938]: listen to (a person), stand, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1127.0, "value": "Describe the actions of the person at bbox [0.168, 0.047, 0.857, 0.948] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.168, 0.047, 0.857, 0.948]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1126.0s, [0.174, 0.047, 0.859, 0.969]: stand, carry/hold (an object).", "At 1125.0s, [0.262, 0.016, 0.772, 0.971]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1124.0s, [0.182, 0.0, 0.933, 0.981]: stand.", "At 1123.0s, [0.176, 0.0, 0.96, 0.989]: stand, watch (a person)."]}]}, {"video": "D8Vhxbho1fY.mp4", "clip": [1069, 1125], "conversations": [{"from": "human", "timestamps": 1099.0, "value": "What actions is the person in bbox [0.077, 0.066, 0.602, 0.957] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.077, 0.066, 0.602, 0.957]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1098.0s, [0.055, 0.071, 0.626, 0.966]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1125.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.262, 0.016, 0.772, 0.971]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "D8Vhxbho1fY.mp4", "clip": [1206, 1244], "conversations": [{"from": "human", "timestamps": 1236.0, "value": "For the person at bbox [0.591, 0.111, 0.868, 0.978], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.591, 0.111, 0.868, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1235.0s, [0.586, 0.124, 0.881, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1234.0s, [0.585, 0.118, 0.899, 0.968]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1244.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.01, 0.037, 0.472, 0.96]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "D8Vhxbho1fY.mp4", "clip": [1407, 1490], "conversations": [{"from": "human", "timestamps": 1437.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.004, 0.366, 0.215, 0.975]: listen to (a person), stand, watch (a person); [0.241, 0.01, 0.904, 0.967]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1490.0, "value": "What actions is the person in bbox [0.229, 0.002, 0.901, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.229, 0.002, 0.901, 1.0]: stand.", "From 1488.0s to 1489.0s, [0.284, 0.0, 0.839, 0.968]->[0.156, 0.006, 0.802, 0.997]: stand."]}]}, {"video": "D8Vhxbho1fY.mp4", "clip": [1423, 1478], "conversations": [{"from": "human", "timestamps": 1453.0, "value": "What actions did the person at bbox [0.344, 0.325, 0.753, 0.922] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.344, 0.325, 0.753, 0.922]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1451.0s to 1452.0s, [0.429, 0.32, 0.707, 0.844]->[0.403, 0.311, 0.759, 0.852]: stand, talk to (e.g., self, a person, a group).", "At 1450.0s, [0.426, 0.375, 0.698, 0.937]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1478.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.057, 0.099, 0.61, 0.978]: listen to (a person), stand; [0.294, 0.041, 0.806, 0.983]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.887, 0.109, 0.996, 0.976]: walk."}]}, {"video": "D8Vhxbho1fY.mp4", "clip": [1505, 1539], "conversations": [{"from": "human", "timestamps": 1535.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.118, 0.071, 0.649, 0.968]?"}, {"from": "gpt", "value": ["At Now, [0.118, 0.071, 0.649, 0.968]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1534.0s, [0.185, 0.086, 0.687, 0.914]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1539.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.307, 0.048, 0.784, 0.971]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "D8Vhxbho1fY.mp4", "clip": [1592, 1665], "conversations": [{"from": "human", "timestamps": 1622.0, "value": "Describe the actions of the person at bbox [0.163, 0.052, 0.868, 0.972] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.163, 0.052, 0.868, 0.972]: bend/bow (at the waist), dress/put on clothing, talk to (e.g., self, a person, a group), watch (a person).", "At 1621.0s, [0.254, 0.154, 0.862, 0.949]: bend/bow (at the waist), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1665.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.363, 0.065, 0.645, 0.975]: carry/hold (an object), walk."}]}, {"video": "Db19rWN5BGo.mkv", "clip": [893, 971], "conversations": [{"from": "human", "timestamps": 923.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.725, 0.362, 0.99, 0.989]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 971.0, "value": "What actions is the person in bbox [0.007, 0.029, 0.565, 0.975] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.007, 0.029, 0.565, 0.975]: sit.", "At 970.0s, [0.0, 0.032, 0.626, 0.993]: sit, listen to (a person).", "From 968.0s to 969.0s, [0.007, 0.0, 0.551, 0.97]->[0.009, 0.032, 0.626, 0.976]: sit, listen to (a person), watch (a person)."]}]}, {"video": "Db19rWN5BGo.mkv", "clip": [955, 1017], "conversations": [{"from": "human", "timestamps": 985.0, "value": "What actions is the person in bbox [0.0, 0.014, 0.531, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.014, 0.531, 0.982]: sit, watch (a person).", "From 982.0s to 984.0s, [0.0, 0.012, 0.569, 0.972]->[0.001, 0.104, 0.393, 0.99]->[0.013, 0.003, 0.649, 0.98]: sit, listen to (a person), watch (a person).", "At 981.0s, [0.005, 0.012, 0.588, 0.977]: sit."]}, {"from": "human", "timestamps": 1017.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.049, 0.068, 0.69, 0.99]: listen to (a person), sit."}]}, {"video": "Db19rWN5BGo.mkv", "clip": [995, 1116], "conversations": [{"from": "human", "timestamps": 1025.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.371, 0.105, 0.919, 0.985]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1116.0, "value": "Describe the actions of the person at bbox [0.257, 0.106, 0.465, 0.968] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.257, 0.106, 0.465, 0.968]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1115.0s, [0.251, 0.11, 0.466, 0.943]: sit, carry/hold (an object), smoke, listen to (a person), watch (a person).", "At 1114.0s, [0.251, 0.164, 0.474, 0.96]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "Db19rWN5BGo.mkv", "clip": [1056, 1138], "conversations": [{"from": "human", "timestamps": 1086.0, "value": "For the person at bbox [0.287, 0.067, 0.975, 0.997], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.287, 0.067, 0.975, 0.997]: sit, talk to (e.g., self, a person, a group).", "At 1085.0s, [0.305, 0.061, 0.972, 0.993]: sit, talk to (e.g., self, a person, a group).", "At 1084.0s, [0.24, 0.082, 0.975, 0.974]: sit.", "From 1082.0s to 1083.0s, [0.367, 0.115, 0.993, 0.995]->[0.248, 0.059, 0.973, 0.973]: sit, carry/hold (an object).", "At 1081.0s, [0.431, 0.062, 0.987, 0.998]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1080.0s, [0.325, 0.04, 0.995, 0.991]: sit.", "At 1079.0s, [0.435, 0.066, 0.96, 0.952]: sit, watch (a person).", "At 1078.0s, [0.289, 0.223, 0.522, 0.945]: sit, carry/hold (an object).", "At 1076.0s, [0.297, 0.231, 0.514, 0.95]: sit, listen to (a person), watch (a person).", "At 1075.0s, [0.287, 0.239, 0.522, 0.967]: sit."]}, {"from": "human", "timestamps": 1138.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.003, 0.014, 0.699, 0.988]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.465, 0.144, 0.978, 0.972]: listen to (a person), sit."}]}, {"video": "Db19rWN5BGo.mkv", "clip": [1131, 1178], "conversations": [{"from": "human", "timestamps": 1161.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.251, 0.316, 0.42, 0.846]: carry/hold (an object), listen to (a person), walk; [0.281, 0.06, 0.679, 0.997]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1178.0, "value": "What actions is the person in bbox [0.45, 0.09, 0.652, 0.979] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.45, 0.09, 0.652, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1175.0s to 1177.0s, [0.436, 0.114, 0.622, 0.984]->[0.449, 0.095, 0.64, 0.976]->[0.457, 0.099, 0.649, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1174.0s, [0.493, 0.124, 0.696, 0.989]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1173.0s, [0.538, 0.152, 0.696, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Db19rWN5BGo.mkv", "clip": [1149, 1257], "conversations": [{"from": "human", "timestamps": 1179.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.307, 0.125, 0.478, 0.977]: listen to (a person), stand, watch (a person); [0.447, 0.078, 0.661, 0.985]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1257.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.358, 0.259, 0.44, 0.658]?"}, {"from": "gpt", "value": ["In the current frame, [0.358, 0.259, 0.44, 0.658]: stand, listen to (a person), watch (a person).", "At 1256.0s, [0.337, 0.259, 0.418, 0.603]: stand, listen to (a person), watch (a person)."]}]}, {"video": "Db19rWN5BGo.mkv", "clip": [1200, 1245], "conversations": [{"from": "human", "timestamps": 1230.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.38, 0.108, 0.576, 0.994]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1245.0, "value": "Describe the actions of the person at bbox [0.392, 0.153, 0.573, 0.634] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.392, 0.153, 0.573, 0.634]: stand, listen to (a person), watch (a person).", "At 1244.0s, [0.431, 0.147, 0.574, 0.625]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1243.0s, [0.523, 0.044, 1.0, 0.987]: stand."]}]}, {"video": "Db19rWN5BGo.mkv", "clip": [1231, 1287], "conversations": [{"from": "human", "timestamps": 1261.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.01, 0.042, 0.601, 0.99]: listen to (a person), stand; [0.393, 0.1, 0.995, 0.979]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1287.0, "value": "For the person at bbox [0.05, 0.114, 0.752, 0.979], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.05, 0.114, 0.752, 0.979]: stand, talk to (e.g., self, a person, a group).", "At 1286.0s, [0.39, 0.187, 0.718, 0.995]: stand.", "At 1285.0s, [0.49, 0.228, 0.855, 0.996]: stand, watch (a person)."]}]}, {"video": "Db19rWN5BGo.mkv", "clip": [1271, 1330], "conversations": [{"from": "human", "timestamps": 1301.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.443, 0.387, 0.495, 0.623]: carry/hold (an object); [0.544, 0.378, 0.603, 0.616]: dance."}, {"from": "human", "timestamps": 1330.0, "value": "Describe the actions of the person at bbox [0.574, 0.265, 0.822, 0.898] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.574, 0.265, 0.822, 0.898]: sit, carry/hold (an object).", "At 1329.0s, [0.453, 0.257, 0.695, 0.877]: sit, carry/hold (an object)."]}]}, {"video": "Db19rWN5BGo.mkv", "clip": [1328, 1376], "conversations": [{"from": "human", "timestamps": 1358.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.246, 0.082, 0.545, 0.992]: listen to (a person), stand, watch (a person); [0.49, 0.017, 0.82, 1.0]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1376.0, "value": "For the person at bbox [0.486, 0.032, 0.812, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.486, 0.032, 0.812, 1.0]: stand, watch (a person).", "At 1375.0s, [0.436, 0.05, 0.814, 1.0]: stand, watch (a person).", "From 1373.0s to 1374.0s, [0.482, 0.05, 0.81, 1.0]->[0.492, 0.05, 0.811, 1.0]: stand, listen to (a person), watch (a person).", "At 1372.0s, [0.279, 0.064, 0.928, 0.991]: stand, talk to (e.g., self, a person, a group).", "At 1371.0s, [0.233, 0.078, 0.833, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1370.0s, [0.432, 0.028, 0.899, 0.988]: stand, listen to (a person), watch (a person).", "From 1364.0s to 1369.0s, [0.392, 0.057, 0.907, 0.993]->[0.417, 0.034, 0.899, 0.992]->[0.426, 0.029, 0.895, 1.0]->[0.455, 0.021, 0.895, 0.999]->[0.43, 0.027, 0.896, 1.0]->[0.411, 0.037, 0.888, 0.995]: stand, listen to (a person)."]}]}, {"video": "Db19rWN5BGo.mkv", "clip": [1359, 1406], "conversations": [{"from": "human", "timestamps": 1389.0, "value": "Describe the actions of the person at bbox [0.173, 0.057, 0.526, 0.94] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.173, 0.057, 0.526, 0.94]: dance.", "At 1388.0s, [0.221, 0.007, 0.424, 0.99]: dance."]}, {"from": "human", "timestamps": 1406.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.319, 0.127, 0.543, 0.912]: watch (a person), walk; [0.488, 0.303, 0.659, 0.905]: watch (a person), sit, sing to (e.g., self, a person, a group)."}]}, {"video": "Db19rWN5BGo.mkv", "clip": [1352, 1445], "conversations": [{"from": "human", "timestamps": 1382.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.439, 0.375, 0.477, 0.578]: carry/hold (an object), dance; [0.525, 0.383, 0.57, 0.581]: carry/hold (an object), dance."}, {"from": "human", "timestamps": 1445.0, "value": "What actions did the person at bbox [0.186, 0.039, 0.436, 0.648] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.186, 0.039, 0.436, 0.648]: bend/bow (at the waist), carry/hold (an object).", "At 1444.0s, [0.075, 0.19, 0.324, 0.749]: bend/bow (at the waist), carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group).", "At 1443.0s, [0.123, 0.216, 0.353, 0.799]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "Db19rWN5BGo.mkv", "clip": [1563, 1605], "conversations": [{"from": "human", "timestamps": 1593.0, "value": "What actions is the person in bbox [0.563, 0.117, 0.899, 0.994] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.563, 0.117, 0.899, 0.994]: run/jog.", "From 1591.0s to 1592.0s, [0.534, 0.229, 0.75, 0.986]->[0.361, 0.223, 0.755, 0.988]: run/jog."]}, {"from": "human", "timestamps": 1605.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.368, 0.261, 0.635, 0.989]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.503, 0.018, 1.0, 0.982]: listen to (a person), stand, watch (a person)."}]}, {"video": "Db19rWN5BGo.mkv", "clip": [1606, 1697], "conversations": [{"from": "human", "timestamps": 1636.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.694, 0.273, 0.904, 0.866]: watch (a person), stand."}, {"from": "human", "timestamps": 1697.0, "value": "For the person at bbox [0.665, 0.194, 0.833, 0.898], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.665, 0.194, 0.833, 0.898]: stand, listen to (a person), watch (a person).", "At 1696.0s, [0.666, 0.192, 0.839, 0.907]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Db19rWN5BGo.mkv", "clip": [1654, 1693], "conversations": [{"from": "human", "timestamps": 1684.0, "value": "Describe the actions of the person at bbox [0.413, 0.006, 0.996, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.413, 0.006, 0.996, 1.0]: stand.", "From 1681.0s to 1683.0s, [0.325, 0.0, 0.99, 0.987]->[0.273, 0.012, 0.991, 1.0]->[0.363, 0.004, 0.971, 1.0]: stand."]}, {"from": "human", "timestamps": 1693.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.229, 0.037, 0.85, 0.974]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "E2jecoyAx1M.mkv", "clip": [886, 955], "conversations": [{"from": "human", "timestamps": 916.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.249, 0.005, 0.912, 0.986]: read, stand, listen to (a person)."}, {"from": "human", "timestamps": 955.0, "value": "What actions did the person at bbox [0.702, 0.0, 1.0, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.702, 0.0, 1.0, 1.0]: stand, listen to (a person), watch (a person).", "At 954.0s, [0.717, 0.0, 0.997, 0.993]: stand, watch (a person)."]}]}, {"video": "E2jecoyAx1M.mkv", "clip": [889, 997], "conversations": [{"from": "human", "timestamps": 919.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.002, 0.465, 0.978]: listen to (a person), stand, watch (a person); [0.092, 0.004, 0.841, 0.93]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 997.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.004, 0.225, 0.96]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.004, 0.225, 0.96]: stand, listen to (a person), watch (a person).", "At 996.0s, [0.0, 0.014, 0.224, 0.994]: stand, listen to (a person), watch (a person)."]}]}, {"video": "E2jecoyAx1M.mkv", "clip": [934, 970], "conversations": [{"from": "human", "timestamps": 964.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.058, 0.0, 0.842, 0.975]: listen to (a person), stand, talk to (e.g., self, a person, a group); [0.669, 0.003, 1.0, 0.98]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 970.0, "value": "For the person at bbox [0.0, 0.011, 0.716, 0.993], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.011, 0.716, 0.993]: stand, watch (a person).", "From 968.0s to 969.0s, [0.0, 0.01, 0.734, 0.965]->[0.0, 0.0, 0.689, 0.987]: stand, watch (a person)."]}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1035, 1069], "conversations": [{"from": "human", "timestamps": 1065.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.296, 0.474, 0.37, 0.658]?"}, {"from": "gpt", "value": ["At Now, [0.296, 0.474, 0.37, 0.658]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1063.0s to 1064.0s, [0.3, 0.502, 0.358, 0.648]->[0.296, 0.472, 0.368, 0.647]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1061.0s to 1062.0s, [0.263, 0.56, 0.322, 0.662]->[0.288, 0.525, 0.352, 0.652]: walk, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1069.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.236, 0.472, 0.308, 0.659]: carry/hold (an object), stand, listen to (a person); [0.29, 0.488, 0.362, 0.687]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1132, 1191], "conversations": [{"from": "human", "timestamps": 1162.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.09, 0.0, 0.747, 0.981]: listen to (a person), stand."}, {"from": "human", "timestamps": 1191.0, "value": "What actions is the person in bbox [0.106, 0.027, 0.56, 0.995] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.106, 0.027, 0.56, 0.995]: stand, listen to (a person).", "From 1184.0s to 1190.0s, [0.073, 0.026, 0.61, 0.965]->[0.052, 0.019, 0.512, 0.972]->[0.064, 0.016, 0.687, 0.98]->[0.067, 0.03, 0.696, 0.963]->[0.081, 0.011, 0.635, 0.974]->[0.1, 0.04, 0.653, 0.973]->[0.092, 0.022, 0.7, 0.97]: stand, listen to (a person).", "From 1180.0s to 1183.0s, [0.143, 0.183, 0.404, 0.985]->[0.13, 0.178, 0.396, 0.979]->[0.081, 0.004, 0.735, 0.959]->[0.078, 0.011, 0.62, 0.969]: stand, carry/hold (an object), listen to (a person).", "From 1178.0s to 1179.0s, [0.148, 0.163, 0.411, 0.988]->[0.145, 0.163, 0.416, 0.997]: stand, carry/hold (an object), watch (a person).", "From 1176.0s to 1177.0s, [0.283, 0.186, 0.437, 0.986]->[0.291, 0.188, 0.438, 0.971]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1172.0s to 1175.0s, [0.279, 0.219, 0.418, 0.99]->[0.282, 0.197, 0.411, 0.973]->[0.28, 0.179, 0.422, 0.977]->[0.271, 0.17, 0.427, 0.98]: stand, carry/hold (an object), listen to (a person).", "At 1171.0s, [0.27, 0.209, 0.41, 0.984]: stand, carry/hold (an object).", "At 1170.0s, [0.062, 0.012, 0.685, 1.0]: stand, carry/hold (an object), listen to (a person)."]}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1144, 1226], "conversations": [{"from": "human", "timestamps": 1174.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.28, 0.179, 0.422, 0.977]: carry/hold (an object), stand, listen to (a person); [0.542, 0.352, 0.825, 0.946]: watch (a person), touch (an object), get up, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1226.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.02, 0.0, 0.767, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.02, 0.0, 0.767, 1.0]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 1225.0s, [0.016, 0.0, 0.744, 0.998]: bend/bow (at the waist), listen to (a person), watch (a person)."]}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1207, 1248], "conversations": [{"from": "human", "timestamps": 1237.0, "value": "What actions did the person at bbox [0.005, 0.0, 0.686, 0.999] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.005, 0.0, 0.686, 0.999]: bend/bow (at the waist), watch (a person).", "At 1236.0s, [0.007, 0.0, 0.732, 1.0]: bend/bow (at the waist), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1248.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.022, 0.0, 0.917, 0.989]: listen to (a person), sit."}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1244, 1295], "conversations": [{"from": "human", "timestamps": 1274.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.006, 0.36, 0.582, 1.0]: walk."}, {"from": "human", "timestamps": 1295.0, "value": "Describe the actions of the person at bbox [0.282, 0.329, 0.443, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.282, 0.329, 0.443, 0.984]: carry/hold (an object), fight/hit (a person), martial art.", "At 1294.0s, [0.234, 0.385, 0.429, 0.978]: bend/bow (at the waist), carry/hold (an object), fight/hit (a person)."]}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1233, 1373], "conversations": [{"from": "human", "timestamps": 1263.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.913, 0.988]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1373.0, "value": "What actions is the person in bbox [0.39, 0.044, 0.851, 0.998] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.39, 0.044, 0.851, 0.998]: sit.", "From 1370.0s to 1372.0s, [0.146, 0.056, 0.805, 0.985]->[0.179, 0.046, 0.852, 0.985]->[0.402, 0.058, 0.878, 0.988]: sit, talk to (e.g., self, a person, a group).", "From 1368.0s to 1369.0s, [0.012, 0.032, 0.785, 0.97]->[0.206, 0.044, 0.726, 0.974]: sit.", "From 1366.0s to 1367.0s, [0.364, 0.289, 0.626, 0.996]->[0.358, 0.492, 0.67, 1.0]: bend/bow (at the waist), listen to (a person).", "At 1365.0s, [0.393, 0.228, 0.57, 0.982]: stand, listen to (a person).", "From 1357.0s to 1364.0s, [0.425, 0.339, 0.556, 0.966]->[0.452, 0.337, 0.576, 0.97]->[0.445, 0.325, 0.573, 0.899]->[0.472, 0.305, 0.595, 0.979]->[0.442, 0.28, 0.59, 0.962]->[0.45, 0.258, 0.603, 0.985]->[0.411, 0.24, 0.572, 0.994]->[0.406, 0.204, 0.606, 0.856]: walk, listen to (a person).", "At 1356.0s, [0.426, 0.366, 0.536, 0.932]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "From 1354.0s to 1355.0s, [0.404, 0.382, 0.514, 0.882]->[0.398, 0.362, 0.516, 0.911]: walk, listen to (a person), watch (a person)."]}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1314, 1372], "conversations": [{"from": "human", "timestamps": 1344.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.338, 0.281, 0.942, 0.981]: carry/hold (an object), stand, fight/hit (a person)."}, {"from": "human", "timestamps": 1372.0, "value": "What actions did the person at bbox [0.402, 0.058, 0.878, 0.988] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.402, 0.058, 0.878, 0.988]: sit, talk to (e.g., self, a person, a group).", "From 1370.0s to 1371.0s, [0.146, 0.056, 0.805, 0.985]->[0.179, 0.046, 0.852, 0.985]: sit, talk to (e.g., self, a person, a group).", "From 1368.0s to 1369.0s, [0.012, 0.032, 0.785, 0.97]->[0.206, 0.044, 0.726, 0.974]: sit.", "From 1366.0s to 1367.0s, [0.364, 0.289, 0.626, 0.996]->[0.358, 0.492, 0.67, 1.0]: bend/bow (at the waist), listen to (a person).", "At 1365.0s, [0.393, 0.228, 0.57, 0.982]: stand, listen to (a person).", "From 1357.0s to 1364.0s, [0.425, 0.339, 0.556, 0.966]->[0.452, 0.337, 0.576, 0.97]->[0.445, 0.325, 0.573, 0.899]->[0.472, 0.305, 0.595, 0.979]->[0.442, 0.28, 0.59, 0.962]->[0.45, 0.258, 0.603, 0.985]->[0.411, 0.24, 0.572, 0.994]->[0.406, 0.204, 0.606, 0.856]: walk, listen to (a person).", "At 1356.0s, [0.426, 0.366, 0.536, 0.932]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "From 1354.0s to 1355.0s, [0.404, 0.382, 0.514, 0.882]->[0.398, 0.362, 0.516, 0.911]: walk, listen to (a person), watch (a person)."]}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1375, 1412], "conversations": [{"from": "human", "timestamps": 1405.0, "value": "What actions did the person at bbox [0.406, 0.026, 0.552, 0.324] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.406, 0.026, 0.552, 0.324]: stand, watch (a person).", "At 1404.0s, [0.399, 0.017, 0.517, 0.293]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1412.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.075, 0.054, 0.623, 1.0]: carry/hold (an object), stand, hug (a person); [0.339, 0.104, 0.89, 0.993]: stand, hug (a person)."}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1499, 1535], "conversations": [{"from": "human", "timestamps": 1529.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.632, 0.574, 0.782, 0.79]: sit, play musical instrument."}, {"from": "human", "timestamps": 1535.0, "value": "For the person at bbox [0.107, 0.0, 0.99, 0.972], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.107, 0.0, 0.99, 0.972]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1534.0s, [0.243, 0.003, 0.975, 0.994]: sit, play musical instrument, talk to (e.g., self, a person, a group).", "At 1533.0s, [0.171, 0.37, 0.245, 0.679]: sit, play musical instrument, listen to (a person).", "At 1532.0s, [0.157, 0.385, 0.243, 0.699]: sit, play musical instrument, listen to (a person), watch (a person).", "From 1530.0s to 1531.0s, [0.129, 0.398, 0.232, 0.631]->[0.138, 0.392, 0.244, 0.678]: sit, play musical instrument, listen to (a person).", "From 1524.0s to 1529.0s, [0.41, 0.0, 1.0, 0.999]->[0.328, 0.001, 0.996, 1.0]->[0.621, 0.57, 0.781, 0.813]->[0.634, 0.569, 0.774, 0.799]->[0.648, 0.579, 0.776, 0.791]->[0.632, 0.574, 0.782, 0.79]: sit, play musical instrument.", "At 1523.0s, [0.536, 0.0, 1.0, 1.0]: stand."]}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1525, 1574], "conversations": [{"from": "human", "timestamps": 1555.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.259, 0.352, 0.307, 0.472]: walk; [0.327, 0.355, 0.435, 0.769]: get up; [0.377, 0.316, 0.437, 0.522]: watch (a person), stand; [0.592, 0.25, 0.681, 0.832]: watch (a person), stand."}, {"from": "human", "timestamps": 1574.0, "value": "What actions did the person at bbox [0.346, 0.082, 0.907, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.346, 0.082, 0.907, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1573.0s, [0.334, 0.081, 0.935, 0.98]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1548, 1589], "conversations": [{"from": "human", "timestamps": 1578.0, "value": "What actions did the person at bbox [0.051, 0.039, 0.358, 0.992] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.051, 0.039, 0.358, 0.992]: stand, watch (a person).", "At 1577.0s, [0.053, 0.038, 0.363, 0.981]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1589.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.005, 0.02, 0.44, 1.0]: listen to (a person), stand, watch (a person); [0.373, 0.089, 0.923, 0.97]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1650, 1723], "conversations": [{"from": "human", "timestamps": 1680.0, "value": "What actions did the person at bbox [0.097, 0.033, 0.67, 0.997] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.097, 0.033, 0.67, 0.997]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1679.0s, [0.156, 0.029, 0.655, 0.963]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1723.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.05, 0.064, 0.633, 0.989]: listen to (a person), stand, watch (a person)."}]}, {"video": "E2jecoyAx1M.mkv", "clip": [1740, 1774], "conversations": [{"from": "human", "timestamps": 1770.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.025, 0.063, 0.673, 0.978]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.891, 0.023, 1.0, 0.859]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1774.0, "value": "What actions did the person at bbox [0.032, 0.059, 0.608, 0.996] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.032, 0.059, 0.608, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1768.0s to 1773.0s, [0.03, 0.056, 0.612, 1.0]->[0.038, 0.051, 0.668, 0.979]->[0.025, 0.063, 0.673, 0.978]->[0.033, 0.064, 0.651, 0.98]->[0.031, 0.054, 0.696, 0.99]->[0.029, 0.065, 0.613, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "E7JcKooKVsM.mp4", "clip": [878, 950], "conversations": [{"from": "human", "timestamps": 908.0, "value": "Describe the actions of the person at bbox [0.364, 0.293, 0.62, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.364, 0.293, 0.62, 0.983]: stand, listen to (a person), watch (a person).", "From 906.0s to 907.0s, [0.409, 0.305, 0.653, 0.979]->[0.417, 0.302, 0.659, 0.987]: stand, listen to (a person), watch (a person).", "At 905.0s, [0.41, 0.297, 0.628, 0.977]: stand, listen to (a person).", "At 904.0s, [0.396, 0.29, 0.605, 0.981]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 950.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.302, 0.216, 0.668, 0.779]: touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "E7JcKooKVsM.mp4", "clip": [908, 1010], "conversations": [{"from": "human", "timestamps": 938.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.048, 0.214, 0.365, 0.976]: stand, talk to (e.g., self, a person, a group); [0.351, 0.268, 0.62, 0.983]: listen to (a person), stand; [0.585, 0.151, 0.946, 1.0]: listen to (a person), stand."}, {"from": "human", "timestamps": 1010.0, "value": "For the person at bbox [0.0, 0.201, 0.551, 0.985], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.201, 0.551, 0.985]: sit, watch (e.g., TV).", "From 1005.0s to 1009.0s, [0.009, 0.262, 0.507, 0.996]->[0.0, 0.242, 0.522, 1.0]->[0.005, 0.241, 0.496, 0.992]->[0.0, 0.215, 0.535, 0.99]->[0.0, 0.22, 0.524, 0.987]: stand, watch (e.g., TV).", "From 1003.0s to 1004.0s, [0.0, 0.229, 0.533, 0.989]->[0.0, 0.241, 0.518, 1.0]: stand.", "At 1002.0s, [0.0, 0.223, 0.555, 0.991]: sit, stand, watch (e.g., TV).", "At 1001.0s, [0.0, 0.218, 0.5, 0.998]: stand, watch (e.g., TV)."]}]}, {"video": "E7JcKooKVsM.mp4", "clip": [937, 1011], "conversations": [{"from": "human", "timestamps": 967.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.049, 0.188, 0.365, 0.978]: stand; [0.348, 0.257, 0.597, 0.99]: stand; [0.611, 0.14, 0.956, 0.994]: stand."}, {"from": "human", "timestamps": 1011.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.219, 0.539, 0.99]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.219, 0.539, 0.99]: sit, watch (e.g., TV), watch (a person).", "At 1010.0s, [0.0, 0.201, 0.551, 0.985]: sit, watch (e.g., TV).", "From 1005.0s to 1009.0s, [0.009, 0.262, 0.507, 0.996]->[0.0, 0.242, 0.522, 1.0]->[0.005, 0.241, 0.496, 0.992]->[0.0, 0.215, 0.535, 0.99]->[0.0, 0.22, 0.524, 0.987]: stand, watch (e.g., TV).", "From 1003.0s to 1004.0s, [0.0, 0.229, 0.533, 0.989]->[0.0, 0.241, 0.518, 1.0]: stand.", "At 1002.0s, [0.0, 0.223, 0.555, 0.991]: sit, stand, watch (e.g., TV).", "At 1001.0s, [0.0, 0.218, 0.5, 0.998]: stand, watch (e.g., TV)."]}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1126, 1164], "conversations": [{"from": "human", "timestamps": 1156.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.002, 0.073, 0.862, 0.981]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1164.0, "value": "For the person at bbox [0.106, 0.1, 1.0, 0.983], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.106, 0.1, 1.0, 0.983]: stand, talk to (e.g., self, a person, a group).", "From 1162.0s to 1163.0s, [0.133, 0.078, 1.0, 0.992]->[0.145, 0.09, 0.981, 0.977]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1151, 1231], "conversations": [{"from": "human", "timestamps": 1181.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.074, 0.228, 0.351, 0.996]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.333, 0.279, 0.588, 0.992]: listen to (a person), stand, watch (a person); [0.621, 0.178, 0.951, 0.991]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1231.0, "value": "Describe the actions of the person at bbox [0.406, 0.295, 0.673, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.406, 0.295, 0.673, 0.988]: stand, listen to (a person), watch (a person).", "From 1229.0s to 1230.0s, [0.4, 0.294, 0.673, 0.994]->[0.4, 0.292, 0.671, 0.998]: stand, listen to (a person), watch (a person).", "At 1228.0s, [0.398, 0.297, 0.658, 0.997]: stand, listen to (a person).", "From 1226.0s to 1227.0s, [0.401, 0.294, 0.66, 0.995]->[0.403, 0.3, 0.672, 0.992]: stand, listen to (a person), watch (a person).", "At 1225.0s, [0.39, 0.285, 0.655, 0.993]: stand, listen to (a person)."]}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1206, 1238], "conversations": [{"from": "human", "timestamps": 1236.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.123, 0.063, 1.0, 0.971]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1238.0, "value": "What actions did the person at bbox [0.001, 0.048, 0.846, 0.993] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.048, 0.846, 0.993]: stand, talk to (e.g., self, a person, a group).", "At 1237.0s, [0.0, 0.047, 0.84, 0.99]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1232, 1290], "conversations": [{"from": "human", "timestamps": 1262.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.473, 0.0, 1.0, 0.984]?"}, {"from": "gpt", "value": ["In the current frame, [0.473, 0.0, 1.0, 0.984]: stand, listen to (a person), watch (a person).", "From 1257.0s to 1261.0s, [0.522, 0.08, 0.997, 0.979]->[0.521, 0.063, 1.0, 0.973]->[0.518, 0.043, 1.0, 0.979]->[0.511, 0.003, 1.0, 0.985]->[0.502, 0.0, 1.0, 0.976]: stand, listen to (a person), watch (a person).", "At 1256.0s, [0.518, 0.079, 1.0, 0.974]: stand."]}, {"from": "human", "timestamps": 1290.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.038, 0.315, 0.388, 0.802]: ride (e.g., a bike, a car, a horse), sit, watch (a person), listen to (a person); [0.538, 0.32, 0.918, 0.78]: sit, drive (e.g., a car, a truck), talk to (e.g., self, a person, a group)."}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1241, 1337], "conversations": [{"from": "human", "timestamps": 1271.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.007, 0.124, 0.602, 0.973]: stand, talk to (e.g., self, a person, a group); [0.463, 0.0, 1.0, 0.983]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1337.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.37, 0.221, 0.614, 0.982]?"}, {"from": "gpt", "value": ["At Now, [0.37, 0.221, 0.614, 0.982]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1333.0s to 1336.0s, [0.376, 0.224, 0.646, 0.987]->[0.376, 0.228, 0.659, 0.984]->[0.349, 0.228, 0.603, 0.977]->[0.368, 0.217, 0.602, 0.972]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1330.0s to 1332.0s, [0.397, 0.251, 0.654, 0.993]->[0.388, 0.234, 0.64, 0.996]->[0.379, 0.226, 0.642, 0.994]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1328.0s to 1329.0s, [0.433, 0.304, 0.616, 0.85]->[0.44, 0.299, 0.639, 0.952]: stand, carry/hold (an object), hand shake, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1299, 1345], "conversations": [{"from": "human", "timestamps": 1329.0, "value": "For the person at bbox [0.44, 0.299, 0.639, 0.952], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.44, 0.299, 0.639, 0.952]: stand, carry/hold (an object), hand shake, talk to (e.g., self, a person, a group), watch (a person).", "At 1328.0s, [0.433, 0.304, 0.616, 0.85]: stand, carry/hold (an object), hand shake, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1345.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.789, 0.214, 0.885, 0.844]: walk."}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1387, 1418], "conversations": [{"from": "human", "timestamps": 1417.0, "value": "Describe the actions of the person at bbox [0.494, 0.08, 0.762, 0.96] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.494, 0.08, 0.762, 0.96]: stand.", "From 1412.0s to 1416.0s, [0.552, 0.085, 0.768, 0.95]->[0.53, 0.077, 0.769, 0.933]->[0.475, 0.073, 0.777, 0.949]->[0.463, 0.058, 0.787, 0.956]->[0.459, 0.064, 0.779, 0.952]: stand.", "At 1411.0s, [0.493, 0.054, 0.847, 0.932]: walk.", "At 1410.0s, [0.579, 0.033, 0.882, 0.988]: walk, talk to (e.g., self, a person, a group).", "From 1408.0s to 1409.0s, [0.498, 0.048, 0.85, 1.0]->[0.458, 0.088, 0.809, 0.992]: walk.", "At 1407.0s, [0.472, 0.089, 0.91, 0.988]: walk, touch (an object).", "From 1405.0s to 1406.0s, [0.402, 0.054, 0.82, 0.989]->[0.491, 0.084, 0.884, 0.984]: stand, touch (an object)."]}, {"from": "human", "timestamps": 1418.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.077, 0.546, 0.485, 0.999]: listen to (a person), sit; [0.562, 0.096, 0.775, 0.974]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1392, 1483], "conversations": [{"from": "human", "timestamps": 1422.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.067, 0.547, 0.478, 0.995]: listen to (a person), sit; [0.546, 0.072, 0.843, 0.99]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1483.0, "value": "What actions did the person at bbox [0.017, 0.131, 0.421, 0.989] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.017, 0.131, 0.421, 0.989]: stand, watch (a person).", "From 1481.0s to 1482.0s, [0.016, 0.135, 0.403, 0.998]->[0.02, 0.132, 0.408, 0.989]: stand, listen to (a person), watch (a person).", "At 1480.0s, [0.0, 0.139, 0.385, 0.99]: stand, listen to (a person).", "At 1479.0s, [0.0, 0.122, 0.432, 0.997]: stand, listen to (a person), watch (a person).", "At 1478.0s, [0.0, 0.114, 0.317, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1474.0s to 1477.0s, [0.025, 0.14, 0.584, 0.978]->[0.0, 0.124, 0.309, 0.99]->[0.002, 0.126, 0.449, 0.98]->[0.003, 0.106, 0.348, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1472.0s to 1473.0s, [0.0, 0.142, 0.393, 0.988]->[0.0, 0.147, 0.393, 0.978]: stand.", "At 1471.0s, [0.0, 0.145, 0.379, 0.993]: stand, watch (a person).", "From 1467.0s to 1470.0s, [0.112, 0.172, 0.459, 0.98]->[0.0, 0.165, 0.408, 1.0]->[0.029, 0.176, 0.468, 0.99]->[0.0, 0.147, 0.335, 0.997]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1466.0s, [0.22, 0.178, 0.654, 0.987]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1411, 1464], "conversations": [{"from": "human", "timestamps": 1441.0, "value": "What actions is the person in bbox [0.537, 0.056, 0.842, 0.986] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.537, 0.056, 0.842, 0.986]: stand, listen to (a person), watch (a person).", "At 1440.0s, [0.536, 0.072, 0.843, 0.987]: stand, listen to (a person), watch (a person).", "From 1437.0s to 1439.0s, [0.535, 0.067, 0.842, 0.989]->[0.535, 0.07, 0.844, 0.987]->[0.539, 0.062, 0.844, 1.0]: stand, listen to (a person).", "At 1436.0s, [0.531, 0.073, 0.837, 0.98]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1464.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.062, 0.565, 1.0]: listen to (a person), stand, watch (a person); [0.506, 0.171, 0.897, 0.985]: talk to (e.g., self, a person, a group), walk."}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1494, 1555], "conversations": [{"from": "human", "timestamps": 1524.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.25, 0.111, 0.911, 0.97]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.739, 0.165, 0.959, 0.453]: sit."}, {"from": "human", "timestamps": 1555.0, "value": "What actions did the person at bbox [0.0, 0.06, 0.861, 0.979] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.06, 0.861, 0.979]: sit, talk to (e.g., self, a person, a group).", "At 1554.0s, [0.132, 0.059, 0.844, 0.976]: stand, talk to (e.g., self, a person, a group).", "At 1553.0s, [0.005, 0.052, 0.861, 0.991]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1532, 1574], "conversations": [{"from": "human", "timestamps": 1562.0, "value": "What actions did the person at bbox [0.203, 0.069, 1.0, 0.979] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.203, 0.069, 1.0, 0.979]: sit, talk to (e.g., self, a person, a group).", "From 1556.0s to 1561.0s, [0.21, 0.047, 1.0, 0.988]->[0.185, 0.044, 1.0, 0.989]->[0.201, 0.055, 1.0, 0.983]->[0.176, 0.074, 1.0, 0.986]->[0.148, 0.059, 0.997, 0.98]->[0.145, 0.066, 1.0, 0.985]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1574.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.158, 0.062, 1.0, 0.991]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1618, 1665], "conversations": [{"from": "human", "timestamps": 1648.0, "value": "What actions is the person in bbox [0.262, 0.187, 0.601, 0.744] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.262, 0.187, 0.601, 0.744]: stand, carry/hold (an object), open (e.g., a window, a car door), touch (an object), watch (a person).", "At 1647.0s, [0.431, 0.091, 0.805, 0.874]: walk, open (e.g., a window, a car door)."]}, {"from": "human", "timestamps": 1665.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.196, 0.196, 0.485, 0.979]: watch (a person), talk to (e.g., self, a person, a group), walk; [0.465, 0.113, 0.859, 0.974]: listen to (a person), walk."}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1646, 1695], "conversations": [{"from": "human", "timestamps": 1676.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.083, 0.102, 0.44, 0.984]: listen to (a person), stand, watch (a person); [0.612, 0.213, 0.919, 0.983]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1695.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.392, 0.087, 0.927, 0.994]?"}, {"from": "gpt", "value": ["In the current frame, [0.392, 0.087, 0.927, 0.994]: stand, talk to (e.g., self, a person, a group).", "From 1690.0s to 1694.0s, [0.387, 0.091, 0.935, 0.996]->[0.393, 0.092, 0.928, 0.992]->[0.383, 0.098, 0.918, 0.996]->[0.382, 0.099, 0.925, 0.989]->[0.385, 0.1, 0.918, 0.985]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1671, 1735], "conversations": [{"from": "human", "timestamps": 1701.0, "value": "What actions is the person in bbox [0.17, 0.073, 0.671, 0.991] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.17, 0.073, 0.671, 0.991]: stand.", "At 1700.0s, [0.167, 0.078, 0.668, 0.993]: stand."]}, {"from": "human", "timestamps": 1735.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.282, 0.117, 0.852, 0.985]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "E7JcKooKVsM.mp4", "clip": [1709, 1759], "conversations": [{"from": "human", "timestamps": 1739.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.07, 0.087, 0.915, 0.982]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1759.0, "value": "What actions is the person in bbox [0.0, 0.029, 0.92, 0.98] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.029, 0.92, 0.98]: stand, talk to (e.g., self, a person, a group).", "From 1757.0s to 1758.0s, [0.016, 0.02, 0.924, 1.0]->[0.0, 0.033, 0.934, 0.986]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "Ecivp8t3MdY.mkv", "clip": [1008, 1044], "conversations": [{"from": "human", "timestamps": 1038.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.436, 0.028, 0.984, 1.0]: stand."}, {"from": "human", "timestamps": 1044.0, "value": "Describe the actions of the person at bbox [0.233, 0.056, 0.963, 0.993] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.233, 0.056, 0.963, 0.993]: stand.", "At 1043.0s, [0.271, 0.055, 0.96, 1.0]: stand.", "At 1042.0s, [0.32, 0.019, 0.972, 1.0]: stand, listen to (a person).", "From 1040.0s to 1041.0s, [0.255, 0.024, 0.957, 1.0]->[0.427, 0.0, 0.955, 1.0]: stand.", "At 1039.0s, [0.307, 0.026, 0.929, 1.0]: sit.", "From 1034.0s to 1038.0s, [0.313, 0.018, 0.961, 1.0]->[0.349, 0.009, 0.988, 1.0]->[0.411, 0.011, 0.971, 1.0]->[0.368, 0.0, 0.966, 1.0]->[0.436, 0.028, 0.984, 1.0]: stand.", "From 1031.0s to 1032.0s, [0.383, 0.16, 0.828, 0.996]->[0.358, 0.14, 0.853, 0.999]: stand.", "At 1030.0s, [0.373, 0.162, 0.854, 0.987]: sit, stand.", "From 1028.0s to 1029.0s, [0.401, 0.17, 0.855, 1.0]->[0.438, 0.181, 0.785, 1.0]: stand.", "At 1025.0s, [0.462, 0.178, 0.795, 1.0]: sit.", "From 1022.0s to 1023.0s, [0.506, 0.203, 0.815, 0.999]->[0.489, 0.189, 0.805, 0.997]: sit.", "From 1019.0s to 1020.0s, [0.15, 0.186, 0.855, 0.998]->[0.533, 0.192, 0.853, 0.989]: stand.", "At 1018.0s, [0.547, 0.193, 0.861, 1.0]: sit.", "From 1016.0s to 1017.0s, [0.537, 0.191, 0.879, 0.989]->[0.525, 0.201, 0.87, 0.987]: stand, listen to (a person).", "At 1015.0s, [0.547, 0.206, 0.911, 0.989]: sit, listen to (a person).", "At 1014.0s, [0.571, 0.194, 0.902, 0.989]: stand, listen to (a person).", "At 1013.0s, [0.664, 0.176, 0.948, 0.992]: stand.", "At 1012.0s, [0.671, 0.191, 0.972, 0.986]: sit.", "At 1011.0s, [0.572, 0.276, 0.993, 0.882]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1010.0s, [0.577, 0.317, 0.914, 0.861]: sit.", "At 1009.0s, [0.574, 0.288, 0.987, 0.876]: sit, listen to (a person).", "At 1008.0s, [0.554, 0.285, 0.983, 0.899]: sit, touch (an object).", "From 1003.0s to 1007.0s, [0.523, 0.305, 0.872, 0.838]->[0.586, 0.302, 0.961, 0.877]->[0.549, 0.304, 0.946, 0.864]->[0.561, 0.311, 0.877, 0.869]->[0.589, 0.303, 0.984, 0.87]: sit.", "At 1002.0s, [0.591, 0.253, 0.997, 0.918]: sit, listen to (a person), watch (a person)."]}]}, {"video": "Ecivp8t3MdY.mkv", "clip": [1150, 1199], "conversations": [{"from": "human", "timestamps": 1180.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.008, 0.274, 0.345, 0.987]: stand, sing to (e.g., self, a person, a group); [0.324, 0.056, 0.977, 0.984]: watch (a person), stand, sing to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1199.0, "value": "What actions is the person in bbox [0.004, 0.244, 0.1, 0.628] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.004, 0.244, 0.1, 0.628]: stand, watch (a person).", "At 1198.0s, [0.004, 0.249, 0.116, 0.629]: stand."]}]}, {"video": "Ecivp8t3MdY.mkv", "clip": [1117, 1243], "conversations": [{"from": "human", "timestamps": 1147.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.223, 0.054, 0.898, 0.972]: stand."}, {"from": "human", "timestamps": 1243.0, "value": "For the person at bbox [0.255, 0.021, 0.941, 0.971], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.255, 0.021, 0.941, 0.971]: stand.", "From 1241.0s to 1242.0s, [0.218, 0.036, 0.906, 0.98]->[0.058, 0.026, 0.93, 0.969]: stand.", "At 1240.0s, [0.261, 0.019, 0.902, 0.968]: sit, stand.", "At 1239.0s, [0.27, 0.025, 0.923, 0.965]: stand.", "From 1237.0s to 1238.0s, [0.204, 0.008, 0.896, 1.0]->[0.22, 0.028, 0.915, 0.97]: stand, watch (a person)."]}]}, {"video": "Ecivp8t3MdY.mkv", "clip": [1171, 1215], "conversations": [{"from": "human", "timestamps": 1201.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.001, 0.238, 0.114, 0.609]: stand; [0.007, 0.297, 0.355, 0.849]: watch (a person), sit; [0.585, 0.239, 0.988, 0.86]: sit; [0.849, 0.191, 0.988, 0.554]: watch (a person), sit."}, {"from": "human", "timestamps": 1215.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.035, 0.155, 0.75, 0.967]?"}, {"from": "gpt", "value": ["In the current frame, [0.035, 0.155, 0.75, 0.967]: stand.", "From 1209.0s to 1214.0s, [0.015, 0.15, 0.713, 0.97]->[0.0, 0.151, 0.597, 0.977]->[0.008, 0.159, 0.61, 0.968]->[0.012, 0.158, 0.666, 0.972]->[0.029, 0.165, 0.67, 0.96]->[0.035, 0.16, 0.706, 0.97]: stand.", "At 1208.0s, [0.007, 0.146, 0.524, 0.977]: stand, watch (a person).", "At 1207.0s, [0.012, 0.186, 0.472, 0.837]: stand, listen to (a person), watch (a person)."]}]}, {"video": "Ecivp8t3MdY.mkv", "clip": [1253, 1291], "conversations": [{"from": "human", "timestamps": 1283.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.259, 0.266, 0.915, 0.984]: stand."}, {"from": "human", "timestamps": 1291.0, "value": "What actions is the person in bbox [0.249, 0.218, 0.825, 0.983] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.249, 0.218, 0.825, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 1290.0s, [0.171, 0.226, 0.861, 0.976]: stand, sing to (e.g., self, a person, a group), talk to (e.g., self, a person, a group).", "At 1289.0s, [0.132, 0.24, 0.815, 0.978]: stand, talk to (e.g., self, a person, a group).", "At 1288.0s, [0.159, 0.224, 0.865, 0.982]: stand.", "At 1287.0s, [0.133, 0.197, 0.848, 0.997]: stand, sing to (e.g., self, a person, a group).", "At 1286.0s, [0.191, 0.262, 0.837, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 1285.0s, [0.233, 0.258, 0.854, 0.981]: stand, sing to (e.g., self, a person, a group)."]}]}, {"video": "Ecivp8t3MdY.mkv", "clip": [1268, 1348], "conversations": [{"from": "human", "timestamps": 1298.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.085, 0.107, 0.865, 0.983]: stand, sing to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1348.0, "value": "Describe the actions of the person at bbox [0.621, 0.204, 0.903, 0.989] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.621, 0.204, 0.903, 0.989]: stand, drink.", "At 1347.0s, [0.675, 0.214, 0.87, 0.997]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ecivp8t3MdY.mkv", "clip": [1419, 1492], "conversations": [{"from": "human", "timestamps": 1449.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.055, 0.317, 0.219, 0.978]: walk."}, {"from": "human", "timestamps": 1492.0, "value": "For the person at bbox [0.206, 0.366, 0.694, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.206, 0.366, 0.694, 0.987]: walk.", "From 1478.0s to 1491.0s, [0.21, 0.129, 0.622, 1.0]->[0.204, 0.181, 0.685, 0.991]->[0.316, 0.198, 0.829, 0.991]->[0.361, 0.1, 0.876, 0.992]->[0.311, 0.128, 0.875, 0.981]->[0.38, 0.205, 0.969, 0.981]->[0.344, 0.164, 1.0, 0.983]->[0.361, 0.251, 0.927, 0.983]->[0.472, 0.259, 0.978, 0.983]->[0.444, 0.173, 1.0, 0.983]->[0.408, 0.035, 0.993, 0.982]->[0.346, 0.133, 1.0, 1.0]->[0.2, 0.128, 1.0, 0.998]->[0.271, 0.385, 0.926, 0.998]: walk.", "From 1475.0s to 1477.0s, [0.312, 0.284, 0.543, 0.989]->[0.262, 0.312, 0.547, 0.986]->[0.248, 0.292, 0.608, 0.988]: walk, listen to (a person).", "From 1472.0s to 1474.0s, [0.765, 0.453, 0.913, 0.949]->[0.571, 0.367, 0.72, 0.973]->[0.385, 0.285, 0.569, 0.984]: walk."]}]}, {"video": "Ecivp8t3MdY.mkv", "clip": [1534, 1603], "conversations": [{"from": "human", "timestamps": 1564.0, "value": "For the person at bbox [0.595, 0.45, 0.835, 0.953], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.595, 0.45, 0.835, 0.953]: sit, carry/hold (an object).", "At 1563.0s, [0.59, 0.423, 0.82, 0.974]: sit, carry/hold (an object).", "At 1562.0s, [0.579, 0.437, 0.843, 0.976]: sit, drink.", "At 1561.0s, [0.589, 0.428, 0.83, 0.973]: sit, drink, listen to (a person).", "At 1560.0s, [0.59, 0.442, 0.832, 0.976]: sit, carry/hold (an object), drink.", "At 1559.0s, [0.601, 0.446, 0.822, 0.964]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1603.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.074, 0.58, 0.319, 0.941]: ride (e.g., a bike, a car, a horse), sit; [0.738, 0.568, 0.98, 0.999]: ride (e.g., a bike, a car, a horse), stand."}]}, {"video": "Ecivp8t3MdY.mkv", "clip": [1658, 1732], "conversations": [{"from": "human", "timestamps": 1688.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.135, 0.278, 0.829, 0.988]: stand."}, {"from": "human", "timestamps": 1732.0, "value": "Describe the actions of the person at bbox [0.625, 0.499, 0.772, 0.97] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.625, 0.499, 0.772, 0.97]: stand, eat.", "At 1731.0s, [0.616, 0.527, 0.763, 0.97]: stand.", "At 1730.0s, [0.624, 0.487, 0.765, 0.962]: stand, eat, listen to (a person).", "At 1729.0s, [0.608, 0.403, 0.768, 0.967]: stand.", "At 1728.0s, [0.617, 0.459, 0.766, 0.969]: stand, touch (an object), listen to (a person).", "From 1726.0s to 1727.0s, [0.622, 0.522, 0.76, 0.966]->[0.613, 0.454, 0.774, 0.969]: stand."]}]}, {"video": "Ecivp8t3MdY.mkv", "clip": [1744, 1787], "conversations": [{"from": "human", "timestamps": 1774.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.017, 0.649, 0.23, 0.986]: watch (a person), get up; [0.225, 0.595, 0.291, 0.822]: watch (a person), stand; [0.318, 0.567, 0.427, 0.894]: watch (a person), stand; [0.409, 0.596, 0.479, 0.904]: stand; [0.466, 0.564, 0.569, 0.899]: walk; [0.562, 0.572, 0.661, 0.976]: stand; [0.63, 0.486, 0.773, 0.958]: walk; [0.761, 0.465, 0.932, 0.978]: watch (a person), walk."}, {"from": "human", "timestamps": 1787.0, "value": "What actions did the person at bbox [0.549, 0.227, 0.99, 0.994] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.549, 0.227, 0.99, 0.994]: stand, listen to (a person), watch (a person).", "At 1786.0s, [0.597, 0.222, 0.997, 0.996]: stand, listen to (a person), watch (a person)."]}]}, {"video": "F3dPH6Xqf5M.mp4", "clip": [965, 1005], "conversations": [{"from": "human", "timestamps": 995.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.114, 0.217, 0.261, 0.919]: watch (a person), dance, play musical instrument; [0.186, 0.22, 0.31, 0.817]: carry/hold (an object), dance, watch (a person); [0.265, 0.229, 0.342, 0.457]: carry/hold (an object), stand, watch (a person); [0.444, 0.324, 0.547, 0.745]: dance; [0.699, 0.23, 0.772, 0.812]: watch (a person), stand."}, {"from": "human", "timestamps": 1005.0, "value": "For the person at bbox [0.768, 0.113, 0.891, 0.867], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.768, 0.113, 0.891, 0.867]: stand, listen to (a person), watch (a person).", "At 1004.0s, [0.684, 0.121, 0.893, 0.938]: stand, listen (e.g., to music), watch (a person).", "From 1002.0s to 1003.0s, [0.543, 0.118, 0.801, 0.918]->[0.625, 0.174, 0.829, 0.87]: stand, watch (a person).", "At 1001.0s, [0.465, 0.085, 0.67, 0.597]: stand, listen to (a person), watch (a person).", "At 1000.0s, [0.466, 0.078, 0.61, 0.473]: stand, watch (a person).", "At 999.0s, [0.428, 0.119, 0.507, 0.451]: stand, listen (e.g., to music), watch (a person)."]}]}, {"video": "F3dPH6Xqf5M.mp4", "clip": [1016, 1076], "conversations": [{"from": "human", "timestamps": 1046.0, "value": "What actions is the person in bbox [0.428, 0.028, 0.884, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.428, 0.028, 0.884, 1.0]: carry/hold (an object), dance, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "At 1045.0s, [0.417, 0.003, 0.892, 1.0]: dance, play musical instrument, sing to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1076.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.171, 0.006, 0.9, 0.994]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "F3dPH6Xqf5M.mp4", "clip": [1070, 1120], "conversations": [{"from": "human", "timestamps": 1100.0, "value": "Describe the actions of the person at bbox [0.178, 0.061, 0.878, 0.994] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.178, 0.061, 0.878, 0.994]: sit, talk to (e.g., self, a person, a group).", "From 1094.0s to 1099.0s, [0.169, 0.072, 0.886, 0.988]->[0.156, 0.09, 0.885, 0.979]->[0.161, 0.057, 0.899, 0.996]->[0.15, 0.069, 0.887, 0.978]->[0.15, 0.03, 0.897, 0.979]->[0.155, 0.053, 0.882, 0.994]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1120.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.197, 0.035, 0.911, 0.982]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "F3dPH6Xqf5M.mp4", "clip": [1131, 1185], "conversations": [{"from": "human", "timestamps": 1161.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.155, 0.341, 0.381, 0.789]: watch (a person), sit; [0.422, 0.045, 0.556, 0.703]: walk; [0.664, 0.321, 0.82, 0.773]: sit."}, {"from": "human", "timestamps": 1185.0, "value": "What actions did the person at bbox [0.467, 0.29, 0.6, 0.768] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.467, 0.29, 0.6, 0.768]: sit, listen to (a person), watch (a person).", "From 1182.0s to 1183.0s, [0.483, 0.304, 0.604, 0.77]->[0.467, 0.293, 0.587, 0.761]: sit, listen to (a person).", "From 1174.0s to 1181.0s, [0.49, 0.297, 0.614, 0.776]->[0.483, 0.298, 0.614, 0.771]->[0.484, 0.298, 0.613, 0.772]->[0.478, 0.296, 0.614, 0.77]->[0.476, 0.29, 0.61, 0.764]->[0.488, 0.293, 0.605, 0.753]->[0.478, 0.286, 0.604, 0.762]->[0.496, 0.287, 0.605, 0.764]: sit, listen to (a person), watch (a person).", "At 1173.0s, [0.487, 0.301, 0.614, 0.778]: sit, listen (e.g., to music), watch (a person)."]}]}, {"video": "F3dPH6Xqf5M.mp4", "clip": [1118, 1232], "conversations": [{"from": "human", "timestamps": 1148.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.116, 0.049, 0.874, 0.96]: sit."}, {"from": "human", "timestamps": 1232.0, "value": "Describe the actions of the person at bbox [0.226, 0.001, 0.825, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.226, 0.001, 0.825, 1.0]: sit.", "At 1231.0s, [0.169, 0.01, 0.75, 1.0]: sit, stand.", "From 1229.0s to 1230.0s, [0.323, 0.087, 0.848, 1.0]->[0.243, 0.061, 0.732, 1.0]: sit.", "At 1228.0s, [0.338, 0.063, 0.78, 1.0]: sit, listen to (a person)."]}]}, {"video": "F3dPH6Xqf5M.mp4", "clip": [1176, 1223], "conversations": [{"from": "human", "timestamps": 1206.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.113, 0.015, 0.481, 0.984]: stand, play musical instrument."}, {"from": "human", "timestamps": 1223.0, "value": "Describe the actions of the person at bbox [0.241, 0.043, 0.771, 0.976] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.241, 0.043, 0.771, 0.976]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "At 1222.0s, [0.168, 0.073, 0.889, 0.972]: stand, carry/hold (an object), play musical instrument, sing to (e.g., self, a person, a group).", "At 1221.0s, [0.175, 0.028, 0.859, 0.991]: stand, play musical instrument."]}]}, {"video": "F3dPH6Xqf5M.mp4", "clip": [1307, 1338], "conversations": [{"from": "human", "timestamps": 1337.0, "value": "What actions did the person at bbox [0.242, 0.007, 0.854, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.242, 0.007, 0.854, 0.986]: stand, talk to (e.g., self, a person, a group).", "From 1335.0s to 1336.0s, [0.179, 0.028, 0.882, 0.997]->[0.231, 0.013, 0.847, 0.984]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1338.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.234, 0.009, 0.886, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "F3dPH6Xqf5M.mp4", "clip": [1391, 1425], "conversations": [{"from": "human", "timestamps": 1421.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.127, 0.097, 0.845, 0.992]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1425.0, "value": "Describe the actions of the person at bbox [0.193, 0.009, 0.888, 0.995] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.193, 0.009, 0.888, 0.995]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1424.0s, [0.129, 0.001, 0.861, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1422.0s, [0.232, 0.036, 0.871, 0.988]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "F3dPH6Xqf5M.mp4", "clip": [1479, 1533], "conversations": [{"from": "human", "timestamps": 1509.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.108, 0.099, 0.503, 0.998]: carry/hold (an object), sit; [0.5, 0.0, 0.897, 0.986]: carry/hold (an object), sit, listen to (a person)."}, {"from": "human", "timestamps": 1533.0, "value": "Describe the actions of the person at bbox [0.339, 0.019, 0.886, 0.995] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.339, 0.019, 0.886, 0.995]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1532.0s, [0.294, 0.024, 0.884, 0.981]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "F3dPH6Xqf5M.mp4", "clip": [1518, 1574], "conversations": [{"from": "human", "timestamps": 1548.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.124, 0.081, 0.469, 0.978]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.463, 0.007, 0.896, 0.978]: carry/hold (an object), sit, listen to (a person)."}, {"from": "human", "timestamps": 1574.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.146, 0.14, 0.584, 0.989]?"}, {"from": "gpt", "value": ["At Now, [0.146, 0.14, 0.584, 0.989]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1573.0s, [0.143, 0.14, 0.573, 0.985]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1572.0s, [0.141, 0.128, 0.599, 0.975]: sit, carry/hold (an object), listen to (a person)."]}]}, {"video": "F3dPH6Xqf5M.mp4", "clip": [1640, 1673], "conversations": [{"from": "human", "timestamps": 1670.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.239, 0.002, 0.884, 0.984]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1673.0, "value": "Describe the actions of the person at bbox [0.078, 0.069, 0.851, 0.985] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.078, 0.069, 0.851, 0.985]: sit, talk to (e.g., self, a person, a group).", "From 1671.0s to 1672.0s, [0.174, 0.012, 0.807, 1.0]->[0.055, 0.02, 0.811, 0.998]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "F3dPH6Xqf5M.mp4", "clip": [1716, 1781], "conversations": [{"from": "human", "timestamps": 1746.0, "value": "Describe the actions of the person at bbox [0.49, 0.408, 0.529, 0.573] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.49, 0.408, 0.529, 0.573]: walk, carry/hold (an object), push (an object), talk to (e.g., self, a person, a group).", "At 1745.0s, [0.49, 0.403, 0.529, 0.561]: walk, push (an object), watch (a person).", "From 1742.0s to 1744.0s, [0.51, 0.392, 0.541, 0.52]->[0.501, 0.397, 0.538, 0.525]->[0.497, 0.4, 0.536, 0.549]: walk, carry/hold (an object), push (an object)."]}, {"from": "human", "timestamps": 1781.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.085, 0.047, 0.842, 0.991]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "Feu1_8NazPE.mp4", "clip": [942, 995], "conversations": [{"from": "human", "timestamps": 972.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.006, 0.284, 0.339, 0.885]: listen to (a person), stand, watch (a person); [0.5, 0.14, 0.982, 0.899]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 995.0, "value": "What actions is the person in bbox [0.447, 0.144, 0.85, 0.826] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.447, 0.144, 0.85, 0.826]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 994.0s, [0.492, 0.134, 0.9, 0.809]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 993.0s, [0.552, 0.137, 0.925, 0.812]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 992.0s, [0.572, 0.156, 0.922, 0.82]: stand, talk to (e.g., self, a person, a group).", "At 991.0s, [0.743, 0.244, 0.872, 0.753]: stand, carry/hold (an object), listen to (a person).", "At 990.0s, [0.724, 0.249, 0.863, 0.634]: stand, carry/hold (an object).", "At 989.0s, [0.702, 0.246, 0.848, 0.558]: walk, carry/hold (an object), listen to (a person)."]}]}, {"video": "Feu1_8NazPE.mp4", "clip": [1034, 1121], "conversations": [{"from": "human", "timestamps": 1064.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.088, 0.221, 0.251, 0.865]: carry/hold (an object), listen to (a person), walk; [0.229, 0.211, 0.369, 0.845]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.453, 0.22, 0.686, 0.737]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1121.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.529, 0.208, 0.81, 0.862]?"}, {"from": "gpt", "value": ["In the current frame, [0.529, 0.208, 0.81, 0.862]: walk, carry/hold (an object), listen to (a person).", "At 1120.0s, [0.478, 0.193, 0.656, 0.874]: stand, carry/hold (an object).", "At 1119.0s, [0.182, 0.133, 0.997, 0.87]: walk, carry/hold (an object)."]}]}, {"video": "Feu1_8NazPE.mp4", "clip": [1143, 1199], "conversations": [{"from": "human", "timestamps": 1173.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.013, 0.125, 0.553, 0.899]: listen to (a person), stand, watch (a person); [0.633, 0.155, 0.986, 0.873]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1199.0, "value": "What actions did the person at bbox [0.647, 0.184, 0.97, 0.879] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.647, 0.184, 0.97, 0.879]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1198.0s, [0.643, 0.174, 0.947, 0.878]: stand."]}]}, {"video": "Feu1_8NazPE.mp4", "clip": [1125, 1235], "conversations": [{"from": "human", "timestamps": 1155.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.326, 0.207, 0.651, 0.889]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1235.0, "value": "For the person at bbox [0.52, 0.086, 0.979, 0.899], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.52, 0.086, 0.979, 0.899]: stand, watch (a person).", "From 1233.0s to 1234.0s, [0.553, 0.122, 0.999, 0.87]->[0.543, 0.115, 0.997, 0.885]: stand, watch (a person).", "At 1232.0s, [0.437, 0.113, 1.0, 0.892]: stand, hug (a person), watch (a person).", "From 1230.0s to 1231.0s, [0.414, 0.088, 1.0, 0.912]->[0.46, 0.114, 1.0, 0.894]: stand, watch (a person)."]}]}, {"video": "Feu1_8NazPE.mp4", "clip": [1256, 1305], "conversations": [{"from": "human", "timestamps": 1286.0, "value": "What actions did the person at bbox [0.418, 0.188, 0.663, 0.872] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.418, 0.188, 0.663, 0.872]: walk, enter.", "At 1285.0s, [0.428, 0.24, 0.639, 0.866]: stand, watch (a person).", "At 1284.0s, [0.411, 0.245, 0.615, 0.865]: stand."]}, {"from": "human", "timestamps": 1305.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.39, 0.204, 0.539, 0.88]: walk."}]}, {"video": "Feu1_8NazPE.mp4", "clip": [1423, 1462], "conversations": [{"from": "human", "timestamps": 1453.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.305, 0.237, 0.451, 0.551]: walk."}, {"from": "human", "timestamps": 1462.0, "value": "For the person at bbox [0.538, 0.144, 0.879, 0.877], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.538, 0.144, 0.879, 0.877]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1458.0s to 1461.0s, [0.398, 0.169, 0.474, 0.534]->[0.388, 0.164, 0.466, 0.521]->[0.528, 0.133, 0.843, 0.866]->[0.557, 0.151, 0.877, 0.875]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1457.0s, [0.438, 0.183, 0.517, 0.587]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1454.0s to 1456.0s, [0.223, 0.187, 0.426, 0.553]->[0.717, 0.203, 0.812, 0.4]->[0.563, 0.196, 0.647, 0.415]: walk, watch (a person).", "At 1453.0s, [0.305, 0.237, 0.451, 0.551]: walk."]}]}, {"video": "Feu1_8NazPE.mp4", "clip": [1496, 1536], "conversations": [{"from": "human", "timestamps": 1526.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.004, 0.124, 0.469, 0.889]: get up; [0.489, 0.181, 0.83, 0.88]: watch (a person), sit."}, {"from": "human", "timestamps": 1536.0, "value": "What actions is the person in bbox [0.3, 0.194, 0.806, 0.869] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.3, 0.194, 0.806, 0.869]: get up.", "At 1535.0s, [0.302, 0.188, 0.756, 0.867]: sit."]}]}, {"video": "Feu1_8NazPE.mp4", "clip": [1527, 1618], "conversations": [{"from": "human", "timestamps": 1557.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.163, 0.175, 0.749, 0.857]: listen to (a person), sit, watch (a person); [0.484, 0.23, 1.0, 0.886]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1618.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.229, 0.169, 0.733, 0.874]?"}, {"from": "gpt", "value": ["In the current frame, [0.229, 0.169, 0.733, 0.874]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1615.0s to 1617.0s, [0.219, 0.153, 0.719, 0.87]->[0.173, 0.161, 0.769, 0.877]->[0.201, 0.162, 0.756, 0.871]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1614.0s, [0.248, 0.161, 0.797, 0.854]: sit.", "At 1613.0s, [0.193, 0.149, 0.665, 0.868]: sit, talk to (e.g., self, a person, a group).", "From 1611.0s to 1612.0s, [0.234, 0.154, 0.798, 0.872]->[0.205, 0.155, 0.664, 0.858]: sit.", "From 1607.0s to 1610.0s, [0.236, 0.146, 0.711, 0.89]->[0.236, 0.155, 0.777, 0.887]->[0.223, 0.148, 0.804, 0.873]->[0.258, 0.161, 0.792, 0.853]: sit, talk to (e.g., self, a person, a group).", "At 1606.0s, [0.203, 0.158, 0.724, 0.869]: sit.", "At 1605.0s, [0.211, 0.131, 0.661, 0.871]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1604.0s, [0.102, 0.128, 0.67, 0.869]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "Feu1_8NazPE.mp4", "clip": [1539, 1589], "conversations": [{"from": "human", "timestamps": 1569.0, "value": "What actions is the person in bbox [0.53, 0.226, 0.998, 0.858] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.53, 0.226, 0.998, 0.858]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1568.0s, [0.518, 0.235, 0.998, 0.839]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1562.0s to 1567.0s, [0.482, 0.241, 0.997, 0.875]->[0.489, 0.24, 0.995, 0.88]->[0.494, 0.24, 0.999, 0.876]->[0.501, 0.233, 1.0, 0.88]->[0.489, 0.239, 1.0, 0.887]->[0.5, 0.242, 1.0, 0.884]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1559.0s to 1561.0s, [0.475, 0.235, 0.99, 0.872]->[0.49, 0.23, 0.988, 0.871]->[0.491, 0.236, 0.993, 0.879]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1557.0s to 1558.0s, [0.484, 0.23, 1.0, 0.886]->[0.453, 0.242, 0.99, 0.878]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1555.0s to 1556.0s, [0.481, 0.229, 0.987, 0.866]->[0.487, 0.231, 0.988, 0.87]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1554.0s, [0.491, 0.232, 0.998, 0.879]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1553.0s, [0.47, 0.239, 0.984, 0.877]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1552.0s, [0.467, 0.237, 0.983, 0.869]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1551.0s, [0.486, 0.239, 0.978, 0.851]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1549.0s to 1550.0s, [0.492, 0.241, 0.977, 0.869]->[0.464, 0.238, 0.994, 0.841]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1547.0s to 1548.0s, [0.485, 0.221, 0.983, 0.847]->[0.493, 0.244, 0.978, 0.859]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1545.0s to 1546.0s, [0.473, 0.241, 0.978, 0.859]->[0.476, 0.237, 0.983, 0.863]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1544.0s, [0.463, 0.237, 0.97, 0.837]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1543.0s, [0.473, 0.226, 0.946, 0.861]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1589.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.09, 0.149, 0.61, 0.867]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "Feu1_8NazPE.mp4", "clip": [1609, 1674], "conversations": [{"from": "human", "timestamps": 1639.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.046, 0.151, 0.57, 0.873]: listen to (a person), sit, watch (a person); [0.688, 0.244, 1.0, 0.87]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1674.0, "value": "What actions did the person at bbox [0.369, 0.218, 0.891, 0.889] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.369, 0.218, 0.891, 0.889]: sit, carry/hold (an object), kiss (a person).", "From 1670.0s to 1673.0s, [0.21, 0.124, 0.619, 0.873]->[0.251, 0.138, 0.626, 0.881]->[0.234, 0.137, 0.613, 0.875]->[0.228, 0.14, 0.588, 0.874]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1669.0s, [0.255, 0.137, 0.639, 0.888]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1668.0s, [0.245, 0.131, 0.635, 0.882]: sit, listen to (a person), watch (a person).", "At 1667.0s, [0.221, 0.14, 0.635, 0.88]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1665.0s to 1666.0s, [0.216, 0.121, 0.618, 0.875]->[0.19, 0.129, 0.632, 0.864]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Feu1_8NazPE.mp4", "clip": [1634, 1690], "conversations": [{"from": "human", "timestamps": 1664.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.018, 0.117, 0.557, 0.886]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.451, 0.134, 0.985, 0.891]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1690.0, "value": "What actions is the person in bbox [0.411, 0.143, 0.558, 0.402] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.411, 0.143, 0.558, 0.402]: sit.", "At 1689.0s, [0.409, 0.146, 0.558, 0.403]: sit."]}]}, {"video": "Feu1_8NazPE.mp4", "clip": [1654, 1728], "conversations": [{"from": "human", "timestamps": 1684.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.383, 0.185, 0.747, 0.865]?"}, {"from": "gpt", "value": ["In the current frame, [0.383, 0.185, 0.747, 0.865]: sit.", "At 1683.0s, [0.397, 0.179, 0.897, 0.878]: sit, talk to (e.g., self, a person, a group).", "At 1682.0s, [0.4, 0.164, 0.909, 0.86]: sit.", "From 1678.0s to 1681.0s, [0.469, 0.173, 0.885, 0.898]->[0.477, 0.207, 0.914, 0.87]->[0.429, 0.225, 0.876, 0.886]->[0.412, 0.207, 0.859, 0.875]: sit, talk to (e.g., self, a person, a group).", "From 1676.0s to 1677.0s, [0.539, 0.251, 0.818, 0.884]->[0.491, 0.227, 0.94, 0.89]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1728.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.494, 0.139, 0.986, 0.872]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "Feu1_8NazPE.mp4", "clip": [1729, 1769], "conversations": [{"from": "human", "timestamps": 1759.0, "value": "Describe the actions of the person at bbox [0.183, 0.142, 0.94, 0.854] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.183, 0.142, 0.94, 0.854]: sit.", "At 1758.0s, [0.33, 0.139, 0.927, 0.846]: sit, watch (a person).", "At 1757.0s, [0.31, 0.231, 0.714, 0.873]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1756.0s, [0.342, 0.241, 0.727, 0.893]: sit, lie/sleep, watch (a person).", "At 1755.0s, [0.332, 0.247, 0.75, 0.889]: sit, watch (a person).", "At 1754.0s, [0.348, 0.223, 0.756, 0.883]: sit, listen to (a person), watch (a person).", "From 1738.0s to 1753.0s, [0.542, 0.253, 0.807, 0.863]->[0.525, 0.259, 0.825, 0.866]->[0.404, 0.257, 0.808, 0.879]->[0.41, 0.234, 0.831, 0.874]->[0.431, 0.256, 0.82, 0.873]->[0.423, 0.248, 0.833, 0.873]->[0.412, 0.25, 0.859, 0.887]->[0.372, 0.243, 0.822, 0.878]->[0.371, 0.233, 0.828, 0.876]->[0.377, 0.261, 0.874, 0.894]->[0.353, 0.232, 0.803, 0.875]->[0.368, 0.244, 0.85, 0.893]->[0.374, 0.247, 0.84, 0.897]->[0.378, 0.232, 0.801, 0.881]->[0.352, 0.243, 0.829, 0.88]->[0.344, 0.238, 0.787, 0.886]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1769.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.366, 0.19, 0.954, 0.87]: listen to (a person), sit, watch (a person)."}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [880, 921], "conversations": [{"from": "human", "timestamps": 910.0, "value": "For the person at bbox [0.87, 0.271, 0.971, 0.719], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.87, 0.271, 0.971, 0.719]: stand, watch (a person).", "At 909.0s, [0.875, 0.274, 0.97, 0.723]: stand, watch (a person)."]}, {"from": "human", "timestamps": 921.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.162, 0.083, 0.384, 0.909]: stand, open (e.g., a window, a car door); [0.346, 0.225, 0.481, 0.969]: stand; [0.458, 0.294, 0.518, 0.904]: stand; [0.515, 0.158, 0.681, 0.945]: walk; [0.669, 0.239, 0.773, 0.807]: watch (a person), stand; [0.771, 0.227, 0.901, 0.812]: watch (a person), stand; [0.872, 0.227, 0.972, 0.675]: watch (a person), stand."}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [1001, 1040], "conversations": [{"from": "human", "timestamps": 1031.0, "value": "What actions is the person in bbox [0.368, 0.129, 0.868, 0.97] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.368, 0.129, 0.868, 0.97]: stand, talk to (e.g., self, a person, a group).", "At 1030.0s, [0.367, 0.117, 0.834, 0.98]: sit, stand, talk to (e.g., self, a person, a group).", "At 1029.0s, [0.366, 0.123, 0.862, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 1028.0s, [0.394, 0.127, 0.846, 0.971]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1027.0s, [0.354, 0.136, 0.856, 0.971]: talk to (e.g., self, a person, a group).", "At 1026.0s, [0.216, 0.113, 0.848, 0.979]: sit, talk to (e.g., self, a person, a group).", "At 1025.0s, [0.17, 0.13, 0.846, 0.996]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1040.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.018, 0.026, 0.524, 0.99]: watch (a person), stand; [0.352, 0.123, 0.845, 0.989]: watch (a person), sit, stand."}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [995, 1133], "conversations": [{"from": "human", "timestamps": 1025.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.17, 0.13, 0.846, 0.996]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1133.0, "value": "What actions is the person in bbox [0.15, 0.1, 0.391, 0.958] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.15, 0.1, 0.391, 0.958]: stand, listen to (a person), watch (a person).", "From 1131.0s to 1132.0s, [0.094, 0.014, 0.399, 0.975]->[0.128, 0.058, 0.409, 0.989]: stand, listen to (a person), watch (a person).", "At 1130.0s, [0.053, 0.0, 0.384, 0.979]: stand, watch (a person).", "At 1129.0s, [0.047, 0.003, 0.391, 0.975]: stand, talk to (e.g., self, a person, a group).", "At 1128.0s, [0.019, 0.0, 0.369, 0.976]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1127.0s, [0.025, 0.0, 0.375, 0.974]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [1053, 1100], "conversations": [{"from": "human", "timestamps": 1083.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.004, 0.05, 0.237, 0.982]: stand; [0.176, 0.121, 0.515, 0.989]: carry/hold (an object), stand, listen to (a person); [0.21, 0.164, 0.315, 0.482]: listen to (a person), stand, watch (a person); [0.399, 0.189, 0.546, 0.959]: stand; [0.51, 0.193, 0.647, 0.962]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.541, 0.05, 0.882, 0.999]: listen to (a person), stand, watch (a person); [0.872, 0.177, 0.989, 0.928]: stand."}, {"from": "human", "timestamps": 1100.0, "value": "What actions is the person in bbox [0.391, 0.346, 0.454, 0.833] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.391, 0.346, 0.454, 0.833]: stand, open (e.g., a window, a car door).", "At 1099.0s, [0.364, 0.329, 0.458, 0.779]: stand, carry/hold (an object), open (e.g., a window, a car door), listen to (a person).", "At 1098.0s, [0.34, 0.38, 0.441, 0.992]: stand, carry/hold (an object), open (e.g., a window, a car door), watch (a person).", "At 1097.0s, [0.311, 0.438, 0.448, 0.993]: bend/bow (at the waist), carry/hold (an object).", "From 1094.0s to 1096.0s, [0.274, 0.479, 0.426, 0.999]->[0.294, 0.49, 0.446, 0.984]->[0.301, 0.484, 0.45, 1.0]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [1128, 1169], "conversations": [{"from": "human", "timestamps": 1158.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.185, 0.867]: listen to (a person), stand, watch (a person); [0.097, 0.069, 0.569, 0.987]: listen to (a person), stand, watch (a person); [0.442, 0.131, 0.7, 0.978]: listen to (a person), stand, watch (a person); [0.504, 0.088, 0.944, 0.988]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.875, 0.105, 1.0, 0.987]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1169.0, "value": "For the person at bbox [0.11, 0.079, 0.749, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.11, 0.079, 0.749, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1168.0s, [0.238, 0.088, 0.744, 0.986]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [1156, 1233], "conversations": [{"from": "human", "timestamps": 1186.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.214, 0.14, 0.811, 1.0]: talk to (e.g., self, a person, a group), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1233.0, "value": "What actions is the person in bbox [0.095, 0.029, 0.446, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.095, 0.029, 0.446, 0.982]: stand, listen to (a person), watch (a person).", "From 1226.0s to 1232.0s, [0.266, 0.151, 0.5, 0.949]->[0.281, 0.123, 0.443, 0.713]->[0.23, 0.064, 0.432, 0.693]->[0.123, 0.07, 0.443, 0.981]->[0.111, 0.048, 0.445, 0.969]->[0.107, 0.042, 0.434, 0.982]->[0.089, 0.021, 0.448, 0.981]: stand, listen to (a person), watch (a person)."]}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [1191, 1238], "conversations": [{"from": "human", "timestamps": 1221.0, "value": "Describe the actions of the person at bbox [0.279, 0.186, 0.4, 0.962] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.279, 0.186, 0.4, 0.962]: stand, listen to (a person), watch (a person).", "At 1220.0s, [0.272, 0.175, 0.414, 0.959]: stand, listen to (a person), watch (a person).", "At 1219.0s, [0.28, 0.166, 0.424, 0.981]: stand, listen to (a person).", "From 1217.0s to 1218.0s, [0.267, 0.203, 0.398, 0.965]->[0.279, 0.181, 0.403, 0.979]: stand, listen to (a person), watch (a person).", "At 1216.0s, [0.279, 0.165, 0.416, 0.957]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1238.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.188, 0.035, 0.79, 0.998]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [1257, 1316], "conversations": [{"from": "human", "timestamps": 1287.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.174, 0.164, 0.996]: listen to (a person), stand, watch (a person); [0.095, 0.13, 0.554, 0.98]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.561, 0.087, 0.833, 0.993]: listen to (a person), stand, watch (a person); [0.737, 0.049, 1.0, 0.957]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1316.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.38, 0.159, 0.646, 0.925]?"}, {"from": "gpt", "value": ["In the current frame, [0.38, 0.159, 0.646, 0.925]: stand, watch (e.g., TV).", "At 1315.0s, [0.311, 0.133, 0.66, 0.979]: stand, carry/hold (an object), watch (a person).", "At 1314.0s, [0.298, 0.132, 0.69, 0.978]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1312.0s to 1313.0s, [0.307, 0.121, 0.664, 0.979]->[0.316, 0.129, 0.664, 0.977]: stand, carry/hold (an object), watch (a person).", "At 1311.0s, [0.308, 0.123, 0.683, 0.975]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1309.0s to 1310.0s, [0.308, 0.147, 0.659, 0.975]->[0.294, 0.126, 0.683, 0.974]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [1273, 1328], "conversations": [{"from": "human", "timestamps": 1303.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.018, 0.161, 0.449, 1.0]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.404, 0.179, 0.528, 0.956]: listen to (a person), stand; [0.451, 0.177, 0.696, 0.958]: listen to (a person), stand, watch (a person); [0.617, 0.129, 0.798, 0.863]: listen to (a person), stand; [0.71, 0.065, 0.992, 0.887]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1328.0, "value": "What actions is the person in bbox [0.006, 0.01, 0.146, 0.92] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.006, 0.01, 0.146, 0.92]: stand, watch (a person).", "At 1327.0s, [0.005, 0.01, 0.172, 0.784]: stand, listen to (a person).", "At 1326.0s, [0.002, 0.017, 0.167, 0.836]: stand, listen to (a person), watch (a person)."]}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [1371, 1410], "conversations": [{"from": "human", "timestamps": 1401.0, "value": "For the person at bbox [0.686, 0.354, 0.997, 0.993], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.686, 0.354, 0.997, 0.993]: sit, stand, listen to (a person), watch (a person).", "At 1400.0s, [0.667, 0.324, 0.996, 0.993]: sit, listen to (a person), watch (a person).", "From 1398.0s to 1399.0s, [0.439, 0.214, 0.832, 0.99]->[0.44, 0.194, 0.844, 0.991]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1397.0s, [0.431, 0.189, 0.841, 0.986]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1396.0s, [0.429, 0.221, 0.835, 0.989]: sit, talk to (e.g., self, a person, a group).", "From 1393.0s to 1395.0s, [0.548, 0.283, 0.866, 0.992]->[0.424, 0.381, 0.79, 0.996]->[0.415, 0.256, 0.83, 0.987]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1391.0s to 1392.0s, [0.521, 0.245, 0.848, 1.0]->[0.542, 0.233, 0.852, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1390.0s, [0.563, 0.211, 0.874, 0.982]: sit, talk to (e.g., self, a person, a group).", "From 1387.0s to 1389.0s, [0.511, 0.028, 0.991, 0.984]->[0.493, 0.024, 0.987, 0.974]->[0.538, 0.236, 0.856, 0.985]: stand, listen to (a person), watch (a person).", "From 1385.0s to 1386.0s, [0.529, 0.253, 0.86, 0.993]->[0.538, 0.213, 0.858, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1384.0s, [0.541, 0.228, 0.864, 0.991]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1382.0s to 1383.0s, [0.605, 0.295, 0.924, 1.0]->[0.548, 0.225, 0.862, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1381.0s, [0.654, 0.208, 0.997, 0.993]: walk."]}, {"from": "human", "timestamps": 1410.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.015, 0.049, 0.457, 0.979]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [1397, 1451], "conversations": [{"from": "human", "timestamps": 1427.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.152, 0.412, 0.486, 0.992]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.507, 0.03, 0.939, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1451.0, "value": "What actions did the person at bbox [0.005, 0.102, 0.327, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.005, 0.102, 0.327, 0.981]: stand, listen to (a person), watch (a person).", "At 1450.0s, [0.008, 0.108, 0.394, 0.976]: stand, listen to (a person), watch (a person)."]}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [1419, 1497], "conversations": [{"from": "human", "timestamps": 1449.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.201, 0.082, 0.688, 0.963]: listen to (a person), watch (a person), walk; [0.625, 0.0, 0.969, 0.962]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1497.0, "value": "What actions did the person at bbox [0.265, 0.229, 0.483, 0.627] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.265, 0.229, 0.483, 0.627]: sit, ride (e.g., a bike, a car, a horse).", "From 1492.0s to 1496.0s, [0.243, 0.412, 0.343, 0.894]->[0.286, 0.407, 0.375, 0.69]->[0.309, 0.381, 0.415, 0.656]->[0.3, 0.341, 0.428, 0.619]->[0.272, 0.25, 0.431, 0.597]: sit, ride (e.g., a bike, a car, a horse)."]}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [1494, 1560], "conversations": [{"from": "human", "timestamps": 1524.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.219, 0.014, 0.937, 0.985]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1560.0, "value": "What actions is the person in bbox [0.221, 0.53, 0.327, 0.803] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.221, 0.53, 0.327, 0.803]: stand, listen to (a person), watch (a person).", "From 1557.0s to 1559.0s, [0.321, 0.455, 0.414, 0.77]->[0.295, 0.468, 0.387, 0.755]->[0.248, 0.513, 0.347, 0.796]: stand, listen to (a person), watch (a person)."]}]}, {"video": "G5Yr20A5z_Q.mkv", "clip": [1680, 1715], "conversations": [{"from": "human", "timestamps": 1710.0, "value": "What actions did the person at bbox [0.152, 0.801, 0.597, 0.979] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.152, 0.801, 0.597, 0.979]: lie/sleep.", "From 1708.0s to 1709.0s, [0.145, 0.782, 0.628, 0.97]->[0.151, 0.803, 0.609, 0.976]: lie/sleep."]}, {"from": "human", "timestamps": 1715.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.15, 0.798, 0.618, 0.975]: lie/sleep; [0.503, 0.368, 0.564, 0.499]: run/jog."}]}, {"video": "GBXK_SyfisM.mkv", "clip": [878, 992], "conversations": [{"from": "human", "timestamps": 908.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.056, 0.155, 0.636, 0.813]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 992.0, "value": "For the person at bbox [0.314, 0.352, 0.542, 0.788], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.314, 0.352, 0.542, 0.788]: stand, watch (a person).", "From 990.0s to 991.0s, [0.418, 0.475, 0.483, 0.771]->[0.402, 0.413, 0.525, 0.737]: walk, watch (a person).", "At 989.0s, [0.381, 0.456, 0.47, 0.77]: walk."]}]}, {"video": "GBXK_SyfisM.mkv", "clip": [1003, 1070], "conversations": [{"from": "human", "timestamps": 1033.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.451, 0.238, 0.834, 0.792]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1070.0, "value": "What actions is the person in bbox [0.013, 0.413, 0.128, 0.774] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.013, 0.413, 0.128, 0.774]: walk, carry/hold (an object), push (an object).", "At 1069.0s, [0.115, 0.389, 0.298, 0.78]: walk, carry/hold (an object), push (an object)."]}]}, {"video": "GBXK_SyfisM.mkv", "clip": [1007, 1134], "conversations": [{"from": "human", "timestamps": 1037.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.397, 0.272, 0.767, 0.796]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1134.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.476, 0.365, 0.611, 0.584]?"}, {"from": "gpt", "value": ["At Now, [0.476, 0.365, 0.611, 0.584]: stand, touch (an object).", "From 1132.0s to 1133.0s, [0.477, 0.357, 0.606, 0.586]->[0.472, 0.362, 0.615, 0.582]: stand, touch (an object).", "From 1127.0s to 1131.0s, [0.479, 0.352, 0.61, 0.586]->[0.458, 0.354, 0.581, 0.644]->[0.455, 0.348, 0.584, 0.678]->[0.461, 0.357, 0.583, 0.653]->[0.469, 0.362, 0.601, 0.582]: stand.", "From 1125.0s to 1126.0s, [0.48, 0.367, 0.61, 0.595]->[0.479, 0.359, 0.607, 0.589]: walk.", "From 1123.0s to 1124.0s, [0.546, 0.376, 0.662, 0.651]->[0.53, 0.375, 0.653, 0.621]: stand.", "At 1122.0s, [0.556, 0.38, 0.662, 0.584]: walk."]}]}, {"video": "GBXK_SyfisM.mkv", "clip": [1132, 1185], "conversations": [{"from": "human", "timestamps": 1162.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.215, 0.448, 0.318, 0.774]: listen to (a person), walk; [0.304, 0.52, 0.391, 0.764]: carry/hold (an object), bend/bow (at the waist), listen to (a person); [0.394, 0.522, 0.45, 0.756]: listen to (a person), stand, touch (an object); [0.455, 0.462, 0.557, 0.78]: walk; [0.573, 0.534, 0.606, 0.704]: walk; [0.589, 0.496, 0.684, 0.78]: walk; [0.663, 0.544, 0.738, 0.762]: walk."}, {"from": "human", "timestamps": 1185.0, "value": "Describe the actions of the person at bbox [0.385, 0.455, 0.481, 0.641] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.385, 0.455, 0.481, 0.641]: walk, touch (an object), watch (a person).", "At 1184.0s, [0.419, 0.456, 0.481, 0.635]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "GBXK_SyfisM.mkv", "clip": [1261, 1295], "conversations": [{"from": "human", "timestamps": 1291.0, "value": "What actions is the person in bbox [0.079, 0.258, 0.461, 0.781] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.079, 0.258, 0.461, 0.781]: stand.", "At 1290.0s, [0.085, 0.263, 0.46, 0.781]: stand, carry/hold (an object).", "At 1289.0s, [0.077, 0.281, 0.586, 0.783]: stand, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1295.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.438, 0.218, 0.775, 0.785]: watch (a person), sit."}]}, {"video": "GBXK_SyfisM.mkv", "clip": [1320, 1364], "conversations": [{"from": "human", "timestamps": 1350.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.022, 0.388, 0.245, 0.781]?"}, {"from": "gpt", "value": ["At Now, [0.022, 0.388, 0.245, 0.781]: stand, watch (a person).", "At 1349.0s, [0.045, 0.488, 0.221, 0.788]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1364.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.433, 0.227, 0.88, 0.769]: watch (a person), stand."}]}, {"video": "GBXK_SyfisM.mkv", "clip": [1364, 1400], "conversations": [{"from": "human", "timestamps": 1394.0, "value": "Describe the actions of the person at bbox [0.57, 0.314, 0.885, 0.772] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.57, 0.314, 0.885, 0.772]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1393.0s, [0.564, 0.308, 0.885, 0.772]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1392.0s, [0.561, 0.309, 0.879, 0.773]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1391.0s, [0.56, 0.308, 0.882, 0.778]: stand, listen to (a person), watch (a person).", "From 1389.0s to 1390.0s, [0.435, 0.218, 0.884, 0.772]->[0.463, 0.23, 0.882, 0.77]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1400.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.097, 0.259, 0.386, 0.784]: listen to (a person), stand, watch (a person); [0.558, 0.33, 0.889, 0.776]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "GBXK_SyfisM.mkv", "clip": [1349, 1439], "conversations": [{"from": "human", "timestamps": 1379.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.595, 0.282, 0.958, 0.767]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1439.0, "value": "What actions did the person at bbox [0.327, 0.426, 0.429, 0.782] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.327, 0.426, 0.429, 0.782]: walk.", "At 1438.0s, [0.477, 0.365, 0.64, 0.756]: walk.", "From 1436.0s to 1437.0s, [0.537, 0.367, 0.675, 0.776]->[0.535, 0.358, 0.674, 0.782]: stand, watch (a person)."]}]}, {"video": "GBXK_SyfisM.mkv", "clip": [1560, 1601], "conversations": [{"from": "human", "timestamps": 1590.0, "value": "What actions is the person in bbox [0.275, 0.307, 0.738, 0.818] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.275, 0.307, 0.738, 0.818]: stand, talk to (e.g., self, a person, a group).", "From 1585.0s to 1589.0s, [0.36, 0.31, 0.745, 0.811]->[0.363, 0.32, 0.749, 0.805]->[0.361, 0.31, 0.746, 0.81]->[0.361, 0.312, 0.741, 0.797]->[0.28, 0.318, 0.763, 0.815]: stand, talk to (e.g., self, a person, a group).", "At 1584.0s, [0.31, 0.272, 0.755, 0.804]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1583.0s, [0.364, 0.321, 0.768, 0.808]: get up, watch (a person)."]}, {"from": "human", "timestamps": 1601.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.525, 0.245, 0.691, 0.588]: stand."}]}, {"video": "GBXK_SyfisM.mkv", "clip": [1593, 1627], "conversations": [{"from": "human", "timestamps": 1623.0, "value": "For the person at bbox [0.494, 0.331, 0.632, 0.785], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.494, 0.331, 0.632, 0.785]: walk.", "At 1622.0s, [0.448, 0.343, 0.599, 0.78]: walk.", "At 1620.0s, [0.272, 0.38, 0.406, 0.788]: walk.", "From 1615.0s to 1619.0s, [0.292, 0.297, 0.521, 0.779]->[0.287, 0.321, 0.48, 0.781]->[0.297, 0.302, 0.483, 0.783]->[0.298, 0.292, 0.482, 0.785]->[0.271, 0.318, 0.456, 0.783]: stand.", "From 1611.0s to 1614.0s, [0.559, 0.303, 0.647, 0.699]->[0.426, 0.305, 0.574, 0.789]->[0.373, 0.292, 0.537, 0.794]->[0.333, 0.283, 0.516, 0.793]: walk."]}, {"from": "human", "timestamps": 1627.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.464, 0.341, 0.595, 0.781]: stand."}]}, {"video": "GBXK_SyfisM.mkv", "clip": [1714, 1784], "conversations": [{"from": "human", "timestamps": 1744.0, "value": "Describe the actions of the person at bbox [0.562, 0.417, 0.946, 0.785] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.562, 0.417, 0.946, 0.785]: sit, carry/hold (an object), touch (an object), listen to (a person).", "At 1743.0s, [0.545, 0.427, 0.921, 0.774]: sit, carry/hold (an object).", "At 1742.0s, [0.555, 0.369, 0.923, 0.777]: sit, carry/hold (an object), listen to (a person).", "At 1741.0s, [0.726, 0.333, 0.967, 0.766]: sit, touch (an object)."]}, {"from": "human", "timestamps": 1784.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.076, 0.348, 0.197, 0.736]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.18, 0.4, 0.333, 0.786]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.359, 0.382, 0.48, 0.783]: listen to (a person), stand, watch (a person); [0.441, 0.381, 0.575, 0.772]: talk to (e.g., self, a person, a group), walk; [0.64, 0.398, 0.778, 0.77]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.765, 0.428, 0.857, 0.781]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.913, 0.381, 0.996, 0.785]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "Gfdg_GcaNe8.mkv", "clip": [903, 962], "conversations": [{"from": "human", "timestamps": 933.0, "value": "For the person at bbox [0.192, 0.005, 0.688, 0.995], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.192, 0.005, 0.688, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 932.0s, [0.18, 0.01, 0.706, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 962.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.128, 0.004, 0.517, 1.0]: stand, talk to (e.g., self, a person, a group); [0.379, 0.0, 0.869, 0.985]: listen to (a person), stand."}]}, {"video": "Gfdg_GcaNe8.mkv", "clip": [1053, 1137], "conversations": [{"from": "human", "timestamps": 1083.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.461, 0.045, 0.874, 0.976]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1137.0, "value": "Describe the actions of the person at bbox [0.247, 0.011, 0.85, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.247, 0.011, 0.85, 1.0]: stand, watch (a person).", "At 1136.0s, [0.357, 0.25, 0.848, 1.0]: stand."]}]}, {"video": "Gfdg_GcaNe8.mkv", "clip": [1292, 1360], "conversations": [{"from": "human", "timestamps": 1322.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.57, 0.018, 0.874, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1360.0, "value": "What actions did the person at bbox [0.331, 0.373, 0.587, 0.998] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.331, 0.373, 0.587, 0.998]: stand, talk to (e.g., self, a person, a group).", "From 1358.0s to 1359.0s, [0.294, 0.387, 0.532, 0.993]->[0.307, 0.371, 0.557, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 1357.0s, [0.278, 0.413, 0.517, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Gfdg_GcaNe8.mkv", "clip": [1435, 1480], "conversations": [{"from": "human", "timestamps": 1465.0, "value": "What actions did the person at bbox [0.525, 0.723, 0.588, 0.884] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.525, 0.723, 0.588, 0.884]: stand.", "At 1464.0s, [0.376, 0.555, 0.467, 0.921]: stand."]}, {"from": "human", "timestamps": 1480.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.166, 0.065, 0.535, 0.977]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group)."}]}, {"video": "Gfdg_GcaNe8.mkv", "clip": [1493, 1550], "conversations": [{"from": "human", "timestamps": 1523.0, "value": "For the person at bbox [0.645, 0.508, 0.814, 0.989], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.645, 0.508, 0.814, 0.989]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1521.0s to 1522.0s, [0.727, 0.439, 0.819, 0.982]->[0.694, 0.495, 0.82, 0.993]: run/jog."]}, {"from": "human", "timestamps": 1550.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.346, 0.038, 0.792, 0.965]: watch (a person), stand."}]}, {"video": "Gfdg_GcaNe8.mkv", "clip": [1656, 1708], "conversations": [{"from": "human", "timestamps": 1686.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.122, 0.269, 0.203, 0.989]: watch (a person), stand; [0.301, 0.174, 0.557, 0.985]: stand; [0.54, 0.318, 0.672, 0.993]: carry/hold (an object), stand, watch (a person); [0.635, 0.356, 0.736, 0.977]: carry/hold (an object), stand; [0.804, 0.273, 0.877, 0.985]: stand."}, {"from": "human", "timestamps": 1708.0, "value": "What actions did the person at bbox [0.51, 0.198, 0.65, 0.898] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.51, 0.198, 0.65, 0.898]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1707.0s, [0.505, 0.265, 0.65, 0.907]: walk, watch (a person).", "At 1706.0s, [0.503, 0.195, 0.63, 0.842]: stand, watch (a person).", "At 1705.0s, [0.289, 0.373, 0.511, 0.979]: get up, watch (a person).", "From 1703.0s to 1704.0s, [0.294, 0.523, 0.554, 0.976]->[0.265, 0.503, 0.524, 0.969]: sit.", "From 1698.0s to 1702.0s, [0.254, 0.493, 0.557, 0.987]->[0.252, 0.498, 0.54, 0.999]->[0.254, 0.507, 0.543, 0.993]->[0.26, 0.527, 0.542, 0.995]->[0.281, 0.523, 0.551, 0.991]: sit, talk to (e.g., self, a person, a group).", "At 1697.0s, [0.259, 0.478, 0.559, 0.999]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1696.0s, [0.26, 0.479, 0.573, 0.977]: sit, talk to (e.g., self, a person, a group).", "From 1694.0s to 1695.0s, [0.245, 0.475, 0.613, 0.986]->[0.256, 0.474, 0.585, 0.988]: sit.", "At 1693.0s, [0.245, 0.497, 0.625, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Gfdg_GcaNe8.mkv", "clip": [1738, 1795], "conversations": [{"from": "human", "timestamps": 1768.0, "value": "What actions is the person in bbox [0.404, 0.232, 0.533, 0.97] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.404, 0.232, 0.533, 0.97]: stand, listen to (a person).", "From 1765.0s to 1767.0s, [0.398, 0.225, 0.54, 0.96]->[0.401, 0.229, 0.535, 0.895]->[0.406, 0.229, 0.536, 0.966]: stand, listen to (a person).", "From 1761.0s to 1764.0s, [0.404, 0.215, 0.533, 0.838]->[0.409, 0.223, 0.534, 0.874]->[0.415, 0.231, 0.539, 0.954]->[0.406, 0.231, 0.545, 0.961]: stand, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1795.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.335, 0.009, 0.763, 0.989]: stand, hug (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "GozLjpMNADg.mkv", "clip": [882, 993], "conversations": [{"from": "human", "timestamps": 912.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.07, 0.23, 0.288, 0.988]: stand; [0.21, 0.251, 0.368, 0.941]: carry/hold (an object), stand, watch (a person); [0.328, 0.151, 0.478, 0.981]: carry/hold (an object), stand, watch (a person); [0.494, 0.139, 0.855, 0.983]: watch (a person), stand; [0.691, 0.143, 0.78, 0.377]: carry/hold (an object), stand, watch (a person); [0.845, 0.126, 0.967, 0.95]: carry/hold (an object), stand, watch (a person); [0.939, 0.234, 1.0, 0.682]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 993.0, "value": "What actions is the person in bbox [0.676, 0.18, 1.0, 0.872] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.676, 0.18, 1.0, 0.872]: sit.", "At 992.0s, [0.67, 0.199, 0.971, 0.862]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 991.0s, [0.621, 0.2, 0.948, 0.897]: sit, touch (an object).", "At 990.0s, [0.477, 0.195, 0.945, 0.918]: sit, touch (an object), watch (a person).", "At 989.0s, [0.478, 0.176, 0.958, 0.907]: sit, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "GozLjpMNADg.mkv", "clip": [961, 1015], "conversations": [{"from": "human", "timestamps": 991.0, "value": "What actions did the person at bbox [0.621, 0.2, 0.948, 0.897] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.621, 0.2, 0.948, 0.897]: sit, touch (an object).", "At 990.0s, [0.477, 0.195, 0.945, 0.918]: sit, touch (an object), watch (a person).", "At 989.0s, [0.478, 0.176, 0.958, 0.907]: sit, touch (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1015.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.465, 0.344, 1.0]: listen to (a person), sit; [0.297, 0.496, 0.543, 0.989]: listen to (a person), sit, watch (a person); [0.405, 0.007, 0.608, 0.862]: carry/hold (an object), bend/bow (at the waist), listen to (a person); [0.532, 0.115, 0.996, 1.0]: listen to (a person), sit, watch (a person); [0.788, 0.007, 1.0, 0.484]: listen to (a person), stand, watch (a person)."}]}, {"video": "GozLjpMNADg.mkv", "clip": [1046, 1091], "conversations": [{"from": "human", "timestamps": 1076.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.017, 0.33, 0.332, 0.987]: listen to (a person), stand, watch (a person); [0.187, 0.065, 0.77, 1.0]: watch (a person), stand; [0.605, 0.016, 0.993, 1.0]: watch (a person), stand."}, {"from": "human", "timestamps": 1091.0, "value": "What actions did the person at bbox [0.0, 0.173, 0.216, 0.979] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.173, 0.216, 0.979]: stand, carry/hold (an object), watch (a person).", "At 1090.0s, [0.0, 0.158, 0.205, 0.987]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "GozLjpMNADg.mkv", "clip": [1125, 1221], "conversations": [{"from": "human", "timestamps": 1155.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.16, 0.006, 0.834, 1.0]: sit, touch (an object)."}, {"from": "human", "timestamps": 1221.0, "value": "Describe the actions of the person at bbox [0.338, 0.006, 1.0, 0.98] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.338, 0.006, 1.0, 0.98]: stand, answer phone.", "From 1209.0s to 1220.0s, [0.011, 0.0, 0.846, 0.94]->[0.009, 0.0, 0.836, 0.88]->[0.016, 0.017, 0.661, 0.889]->[0.0, 0.0, 0.629, 0.897]->[0.003, 0.0, 0.589, 0.961]->[0.003, 0.045, 0.648, 0.961]->[0.062, 0.048, 0.912, 1.0]->[0.0, 0.023, 0.823, 0.954]->[0.0, 0.024, 0.807, 1.0]->[0.0, 0.003, 0.905, 1.0]->[0.124, 0.009, 0.924, 0.995]->[0.196, 0.0, 0.923, 0.975]: stand, answer phone."]}]}, {"video": "GozLjpMNADg.mkv", "clip": [1244, 1292], "conversations": [{"from": "human", "timestamps": 1274.0, "value": "For the person at bbox [0.123, 0.018, 1.0, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.123, 0.018, 1.0, 1.0]: sit, eat.", "At 1273.0s, [0.144, 0.017, 0.988, 1.0]: sit, eat, watch (a person).", "At 1272.0s, [0.035, 0.007, 0.864, 0.97]: sit, eat."]}, {"from": "human", "timestamps": 1292.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.151, 0.01, 0.937, 1.0]: watch (a person), sit, eat, talk to (e.g., self, a person, a group)."}]}, {"video": "GozLjpMNADg.mkv", "clip": [1497, 1535], "conversations": [{"from": "human", "timestamps": 1527.0, "value": "What actions is the person in bbox [0.363, 0.296, 0.881, 0.98] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.363, 0.296, 0.881, 0.98]: bend/bow (at the waist), carry/hold (an object).", "At 1526.0s, [0.368, 0.199, 0.79, 0.991]: run/jog."]}, {"from": "human", "timestamps": 1535.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.193, 0.19, 0.66, 1.0]: carry/hold (an object), stand, sail boat, talk to (e.g., self, a person, a group); [0.435, 0.121, 0.721, 0.689]: listen to (a person), stand, sail boat; [0.691, 0.005, 0.939, 0.61]: carry/hold (an object), stand, sail boat, listen to (a person)."}]}, {"video": "GozLjpMNADg.mkv", "clip": [1619, 1725], "conversations": [{"from": "human", "timestamps": 1649.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.116, 0.11, 0.961]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.143, 0.046, 0.246, 0.803]: watch (a person), listen to (a person), grab (a person), carry/hold (an object), stand; [0.223, 0.089, 0.394, 0.821]: listen to (a person), stand; [0.412, 0.084, 0.505, 0.645]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.513, 0.043, 0.609, 0.682]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.556, 0.05, 0.697, 0.914]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.674, 0.0, 0.815, 0.928]: listen to (a person), stand, watch (a person); [0.79, 0.0, 0.982, 0.987]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1725.0, "value": "What actions did the person at bbox [0.192, 0.0, 0.937, 0.988] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.192, 0.0, 0.937, 0.988]: stand, carry/hold (an object), turn (e.g., a screwdriver).", "At 1724.0s, [0.243, 0.013, 0.956, 0.988]: stand."]}]}, {"video": "GozLjpMNADg.mkv", "clip": [1649, 1719], "conversations": [{"from": "human", "timestamps": 1679.0, "value": "What actions is the person in bbox [0.028, 0.017, 0.389, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.028, 0.017, 0.389, 0.993]: stand, listen to (a person), watch (a person).", "At 1678.0s, [0.0, 0.0, 0.31, 0.995]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1719.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.336, 0.226, 0.547, 0.99]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.459, 0.125, 0.806, 0.99]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.784, 0.227, 0.998, 0.976]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "Gt61_Yekkgc.mp4", "clip": [977, 1008], "conversations": [{"from": "human", "timestamps": 1007.0, "value": "For the person at bbox [0.243, 0.072, 0.988, 0.953], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.243, 0.072, 0.988, 0.953]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1006.0s, [0.295, 0.062, 1.0, 0.967]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1008.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.297, 0.056, 1.0, 0.946]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "Gt61_Yekkgc.mp4", "clip": [1058, 1093], "conversations": [{"from": "human", "timestamps": 1088.0, "value": "What actions did the person at bbox [0.444, 0.275, 0.708, 0.918] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.444, 0.275, 0.708, 0.918]: sit, listen to (a person), watch (a person).", "From 1083.0s to 1087.0s, [0.328, 0.107, 0.739, 0.922]->[0.433, 0.273, 0.694, 0.907]->[0.428, 0.279, 0.677, 0.907]->[0.459, 0.282, 0.69, 0.917]->[0.465, 0.274, 0.705, 0.913]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1093.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.129, 0.385, 0.43, 0.903]: listen to (a person), sit, smoke; [0.294, 0.141, 0.622, 0.903]: listen to (a person), sit, watch (a person); [0.342, 0.118, 0.991, 0.951]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "Gt61_Yekkgc.mp4", "clip": [1199, 1252], "conversations": [{"from": "human", "timestamps": 1229.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.145, 0.144, 0.584, 0.904]: carry/hold (an object), fight/hit (a person), jump/leap; [0.572, 0.43, 0.793, 0.911]: watch (a person), stand, fight/hit (a person), walk."}, {"from": "human", "timestamps": 1252.0, "value": "What actions did the person at bbox [0.673, 0.13, 1.0, 0.916] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.673, 0.13, 1.0, 0.916]: stand, watch (a person).", "At 1251.0s, [0.657, 0.156, 0.998, 0.908]: stand, listen to (a person), watch (a person).", "From 1247.0s to 1250.0s, [0.624, 0.099, 0.999, 0.933]->[0.611, 0.064, 0.997, 0.933]->[0.586, 0.109, 0.991, 0.955]->[0.637, 0.116, 1.0, 0.943]: stand, watch (a person)."]}]}, {"video": "Gt61_Yekkgc.mp4", "clip": [1242, 1310], "conversations": [{"from": "human", "timestamps": 1272.0, "value": "Describe the actions of the person at bbox [0.223, 0.155, 0.655, 0.914] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.223, 0.155, 0.655, 0.914]: stand, carry/hold (an object), watch (a person).", "From 1270.0s to 1271.0s, [0.087, 0.117, 0.533, 0.908]->[0.101, 0.115, 0.517, 0.9]: stand, carry/hold (an object), watch (a person).", "At 1269.0s, [0.066, 0.151, 0.524, 0.91]: sit.", "At 1268.0s, [0.069, 0.137, 0.516, 0.924]: stand."]}, {"from": "human", "timestamps": 1310.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.246, 0.2, 0.528, 0.916]: walk; [0.618, 0.233, 0.996, 0.914]: walk."}]}, {"video": "Gt61_Yekkgc.mp4", "clip": [1296, 1368], "conversations": [{"from": "human", "timestamps": 1326.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.206, 0.098, 0.585, 0.905]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1368.0, "value": "For the person at bbox [0.033, 0.309, 0.553, 0.92], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.033, 0.309, 0.553, 0.92]: stand.", "At 1367.0s, [0.02, 0.273, 0.575, 0.912]: stand, listen to (a person)."]}]}, {"video": "Gt61_Yekkgc.mp4", "clip": [1351, 1405], "conversations": [{"from": "human", "timestamps": 1381.0, "value": "What actions did the person at bbox [0.017, 0.256, 0.535, 0.927] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.017, 0.256, 0.535, 0.927]: stand.", "At 1379.0s, [0.017, 0.261, 0.542, 0.925]: stand.", "From 1375.0s to 1377.0s, [0.093, 0.492, 0.487, 0.911]->[0.018, 0.35, 0.566, 0.944]->[0.018, 0.246, 0.518, 0.934]: stand, talk to (e.g., self, a person, a group).", "From 1370.0s to 1374.0s, [0.072, 0.431, 0.542, 0.92]->[0.096, 0.389, 0.518, 0.911]->[0.036, 0.345, 0.547, 0.899]->[0.029, 0.348, 0.547, 0.92]->[0.088, 0.412, 0.515, 0.914]: stand.", "At 1369.0s, [0.066, 0.399, 0.557, 0.915]: stand, talk to (e.g., self, a person, a group).", "At 1368.0s, [0.033, 0.309, 0.553, 0.92]: stand.", "At 1367.0s, [0.02, 0.273, 0.575, 0.912]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1405.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.455, 0.179, 1.0, 0.917]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "Gt61_Yekkgc.mp4", "clip": [1408, 1479], "conversations": [{"from": "human", "timestamps": 1438.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.008, 0.153, 0.691, 0.926]: watch (a person), stand."}, {"from": "human", "timestamps": 1479.0, "value": "Describe the actions of the person at bbox [0.482, 0.127, 0.97, 0.89] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.482, 0.127, 0.97, 0.89]: sit.", "From 1477.0s to 1478.0s, [0.493, 0.102, 0.978, 0.887]->[0.449, 0.231, 0.941, 0.908]: sit.", "At 1476.0s, [0.489, 0.104, 0.985, 0.882]: stand."]}]}, {"video": "Gt61_Yekkgc.mp4", "clip": [1452, 1492], "conversations": [{"from": "human", "timestamps": 1482.0, "value": "Describe the actions of the person at bbox [0.41, 0.112, 0.954, 0.895] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.41, 0.112, 0.954, 0.895]: sit.", "From 1480.0s to 1481.0s, [0.515, 0.116, 0.969, 0.877]->[0.495, 0.114, 0.971, 0.905]: stand.", "From 1477.0s to 1479.0s, [0.493, 0.102, 0.978, 0.887]->[0.449, 0.231, 0.941, 0.908]->[0.482, 0.127, 0.97, 0.89]: sit.", "At 1476.0s, [0.489, 0.104, 0.985, 0.882]: stand."]}, {"from": "human", "timestamps": 1492.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.15, 0.092, 0.884, 0.9]: sit, text on/look at a cellphone."}]}, {"video": "Gt61_Yekkgc.mp4", "clip": [1518, 1616], "conversations": [{"from": "human", "timestamps": 1548.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.787, 0.555, 0.814, 0.69]: walk."}, {"from": "human", "timestamps": 1616.0, "value": "For the person at bbox [0.278, 0.086, 0.939, 0.914], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.278, 0.086, 0.939, 0.914]: stand, talk to (e.g., self, a person, a group).", "At 1615.0s, [0.33, 0.101, 0.909, 0.917]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "HJzgJ9ZjvJk.mkv", "clip": [898, 940], "conversations": [{"from": "human", "timestamps": 928.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.016, 0.0, 0.721, 1.0]: watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 940.0, "value": "What actions did the person at bbox [0.394, 0.0, 0.965, 0.989] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.394, 0.0, 0.965, 0.989]: stand, watch (a person).", "At 939.0s, [0.561, 0.248, 1.0, 1.0]: sit, watch (e.g., TV).", "At 938.0s, [0.607, 0.224, 1.0, 0.986]: sit, stand, watch (e.g., TV).", "From 936.0s to 937.0s, [0.643, 0.193, 1.0, 0.997]->[0.658, 0.204, 1.0, 0.984]: stand, watch (e.g., TV), listen to (a person)."]}]}, {"video": "HJzgJ9ZjvJk.mkv", "clip": [1242, 1273], "conversations": [{"from": "human", "timestamps": 1272.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.028, 0.287, 0.44, 0.837]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.571, 0.216, 1.0, 0.893]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1273.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.031, 0.286, 0.443, 0.792]?"}, {"from": "gpt", "value": ["At Now, [0.031, 0.286, 0.443, 0.792]: sit, listen to (a person), watch (a person).", "From 1270.0s to 1272.0s, [0.013, 0.277, 0.429, 0.779]->[0.032, 0.284, 0.444, 0.819]->[0.028, 0.287, 0.44, 0.837]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1269.0s, [0.016, 0.281, 0.436, 0.815]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "HJzgJ9ZjvJk.mkv", "clip": [1450, 1484], "conversations": [{"from": "human", "timestamps": 1480.0, "value": "What actions did the person at bbox [0.019, 0.06, 0.756, 0.993] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.019, 0.06, 0.756, 0.993]: stand, carry/hold (an object), throw, hand wave.", "From 1478.0s to 1479.0s, [0.343, 0.183, 0.92, 0.994]->[0.098, 0.176, 0.894, 0.978]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1484.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.116, 0.175, 0.941, 0.987]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "HJzgJ9ZjvJk.mkv", "clip": [1549, 1599], "conversations": [{"from": "human", "timestamps": 1579.0, "value": "For the person at bbox [0.529, 0.102, 0.964, 0.857], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.529, 0.102, 0.964, 0.857]: sit, watch (a person).", "At 1578.0s, [0.53, 0.092, 0.964, 0.858]: sit, watch (a person).", "At 1577.0s, [0.533, 0.103, 0.954, 0.861]: sit.", "From 1573.0s to 1575.0s, [0.531, 0.11, 0.958, 0.9]->[0.535, 0.113, 0.958, 0.879]->[0.547, 0.107, 0.959, 0.889]: sit.", "At 1572.0s, [0.53, 0.115, 0.954, 0.883]: stand, watch (a person).", "From 1569.0s to 1571.0s, [0.501, 0.115, 0.958, 0.927]->[0.537, 0.115, 0.967, 0.879]->[0.541, 0.1, 0.948, 0.881]: sit, watch (a person).", "At 1568.0s, [0.398, 0.055, 0.971, 1.0]: sit, touch (an object), watch (a person)."]}, {"from": "human", "timestamps": 1599.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.393, 0.149, 0.51, 0.861]: listen to (a person), stand, watch (a person); [0.575, 0.214, 0.713, 0.865]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "HJzgJ9ZjvJk.mkv", "clip": [1595, 1663], "conversations": [{"from": "human", "timestamps": 1625.0, "value": "What actions did the person at bbox [0.423, 0.086, 1.0, 0.971] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.423, 0.086, 1.0, 0.971]: sit.", "At 1624.0s, [0.402, 0.081, 0.996, 0.978]: sit, listen to (a person).", "At 1623.0s, [0.421, 0.058, 0.998, 0.977]: sit, listen to (a person), watch (a person).", "At 1622.0s, [0.421, 0.056, 1.0, 0.987]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1663.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.255, 0.17, 0.45, 1.0]: walk; [0.371, 0.248, 0.707, 0.986]: carry/hold (an object), walk."}]}, {"video": "HJzgJ9ZjvJk.mkv", "clip": [1744, 1775], "conversations": [{"from": "human", "timestamps": 1774.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.646, 0.416, 0.934, 0.94]?"}, {"from": "gpt", "value": ["At Now, [0.646, 0.416, 0.934, 0.94]: dance, watch (a person).", "At 1773.0s, [0.719, 0.615, 0.961, 0.975]: dance."]}, {"from": "human", "timestamps": 1775.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.166, 0.202, 0.443, 0.936]: walk; [0.6, 0.267, 0.825, 0.716]: dance; [0.644, 0.519, 0.942, 0.981]: dance; [0.839, 0.156, 0.986, 0.509]: watch (a person), sit."}]}, {"video": "HKjR70GCRPE.mp4", "clip": [913, 949], "conversations": [{"from": "human", "timestamps": 943.0, "value": "Describe the actions of the person at bbox [0.326, 0.121, 0.998, 0.978] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.326, 0.121, 0.998, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 942.0s, [0.173, 0.148, 0.943, 0.994]: bend/bow (at the waist), stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 941.0s, [0.193, 0.168, 0.976, 0.957]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 940.0s, [0.238, 0.174, 0.957, 0.922]: bend/bow (at the waist), stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 939.0s, [0.366, 0.229, 0.998, 0.982]: bend/bow (at the waist), carry/hold (an object), take (an object) from (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 938.0s, [0.339, 0.202, 0.99, 0.982]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 937.0s, [0.376, 0.062, 0.975, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 936.0s, [0.369, 0.182, 0.957, 0.976]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 935.0s, [0.198, 0.079, 0.912, 0.969]: stand, touch (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 949.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.064, 0.109, 0.487, 0.972]: carry/hold (an object), stand, watch (a person); [0.455, 0.214, 0.984, 0.982]: listen to (a person), talk to (e.g., self, a person, a group), watch (a person), bend/bow (at the waist)."}]}, {"video": "HKjR70GCRPE.mp4", "clip": [946, 993], "conversations": [{"from": "human", "timestamps": 976.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.077, 0.348, 0.477, 0.997]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.433, 0.055, 0.998, 0.986]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 993.0, "value": "What actions is the person in bbox [0.216, 0.335, 0.37, 0.573] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.216, 0.335, 0.37, 0.573]: sit, carry/hold (an object).", "At 992.0s, [0.171, 0.354, 0.384, 0.56]: sit, listen to (a person)."]}]}, {"video": "HKjR70GCRPE.mp4", "clip": [1041, 1072], "conversations": [{"from": "human", "timestamps": 1071.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.021, 0.023, 0.939, 0.99]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1072.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.01, 0.09, 0.945, 0.98]?"}, {"from": "gpt", "value": ["At Now, [0.01, 0.09, 0.945, 0.98]: sit, talk to (e.g., self, a person, a group).", "From 1069.0s to 1071.0s, [0.0, 0.171, 0.948, 0.99]->[0.012, 0.084, 0.911, 0.983]->[0.021, 0.023, 0.939, 0.99]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1065.0s to 1068.0s, [0.006, 0.052, 0.996, 0.983]->[0.0, 0.082, 0.928, 0.993]->[0.0, 0.09, 0.929, 0.988]->[0.0, 0.086, 0.934, 0.99]: sit, talk to (e.g., self, a person, a group).", "At 1064.0s, [0.0, 0.106, 0.991, 0.973]: sit.", "At 1063.0s, [0.62, 0.47, 0.971, 0.974]: sit, carry/hold (an object).", "At 1062.0s, [0.597, 0.471, 0.991, 1.0]: stand, carry/hold (an object).", "From 1059.0s to 1061.0s, [0.103, 0.122, 0.663, 0.981]->[0.01, 0.159, 0.719, 0.965]->[0.109, 0.233, 0.924, 0.966]: sit, carry/hold (an object).", "From 1057.0s to 1058.0s, [0.047, 0.087, 0.786, 0.94]->[0.015, 0.11, 0.686, 0.977]: sit, carry/hold (an object), read."]}]}, {"video": "HKjR70GCRPE.mp4", "clip": [1290, 1367], "conversations": [{"from": "human", "timestamps": 1320.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.289, 0.272, 0.831, 0.996]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1367.0, "value": "What actions did the person at bbox [0.077, 0.116, 0.62, 0.989] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.077, 0.116, 0.62, 0.989]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1366.0s, [0.095, 0.186, 0.708, 0.97]: sit, talk to (e.g., self, a person, a group).", "At 1365.0s, [0.131, 0.114, 0.679, 0.992]: sit, listen to (a person)."]}]}, {"video": "HKjR70GCRPE.mp4", "clip": [1361, 1395], "conversations": [{"from": "human", "timestamps": 1391.0, "value": "What actions is the person in bbox [0.592, 0.221, 0.769, 0.781] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.592, 0.221, 0.769, 0.781]: stand.", "At 1390.0s, [0.581, 0.207, 0.841, 0.978]: stand, listen to (a person).", "From 1387.0s to 1389.0s, [0.54, 0.203, 0.773, 0.977]->[0.563, 0.22, 0.785, 0.967]->[0.553, 0.215, 0.799, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1386.0s, [0.543, 0.19, 0.786, 0.981]: stand, talk to (e.g., self, a person, a group).", "At 1385.0s, [0.514, 0.168, 0.725, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1384.0s, [0.469, 0.266, 0.747, 0.986]: get up, talk to (e.g., self, a person, a group), watch (a person).", "At 1383.0s, [0.308, 0.087, 0.998, 0.994]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1382.0s, [0.273, 0.086, 0.979, 0.987]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1395.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.597, 0.24, 0.838, 0.912]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "HKjR70GCRPE.mp4", "clip": [1388, 1494], "conversations": [{"from": "human", "timestamps": 1418.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.033, 0.003, 0.263, 0.98]: carry/hold (an object), stand, watch (a person); [0.215, 0.116, 0.438, 0.944]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.771, 0.252, 0.93, 0.968]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1494.0, "value": "For the person at bbox [0.465, 0.07, 0.92, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.465, 0.07, 0.92, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1493.0s, [0.466, 0.069, 0.958, 1.0]: stand, touch (an object), listen to (a person), watch (a person).", "From 1490.0s to 1492.0s, [0.565, 0.069, 0.962, 1.0]->[0.558, 0.075, 0.938, 0.98]->[0.486, 0.079, 0.949, 1.0]: stand, listen to (a person), watch (a person).", "From 1488.0s to 1489.0s, [0.523, 0.05, 0.937, 0.991]->[0.508, 0.044, 0.905, 1.0]: stand, hand shake, listen to (a person), watch (a person)."]}]}, {"video": "HKjR70GCRPE.mp4", "clip": [1483, 1552], "conversations": [{"from": "human", "timestamps": 1513.0, "value": "Describe the actions of the person at bbox [0.369, 0.085, 0.941, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.369, 0.085, 0.941, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1509.0s to 1512.0s, [0.365, 0.074, 0.933, 0.988]->[0.369, 0.068, 0.945, 1.0]->[0.311, 0.081, 0.949, 0.992]->[0.379, 0.098, 0.944, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1552.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.176, 0.285, 0.351, 0.721]: sit, touch (an object); [0.292, 0.203, 0.719, 0.885]: sit, touch (an object), answer phone; [0.685, 0.37, 0.886, 0.773]: sit, touch (an object)."}]}, {"video": "HKjR70GCRPE.mp4", "clip": [1611, 1666], "conversations": [{"from": "human", "timestamps": 1641.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.192, 0.083, 0.956, 0.979]: stand."}, {"from": "human", "timestamps": 1666.0, "value": "Describe the actions of the person at bbox [0.076, 0.109, 0.467, 0.974] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.076, 0.109, 0.467, 0.974]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1663.0s to 1665.0s, [0.055, 0.03, 0.459, 0.986]->[0.07, 0.068, 0.449, 0.984]->[0.105, 0.093, 0.523, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1662.0s, [0.0, 0.08, 0.495, 1.0]: stand, carry/hold (an object), listen to (a person).", "At 1661.0s, [0.089, 0.088, 0.425, 0.997]: stand, lift/pick up, listen to (a person).", "At 1660.0s, [0.063, 0.155, 0.414, 0.981]: walk, listen to (a person)."]}]}, {"video": "HKjR70GCRPE.mp4", "clip": [1625, 1712], "conversations": [{"from": "human", "timestamps": 1655.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.019, 0.453, 0.107, 0.786]: sit; [0.094, 0.34, 0.2, 0.79]: watch (a person), stand; [0.21, 0.248, 0.47, 0.986]: carry/hold (an object), stand, listen to (a person); [0.576, 0.208, 0.947, 0.744]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1712.0, "value": "What actions did the person at bbox [0.154, 0.111, 0.987, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.154, 0.111, 0.987, 0.982]: stand, listen to (a person), watch (a person).", "From 1709.0s to 1711.0s, [0.138, 0.105, 0.973, 0.983]->[0.17, 0.119, 0.986, 0.976]->[0.183, 0.112, 0.963, 0.973]: stand, listen to (a person), watch (a person).", "At 1708.0s, [0.178, 0.121, 0.97, 0.976]: stand, listen to (a person)."]}]}, {"video": "HKjR70GCRPE.mp4", "clip": [1686, 1725], "conversations": [{"from": "human", "timestamps": 1716.0, "value": "Describe the actions of the person at bbox [0.098, 0.229, 0.341, 0.694] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.098, 0.229, 0.341, 0.694]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1715.0s, [0.0, 0.23, 0.264, 0.732]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1725.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.066, 0.401, 0.165, 0.574]: sit; [0.097, 0.241, 0.502, 0.709]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.45, 0.272, 0.521, 0.63]: watch (a person), stand; [0.644, 0.173, 0.977, 0.972]: carry/hold (an object), stand, listen to (a person); [0.661, 0.205, 0.735, 0.525]: stand."}]}, {"video": "HKjR70GCRPE.mp4", "clip": [1720, 1787], "conversations": [{"from": "human", "timestamps": 1750.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.01, 0.086, 0.725, 1.0]: watch (a person), stand, eat, talk to (e.g., self, a person, a group); [0.792, 0.378, 0.998, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1787.0, "value": "Describe the actions of the person at bbox [0.806, 0.241, 0.926, 0.655] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.806, 0.241, 0.926, 0.655]: walk.", "From 1783.0s to 1786.0s, [0.21, 0.285, 0.396, 0.899]->[0.156, 0.294, 0.302, 0.899]->[0.074, 0.304, 0.214, 0.888]->[0.021, 0.309, 0.126, 0.793]: walk."]}]}, {"video": "HTYT2vF-j_w.mkv", "clip": [886, 997], "conversations": [{"from": "human", "timestamps": 916.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.558, 0.996]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 997.0, "value": "For the person at bbox [0.062, 0.099, 0.516, 0.988], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.062, 0.099, 0.516, 0.988]: stand, listen to (a person).", "From 994.0s to 996.0s, [0.058, 0.123, 0.57, 0.976]->[0.011, 0.076, 0.496, 0.993]->[0.069, 0.043, 0.508, 0.996]: stand.", "At 993.0s, [0.111, 0.081, 0.647, 0.993]: walk, talk to (e.g., self, a person, a group).", "At 992.0s, [0.041, 0.009, 0.639, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 991.0s, [0.415, 0.046, 0.966, 0.976]: bend/bow (at the waist)."]}]}, {"video": "HTYT2vF-j_w.mkv", "clip": [1004, 1056], "conversations": [{"from": "human", "timestamps": 1034.0, "value": "What actions did the person at bbox [0.19, 0.014, 0.744, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.19, 0.014, 0.744, 0.985]: stand, touch (an object).", "At 1033.0s, [0.243, 0.026, 0.773, 0.981]: stand, touch (an object).", "At 1032.0s, [0.12, 0.043, 0.609, 0.967]: walk, touch (an object).", "At 1031.0s, [0.117, 0.026, 0.687, 0.971]: walk."]}, {"from": "human", "timestamps": 1056.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.051, 0.059, 0.997, 0.927]: read, sit."}]}, {"video": "HTYT2vF-j_w.mkv", "clip": [1127, 1165], "conversations": [{"from": "human", "timestamps": 1157.0, "value": "What actions did the person at bbox [0.613, 0.423, 0.842, 0.994] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.613, 0.423, 0.842, 0.994]: walk, talk to (e.g., self, a person, a group).", "At 1156.0s, [0.608, 0.245, 0.75, 0.795]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1165.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.062, 0.064, 0.603, 0.759]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "HTYT2vF-j_w.mkv", "clip": [1169, 1215], "conversations": [{"from": "human", "timestamps": 1199.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.199, 0.0, 0.996, 0.996]: talk to (e.g., self, a person, a group), touch (an object), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1215.0, "value": "What actions did the person at bbox [0.323, 0.094, 0.905, 0.965] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.323, 0.094, 0.905, 0.965]: stand, carry/hold (an object), put down.", "At 1214.0s, [0.361, 0.0, 0.983, 0.977]: stand, carry/hold (an object), watch (a person).", "At 1213.0s, [0.105, 0.031, 0.999, 0.981]: bend/bow (at the waist), carry/hold (an object), touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "HTYT2vF-j_w.mkv", "clip": [1193, 1244], "conversations": [{"from": "human", "timestamps": 1223.0, "value": "For the person at bbox [0.138, 0.06, 0.991, 0.996], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.138, 0.06, 0.991, 0.996]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1222.0s, [0.032, 0.124, 0.996, 0.994]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1244.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.034, 0.0, 0.883, 0.98]: stand, talk to (e.g., self, a person, a group); [0.637, 0.135, 0.985, 0.993]: listen to (a person), stand, watch (a person), drink."}]}, {"video": "HTYT2vF-j_w.mkv", "clip": [1470, 1507], "conversations": [{"from": "human", "timestamps": 1500.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.17, 0.029, 0.962, 0.998]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1507.0, "value": "What actions is the person in bbox [0.046, 0.0, 0.894, 0.995] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.046, 0.0, 0.894, 0.995]: carry/hold (an object).", "At 1506.0s, [0.016, 0.027, 0.591, 0.978]: sit.", "At 1505.0s, [0.045, 0.02, 0.899, 0.966]: sit, watch (a person)."]}]}, {"video": "HTYT2vF-j_w.mkv", "clip": [1628, 1736], "conversations": [{"from": "human", "timestamps": 1658.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.034, 0.068, 0.59, 0.978]: get up."}, {"from": "human", "timestamps": 1736.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.304, 0.124, 0.759, 0.976]?"}, {"from": "gpt", "value": ["In the current frame, [0.304, 0.124, 0.759, 0.976]: walk.", "From 1733.0s to 1735.0s, [0.08, 0.0, 0.557, 0.976]->[0.041, 0.115, 0.728, 0.971]->[0.606, 0.026, 0.99, 0.996]: walk, talk to (e.g., self, a person, a group).", "At 1732.0s, [0.261, 0.043, 0.839, 1.0]: walk."]}]}, {"video": "HTYT2vF-j_w.mkv", "clip": [1663, 1717], "conversations": [{"from": "human", "timestamps": 1693.0, "value": "For the person at bbox [0.0, 0.023, 0.788, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.023, 0.788, 0.987]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1692.0s, [0.002, 0.019, 0.698, 1.0]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1691.0s, [0.04, 0.015, 0.784, 0.981]: stand, touch (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1717.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.01, 0.026, 0.585, 0.979]: stand, touch (an object)."}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [873, 931], "conversations": [{"from": "human", "timestamps": 903.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.303, 0.105, 0.464, 0.697]: watch (a person), talk to (e.g., self, a person, a group), grab (a person), bend/bow (at the waist); [0.344, 0.203, 0.792, 0.948]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 931.0, "value": "Describe the actions of the person at bbox [0.294, 0.159, 0.44, 0.549] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.294, 0.159, 0.44, 0.549]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 930.0s, [0.37, 0.144, 0.497, 0.553]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 929.0s, [0.366, 0.148, 0.498, 0.552]: stand, listen to (a person), watch (a person).", "At 928.0s, [0.389, 0.147, 0.501, 0.545]: stand, listen to (a person).", "At 927.0s, [0.375, 0.155, 0.486, 0.553]: stand, listen to (a person), watch (a person).", "At 926.0s, [0.372, 0.29, 0.517, 0.54]: bend/bow (at the waist), listen to (a person), watch (a person)."]}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [917, 982], "conversations": [{"from": "human", "timestamps": 947.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.413, 0.251, 0.577, 0.995]: listen to (a person), walk; [0.56, 0.194, 0.767, 0.989]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 982.0, "value": "What actions did the person at bbox [0.056, 0.33, 0.506, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.056, 0.33, 0.506, 1.0]: sit, touch (an object), listen to (a person).", "At 981.0s, [0.101, 0.264, 0.54, 0.994]: sit, listen to (a person).", "At 980.0s, [0.075, 0.158, 0.604, 0.995]: sit, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [933, 966], "conversations": [{"from": "human", "timestamps": 963.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.235, 0.231, 0.512, 0.987]: listen to (a person), walk; [0.453, 0.215, 0.709, 1.0]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 966.0, "value": "Describe the actions of the person at bbox [0.463, 0.187, 0.697, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.463, 0.187, 0.697, 0.987]: walk, listen to (a person), watch (a person).", "From 961.0s to 965.0s, [0.514, 0.193, 0.76, 0.995]->[0.444, 0.189, 0.657, 0.99]->[0.453, 0.215, 0.709, 1.0]->[0.429, 0.177, 0.715, 0.998]->[0.479, 0.173, 0.711, 0.989]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 960.0s, [0.518, 0.167, 0.768, 0.986]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 958.0s to 959.0s, [0.485, 0.144, 0.705, 1.0]->[0.501, 0.153, 0.758, 0.979]: walk, listen to (a person), watch (a person).", "At 957.0s, [0.49, 0.193, 0.71, 0.991]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 956.0s, [0.504, 0.185, 0.746, 0.984]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 955.0s, [0.493, 0.201, 0.717, 0.995]: walk, listen to (a person).", "At 954.0s, [0.516, 0.248, 0.714, 0.992]: walk, listen to (a person), watch (a person).", "From 951.0s to 953.0s, [0.489, 0.204, 0.707, 0.999]->[0.562, 0.313, 0.792, 1.0]->[0.549, 0.281, 0.762, 0.995]: walk, listen to (a person).", "From 946.0s to 950.0s, [0.588, 0.186, 0.761, 0.988]->[0.56, 0.194, 0.767, 0.989]->[0.574, 0.175, 0.76, 0.991]->[0.508, 0.217, 0.804, 1.0]->[0.497, 0.211, 0.714, 0.986]: walk, talk to (e.g., self, a person, a group).", "At 945.0s, [0.596, 0.2, 0.752, 0.986]: walk, listen to (a person).", "At 944.0s, [0.602, 0.201, 0.75, 0.969]: walk, listen to (a person), watch (a person).", "From 939.0s to 943.0s, [0.478, 0.211, 0.634, 0.801]->[0.506, 0.149, 0.651, 0.812]->[0.492, 0.149, 0.658, 0.862]->[0.527, 0.165, 0.668, 0.913]->[0.583, 0.219, 0.723, 0.95]: walk, listen to (a person).", "From 936.0s to 938.0s, [0.462, 0.229, 0.57, 0.744]->[0.48, 0.19, 0.595, 0.77]->[0.46, 0.187, 0.581, 0.789]: walk, talk to (e.g., self, a person, a group).", "From 934.0s to 935.0s, [0.224, 0.235, 0.333, 0.755]->[0.323, 0.23, 0.433, 0.727]: walk, listen to (a person).", "At 933.0s, [0.232, 0.206, 0.364, 0.787]: walk, listen to (a person), watch (a person).", "At 932.0s, [0.258, 0.17, 0.385, 0.693]: walk, listen to (a person).", "At 931.0s, [0.294, 0.159, 0.44, 0.549]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 930.0s, [0.37, 0.144, 0.497, 0.553]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 929.0s, [0.366, 0.148, 0.498, 0.552]: stand, listen to (a person), watch (a person).", "At 928.0s, [0.389, 0.147, 0.501, 0.545]: stand, listen to (a person).", "At 927.0s, [0.375, 0.155, 0.486, 0.553]: stand, listen to (a person), watch (a person).", "At 926.0s, [0.372, 0.29, 0.517, 0.54]: bend/bow (at the waist), listen to (a person), watch (a person)."]}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [994, 1025], "conversations": [{"from": "human", "timestamps": 1024.0, "value": "For the person at bbox [0.097, 0.182, 0.393, 0.928], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.097, 0.182, 0.393, 0.928]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1023.0s, [0.1, 0.188, 0.43, 0.935]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1022.0s, [0.156, 0.205, 0.417, 0.919]: stand, listen to (a person), watch (a person).", "At 1021.0s, [0.137, 0.202, 0.418, 0.922]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1020.0s, [0.143, 0.192, 0.411, 0.896]: stand, talk to (e.g., self, a person, a group).", "At 1019.0s, [0.142, 0.185, 0.413, 0.927]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1018.0s, [0.131, 0.187, 0.411, 0.921]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1017.0s, [0.093, 0.172, 0.406, 0.936]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1016.0s, [0.102, 0.179, 0.426, 0.949]: stand, touch (an object), listen to (a person), watch (a person).", "At 1015.0s, [0.093, 0.171, 0.419, 0.946]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1014.0s, [0.096, 0.178, 0.422, 0.95]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1012.0s to 1013.0s, [0.142, 0.208, 0.475, 0.924]->[0.138, 0.212, 0.436, 0.919]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1011.0s, [0.102, 0.17, 0.459, 0.931]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1008.0s to 1010.0s, [0.122, 0.19, 0.449, 0.927]->[0.136, 0.163, 0.413, 0.876]->[0.092, 0.156, 0.433, 0.917]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1007.0s, [0.116, 0.207, 0.415, 0.985]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1005.0s to 1006.0s, [0.087, 0.143, 0.419, 0.97]->[0.058, 0.164, 0.38, 0.984]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1003.0s to 1004.0s, [0.13, 0.148, 0.477, 0.985]->[0.121, 0.149, 0.474, 0.974]: stand, touch (an object), listen to (a person), watch (a person).", "At 1002.0s, [0.115, 0.157, 0.35, 0.983]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1001.0s, [0.109, 0.18, 0.445, 0.963]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1000.0s, [0.097, 0.16, 0.461, 0.979]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 999.0s, [0.067, 0.152, 0.361, 0.99]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 998.0s, [0.067, 0.1, 0.343, 0.96]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 997.0s, [0.097, 0.112, 0.43, 0.93]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 996.0s, [0.077, 0.089, 0.379, 0.826]: stand, talk to (e.g., self, a person, a group).", "From 994.0s to 995.0s, [0.06, 0.094, 0.425, 0.945]->[0.074, 0.082, 0.461, 0.907]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 993.0s, [0.053, 0.076, 0.419, 0.937]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 988.0s to 992.0s, [0.326, 0.077, 0.56, 0.811]->[0.138, 0.278, 0.4, 0.975]->[0.019, 0.238, 0.286, 0.999]->[0.06, 0.157, 0.41, 0.974]->[0.096, 0.077, 0.41, 0.948]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 987.0s, [0.346, 0.072, 0.566, 0.802]: stand, carry/hold (an object), touch (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1025.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.089, 0.178, 0.397, 0.935]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.517, 0.394, 0.864, 0.97]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [1002, 1131], "conversations": [{"from": "human", "timestamps": 1032.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.03, 0.033, 0.847, 0.991]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1131.0, "value": "What actions is the person in bbox [0.157, 0.049, 0.76, 0.967] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.157, 0.049, 0.76, 0.967]: stand, talk to (e.g., self, a person, a group).", "From 1129.0s to 1130.0s, [0.148, 0.043, 0.738, 0.977]->[0.151, 0.054, 0.732, 0.975]: stand, talk to (e.g., self, a person, a group).", "At 1128.0s, [0.153, 0.041, 0.741, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1127.0s, [0.148, 0.035, 0.757, 0.979]: stand, listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [1067, 1116], "conversations": [{"from": "human", "timestamps": 1097.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.145, 0.193, 0.492, 0.972]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.468, 0.087, 0.907, 0.972]: watch (a person), stand, smoke, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1116.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.28, 0.083, 0.915, 0.989]?"}, {"from": "gpt", "value": ["In the current frame, [0.28, 0.083, 0.915, 0.989]: stand, carry/hold (an object), smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1115.0s, [0.234, 0.052, 0.948, 0.966]: stand, dress/put on clothing, smoke, talk to (e.g., self, a person, a group)."]}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [1114, 1176], "conversations": [{"from": "human", "timestamps": 1144.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.329, 0.091, 0.971, 0.98]: carry/hold (an object), stand, listen to (a person)."}, {"from": "human", "timestamps": 1176.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.529, 0.112, 0.975, 0.982]?"}, {"from": "gpt", "value": ["In the current frame, [0.529, 0.112, 0.975, 0.982]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1175.0s, [0.561, 0.14, 0.98, 0.986]: stand, carry/hold (an object), listen to (a person).", "At 1174.0s, [0.536, 0.107, 0.981, 1.0]: stand.", "At 1173.0s, [0.525, 0.131, 0.981, 0.981]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1171.0s to 1172.0s, [0.536, 0.114, 0.973, 0.97]->[0.514, 0.101, 0.972, 0.979]: stand, listen to (a person), watch (a person).", "From 1168.0s to 1170.0s, [0.565, 0.128, 0.981, 0.973]->[0.533, 0.086, 0.978, 0.977]->[0.534, 0.104, 0.98, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1167.0s, [0.553, 0.105, 0.981, 0.976]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1166.0s, [0.604, 0.191, 0.911, 0.995]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1165.0s, [0.711, 0.112, 0.975, 0.983]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1164.0s, [0.453, 0.026, 0.977, 0.995]: walk, watch (a person).", "At 1163.0s, [0.15, 0.0, 0.966, 0.956]: walk."]}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [1254, 1286], "conversations": [{"from": "human", "timestamps": 1284.0, "value": "Describe the actions of the person at bbox [0.239, 0.164, 0.678, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.239, 0.164, 0.678, 0.988]: stand, answer phone, carry/hold (an object).", "At 1283.0s, [0.243, 0.143, 0.665, 0.989]: stand, answer phone, carry/hold (an object).", "From 1281.0s to 1282.0s, [0.193, 0.119, 0.641, 0.991]->[0.255, 0.135, 0.65, 0.996]: stand, answer phone.", "From 1276.0s to 1280.0s, [0.181, 0.074, 0.608, 0.985]->[0.205, 0.094, 0.602, 0.987]->[0.222, 0.108, 0.608, 0.985]->[0.184, 0.121, 0.609, 0.976]->[0.238, 0.127, 0.634, 0.995]: stand, answer phone, carry/hold (an object).", "At 1275.0s, [0.115, 0.094, 0.715, 0.972]: sit, stand, answer phone.", "At 1274.0s, [0.124, 0.075, 0.725, 0.978]: stand, answer phone."]}, {"from": "human", "timestamps": 1286.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.169, 0.143, 0.709, 0.992]: carry/hold (an object), sit, answer phone."}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [1258, 1360], "conversations": [{"from": "human", "timestamps": 1288.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.212, 0.122, 0.725, 0.992]: carry/hold (an object), stand, answer phone."}, {"from": "human", "timestamps": 1360.0, "value": "Describe the actions of the person at bbox [0.723, 0.282, 0.901, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.723, 0.282, 0.901, 0.982]: walk, carry/hold (an object), watch (a person).", "At 1359.0s, [0.738, 0.287, 0.953, 0.978]: walk, carry/hold (an object), watch (a person).", "At 1358.0s, [0.673, 0.269, 0.947, 0.985]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1357.0s, [0.644, 0.279, 0.916, 0.979]: stand, carry/hold (an object), listen to (a person), take (an object) from (a person), watch (a person).", "At 1356.0s, [0.69, 0.295, 0.922, 0.994]: stand, listen to (a person).", "At 1355.0s, [0.646, 0.319, 0.942, 0.977]: stand, carry/hold (an object), give/serve (an object) to (a person), listen to (a person), watch (a person).", "From 1353.0s to 1354.0s, [0.748, 0.263, 0.976, 1.0]->[0.714, 0.303, 0.957, 0.997]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [1315, 1347], "conversations": [{"from": "human", "timestamps": 1345.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.268, 0.047, 0.907, 0.984]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1347.0, "value": "What actions is the person in bbox [0.241, 0.058, 0.896, 0.99] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.241, 0.058, 0.896, 0.99]: stand, talk to (e.g., self, a person, a group).", "From 1344.0s to 1346.0s, [0.292, 0.061, 0.921, 0.985]->[0.268, 0.047, 0.907, 0.984]->[0.276, 0.05, 0.883, 0.985]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [1350, 1399], "conversations": [{"from": "human", "timestamps": 1380.0, "value": "What actions did the person at bbox [0.117, 0.098, 0.724, 0.989] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.117, 0.098, 0.724, 0.989]: stand, talk to (e.g., self, a person, a group).", "From 1377.0s to 1379.0s, [0.229, 0.077, 0.786, 0.979]->[0.136, 0.078, 0.765, 0.994]->[0.215, 0.089, 0.699, 0.982]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1399.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.07, 0.085, 0.713, 0.997]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [1373, 1482], "conversations": [{"from": "human", "timestamps": 1403.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.301, 0.093, 0.769, 0.984]: listen to (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1482.0, "value": "What actions is the person in bbox [0.442, 0.183, 0.713, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.442, 0.183, 0.713, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1481.0s, [0.384, 0.042, 0.707, 0.987]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1480.0s, [0.448, 0.109, 0.773, 0.99]: sit, smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1479.0s, [0.49, 0.085, 0.814, 1.0]: sit, smoke, talk to (e.g., self, a person, a group).", "At 1478.0s, [0.501, 0.114, 0.855, 1.0]: sit, smoke, listen to (a person).", "At 1477.0s, [0.484, 0.132, 0.935, 0.997]: sit, listen to (a person).", "At 1476.0s, [0.516, 0.091, 0.981, 0.991]: sit, listen to (a person), watch (a person).", "At 1475.0s, [0.508, 0.066, 0.976, 0.994]: sit, hug (a person), listen to (a person), watch (a person).", "At 1474.0s, [0.491, 0.076, 0.981, 0.989]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1473.0s, [0.552, 0.002, 0.977, 0.99]: sit, listen to (a person), watch (a person).", "At 1472.0s, [0.62, 0.002, 0.978, 0.993]: stand, listen to (a person), watch (a person).", "At 1471.0s, [0.526, 0.0, 0.946, 0.949]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1470.0s, [0.458, 0.005, 0.98, 0.995]: sit, stand, grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1469.0s, [0.463, 0.03, 0.981, 0.96]: sit, hug (a person), kiss (a person), watch (a person).", "At 1468.0s, [0.523, 0.03, 0.981, 0.98]: sit, kiss (a person), listen to (a person).", "At 1467.0s, [0.44, 0.01, 0.978, 0.995]: stand, kiss (a person)."]}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [1416, 1473], "conversations": [{"from": "human", "timestamps": 1446.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.254, 0.102, 0.559, 0.983]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.544, 0.264, 0.781, 0.995]: watch (a person), listen to (a person), carry/hold (an object), stand, take (an object) from (a person)."}, {"from": "human", "timestamps": 1473.0, "value": "What actions is the person in bbox [0.036, 0.0, 0.78, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.036, 0.0, 0.78, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1472.0s, [0.075, 0.005, 0.799, 0.994]: sit, talk to (e.g., self, a person, a group).", "At 1471.0s, [0.038, 0.0, 0.696, 0.98]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [1469, 1540], "conversations": [{"from": "human", "timestamps": 1499.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.022, 0.058, 0.536, 0.971]: sit, talk to (e.g., self, a person, a group); [0.219, 0.064, 0.826, 0.995]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1540.0, "value": "What actions did the person at bbox [0.566, 0.082, 0.962, 0.988] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.566, 0.082, 0.962, 0.988]: stand.", "From 1538.0s to 1539.0s, [0.565, 0.041, 0.981, 0.994]->[0.6, 0.107, 0.981, 0.988]: stand, watch (a person)."]}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [1590, 1722], "conversations": [{"from": "human", "timestamps": 1620.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.065, 0.072, 0.544, 0.985]: carry/hold (an object), stand, answer phone; [0.692, 0.568, 0.87, 0.995]: sit, drink."}, {"from": "human", "timestamps": 1722.0, "value": "What actions did the person at bbox [0.042, 0.138, 0.396, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.042, 0.138, 0.396, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1717.0s to 1721.0s, [0.07, 0.14, 0.35, 1.0]->[0.056, 0.148, 0.362, 0.986]->[0.056, 0.149, 0.346, 1.0]->[0.053, 0.148, 0.351, 0.991]->[0.043, 0.16, 0.349, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1715.0s to 1716.0s, [0.019, 0.151, 0.331, 0.995]->[0.022, 0.105, 0.334, 0.979]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "HV0H6oc4Kvs.mkv", "clip": [1730, 1797], "conversations": [{"from": "human", "timestamps": 1760.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.413, 0.084, 0.966, 0.969]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1797.0, "value": "Describe the actions of the person at bbox [0.466, 0.052, 0.981, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.466, 0.052, 0.981, 0.981]: stand, talk to (e.g., self, a person, a group).", "At 1796.0s, [0.481, 0.032, 0.981, 0.984]: stand, talk to (e.g., self, a person, a group).", "At 1795.0s, [0.403, 0.021, 0.98, 0.983]: stand, listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [885, 928], "conversations": [{"from": "human", "timestamps": 915.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.333, 0.075, 0.793]: carry/hold (an object), stand; [0.055, 0.507, 0.371, 0.777]: watch (a person), stand; [0.362, 0.219, 0.784, 0.797]: watch (a person), stand."}, {"from": "human", "timestamps": 928.0, "value": "What actions is the person in bbox [0.686, 0.392, 0.733, 0.649] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.686, 0.392, 0.733, 0.649]: stand, listen to (a person), watch (a person).", "At 927.0s, [0.679, 0.392, 0.735, 0.649]: stand, carry/hold (an object), watch (a person).", "At 926.0s, [0.68, 0.395, 0.737, 0.65]: stand, watch (a person)."]}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1000, 1031], "conversations": [{"from": "human", "timestamps": 1030.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.282, 0.235, 0.558, 0.821]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.559, 0.229, 0.854, 0.804]: carry/hold (an object), stand, listen to (a person)."}, {"from": "human", "timestamps": 1031.0, "value": "For the person at bbox [0.284, 0.237, 0.56, 0.824], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.284, 0.237, 0.56, 0.824]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1030.0s, [0.282, 0.235, 0.558, 0.821]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1029.0s, [0.276, 0.222, 0.56, 0.791]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1090, 1134], "conversations": [{"from": "human", "timestamps": 1120.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.253, 0.229, 0.824, 0.783]: listen to (a person), sit."}, {"from": "human", "timestamps": 1134.0, "value": "What actions did the person at bbox [0.574, 0.277, 0.852, 0.786] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.574, 0.277, 0.852, 0.786]: walk, carry/hold (an object).", "At 1133.0s, [0.456, 0.234, 0.838, 0.799]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1122, 1156], "conversations": [{"from": "human", "timestamps": 1152.0, "value": "What actions did the person at bbox [0.001, 0.237, 0.265, 0.76] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.001, 0.237, 0.265, 0.76]: stand, listen to (a person), watch (a person).", "From 1144.0s to 1151.0s, [0.032, 0.295, 0.353, 0.79]->[0.044, 0.252, 0.367, 0.79]->[0.003, 0.237, 0.226, 0.767]->[0.005, 0.237, 0.249, 0.774]->[0.01, 0.229, 0.225, 0.773]->[0.003, 0.239, 0.245, 0.776]->[0.064, 0.238, 0.372, 0.798]->[0.044, 0.235, 0.375, 0.796]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1156.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.004, 0.215, 0.367, 0.793]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.4, 0.208, 0.754, 0.792]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.716, 0.209, 0.998, 0.774]: listen to (a person), stand, watch (a person)."}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1212, 1251], "conversations": [{"from": "human", "timestamps": 1242.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.443, 0.28, 0.612, 0.78]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1251.0, "value": "For the person at bbox [0.524, 0.282, 0.742, 0.79], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.524, 0.282, 0.742, 0.79]: stand, listen to (a person).", "From 1247.0s to 1250.0s, [0.497, 0.283, 0.723, 0.793]->[0.505, 0.279, 0.721, 0.794]->[0.512, 0.267, 0.717, 0.79]->[0.522, 0.276, 0.73, 0.785]: stand, listen to (a person), watch (a person)."]}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1233, 1374], "conversations": [{"from": "human", "timestamps": 1263.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.306, 0.239, 0.538, 0.788]: listen to (a person), stand, watch (a person); [0.617, 0.223, 0.889, 0.779]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1374.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.008, 0.203, 0.743, 0.794]?"}, {"from": "gpt", "value": ["In the current frame, [0.008, 0.203, 0.743, 0.794]: stand.", "From 1372.0s to 1373.0s, [0.0, 0.204, 0.703, 0.782]->[0.013, 0.205, 0.739, 0.792]: stand, touch (an object).", "At 1371.0s, [0.0, 0.197, 0.75, 0.783]: stand.", "From 1368.0s to 1370.0s, [0.499, 0.214, 0.722, 0.79]->[0.492, 0.221, 0.728, 0.786]->[0.493, 0.239, 0.73, 0.786]: stand, carry/hold (an object)."]}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1325, 1368], "conversations": [{"from": "human", "timestamps": 1355.0, "value": "For the person at bbox [0.259, 0.206, 0.712, 0.77], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.259, 0.206, 0.712, 0.77]: stand, talk to (e.g., self, a person, a group).", "From 1352.0s to 1354.0s, [0.244, 0.214, 0.746, 0.782]->[0.223, 0.224, 0.767, 0.869]->[0.233, 0.229, 0.75, 0.861]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1368.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.499, 0.214, 0.722, 0.79]: carry/hold (an object), stand."}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1378, 1420], "conversations": [{"from": "human", "timestamps": 1408.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.444, 0.277, 0.705, 0.779]: carry/hold (an object), talk to (e.g., self, a person, a group), walk; [0.633, 0.339, 0.718, 0.527]: listen to (a person), stand, watch (a person); [0.655, 0.222, 0.884, 0.764]: carry/hold (an object), listen to (a person), watch (a person), walk."}, {"from": "human", "timestamps": 1420.0, "value": "Describe the actions of the person at bbox [0.29, 0.224, 0.874, 0.788] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.29, 0.224, 0.874, 0.788]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1418.0s to 1419.0s, [0.272, 0.225, 0.878, 0.788]->[0.29, 0.245, 0.88, 0.778]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1384, 1443], "conversations": [{"from": "human", "timestamps": 1414.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.286, 0.265, 0.891, 0.778]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1443.0, "value": "Describe the actions of the person at bbox [0.625, 0.218, 0.991, 0.781] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.625, 0.218, 0.991, 0.781]: stand, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1442.0s, [0.546, 0.208, 0.977, 0.788]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1427, 1481], "conversations": [{"from": "human", "timestamps": 1457.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.203, 0.263, 0.301, 0.719]: carry/hold (an object), stand, listen to (a person); [0.213, 0.411, 0.535, 0.792]: listen to (a person), sit, talk to (e.g., self, a person, a group); [0.445, 0.205, 0.77, 0.791]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.646, 0.248, 0.809, 0.647]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1481.0, "value": "For the person at bbox [0.118, 0.456, 0.356, 0.799], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.118, 0.456, 0.356, 0.799]: sit, listen to (a person), watch (a person).", "From 1479.0s to 1480.0s, [0.119, 0.456, 0.356, 0.794]->[0.116, 0.457, 0.362, 0.793]: sit, listen to (a person), watch (a person).", "From 1477.0s to 1478.0s, [0.222, 0.23, 0.674, 0.779]->[0.214, 0.227, 0.671, 0.768]: sit, listen to (a person)."]}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1473, 1515], "conversations": [{"from": "human", "timestamps": 1503.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.35, 0.473, 0.487, 0.749]: carry/hold (an object), get up."}, {"from": "human", "timestamps": 1515.0, "value": "Describe the actions of the person at bbox [0.149, 0.227, 0.276, 0.762] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.149, 0.227, 0.276, 0.762]: stand, watch (a person).", "At 1514.0s, [0.241, 0.245, 0.562, 0.793]: stand, carry/hold (an object).", "At 1513.0s, [0.259, 0.249, 0.585, 0.792]: stand, touch (an object)."]}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1475, 1571], "conversations": [{"from": "human", "timestamps": 1505.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.274, 0.272, 0.414, 0.765]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1571.0, "value": "For the person at bbox [0.019, 0.272, 0.388, 0.796], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.019, 0.272, 0.388, 0.796]: stand, listen to (a person).", "From 1568.0s to 1570.0s, [0.0, 0.276, 0.349, 0.797]->[0.009, 0.269, 0.375, 0.792]->[0.011, 0.275, 0.378, 0.798]: stand, listen to (a person), watch (a person)."]}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1566, 1605], "conversations": [{"from": "human", "timestamps": 1596.0, "value": "What actions is the person in bbox [0.135, 0.354, 0.56, 0.786] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.135, 0.354, 0.56, 0.786]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1595.0s, [0.131, 0.361, 0.559, 0.79]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1594.0s, [0.153, 0.359, 0.565, 0.785]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1593.0s, [0.261, 0.309, 0.581, 0.793]: stand, listen to (a person), watch (a person).", "At 1592.0s, [0.284, 0.322, 0.576, 0.79]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1591.0s, [0.287, 0.379, 0.578, 0.786]: bend/bow (at the waist).", "From 1589.0s to 1590.0s, [0.279, 0.451, 0.58, 0.784]->[0.29, 0.421, 0.586, 0.781]: bend/bow (at the waist), kiss (a person).", "At 1588.0s, [0.104, 0.416, 0.544, 0.785]: bend/bow (at the waist).", "At 1587.0s, [0.104, 0.358, 0.504, 0.781]: stand, watch (a person).", "From 1572.0s to 1586.0s, [0.175, 0.325, 0.489, 0.787]->[0.174, 0.324, 0.505, 0.783]->[0.159, 0.323, 0.519, 0.786]->[0.359, 0.313, 0.472, 0.764]->[0.358, 0.312, 0.474, 0.761]->[0.358, 0.313, 0.474, 0.771]->[0.357, 0.31, 0.476, 0.766]->[0.086, 0.285, 0.387, 0.776]->[0.078, 0.281, 0.398, 0.785]->[0.069, 0.29, 0.379, 0.781]->[0.084, 0.287, 0.404, 0.785]->[0.08, 0.293, 0.383, 0.786]->[0.068, 0.285, 0.379, 0.78]->[0.069, 0.278, 0.383, 0.786]->[0.138, 0.346, 0.53, 0.784]: stand, listen to (a person), watch (a person).", "At 1571.0s, [0.019, 0.272, 0.388, 0.796]: stand, listen to (a person).", "From 1568.0s to 1570.0s, [0.0, 0.276, 0.349, 0.797]->[0.009, 0.269, 0.375, 0.792]->[0.011, 0.275, 0.378, 0.798]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1605.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.113, 0.354, 0.555, 0.788]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.563, 0.212, 0.995, 0.779]: listen to (a person), stand, watch (a person)."}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1609, 1649], "conversations": [{"from": "human", "timestamps": 1639.0, "value": "What actions did the person at bbox [0.76, 0.303, 1.0, 0.787] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.76, 0.303, 1.0, 0.787]: stand, carry/hold (an object), watch (a person).", "From 1634.0s to 1638.0s, [0.756, 0.302, 0.997, 0.786]->[0.757, 0.309, 0.994, 0.78]->[0.76, 0.303, 0.999, 0.777]->[0.762, 0.304, 0.997, 0.779]->[0.763, 0.305, 1.0, 0.783]: stand, carry/hold (an object), watch (a person).", "At 1633.0s, [0.768, 0.292, 0.996, 0.778]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1630.0s to 1632.0s, [0.743, 0.3, 0.996, 0.779]->[0.744, 0.303, 0.993, 0.776]->[0.753, 0.294, 0.995, 0.782]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1649.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.243, 0.232, 0.888, 0.783]: watch (a person), walk."}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1610, 1686], "conversations": [{"from": "human", "timestamps": 1640.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.003, 0.237, 0.227, 0.782]: watch (a person), stand; [0.734, 0.312, 0.998, 0.787]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1686.0, "value": "Describe the actions of the person at bbox [0.212, 0.296, 0.484, 0.786] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.212, 0.296, 0.484, 0.786]: walk, carry/hold (an object).", "At 1685.0s, [0.299, 0.274, 0.641, 0.772]: walk, carry/hold (an object).", "At 1684.0s, [0.335, 0.249, 0.686, 0.773]: walk, carry/hold (an object), watch (a person).", "From 1680.0s to 1683.0s, [0.286, 0.307, 0.446, 0.772]->[0.279, 0.3, 0.447, 0.776]->[0.283, 0.302, 0.444, 0.772]->[0.284, 0.307, 0.445, 0.777]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1678.0s to 1679.0s, [0.292, 0.324, 0.415, 0.697]->[0.257, 0.337, 0.434, 0.738]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "HVAmkvLrthQ.mkv", "clip": [1689, 1720], "conversations": [{"from": "human", "timestamps": 1719.0, "value": "What actions did the person at bbox [0.137, 0.208, 0.581, 0.787] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.137, 0.208, 0.581, 0.787]: stand, talk to (e.g., self, a person, a group).", "At 1718.0s, [0.148, 0.215, 0.524, 0.781]: stand, talk to (e.g., self, a person, a group).", "From 1716.0s to 1717.0s, [0.135, 0.207, 0.545, 0.789]->[0.145, 0.207, 0.543, 0.795]: stand.", "From 1711.0s to 1715.0s, [0.144, 0.212, 0.562, 0.783]->[0.143, 0.217, 0.559, 0.792]->[0.131, 0.214, 0.551, 0.794]->[0.146, 0.209, 0.565, 0.788]->[0.133, 0.213, 0.55, 0.786]: stand, talk to (e.g., self, a person, a group).", "From 1708.0s to 1710.0s, [0.3, 0.23, 0.418, 0.732]->[0.276, 0.234, 0.399, 0.738]->[0.284, 0.235, 0.386, 0.741]: stand, carry/hold (an object).", "At 1707.0s, [0.297, 0.228, 0.414, 0.729]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1706.0s, [0.287, 0.228, 0.408, 0.728]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1705.0s, [0.21, 0.216, 0.655, 0.801]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1720.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.134, 0.22, 0.59, 0.787]: stand; [0.553, 0.303, 0.69, 0.771]: stand, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [912, 965], "conversations": [{"from": "human", "timestamps": 942.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.003, 0.056, 0.646, 0.995]: ride (e.g., a bike, a car, a horse), sit, talk to (e.g., self, a person, a group); [0.586, 0.11, 0.988, 0.974]: ride (e.g., a bike, a car, a horse), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 965.0, "value": "What actions did the person at bbox [0.347, 0.419, 0.478, 0.77] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.347, 0.419, 0.478, 0.77]: sit, listen to (a person), watch (a person).", "At 964.0s, [0.35, 0.417, 0.477, 0.724]: sit, listen to (a person), watch (a person).", "At 963.0s, [0.341, 0.42, 0.5, 0.957]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 962.0s, [0.34, 0.414, 0.523, 0.98]: sit, listen to (a person), watch (a person).", "At 961.0s, [0.346, 0.419, 0.526, 0.968]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 960.0s, [0.348, 0.421, 0.526, 0.959]: sit, listen to (a person), watch (a person)."]}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [977, 1018], "conversations": [{"from": "human", "timestamps": 1007.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.285, 0.254, 0.744, 0.999]: watch (a person), stand; [0.587, 0.0, 1.0, 0.993]: watch (a person), stand."}, {"from": "human", "timestamps": 1018.0, "value": "For the person at bbox [0.068, 0.065, 0.701, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.068, 0.065, 0.701, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 1012.0s to 1017.0s, [0.116, 0.084, 0.776, 0.984]->[0.139, 0.102, 0.719, 0.985]->[0.093, 0.096, 0.729, 0.985]->[0.066, 0.079, 0.717, 0.989]->[0.109, 0.07, 0.78, 0.98]->[0.114, 0.12, 0.796, 0.973]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [992, 1075], "conversations": [{"from": "human", "timestamps": 1022.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.294, 0.082, 0.815, 0.994]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1075.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.262, 0.279, 0.483, 0.989]?"}, {"from": "gpt", "value": ["In the current frame, [0.262, 0.279, 0.483, 0.989]: stand, carry/hold (an object), listen to (a person).", "At 1074.0s, [0.258, 0.277, 0.467, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1065.0s to 1073.0s, [0.239, 0.287, 0.433, 0.986]->[0.232, 0.303, 0.466, 0.977]->[0.234, 0.295, 0.466, 0.975]->[0.227, 0.286, 0.401, 0.983]->[0.212, 0.283, 0.443, 0.98]->[0.215, 0.27, 0.473, 0.988]->[0.252, 0.282, 0.479, 0.975]->[0.254, 0.268, 0.471, 0.988]->[0.255, 0.29, 0.48, 0.972]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1064.0s, [0.241, 0.296, 0.482, 0.99]: stand, carry/hold (an object), listen to (a person).", "At 1063.0s, [0.226, 0.293, 0.467, 0.991]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1062.0s, [0.26, 0.3, 0.444, 0.969]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1061.0s, [0.234, 0.295, 0.44, 0.975]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1060.0s, [0.237, 0.288, 0.471, 0.974]: stand, carry/hold (an object), listen to (a person).", "At 1059.0s, [0.222, 0.284, 0.442, 0.975]: stand, carry/hold (an object), put down, listen to (a person).", "At 1058.0s, [0.191, 0.212, 0.46, 0.981]: walk, carry/hold (an object), watch (a person).", "At 1057.0s, [0.339, 0.188, 0.636, 0.986]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1055.0s to 1056.0s, [0.376, 0.291, 0.607, 0.988]->[0.272, 0.227, 0.622, 0.98]: walk, carry/hold (an object)."]}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [1023, 1121], "conversations": [{"from": "human", "timestamps": 1053.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.3, 0.016, 0.992, 0.971]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1121.0, "value": "What actions did the person at bbox [0.183, 0.081, 0.434, 0.931] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.183, 0.081, 0.434, 0.931]: stand, carry/hold (an object), listen to (a person).", "At 1120.0s, [0.192, 0.09, 0.431, 0.811]: stand, carry/hold (an object), listen to (a person).", "At 1119.0s, [0.144, 0.102, 0.657, 0.976]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1118.0s, [0.18, 0.145, 0.639, 0.883]: bend/bow (at the waist), carry/hold (an object), lift/pick up, listen to (a person)."]}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [1183, 1258], "conversations": [{"from": "human", "timestamps": 1213.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.574, 0.09, 0.836, 0.976]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1258.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.314, 0.195, 0.618, 0.935]?"}, {"from": "gpt", "value": ["At Now, [0.314, 0.195, 0.618, 0.935]: sit, eat.", "From 1256.0s to 1257.0s, [0.307, 0.277, 0.604, 0.97]->[0.318, 0.298, 0.603, 0.978]: sit, carry/hold (an object).", "At 1255.0s, [0.325, 0.275, 0.606, 0.96]: sit, carry/hold (an object), put down.", "From 1253.0s to 1254.0s, [0.317, 0.219, 0.684, 0.96]->[0.321, 0.257, 0.6, 0.951]: sit, carry/hold (an object).", "At 1252.0s, [0.304, 0.263, 0.764, 0.965]: sit, carry/hold (an object), open (e.g., a window, a car door).", "At 1251.0s, [0.307, 0.231, 0.694, 0.978]: sit, open (e.g., a window, a car door).", "At 1250.0s, [0.309, 0.24, 0.711, 0.95]: sit, open (e.g., a window, a car door), touch (an object).", "At 1249.0s, [0.242, 0.254, 0.668, 0.951]: sit, put down.", "At 1248.0s, [0.223, 0.178, 0.634, 0.98]: sit, carry/hold (an object).", "From 1246.0s to 1247.0s, [0.168, 0.261, 0.29, 0.991]->[0.0, 0.324, 0.093, 0.979]: walk, watch (a person).", "At 1245.0s, [0.373, 0.35, 0.508, 0.972]: walk.", "At 1244.0s, [0.443, 0.317, 0.525, 0.932]: stand, watch (a person)."]}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [1246, 1354], "conversations": [{"from": "human", "timestamps": 1276.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.459, 0.089, 0.582, 0.844]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.65, 0.092, 0.758, 0.83]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1354.0, "value": "What actions is the person in bbox [0.121, 0.118, 0.454, 0.945] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.121, 0.118, 0.454, 0.945]: sit, carry/hold (an object), listen to (a person).", "From 1352.0s to 1353.0s, [0.078, 0.065, 0.434, 0.982]->[0.135, 0.177, 0.446, 0.942]: carry/hold (an object), crouch/kneel, listen to (a person)."]}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [1390, 1428], "conversations": [{"from": "human", "timestamps": 1420.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.349, 0.031, 0.994, 0.992]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1428.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.611, 0.425, 0.809, 0.988]?"}, {"from": "gpt", "value": ["In the current frame, [0.611, 0.425, 0.809, 0.988]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1425.0s to 1427.0s, [0.677, 0.546, 0.923, 0.989]->[0.594, 0.462, 0.862, 0.992]->[0.629, 0.431, 0.845, 0.993]: sit, listen to (a person), watch (a person)."]}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [1358, 1462], "conversations": [{"from": "human", "timestamps": 1388.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.004, 0.02, 0.534, 1.0]: listen to (a person), stand, watch (a person); [0.459, 0.149, 0.852, 0.989]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1462.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.508, 0.05, 0.993, 0.995]?"}, {"from": "gpt", "value": ["At Now, [0.508, 0.05, 0.993, 0.995]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), watch (a person).", "From 1460.0s to 1461.0s, [0.424, 0.087, 0.896, 0.968]->[0.458, 0.075, 0.947, 0.977]: sit, ride (e.g., a bike, a car, a horse), listen to (a person)."]}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [1423, 1464], "conversations": [{"from": "human", "timestamps": 1453.0, "value": "Describe the actions of the person at bbox [0.328, 0.217, 0.477, 0.909] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.328, 0.217, 0.477, 0.909]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1452.0s, [0.336, 0.202, 0.481, 0.917]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1448.0s to 1451.0s, [0.345, 0.215, 0.484, 0.897]->[0.328, 0.214, 0.482, 0.937]->[0.33, 0.217, 0.487, 0.935]->[0.34, 0.207, 0.492, 0.924]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1446.0s to 1447.0s, [0.309, 0.243, 0.46, 1.0]->[0.348, 0.221, 0.537, 0.978]: walk, carry/hold (an object), listen to (a person).", "At 1445.0s, [0.296, 0.231, 0.426, 0.983]: walk, carry/hold (an object), grab (a person), listen to (a person), talk to (e.g., self, a person, a group).", "At 1444.0s, [0.274, 0.23, 0.428, 0.969]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1443.0s, [0.269, 0.226, 0.438, 0.958]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1464.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.074, 0.059, 0.566, 0.98]: ride (e.g., a bike, a car, a horse), sit, touch (an object), watch (a person), talk to (e.g., self, a person, a group); [0.396, 0.076, 0.96, 0.969]: ride (e.g., a bike, a car, a horse), sit, watch (a person), listen to (a person)."}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [1506, 1605], "conversations": [{"from": "human", "timestamps": 1536.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.072, 0.089, 0.486, 0.958]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.318, 0.061, 0.807, 0.929]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1605.0, "value": "For the person at bbox [0.441, 0.363, 0.512, 0.766], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.441, 0.363, 0.512, 0.766]: walk, carry/hold (an object).", "At 1604.0s, [0.444, 0.365, 0.499, 0.721]: walk, carry/hold (an object).", "From 1602.0s to 1603.0s, [0.436, 0.363, 0.5, 0.73]->[0.432, 0.352, 0.492, 0.721]: walk, open (e.g., a window, a car door)."]}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [1547, 1613], "conversations": [{"from": "human", "timestamps": 1577.0, "value": "What actions is the person in bbox [0.46, 0.349, 0.591, 0.922] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.46, 0.349, 0.591, 0.922]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1575.0s to 1576.0s, [0.478, 0.334, 0.606, 0.968]->[0.487, 0.328, 0.583, 0.874]: walk, carry/hold (an object), grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1574.0s, [0.428, 0.348, 0.548, 0.94]: stand, carry/hold (an object), grab (a person), talk to (e.g., self, a person, a group).", "At 1573.0s, [0.414, 0.335, 0.535, 0.822]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1570.0s to 1572.0s, [0.426, 0.337, 0.545, 0.698]->[0.424, 0.338, 0.545, 0.861]->[0.423, 0.326, 0.538, 0.863]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1569.0s, [0.398, 0.351, 0.537, 0.974]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1568.0s, [0.383, 0.357, 0.515, 0.946]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1566.0s to 1567.0s, [0.354, 0.383, 0.483, 0.857]->[0.377, 0.359, 0.503, 0.817]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1565.0s, [0.363, 0.152, 0.835, 0.97]: sit, carry/hold (an object), eat, listen to (a person)."]}, {"from": "human", "timestamps": 1613.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.355, 0.357, 0.423, 0.657]: carry/hold (an object), stand, listen to (a person); [0.463, 0.348, 0.559, 0.808]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [1596, 1647], "conversations": [{"from": "human", "timestamps": 1626.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.216, 0.157, 0.489, 0.984]: listen to (a person), stand; [0.533, 0.067, 0.976, 0.981]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1647.0, "value": "Describe the actions of the person at bbox [0.15, 0.105, 0.742, 0.972] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.15, 0.105, 0.742, 0.972]: walk, carry/hold (an object).", "From 1645.0s to 1646.0s, [0.262, 0.148, 0.724, 0.963]->[0.179, 0.126, 0.64, 0.973]: stand, close (e.g., a door, a box).", "At 1644.0s, [0.42, 0.12, 0.897, 0.97]: stand, close (e.g., a door, a box), talk to (e.g., self, a person, a group).", "At 1643.0s, [0.459, 0.111, 0.886, 0.974]: stand, close (e.g., a door, a box).", "At 1642.0s, [0.458, 0.154, 0.685, 0.995]: stand, carry/hold (an object)."]}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [1620, 1710], "conversations": [{"from": "human", "timestamps": 1650.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.214, 0.16, 0.621, 0.978]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1710.0, "value": "What actions did the person at bbox [0.417, 0.027, 0.969, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.417, 0.027, 0.969, 0.985]: stand, talk to (e.g., self, a person, a group).", "From 1707.0s to 1709.0s, [0.473, 0.006, 0.996, 0.973]->[0.451, 0.009, 0.954, 0.971]->[0.527, 0.0, 0.981, 0.972]: stand, talk to (e.g., self, a person, a group).", "From 1703.0s to 1706.0s, [0.358, 0.0, 0.969, 0.992]->[0.544, 0.029, 0.994, 0.968]->[0.404, 0.033, 1.0, 0.988]->[0.434, 0.012, 1.0, 1.0]: stand.", "At 1702.0s, [0.236, 0.064, 0.882, 0.991]: walk, touch (an object).", "At 1701.0s, [0.283, 0.002, 0.869, 0.985]: walk.", "At 1700.0s, [0.014, 0.013, 0.695, 1.0]: stand."]}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [1653, 1703], "conversations": [{"from": "human", "timestamps": 1683.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.367, 0.03, 0.659, 0.978]: walk."}, {"from": "human", "timestamps": 1703.0, "value": "Describe the actions of the person at bbox [0.358, 0.0, 0.969, 0.992] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.358, 0.0, 0.969, 0.992]: stand.", "At 1702.0s, [0.236, 0.064, 0.882, 0.991]: walk, touch (an object).", "At 1701.0s, [0.283, 0.002, 0.869, 0.985]: walk.", "At 1700.0s, [0.014, 0.013, 0.695, 1.0]: stand."]}]}, {"video": "Hi8QeP_VPu0.mkv", "clip": [1731, 1770], "conversations": [{"from": "human", "timestamps": 1761.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.239, 0.529, 0.987]: stand; [0.511, 0.091, 0.875, 0.977]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1770.0, "value": "What actions did the person at bbox [0.702, 0.092, 0.941, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.702, 0.092, 0.941, 1.0]: stand, drink.", "From 1768.0s to 1769.0s, [0.63, 0.106, 0.877, 0.985]->[0.634, 0.105, 0.907, 0.986]: stand, carry/hold (an object), watch (a person).", "At 1767.0s, [0.609, 0.136, 0.836, 0.989]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1766.0s, [0.522, 0.142, 0.755, 0.986]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1763.0s to 1765.0s, [0.519, 0.113, 0.785, 0.985]->[0.59, 0.136, 0.859, 0.976]->[0.604, 0.127, 0.866, 0.991]: stand, carry/hold (an object), watch (a person).", "At 1762.0s, [0.478, 0.063, 0.778, 0.969]: stand, carry/hold (an object).", "From 1760.0s to 1761.0s, [0.488, 0.074, 0.843, 0.975]->[0.511, 0.091, 0.875, 0.977]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1759.0s, [0.51, 0.06, 0.836, 0.971]: stand, talk to (e.g., self, a person, a group).", "At 1758.0s, [0.524, 0.049, 0.858, 0.971]: stand.", "At 1757.0s, [0.51, 0.044, 0.868, 0.98]: stand, talk to (e.g., self, a person, a group).", "From 1753.0s to 1756.0s, [0.523, 0.087, 0.836, 0.989]->[0.547, 0.069, 0.858, 0.98]->[0.536, 0.049, 0.857, 0.975]->[0.516, 0.049, 0.859, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1752.0s, [0.513, 0.093, 0.837, 0.981]: stand, talk to (e.g., self, a person, a group).", "At 1751.0s, [0.517, 0.092, 0.832, 0.977]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1747.0s to 1750.0s, [0.536, 0.016, 1.0, 0.966]->[0.505, 0.016, 0.992, 0.986]->[0.518, 0.003, 1.0, 0.968]->[0.574, 0.016, 1.0, 0.971]: stand, watch (a person).", "From 1745.0s to 1746.0s, [0.534, 0.013, 1.0, 0.969]->[0.589, 0.02, 1.0, 0.971]: stand, listen to (a person), watch (a person)."]}]}, {"video": "HymKCzQJbB8.mkv", "clip": [883, 938], "conversations": [{"from": "human", "timestamps": 913.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.006, 0.004, 0.67, 0.995]?"}, {"from": "gpt", "value": ["At Now, [0.006, 0.004, 0.67, 0.995]: stand, answer phone.", "At 912.0s, [0.028, 0.0, 0.623, 0.989]: stand, answer phone."]}, {"from": "human", "timestamps": 938.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.288, 0.134, 0.381, 0.796]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.468, 0.421, 0.667, 0.938]: sit."}]}, {"video": "HymKCzQJbB8.mkv", "clip": [1028, 1062], "conversations": [{"from": "human", "timestamps": 1058.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.015, 0.038, 0.466, 1.0]: listen to (a person), sit, watch (a person); [0.446, 0.086, 0.857, 0.955]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.731, 0.014, 1.0, 0.967]: sit."}, {"from": "human", "timestamps": 1062.0, "value": "For the person at bbox [0.133, 0.083, 0.761, 0.982], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.133, 0.083, 0.761, 0.982]: stand, watch (a person), martial art.", "At 1061.0s, [0.126, 0.022, 0.561, 0.973]: watch (a person), martial art."]}]}, {"video": "HymKCzQJbB8.mkv", "clip": [1168, 1214], "conversations": [{"from": "human", "timestamps": 1198.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.593, 0.196, 0.767, 0.992]: watch (a person), walk; [0.864, 0.387, 0.961, 0.98]: walk."}, {"from": "human", "timestamps": 1214.0, "value": "What actions is the person in bbox [0.654, 0.302, 1.0, 0.983] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.654, 0.302, 1.0, 0.983]: stand, listen to (a person), watch (a person).", "At 1213.0s, [0.655, 0.283, 1.0, 0.996]: stand, talk to (e.g., self, a person, a group).", "From 1210.0s to 1212.0s, [0.624, 0.31, 1.0, 0.987]->[0.593, 0.304, 1.0, 0.99]->[0.592, 0.289, 1.0, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1209.0s, [0.651, 0.313, 0.995, 0.996]: stand, listen to (a person), watch (a person).", "At 1208.0s, [0.643, 0.315, 1.0, 0.995]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1206.0s to 1207.0s, [0.622, 0.334, 0.992, 0.989]->[0.626, 0.326, 0.997, 0.998]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1205.0s, [0.626, 0.409, 0.965, 0.988]: stand, listen to (a person), watch (a person).", "From 1203.0s to 1204.0s, [0.532, 0.125, 0.825, 0.987]->[0.529, 0.175, 0.819, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1202.0s, [0.463, 0.121, 0.816, 0.99]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1197.0s to 1201.0s, [0.639, 0.238, 0.768, 0.956]->[0.593, 0.196, 0.767, 0.992]->[0.563, 0.301, 0.752, 0.943]->[0.579, 0.243, 0.803, 0.986]->[0.489, 0.203, 0.773, 0.996]: walk, watch (a person)."]}]}, {"video": "HymKCzQJbB8.mkv", "clip": [1285, 1342], "conversations": [{"from": "human", "timestamps": 1315.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.455, 0.154, 0.902, 0.98]: eat, walk."}, {"from": "human", "timestamps": 1342.0, "value": "What actions did the person at bbox [0.326, 0.009, 1.0, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.326, 0.009, 1.0, 1.0]: stand.", "From 1339.0s to 1341.0s, [0.124, 0.131, 0.925, 0.982]->[0.214, 0.122, 0.779, 0.97]->[0.203, 0.044, 0.827, 0.973]: stand."]}]}, {"video": "HymKCzQJbB8.mkv", "clip": [1321, 1370], "conversations": [{"from": "human", "timestamps": 1351.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.243, 0.195, 0.443, 0.825]: stand, eat; [0.398, 0.394, 0.588, 0.991]: listen to (a person), stand, watch (a person); [0.74, 0.206, 0.949, 0.973]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1370.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.641, 0.45, 0.776, 0.985]?"}, {"from": "gpt", "value": ["In the current frame, [0.641, 0.45, 0.776, 0.985]: stand, listen to (a person), watch (a person).", "At 1369.0s, [0.637, 0.45, 0.777, 0.991]: stand, eat, listen to (a person), watch (a person).", "At 1368.0s, [0.612, 0.448, 0.755, 0.986]: stand, eat, watch (a person).", "At 1367.0s, [0.583, 0.412, 0.722, 0.983]: stand, eat, listen to (a person), watch (a person).", "From 1365.0s to 1366.0s, [0.562, 0.373, 0.715, 0.991]->[0.566, 0.369, 0.717, 0.989]: stand, watch (a person)."]}]}, {"video": "HymKCzQJbB8.mkv", "clip": [1477, 1527], "conversations": [{"from": "human", "timestamps": 1507.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.133, 0.017, 0.708, 0.997]: drink, walk; [0.635, 0.231, 0.735, 0.577]: stand; [0.662, 0.319, 0.922, 0.997]: stand; [0.865, 0.289, 0.994, 0.998]: stand."}, {"from": "human", "timestamps": 1527.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.856, 0.0, 1.0, 0.985]?"}, {"from": "gpt", "value": ["In the current frame, [0.856, 0.0, 1.0, 0.985]: stand, listen to (a person), watch (a person).", "At 1526.0s, [0.839, 0.006, 0.998, 0.96]: stand, listen to (a person), watch (a person)."]}]}, {"video": "HymKCzQJbB8.mkv", "clip": [1731, 1776], "conversations": [{"from": "human", "timestamps": 1761.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.073, 0.006, 0.862, 0.975]: carry/hold (an object), stand, listen to (a person)."}, {"from": "human", "timestamps": 1776.0, "value": "What actions is the person in bbox [0.009, 0.113, 0.602, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.009, 0.113, 0.602, 1.0]: stand, watch (a person).", "At 1775.0s, [0.029, 0.118, 0.742, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "I8j6Xq2B5ys.mp4", "clip": [936, 979], "conversations": [{"from": "human", "timestamps": 966.0, "value": "What actions did the person at bbox [0.005, 0.05, 0.8, 0.975] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.005, 0.05, 0.8, 0.975]: stand.", "At 965.0s, [0.003, 0.011, 0.534, 0.983]: stand.", "At 964.0s, [0.027, 0.076, 0.482, 0.963]: stand, watch (a person).", "At 963.0s, [0.007, 0.132, 0.838, 0.973]: stand."]}, {"from": "human", "timestamps": 979.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.003, 0.078, 0.452, 0.973]: stand, talk to (e.g., self, a person, a group); [0.476, 0.124, 1.0, 0.987]: listen to (a person), stand."}]}, {"video": "I8j6Xq2B5ys.mp4", "clip": [1004, 1049], "conversations": [{"from": "human", "timestamps": 1034.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.255, 0.08, 0.762, 0.961]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1049.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.011, 0.002, 0.608, 0.984]?"}, {"from": "gpt", "value": ["In the current frame, [0.011, 0.002, 0.608, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1048.0s, [0.012, 0.028, 0.649, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "I8j6Xq2B5ys.mp4", "clip": [1029, 1111], "conversations": [{"from": "human", "timestamps": 1059.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.325, 0.189, 0.717, 0.859]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1111.0, "value": "What actions did the person at bbox [0.078, 0.161, 0.8, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.078, 0.161, 0.8, 0.985]: stand, carry/hold (an object), watch (a person).", "At 1110.0s, [0.489, 0.258, 0.651, 0.814]: walk, carry/hold (an object)."]}]}, {"video": "I8j6Xq2B5ys.mp4", "clip": [1061, 1136], "conversations": [{"from": "human", "timestamps": 1091.0, "value": "What actions is the person in bbox [0.336, 0.003, 1.0, 0.888] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.336, 0.003, 1.0, 0.888]: stand, talk to (e.g., self, a person, a group).", "At 1090.0s, [0.303, 0.001, 1.0, 0.896]: stand, talk to (e.g., self, a person, a group).", "At 1089.0s, [0.216, 0.0, 0.987, 0.886]: stand."]}, {"from": "human", "timestamps": 1136.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.424, 0.049, 1.0, 0.977]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "I8j6Xq2B5ys.mp4", "clip": [1136, 1238], "conversations": [{"from": "human", "timestamps": 1166.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.048, 0.061, 0.785, 0.972]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1238.0, "value": "What actions is the person in bbox [0.528, 0.2, 0.911, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.528, 0.2, 0.911, 0.978]: walk.", "From 1236.0s to 1237.0s, [0.337, 0.198, 0.78, 0.961]->[0.383, 0.217, 0.806, 0.965]: stand, talk to (e.g., self, a person, a group).", "At 1235.0s, [0.314, 0.179, 0.886, 0.957]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "I8j6Xq2B5ys.mp4", "clip": [1267, 1312], "conversations": [{"from": "human", "timestamps": 1297.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.818, 0.927]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1312.0, "value": "What actions did the person at bbox [0.017, 0.487, 0.244, 0.959] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.017, 0.487, 0.244, 0.959]: stand, listen to (a person), watch (a person).", "From 1310.0s to 1311.0s, [0.424, 0.335, 0.531, 0.499]->[0.231, 0.387, 0.432, 0.594]: stand, listen to (a person), watch (a person)."]}]}, {"video": "I8j6Xq2B5ys.mp4", "clip": [1329, 1362], "conversations": [{"from": "human", "timestamps": 1359.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.041, 0.275, 0.21, 0.608]: listen to (a person), sit, watch (a person); [0.131, 0.259, 0.281, 0.545]: listen to (a person), sit, watch (a person); [0.515, 0.183, 0.574, 0.383]: stand; [0.742, 0.22, 0.895, 0.566]: listen to (a person), stand, watch (a person); [0.857, 0.218, 0.976, 0.581]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1362.0, "value": "For the person at bbox [0.318, 0.224, 0.696, 0.854], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.318, 0.224, 0.696, 0.854]: sit, carry/hold (an object).", "From 1360.0s to 1361.0s, [0.361, 0.24, 0.623, 0.793]->[0.301, 0.22, 0.641, 0.623]: sit, carry/hold (an object), watch (a person).", "At 1359.0s, [0.742, 0.22, 0.895, 0.566]: stand, listen to (a person), watch (a person)."]}]}, {"video": "I8j6Xq2B5ys.mp4", "clip": [1430, 1482], "conversations": [{"from": "human", "timestamps": 1460.0, "value": "For the person at bbox [0.167, 0.061, 0.814, 0.958], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.167, 0.061, 0.814, 0.958]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1459.0s, [0.166, 0.048, 0.803, 0.977]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1457.0s to 1458.0s, [0.156, 0.044, 0.804, 0.973]->[0.161, 0.059, 0.809, 0.971]: sit, talk to (e.g., self, a person, a group).", "At 1456.0s, [0.163, 0.057, 0.808, 0.972]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1455.0s, [0.167, 0.065, 0.809, 0.975]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1482.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.774, 0.161, 0.997, 0.978]: watch (a person), sit, touch (an object)."}]}, {"video": "I8j6Xq2B5ys.mp4", "clip": [1507, 1596], "conversations": [{"from": "human", "timestamps": 1537.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.089, 0.05, 0.705, 0.98]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1596.0, "value": "Describe the actions of the person at bbox [0.284, 0.277, 0.739, 0.727] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.284, 0.277, 0.739, 0.727]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1595.0s, [0.392, 0.284, 0.749, 0.763]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1594.0s, [0.363, 0.244, 0.712, 0.778]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group), watch (a person).", "From 1592.0s to 1593.0s, [0.321, 0.262, 0.719, 0.731]->[0.332, 0.296, 0.657, 0.756]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1591.0s, [0.15, 0.055, 0.781, 0.818]: stand, ride (e.g., a bike, a car, a horse).", "At 1590.0s, [0.196, 0.038, 0.616, 0.775]: bend/bow (at the waist), ride (e.g., a bike, a car, a horse), watch (a person)."]}]}, {"video": "I8j6Xq2B5ys.mp4", "clip": [1596, 1642], "conversations": [{"from": "human", "timestamps": 1626.0, "value": "What actions is the person in bbox [0.694, 0.224, 0.856, 0.894] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.694, 0.224, 0.856, 0.894]: stand, talk to (e.g., self, a person, a group).", "From 1624.0s to 1625.0s, [0.55, 0.247, 0.701, 0.721]->[0.671, 0.224, 0.847, 0.922]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1642.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.207, 0.218, 0.385, 0.677]: listen to (a person), stand, watch (a person); [0.556, 0.206, 0.77, 0.961]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.817, 0.232, 0.971, 0.911]: listen to (a person), stand, watch (a person)."}]}, {"video": "IKdBLciu_-A.mp4", "clip": [886, 951], "conversations": [{"from": "human", "timestamps": 916.0, "value": "For the person at bbox [0.085, 0.0, 0.987, 0.972], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.085, 0.0, 0.987, 0.972]: sit, talk to (e.g., self, a person, a group).", "At 915.0s, [0.106, 0.0, 0.984, 0.966]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 914.0s, [0.127, 0.0, 0.999, 0.975]: stand, talk to (e.g., self, a person, a group).", "At 913.0s, [0.185, 0.0, 1.0, 0.979]: sit, stand, talk to (e.g., self, a person, a group).", "From 911.0s to 912.0s, [0.169, 0.0, 0.987, 0.972]->[0.154, 0.0, 0.988, 0.982]: stand, talk to (e.g., self, a person, a group).", "At 910.0s, [0.17, 0.0, 0.978, 0.97]: sit, talk to (e.g., self, a person, a group).", "From 908.0s to 909.0s, [0.162, 0.006, 0.995, 0.98]->[0.158, 0.0, 1.0, 0.965]: stand, talk to (e.g., self, a person, a group).", "At 907.0s, [0.246, 0.027, 1.0, 0.978]: sit, stand, talk to (e.g., self, a person, a group).", "From 902.0s to 906.0s, [0.126, 0.003, 0.993, 0.981]->[0.141, 0.0, 0.988, 0.972]->[0.134, 0.02, 0.99, 0.968]->[0.175, 0.014, 0.997, 0.98]->[0.159, 0.0, 1.0, 0.96]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 951.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.127, 0.041, 0.746, 0.99]: carry/hold (an object), sit."}]}, {"video": "IKdBLciu_-A.mp4", "clip": [930, 1015], "conversations": [{"from": "human", "timestamps": 960.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.942, 0.987]: sit."}, {"from": "human", "timestamps": 1015.0, "value": "What actions did the person at bbox [0.509, 0.092, 0.912, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.509, 0.092, 0.912, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1013.0s to 1014.0s, [0.525, 0.094, 0.915, 1.0]->[0.514, 0.097, 0.918, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1012.0s, [0.526, 0.1, 0.897, 1.0]: stand, carry/hold (an object), catch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1011.0s, [0.522, 0.1, 0.898, 1.0]: stand, carry/hold (an object), listen to (a person).", "At 1010.0s, [0.506, 0.092, 0.899, 1.0]: stand, carry/hold (an object), watch (a person).", "From 1008.0s to 1009.0s, [0.574, 0.114, 0.894, 0.991]->[0.51, 0.086, 0.896, 0.987]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1007.0s, [0.546, 0.127, 0.901, 0.996]: stand, carry/hold (an object).", "At 1006.0s, [0.562, 0.091, 0.909, 1.0]: stand, carry/hold (an object), watch (a person).", "At 1005.0s, [0.503, 0.12, 0.909, 0.969]: stand, carry/hold (an object), touch (an object), watch (a person).", "From 1002.0s to 1004.0s, [0.482, 0.081, 0.786, 0.981]->[0.552, 0.187, 0.835, 1.0]->[0.563, 0.077, 0.864, 0.991]: walk, carry/hold (an object).", "At 1001.0s, [0.492, 0.017, 0.763, 0.977]: walk, carry/hold (an object), listen to (a person).", "From 998.0s to 1000.0s, [0.47, 0.036, 0.799, 0.98]->[0.46, 0.049, 0.785, 0.986]->[0.457, 0.087, 0.762, 0.977]: walk, carry/hold (an object).", "At 997.0s, [0.485, 0.155, 0.823, 0.987]: walk.", "At 996.0s, [0.421, 0.208, 0.83, 1.0]: walk, touch (an object).", "At 995.0s, [0.312, 0.187, 0.667, 0.992]: walk."]}]}, {"video": "IKdBLciu_-A.mp4", "clip": [936, 979], "conversations": [{"from": "human", "timestamps": 966.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.032, 0.088, 0.758, 0.995]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 979.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.172, 0.106, 0.633, 0.975]?"}, {"from": "gpt", "value": ["In the current frame, [0.172, 0.106, 0.633, 0.975]: stand, cut.", "At 978.0s, [0.169, 0.092, 0.635, 0.969]: stand, carry/hold (an object), cut.", "At 977.0s, [0.203, 0.075, 0.662, 0.957]: stand, carry/hold (an object), cut, talk to (e.g., self, a person, a group).", "At 976.0s, [0.178, 0.045, 0.654, 0.968]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 975.0s, [0.071, 0.096, 0.654, 0.968]: stand, carry/hold (an object).", "At 974.0s, [0.026, 0.126, 0.544, 0.985]: bend/bow (at the waist), carry/hold (an object), touch (an object), listen to (a person)."]}]}, {"video": "IKdBLciu_-A.mp4", "clip": [1029, 1077], "conversations": [{"from": "human", "timestamps": 1059.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.319, 0.179, 0.981]: walk; [0.065, 0.134, 0.628, 0.991]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.598, 0.285, 0.997, 0.979]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1077.0, "value": "What actions did the person at bbox [0.584, 0.268, 1.0, 0.992] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.584, 0.268, 1.0, 0.992]: stand, talk to (e.g., self, a person, a group).", "From 1074.0s to 1076.0s, [0.591, 0.252, 1.0, 0.989]->[0.598, 0.259, 1.0, 0.994]->[0.578, 0.264, 0.999, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "IKdBLciu_-A.mp4", "clip": [1063, 1097], "conversations": [{"from": "human", "timestamps": 1093.0, "value": "Describe the actions of the person at bbox [0.015, 0.142, 0.592, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.015, 0.142, 0.592, 0.983]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1092.0s, [0.004, 0.132, 0.592, 0.979]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1091.0s, [0.051, 0.116, 0.639, 0.996]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1088.0s to 1090.0s, [0.026, 0.11, 0.597, 0.991]->[0.026, 0.097, 0.599, 1.0]->[0.042, 0.115, 0.528, 1.0]: walk, carry/hold (an object), listen to (a person).", "At 1087.0s, [0.104, 0.06, 0.715, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1085.0s to 1086.0s, [0.033, 0.1, 0.666, 0.985]->[0.067, 0.082, 0.644, 0.985]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1078.0s to 1084.0s, [0.014, 0.138, 0.575, 1.0]->[0.05, 0.085, 0.634, 0.98]->[0.023, 0.122, 0.65, 0.997]->[0.0, 0.085, 0.574, 0.99]->[0.044, 0.125, 0.627, 1.0]->[0.065, 0.069, 0.637, 0.997]->[0.052, 0.119, 0.578, 0.988]: walk, carry/hold (an object), listen to (a person).", "From 1071.0s to 1077.0s, [0.099, 0.093, 0.618, 0.984]->[0.069, 0.143, 0.567, 0.979]->[0.053, 0.128, 0.568, 0.979]->[0.064, 0.131, 0.585, 0.99]->[0.06, 0.129, 0.585, 0.986]->[0.073, 0.124, 0.589, 0.986]->[0.068, 0.122, 0.608, 0.994]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1070.0s, [0.042, 0.106, 0.648, 0.986]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1066.0s to 1069.0s, [0.034, 0.147, 0.608, 0.979]->[0.127, 0.109, 0.675, 0.983]->[0.061, 0.096, 0.615, 0.996]->[0.102, 0.108, 0.691, 0.989]: walk, carry/hold (an object), listen to (a person).", "From 1056.0s to 1065.0s, [0.05, 0.107, 0.629, 0.983]->[0.04, 0.106, 0.58, 0.982]->[0.054, 0.136, 0.619, 0.985]->[0.065, 0.134, 0.628, 0.991]->[0.061, 0.143, 0.622, 0.99]->[0.059, 0.15, 0.615, 0.983]->[0.066, 0.145, 0.612, 0.985]->[0.062, 0.152, 0.62, 0.987]->[0.057, 0.148, 0.619, 0.987]->[0.06, 0.17, 0.63, 0.994]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1055.0s, [0.007, 0.04, 0.498, 1.0]: stand, carry/hold (an object), listen to (a person).", "From 1053.0s to 1054.0s, [0.0, 0.082, 0.531, 1.0]->[0.017, 0.075, 0.661, 0.997]: walk, carry/hold (an object), listen to (a person).", "From 1049.0s to 1052.0s, [0.054, 0.065, 0.599, 0.993]->[0.078, 0.086, 0.719, 0.987]->[0.009, 0.102, 0.598, 0.99]->[0.07, 0.104, 0.625, 0.992]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1045.0s to 1048.0s, [0.054, 0.158, 0.576, 1.0]->[0.112, 0.152, 0.629, 1.0]->[0.031, 0.146, 0.536, 1.0]->[0.138, 0.075, 0.627, 0.993]: walk, carry/hold (an object), listen to (a person).", "From 1037.0s to 1044.0s, [0.101, 0.043, 0.604, 1.0]->[0.029, 0.089, 0.526, 0.99]->[0.151, 0.173, 0.617, 0.998]->[0.133, 0.13, 0.57, 0.989]->[0.109, 0.131, 0.599, 0.989]->[0.137, 0.12, 0.575, 0.99]->[0.021, 0.101, 0.514, 0.992]->[0.119, 0.124, 0.612, 0.984]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1035.0s to 1036.0s, [0.197, 0.175, 0.58, 1.0]->[0.117, 0.059, 0.608, 0.99]: walk, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1097.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.014, 0.123, 0.598, 0.986]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.566, 0.256, 1.0, 0.985]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "IKdBLciu_-A.mp4", "clip": [1303, 1361], "conversations": [{"from": "human", "timestamps": 1333.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.269, 0.24, 0.956, 0.984]?"}, {"from": "gpt", "value": ["In the current frame, [0.269, 0.24, 0.956, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1332.0s, [0.38, 0.262, 0.964, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1331.0s, [0.276, 0.243, 0.981, 0.991]: stand, grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1330.0s, [0.398, 0.247, 0.984, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1329.0s, [0.463, 0.263, 0.807, 0.97]: stand.", "At 1328.0s, [0.511, 0.372, 1.0, 0.992]: stand, watch (a person).", "At 1327.0s, [0.071, 0.068, 0.794, 0.979]: crouch/kneel, watch (a person).", "At 1326.0s, [0.072, 0.24, 0.846, 0.961]: crouch/kneel.", "At 1325.0s, [0.028, 0.135, 0.949, 0.984]: crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1324.0s, [0.09, 0.059, 0.944, 0.963]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1323.0s, [0.479, 0.279, 0.85, 0.981]: carry/hold (an object), crouch/kneel, watch (a person).", "At 1322.0s, [0.205, 0.194, 0.95, 0.993]: stand, touch (an object), grab (a person), watch (a person).", "At 1321.0s, [0.561, 0.375, 1.0, 0.991]: stand, carry/hold (an object), dress/put on clothing, give/serve (an object) to (a person), watch (a person).", "From 1318.0s to 1320.0s, [0.575, 0.309, 0.993, 0.982]->[0.623, 0.316, 0.998, 0.99]->[0.441, 0.345, 0.985, 0.984]: stand, carry/hold (an object).", "From 1316.0s to 1317.0s, [0.659, 0.249, 1.0, 0.995]->[0.85, 0.262, 1.0, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1315.0s, [0.53, 0.285, 0.986, 0.998]: stand, talk to (e.g., self, a person, a group).", "From 1313.0s to 1314.0s, [0.719, 0.291, 1.0, 0.993]->[0.652, 0.283, 1.0, 0.993]: stand, listen to (a person), watch (a person).", "At 1312.0s, [0.752, 0.283, 1.0, 0.991]: stand, watch (a person).", "At 1311.0s, [0.671, 0.271, 0.991, 0.989]: stand, carry/hold (an object), watch (a person).", "At 1310.0s, [0.68, 0.255, 1.0, 0.987]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1361.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.031, 0.166, 0.868, 1.0]: stand, pull (an object)."}]}, {"video": "IKdBLciu_-A.mp4", "clip": [1478, 1526], "conversations": [{"from": "human", "timestamps": 1508.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.214, 0.426, 0.507, 0.811]: sit, write; [0.711, 0.197, 0.995, 0.985]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1526.0, "value": "What actions did the person at bbox [0.213, 0.109, 0.81, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.213, 0.109, 0.81, 0.986]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1518.0s to 1525.0s, [0.199, 0.09, 0.801, 0.998]->[0.144, 0.078, 0.745, 0.995]->[0.2, 0.108, 0.779, 0.992]->[0.216, 0.109, 0.794, 0.99]->[0.217, 0.105, 0.822, 0.994]->[0.227, 0.109, 0.818, 0.99]->[0.208, 0.112, 0.803, 0.999]->[0.214, 0.108, 0.811, 0.989]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1516.0s to 1517.0s, [0.214, 0.128, 0.734, 0.994]->[0.198, 0.088, 0.795, 0.991]: stand, carry/hold (an object), close (e.g., a door, a box).", "At 1515.0s, [0.218, 0.336, 0.757, 0.995]: bend/bow (at the waist), stand, carry/hold (an object), close (e.g., a door, a box).", "From 1513.0s to 1514.0s, [0.199, 0.123, 0.776, 0.987]->[0.177, 0.261, 0.766, 0.99]: stand, carry/hold (an object).", "At 1512.0s, [0.287, 0.108, 0.809, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1511.0s, [0.355, 0.085, 0.723, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1510.0s, [0.57, 0.305, 0.705, 0.547]: walk, watch (a person)."]}]}, {"video": "IKdBLciu_-A.mp4", "clip": [1545, 1613], "conversations": [{"from": "human", "timestamps": 1575.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.139, 0.204, 0.98]: carry/hold (an object), run/jog; [0.095, 0.214, 0.372, 0.922]: carry/hold (an object), run/jog."}, {"from": "human", "timestamps": 1613.0, "value": "What actions did the person at bbox [0.242, 0.056, 0.931, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.242, 0.056, 0.931, 0.983]: bend/bow (at the waist).", "At 1612.0s, [0.192, 0.069, 0.882, 0.95]: bend/bow (at the waist), crouch/kneel."]}]}, {"video": "IKdBLciu_-A.mp4", "clip": [1615, 1652], "conversations": [{"from": "human", "timestamps": 1645.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.37, 0.094, 0.779, 0.99]?"}, {"from": "gpt", "value": ["In the current frame, [0.37, 0.094, 0.779, 0.99]: stand, smoke, talk to (e.g., self, a person, a group).", "At 1644.0s, [0.379, 0.065, 0.751, 0.957]: stand, smoke.", "From 1641.0s to 1643.0s, [0.398, 0.096, 0.767, 0.965]->[0.405, 0.098, 0.772, 0.978]->[0.379, 0.105, 0.767, 0.967]: stand, carry/hold (an object), smoke."]}, {"from": "human", "timestamps": 1652.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.324, 0.024, 0.853, 0.985]: carry/hold (an object), walk."}]}, {"video": "IKdBLciu_-A.mp4", "clip": [1624, 1701], "conversations": [{"from": "human", "timestamps": 1654.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.341, 0.133, 0.912, 0.993]: sit, lift/pick up, smoke."}, {"from": "human", "timestamps": 1701.0, "value": "What actions is the person in bbox [0.593, 0.087, 1.0, 0.974] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.593, 0.087, 1.0, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1697.0s to 1700.0s, [0.663, 0.102, 0.995, 0.996]->[0.591, 0.104, 1.0, 0.967]->[0.609, 0.088, 0.999, 1.0]->[0.606, 0.086, 0.999, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1696.0s, [0.638, 0.101, 0.998, 0.994]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "Ie35yEssHko.mkv", "clip": [902, 940], "conversations": [{"from": "human", "timestamps": 932.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.141, 0.272, 0.338, 0.967]: carry/hold (an object), stand, watch (a person); [0.275, 0.093, 0.447, 0.962]: listen to (a person), stand, watch (a person); [0.415, 0.083, 0.671, 0.976]: watch (a person), give/serve (an object) to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 940.0, "value": "What actions is the person in bbox [0.114, 0.119, 0.418, 0.994] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.114, 0.119, 0.418, 0.994]: stand, hug (a person), talk to (e.g., self, a person, a group).", "At 939.0s, [0.002, 0.148, 0.351, 0.987]: walk, hug (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 938.0s, [0.229, 0.073, 0.669, 0.984]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 937.0s, [0.407, 0.065, 0.692, 0.996]: walk, talk to (e.g., self, a person, a group).", "At 936.0s, [0.38, 0.035, 0.653, 0.964]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ie35yEssHko.mkv", "clip": [935, 977], "conversations": [{"from": "human", "timestamps": 965.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.249, 0.15, 1.0, 0.975]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 977.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.067, 0.17, 0.704, 0.975]?"}, {"from": "gpt", "value": ["At Now, [0.067, 0.17, 0.704, 0.975]: stand, talk to (e.g., self, a person, a group).", "At 976.0s, [0.067, 0.21, 0.683, 0.977]: stand, watch (a person)."]}]}, {"video": "Ie35yEssHko.mkv", "clip": [1053, 1127], "conversations": [{"from": "human", "timestamps": 1083.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.216, 0.098, 0.655, 0.921]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1127.0, "value": "Describe the actions of the person at bbox [0.33, 0.113, 0.706, 0.865] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.33, 0.113, 0.706, 0.865]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1125.0s to 1126.0s, [0.379, 0.042, 0.759, 0.851]->[0.329, 0.057, 0.727, 0.908]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1124.0s, [0.404, 0.043, 0.816, 0.783]: stand, touch (an object), watch (a person).", "At 1123.0s, [0.532, 0.048, 0.842, 0.736]: walk, watch (a person).", "At 1122.0s, [0.435, 0.084, 0.632, 0.833]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1121.0s, [0.19, 0.14, 0.419, 0.851]: walk, watch (a person).", "At 1120.0s, [0.175, 0.146, 0.352, 0.87]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1119.0s, [0.015, 0.067, 0.455, 1.0]: walk."]}]}, {"video": "Ie35yEssHko.mkv", "clip": [1134, 1199], "conversations": [{"from": "human", "timestamps": 1164.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.016, 0.315, 0.373, 0.99]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.302, 0.473, 0.448, 0.911]: listen to (a person), sit, watch (a person); [0.722, 0.341, 0.926, 0.711]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1199.0, "value": "For the person at bbox [0.009, 0.047, 0.719, 0.972], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.009, 0.047, 0.719, 0.972]: sit, talk to (e.g., self, a person, a group).", "From 1197.0s to 1198.0s, [0.022, 0.046, 0.714, 0.972]->[0.017, 0.066, 0.735, 0.972]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ie35yEssHko.mkv", "clip": [1135, 1239], "conversations": [{"from": "human", "timestamps": 1165.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.412, 0.08, 0.871, 0.884]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1239.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.033, 0.078, 0.733, 0.894]?"}, {"from": "gpt", "value": ["In the current frame, [0.033, 0.078, 0.733, 0.894]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1238.0s, [0.247, 0.152, 0.983, 0.917]: sit, watch (a person)."]}]}, {"video": "Ie35yEssHko.mkv", "clip": [1203, 1241], "conversations": [{"from": "human", "timestamps": 1233.0, "value": "What actions is the person in bbox [0.494, 0.17, 0.969, 0.768] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.494, 0.17, 0.969, 0.768]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1232.0s, [0.5, 0.172, 0.958, 0.764]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1227.0s to 1231.0s, [0.543, 0.382, 0.839, 0.825]->[0.513, 0.369, 0.843, 0.813]->[0.54, 0.367, 0.833, 0.801]->[0.541, 0.358, 0.834, 0.803]->[0.535, 0.36, 0.835, 0.802]: sit, listen to (a person), watch (a person).", "From 1223.0s to 1226.0s, [0.497, 0.508, 0.856, 0.956]->[0.47, 0.534, 0.851, 0.973]->[0.465, 0.463, 0.827, 0.913]->[0.496, 0.406, 0.824, 0.853]: sit, watch (a person).", "From 1221.0s to 1222.0s, [0.464, 0.45, 0.851, 0.916]->[0.478, 0.462, 0.857, 0.913]: sit.", "At 1220.0s, [0.493, 0.272, 0.97, 0.885]: sit, talk to (e.g., self, a person, a group).", "At 1219.0s, [0.484, 0.272, 0.963, 0.884]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1241.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.002, 0.052, 0.769, 0.978]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.5, 0.096, 0.976, 0.834]: listen to (a person), sit."}]}, {"video": "Ie35yEssHko.mkv", "clip": [1248, 1305], "conversations": [{"from": "human", "timestamps": 1278.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.061, 0.323, 0.973]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.165, 0.023, 0.964, 0.978]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1305.0, "value": "What actions is the person in bbox [0.217, 0.0, 1.0, 0.974] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.217, 0.0, 1.0, 0.974]: sit, talk to (e.g., self, a person, a group).", "At 1304.0s, [0.222, 0.017, 1.0, 0.968]: sit, listen to (a person), watch (a person)."]}]}, {"video": "Ie35yEssHko.mkv", "clip": [1268, 1323], "conversations": [{"from": "human", "timestamps": 1298.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.241, 0.0, 0.991, 0.991]: listen to (a person), sit."}, {"from": "human", "timestamps": 1323.0, "value": "Describe the actions of the person at bbox [0.0, 0.0, 0.907, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.0, 0.907, 0.984]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1320.0s to 1322.0s, [0.0, 0.003, 0.9, 0.996]->[0.0, 0.0, 0.915, 0.988]->[0.0, 0.0, 0.914, 0.983]: sit, talk to (e.g., self, a person, a group).", "At 1319.0s, [0.0, 0.0, 0.93, 0.989]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1318.0s, [0.0, 0.0, 0.483, 0.978]: sit, listen to (a person), watch (a person)."]}]}, {"video": "Ie35yEssHko.mkv", "clip": [1360, 1419], "conversations": [{"from": "human", "timestamps": 1390.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.239, 0.002, 1.0, 0.976]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1419.0, "value": "Describe the actions of the person at bbox [0.127, 0.029, 0.754, 0.996] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.127, 0.029, 0.754, 0.996]: stand, talk to (e.g., self, a person, a group).", "At 1418.0s, [0.189, 0.082, 0.762, 0.978]: stand, talk to (e.g., self, a person, a group).", "From 1416.0s to 1417.0s, [0.122, 0.041, 0.766, 0.982]->[0.118, 0.073, 0.768, 0.969]: crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1415.0s, [0.429, 0.139, 0.696, 0.826]: crouch/kneel, listen to (a person).", "At 1414.0s, [0.46, 0.189, 0.721, 0.865]: crouch/kneel, watch (e.g., TV), listen to (a person).", "At 1413.0s, [0.49, 0.287, 0.756, 0.958]: crouch/kneel, read, listen to (a person), watch (a person).", "From 1411.0s to 1412.0s, [0.63, 0.633, 0.848, 0.994]->[0.534, 0.454, 0.787, 0.986]: crouch/kneel, listen to (a person), watch (a person).", "At 1410.0s, [0.751, 0.808, 0.885, 0.996]: stand."]}]}, {"video": "Ie35yEssHko.mkv", "clip": [1409, 1451], "conversations": [{"from": "human", "timestamps": 1439.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.006, 0.009, 0.525, 0.983]: carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person), bend/bow (at the waist); [0.347, 0.182, 0.994, 0.981]: listen to (a person), stand, eat."}, {"from": "human", "timestamps": 1451.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.028, 0.046, 0.693, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.028, 0.046, 0.693, 1.0]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1449.0s to 1450.0s, [0.009, 0.024, 0.646, 0.98]->[0.01, 0.036, 0.618, 0.972]: bend/bow (at the waist), carry/hold (an object), listen to (a person), watch (a person).", "From 1447.0s to 1448.0s, [0.012, 0.02, 0.491, 1.0]->[0.0, 0.0, 0.333, 0.975]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1446.0s, [0.006, 0.0, 0.316, 1.0]: bend/bow (at the waist), carry/hold (an object), give/serve (an object) to (a person), listen to (a person), watch (a person).", "At 1445.0s, [0.0, 0.009, 0.428, 0.995]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1443.0s to 1444.0s, [0.0, 0.016, 0.53, 0.974]->[0.0, 0.0, 0.439, 0.999]: bend/bow (at the waist), watch (a person).", "From 1441.0s to 1442.0s, [0.0, 0.015, 0.494, 0.611]->[0.0, 0.035, 0.407, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1440.0s, [0.0, 0.01, 0.49, 0.999]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "From 1436.0s to 1439.0s, [0.026, 0.052, 0.616, 0.98]->[0.0, 0.014, 0.358, 0.968]->[0.003, 0.0, 0.325, 0.964]->[0.006, 0.009, 0.525, 0.983]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1434.0s to 1435.0s, [0.037, 0.006, 0.686, 1.0]->[0.074, 0.001, 0.761, 0.986]: stand, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ie35yEssHko.mkv", "clip": [1528, 1610], "conversations": [{"from": "human", "timestamps": 1558.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.008, 0.0, 0.463, 0.981]: stand; [0.657, 0.333, 0.918, 0.983]: sit."}, {"from": "human", "timestamps": 1610.0, "value": "What actions did the person at bbox [0.059, 0.022, 0.698, 0.979] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.059, 0.022, 0.698, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1607.0s to 1609.0s, [0.067, 0.007, 0.795, 0.976]->[0.119, 0.024, 0.724, 0.99]->[0.071, 0.016, 0.722, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ie35yEssHko.mkv", "clip": [1547, 1592], "conversations": [{"from": "human", "timestamps": 1577.0, "value": "Describe the actions of the person at bbox [0.482, 0.101, 0.984, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.482, 0.101, 0.984, 0.988]: stand, carry/hold (an object).", "At 1576.0s, [0.328, 0.085, 0.866, 1.0]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1592.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.182, 0.131, 0.633, 0.983]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.522, 0.095, 0.996, 0.993]: listen to (a person), stand, watch (a person)."}]}, {"video": "Ie35yEssHko.mkv", "clip": [1638, 1716], "conversations": [{"from": "human", "timestamps": 1668.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.015, 0.197, 0.431, 0.863]: listen to (a person), sit, watch (a person); [0.41, 0.16, 0.976, 0.928]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1716.0, "value": "What actions is the person in bbox [0.01, 0.019, 0.856, 0.995] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.01, 0.019, 0.856, 0.995]: sit, carry/hold (an object), listen to (a person).", "At 1715.0s, [0.247, 0.031, 0.888, 0.979]: sit, carry/hold (an object).", "At 1714.0s, [0.187, 0.017, 0.949, 0.988]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "Ie35yEssHko.mkv", "clip": [1680, 1711], "conversations": [{"from": "human", "timestamps": 1710.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.038, 0.004, 0.818, 0.993]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1711.0, "value": "What actions is the person in bbox [0.039, 0.004, 0.817, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.039, 0.004, 0.817, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1708.0s to 1710.0s, [0.041, 0.0, 0.799, 1.0]->[0.039, 0.005, 0.825, 1.0]->[0.038, 0.004, 0.818, 0.993]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [923, 993], "conversations": [{"from": "human", "timestamps": 953.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.085, 0.213, 0.44, 0.815]: watch (a person), lie/sleep; [0.394, 0.24, 0.934, 0.935]: watch (a person), lie/sleep."}, {"from": "human", "timestamps": 993.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.053, 0.0, 0.704, 0.988]?"}, {"from": "gpt", "value": ["At Now, [0.053, 0.0, 0.704, 0.988]: walk.", "At 992.0s, [0.053, 0.011, 0.637, 0.999]: stand, watch (a person).", "At 991.0s, [0.005, 0.0, 0.588, 0.967]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [1025, 1073], "conversations": [{"from": "human", "timestamps": 1055.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.088, 0.138, 0.542, 0.986]: ride (e.g., a bike, a car, a horse), sit; [0.584, 0.166, 0.992, 0.98]: ride (e.g., a bike, a car, a horse), sit, listen to (a person)."}, {"from": "human", "timestamps": 1073.0, "value": "What actions is the person in bbox [0.505, 0.024, 1.0, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.505, 0.024, 1.0, 0.993]: sit, drive (e.g., a car, a truck).", "At 1072.0s, [0.698, 0.0, 0.993, 1.0]: sit, drive (e.g., a car, a truck)."]}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [1134, 1168], "conversations": [{"from": "human", "timestamps": 1164.0, "value": "Describe the actions of the person at bbox [0.095, 0.042, 0.837, 0.962] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.095, 0.042, 0.837, 0.962]: sit, stand, carry/hold (an object).", "From 1162.0s to 1163.0s, [0.066, 0.01, 0.884, 0.971]->[0.078, 0.015, 0.848, 0.966]: sit.", "At 1161.0s, [0.063, 0.009, 0.902, 0.975]: sit, stand.", "At 1160.0s, [0.039, 0.005, 0.923, 0.981]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1168.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.126, 0.083, 0.812, 0.974]: sit."}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [1146, 1230], "conversations": [{"from": "human", "timestamps": 1176.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.115, 0.096, 0.801, 0.99]: carry/hold (an object), sit, touch (an object)."}, {"from": "human", "timestamps": 1230.0, "value": "What actions did the person at bbox [0.0, 0.013, 0.767, 0.972] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.013, 0.767, 0.972]: sit, write.", "From 1228.0s to 1229.0s, [0.0, 0.0, 0.714, 0.982]->[0.0, 0.0, 0.78, 0.983]: stand.", "At 1227.0s, [0.0, 0.0, 0.725, 0.979]: stand, watch (a person).", "At 1226.0s, [0.0, 0.005, 0.737, 0.983]: stand, listen to (a person), watch (a person)."]}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [1185, 1249], "conversations": [{"from": "human", "timestamps": 1215.0, "value": "For the person at bbox [0.265, 0.101, 0.513, 0.801], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.265, 0.101, 0.513, 0.801]: sit, watch (a person).", "At 1214.0s, [0.275, 0.103, 0.514, 0.841]: sit, watch (a person).", "At 1213.0s, [0.28, 0.103, 0.52, 0.828]: sit."]}, {"from": "human", "timestamps": 1249.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.01, 0.771, 0.982]: watch (a person), stand."}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [1251, 1308], "conversations": [{"from": "human", "timestamps": 1281.0, "value": "Describe the actions of the person at bbox [0.01, 0.314, 0.373, 0.895] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.01, 0.314, 0.373, 0.895]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1280.0s, [0.0, 0.304, 0.378, 0.915]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1308.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.345, 0.016, 0.996, 0.972]: watch (a person), sit."}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [1333, 1377], "conversations": [{"from": "human", "timestamps": 1363.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.238, 0.123, 0.461, 0.861]: sit, touch (an object); [0.569, 0.06, 0.852, 0.807]: sit, touch (an object)."}, {"from": "human", "timestamps": 1377.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.13, 0.005, 0.884, 0.984]?"}, {"from": "gpt", "value": ["In the current frame, [0.13, 0.005, 0.884, 0.984]: stand, watch (a person).", "At 1376.0s, [0.053, 0.0, 0.778, 0.986]: stand, watch (a person).", "At 1375.0s, [0.012, 0.009, 0.699, 0.981]: sit, stand, listen to (a person), watch (a person)."]}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [1386, 1434], "conversations": [{"from": "human", "timestamps": 1416.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.159, 0.036, 0.844, 0.969]: listen to (a person), stand, watch (a person); [0.556, 0.195, 0.773, 0.684]: listen to (a person), stand."}, {"from": "human", "timestamps": 1434.0, "value": "For the person at bbox [0.388, 0.0, 0.995, 0.982], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.388, 0.0, 0.995, 0.982]: stand, talk to (e.g., self, a person, a group).", "At 1433.0s, [0.443, 0.003, 0.996, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 1432.0s, [0.454, 0.0, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [1350, 1469], "conversations": [{"from": "human", "timestamps": 1380.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.001, 0.515, 0.977]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1469.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.479, 0.178, 1.0, 0.978]?"}, {"from": "gpt", "value": ["In the current frame, [0.479, 0.178, 1.0, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1468.0s, [0.475, 0.166, 0.998, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1467.0s, [0.442, 0.082, 1.0, 0.985]: stand, listen to (a person), watch (a person)."]}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [1509, 1609], "conversations": [{"from": "human", "timestamps": 1539.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.062, 0.028, 0.588, 0.987]: watch (e.g., TV), sit."}, {"from": "human", "timestamps": 1609.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.68, 0.585, 0.924, 0.99]?"}, {"from": "gpt", "value": ["In the current frame, [0.68, 0.585, 0.924, 0.99]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1608.0s, [0.507, 0.473, 0.75, 0.924]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1605.0s to 1607.0s, [0.442, 0.002, 1.0, 0.992]->[0.518, 0.3, 0.817, 0.973]->[0.521, 0.413, 0.736, 0.979]: run/jog, carry/hold (an object), watch (a person).", "At 1604.0s, [0.065, 0.0, 0.999, 0.978]: run/jog, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1603.0s, [0.012, 0.145, 0.613, 0.992]: run/jog, carry/hold (an object)."]}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [1602, 1662], "conversations": [{"from": "human", "timestamps": 1632.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.105, 0.336, 0.209, 0.666]?"}, {"from": "gpt", "value": ["In the current frame, [0.105, 0.336, 0.209, 0.666]: sit, carry/hold (an object).", "At 1631.0s, [0.0, 0.407, 0.112, 0.743]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1662.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.036, 0.0, 0.723, 0.963]: sit."}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [1639, 1698], "conversations": [{"from": "human", "timestamps": 1669.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.006, 0.16, 0.267, 0.689]: carry/hold (an object), sit; [0.189, 0.059, 0.77, 0.918]: carry/hold (an object), talk to (e.g., self, a person, a group); [0.58, 0.011, 1.0, 0.952]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1698.0, "value": "Describe the actions of the person at bbox [0.114, 0.108, 0.671, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.114, 0.108, 0.671, 0.982]: sit.", "At 1697.0s, [0.183, 0.097, 0.595, 0.91]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1695.0s to 1696.0s, [0.147, 0.059, 0.748, 0.891]->[0.228, 0.045, 0.724, 0.851]: stand, kiss (a person)."]}]}, {"video": "IuPC-z-M9u8.mkv", "clip": [1670, 1704], "conversations": [{"from": "human", "timestamps": 1700.0, "value": "What actions is the person in bbox [0.47, 0.018, 0.986, 0.913] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.47, 0.018, 0.986, 0.913]: talk to (e.g., self, a person, a group), watch (a person).", "At 1699.0s, [0.47, 0.033, 0.999, 0.971]: stand, watch (a person).", "From 1697.0s to 1698.0s, [0.359, 0.019, 1.0, 0.953]->[0.439, 0.004, 1.0, 0.967]: sit, listen to (a person), watch (a person).", "From 1695.0s to 1696.0s, [0.53, 0.031, 0.996, 0.968]->[0.474, 0.027, 1.0, 0.963]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1704.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.233, 0.285, 0.53, 0.697]: listen to (a person), sit, touch (an object), watch (a person); [0.616, 0.343, 0.8, 0.626]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [914, 945], "conversations": [{"from": "human", "timestamps": 944.0, "value": "What actions is the person in bbox [0.049, 0.008, 0.675, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.049, 0.008, 0.675, 0.982]: sit, talk to (e.g., self, a person, a group).", "At 943.0s, [0.059, 0.006, 0.743, 0.975]: sit, talk to (e.g., self, a person, a group).", "From 940.0s to 942.0s, [0.318, 0.027, 0.992, 0.982]->[0.325, 0.01, 0.986, 0.983]->[0.329, 0.033, 0.997, 0.938]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 945.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.039, 0.009, 0.676, 0.983]: read, sit, talk to (e.g., self, a person, a group)."}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [903, 1000], "conversations": [{"from": "human", "timestamps": 933.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.023, 0.151, 0.104, 0.569]: listen to (a person), stand; [0.091, 0.2, 0.202, 0.624]: listen to (a person), stand, watch (a person); [0.289, 0.118, 0.39, 0.669]: listen to (a person), stand, watch (a person); [0.506, 0.374, 0.602, 0.611]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.739, 0.149, 0.841, 0.716]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1000.0, "value": "For the person at bbox [0.251, 0.046, 0.988, 0.97], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.251, 0.046, 0.988, 0.97]: stand.", "At 999.0s, [0.265, 0.025, 0.943, 0.98]: stand.", "From 995.0s to 998.0s, [0.21, 0.037, 0.934, 0.927]->[0.164, 0.024, 0.937, 0.939]->[0.194, 0.026, 0.927, 0.951]->[0.149, 0.006, 0.78, 0.986]: stand, talk to (e.g., self, a person, a group).", "From 993.0s to 994.0s, [0.191, 0.034, 0.889, 0.972]->[0.186, 0.049, 1.0, 0.968]: stand.", "From 988.0s to 992.0s, [0.321, 0.08, 0.98, 0.962]->[0.222, 0.074, 0.991, 0.969]->[0.204, 0.021, 0.896, 0.928]->[0.173, 0.026, 0.887, 0.946]->[0.166, 0.028, 0.864, 0.938]: stand, talk to (e.g., self, a person, a group).", "At 987.0s, [0.284, 0.06, 0.927, 0.966]: stand, watch (a person).", "At 986.0s, [0.251, 0.046, 0.974, 0.976]: stand.", "From 983.0s to 985.0s, [0.149, 0.008, 0.738, 0.949]->[0.147, 0.0, 0.772, 0.966]->[0.137, 0.0, 0.779, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 982.0s, [0.151, 0.0, 0.674, 0.939]: talk to (e.g., self, a person, a group), watch (a person).", "From 980.0s to 981.0s, [0.059, 0.0, 0.499, 0.96]->[0.109, 0.0, 0.686, 0.964]: stand, listen to (a person).", "At 979.0s, [0.013, 0.008, 0.47, 0.965]: stand, listen to (a person), watch (a person).", "At 978.0s, [0.023, 0.015, 0.464, 0.967]: stand, listen to (a person).", "At 977.0s, [0.037, 0.006, 0.521, 0.98]: stand.", "At 976.0s, [0.043, 0.004, 0.676, 0.98]: enter."]}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [931, 1009], "conversations": [{"from": "human", "timestamps": 961.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.408, 0.018, 0.934, 0.975]: listen to (a person), stand."}, {"from": "human", "timestamps": 1009.0, "value": "What actions is the person in bbox [0.17, 0.028, 0.979, 0.968] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.17, 0.028, 0.979, 0.968]: stand, talk to (e.g., self, a person, a group).", "From 1003.0s to 1008.0s, [0.248, 0.047, 0.946, 0.971]->[0.314, 0.026, 0.943, 0.96]->[0.335, 0.063, 0.95, 0.96]->[0.275, 0.08, 0.982, 0.981]->[0.362, 0.065, 0.923, 0.966]->[0.188, 0.033, 0.932, 0.972]: stand, talk to (e.g., self, a person, a group).", "From 999.0s to 1002.0s, [0.265, 0.025, 0.943, 0.98]->[0.251, 0.046, 0.988, 0.97]->[0.213, 0.06, 0.968, 0.971]->[0.242, 0.054, 1.0, 0.971]: stand.", "From 995.0s to 998.0s, [0.21, 0.037, 0.934, 0.927]->[0.164, 0.024, 0.937, 0.939]->[0.194, 0.026, 0.927, 0.951]->[0.149, 0.006, 0.78, 0.986]: stand, talk to (e.g., self, a person, a group).", "From 993.0s to 994.0s, [0.191, 0.034, 0.889, 0.972]->[0.186, 0.049, 1.0, 0.968]: stand.", "From 988.0s to 992.0s, [0.321, 0.08, 0.98, 0.962]->[0.222, 0.074, 0.991, 0.969]->[0.204, 0.021, 0.896, 0.928]->[0.173, 0.026, 0.887, 0.946]->[0.166, 0.028, 0.864, 0.938]: stand, talk to (e.g., self, a person, a group).", "At 987.0s, [0.284, 0.06, 0.927, 0.966]: stand, watch (a person).", "At 986.0s, [0.251, 0.046, 0.974, 0.976]: stand.", "From 983.0s to 985.0s, [0.149, 0.008, 0.738, 0.949]->[0.147, 0.0, 0.772, 0.966]->[0.137, 0.0, 0.779, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 982.0s, [0.151, 0.0, 0.674, 0.939]: talk to (e.g., self, a person, a group), watch (a person).", "From 980.0s to 981.0s, [0.059, 0.0, 0.499, 0.96]->[0.109, 0.0, 0.686, 0.964]: stand, listen to (a person).", "At 979.0s, [0.013, 0.008, 0.47, 0.965]: stand, listen to (a person), watch (a person).", "At 978.0s, [0.023, 0.015, 0.464, 0.967]: stand, listen to (a person).", "At 977.0s, [0.037, 0.006, 0.521, 0.98]: stand.", "At 976.0s, [0.043, 0.004, 0.676, 0.98]: enter."]}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [1025, 1075], "conversations": [{"from": "human", "timestamps": 1055.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.61, 0.268, 0.691, 0.851]?"}, {"from": "gpt", "value": ["At Now, [0.61, 0.268, 0.691, 0.851]: stand, listen to (a person).", "From 1050.0s to 1054.0s, [0.598, 0.245, 0.678, 0.788]->[0.602, 0.253, 0.68, 0.803]->[0.604, 0.257, 0.68, 0.812]->[0.608, 0.248, 0.682, 0.828]->[0.602, 0.256, 0.687, 0.835]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1075.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.429, 0.023, 0.988, 1.0]: stand."}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [1130, 1179], "conversations": [{"from": "human", "timestamps": 1160.0, "value": "What actions is the person in bbox [0.299, 0.058, 0.948, 0.973] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.299, 0.058, 0.948, 0.973]: stand.", "From 1157.0s to 1159.0s, [0.385, 0.186, 0.897, 1.0]->[0.323, 0.121, 0.916, 1.0]->[0.286, 0.09, 0.895, 1.0]: stand."]}, {"from": "human", "timestamps": 1179.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.166, 0.156, 0.455, 0.989]: carry/hold (an object), stand, listen to (a person); [0.456, 0.04, 0.974, 0.987]: watch (a person), stand."}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [1206, 1256], "conversations": [{"from": "human", "timestamps": 1236.0, "value": "Describe the actions of the person at bbox [0.55, 0.254, 1.0, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.55, 0.254, 1.0, 0.986]: stand, listen to (a person), watch (a person).", "At 1235.0s, [0.083, 0.0, 0.685, 0.975]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1256.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.083, 0.468, 0.149, 0.655]: listen to (a person), stand, watch (a person); [0.184, 0.45, 0.246, 0.749]: listen to (a person), stand, watch (a person); [0.249, 0.437, 0.318, 0.76]: listen to (a person), stand, watch (a person); [0.3, 0.421, 0.345, 0.673]: listen to (a person), stand; [0.345, 0.425, 0.404, 0.767]: listen to (a person), stand, watch (a person); [0.393, 0.431, 0.433, 0.698]: listen to (a person), stand, watch (a person); [0.433, 0.425, 0.49, 0.738]: watch (a person), stand; [0.508, 0.581, 0.717, 0.974]: listen to (a person), sit, watch (a person); [0.746, 0.627, 0.997, 0.997]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [1261, 1331], "conversations": [{"from": "human", "timestamps": 1291.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.167, 0.124, 0.631, 0.967]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1331.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.438, 0.048, 0.932, 0.972]?"}, {"from": "gpt", "value": ["At Now, [0.438, 0.048, 0.932, 0.972]: stand, talk to (e.g., self, a person, a group).", "From 1329.0s to 1330.0s, [0.389, 0.07, 0.923, 0.986]->[0.424, 0.044, 0.957, 0.997]: stand.", "At 1328.0s, [0.517, 0.184, 0.655, 0.977]: stand, listen to (a person), watch (a person).", "At 1327.0s, [0.51, 0.181, 0.642, 0.97]: stand, listen to (a person).", "At 1326.0s, [0.504, 0.178, 0.641, 0.961]: stand, listen to (a person), watch (a person)."]}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [1338, 1378], "conversations": [{"from": "human", "timestamps": 1368.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.022, 0.024, 0.615, 0.983]: listen to (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1378.0, "value": "For the person at bbox [0.676, 0.131, 0.779, 0.499], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.676, 0.131, 0.779, 0.499]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1377.0s, [0.665, 0.131, 0.769, 0.477]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1376.0s, [0.666, 0.125, 0.765, 0.483]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1374.0s to 1375.0s, [0.656, 0.113, 0.749, 0.403]->[0.66, 0.121, 0.753, 0.41]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1371.0s to 1373.0s, [0.267, 0.324, 0.562, 0.981]->[0.26, 0.33, 0.542, 0.981]->[0.264, 0.323, 0.534, 0.958]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [1366, 1411], "conversations": [{"from": "human", "timestamps": 1396.0, "value": "Describe the actions of the person at bbox [0.424, 0.051, 1.0, 0.974] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.424, 0.051, 1.0, 0.974]: sit, talk to (e.g., self, a person, a group).", "At 1395.0s, [0.421, 0.052, 1.0, 0.984]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1411.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.431, 0.008, 0.997, 0.987]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [1374, 1453], "conversations": [{"from": "human", "timestamps": 1404.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.414, 0.056, 1.0, 0.97]: listen to (a person), sit."}, {"from": "human", "timestamps": 1453.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.358, 0.036, 0.988, 0.972]?"}, {"from": "gpt", "value": ["At Now, [0.358, 0.036, 0.988, 0.972]: stand, talk to (e.g., self, a person, a group).", "From 1449.0s to 1452.0s, [0.319, 0.03, 0.978, 0.98]->[0.342, 0.018, 0.97, 0.987]->[0.331, 0.02, 0.982, 0.959]->[0.328, 0.021, 0.997, 0.955]: stand, talk to (e.g., self, a person, a group).", "At 1448.0s, [0.0, 0.0, 0.448, 0.97]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1446.0s to 1447.0s, [0.0, 0.006, 0.402, 0.95]->[0.0, 0.019, 0.457, 0.952]: stand, talk to (e.g., self, a person, a group).", "From 1443.0s to 1445.0s, [0.002, 0.023, 0.605, 0.98]->[0.005, 0.072, 0.584, 0.973]->[0.025, 0.091, 0.595, 0.965]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [1472, 1507], "conversations": [{"from": "human", "timestamps": 1502.0, "value": "For the person at bbox [0.24, 0.012, 0.972, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.24, 0.012, 0.972, 0.981]: grab (a person), talk to (e.g., self, a person, a group).", "At 1501.0s, [0.008, 0.0, 0.24, 0.556]: hug (a person)."]}, {"from": "human", "timestamps": 1507.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.134, 0.542, 0.277, 0.985]: watch (a person), sit; [0.196, 0.121, 0.538, 1.0]: dance; [0.352, 0.044, 0.853, 1.0]: dance."}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [1502, 1610], "conversations": [{"from": "human", "timestamps": 1532.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.155, 0.037, 0.759, 0.973]: walk."}, {"from": "human", "timestamps": 1610.0, "value": "Describe the actions of the person at bbox [0.027, 0.076, 0.742, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.027, 0.076, 0.742, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1608.0s to 1609.0s, [0.019, 0.054, 0.717, 0.976]->[0.045, 0.046, 0.753, 0.975]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1607.0s, [0.034, 0.047, 0.745, 0.985]: stand, listen to (a person), watch (a person)."]}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [1590, 1652], "conversations": [{"from": "human", "timestamps": 1620.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.012, 0.53, 0.992]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.012, 0.53, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1619.0s, [0.079, 0.098, 0.617, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 1618.0s, [0.37, 0.347, 0.842, 0.991]: stand."]}, {"from": "human", "timestamps": 1652.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.39, 0.076, 1.0, 0.97]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "J1jDc2rTJlg.mkv", "clip": [1620, 1700], "conversations": [{"from": "human", "timestamps": 1650.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.008, 0.204, 0.246, 0.959]: walk; [0.116, 0.088, 0.692, 0.964]: stand."}, {"from": "human", "timestamps": 1700.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.007, 0.019, 0.377, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.007, 0.019, 0.377, 1.0]: stand.", "At 1699.0s, [0.0, 0.011, 0.14, 0.972]: stand, listen to (a person)."]}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [889, 948], "conversations": [{"from": "human", "timestamps": 919.0, "value": "For the person at bbox [0.465, 0.103, 0.748, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.465, 0.103, 0.748, 0.987]: walk, carry/hold (an object), read, listen to (a person).", "At 918.0s, [0.447, 0.113, 0.735, 1.0]: walk, carry/hold (an object), listen to (a person).", "From 916.0s to 917.0s, [0.43, 0.069, 0.819, 0.97]->[0.392, 0.087, 0.67, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 915.0s, [0.474, 0.023, 0.865, 1.0]: walk, carry/hold (an object), listen to (a person).", "From 913.0s to 914.0s, [0.455, 0.047, 0.862, 0.982]->[0.46, 0.047, 0.855, 0.964]: walk, carry/hold (an object), read, listen to (a person).", "From 911.0s to 912.0s, [0.482, 0.009, 0.825, 0.959]->[0.414, 0.022, 0.781, 0.968]: walk, carry/hold (an object), listen to (a person).", "At 910.0s, [0.456, 0.068, 0.695, 1.0]: walk, carry/hold (an object), read, listen to (a person).", "At 909.0s, [0.469, 0.135, 0.7, 0.989]: walk, carry/hold (an object), listen to (a person).", "At 908.0s, [0.523, 0.095, 0.695, 0.981]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 907.0s, [0.535, 0.078, 0.682, 0.559]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 948.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.251, 0.012, 0.912, 0.999]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [923, 976], "conversations": [{"from": "human", "timestamps": 953.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.186, 0.035, 0.821, 0.987]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.779, 0.0, 0.999, 0.995]: watch (a person), stand."}, {"from": "human", "timestamps": 976.0, "value": "Describe the actions of the person at bbox [0.439, 0.041, 0.864, 0.957] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.439, 0.041, 0.864, 0.957]: bend/bow (at the waist), stand, carry/hold (an object).", "At 975.0s, [0.496, 0.157, 0.838, 0.951]: stand, carry/hold (an object)."]}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [958, 996], "conversations": [{"from": "human", "timestamps": 988.0, "value": "Describe the actions of the person at bbox [0.169, 0.614, 0.329, 0.993] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.169, 0.614, 0.329, 0.993]: stand, watch (a person).", "At 987.0s, [0.194, 0.785, 0.333, 0.993]: stand, watch (a person)."]}, {"from": "human", "timestamps": 996.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.111, 0.0, 0.956, 0.984]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.656, 0.007, 0.999, 0.946]: listen to (a person), stand."}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [1003, 1090], "conversations": [{"from": "human", "timestamps": 1033.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.162, 0.051, 0.704, 0.989]: watch (a person), stand."}, {"from": "human", "timestamps": 1090.0, "value": "Describe the actions of the person at bbox [0.328, 0.165, 0.462, 0.875] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.328, 0.165, 0.462, 0.875]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1089.0s, [0.367, 0.139, 0.553, 0.871]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1088.0s, [0.632, 0.032, 0.861, 0.989]: walk, carry/hold (an object)."]}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [1098, 1132], "conversations": [{"from": "human", "timestamps": 1128.0, "value": "For the person at bbox [0.39, 0.203, 0.552, 0.882], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.39, 0.203, 0.552, 0.882]: stand, watch (e.g., TV).", "At 1127.0s, [0.347, 0.176, 0.52, 0.891]: stand, walk.", "At 1126.0s, [0.311, 0.164, 0.515, 0.899]: stand, carry/hold (an object), listen to (a person).", "At 1125.0s, [0.314, 0.149, 0.492, 0.889]: stand, take (an object) from (a person), watch (a person)."]}, {"from": "human", "timestamps": 1132.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.107, 0.37, 0.331, 0.978]: listen to (a person), sit, eat; [0.298, 0.087, 0.541, 0.969]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.554, 0.408, 0.771, 0.986]: listen to (a person), sit, watch (a person)."}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [1119, 1170], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.4, 0.0, 0.997, 0.984]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1170.0, "value": "Describe the actions of the person at bbox [0.417, 0.005, 0.995, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.417, 0.005, 0.995, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1169.0s, [0.041, 0.009, 0.923, 0.986]: sit, watch (a person).", "From 1167.0s to 1168.0s, [0.533, 0.0, 0.989, 0.972]->[0.039, 0.026, 0.919, 0.98]: sit, listen to (a person), watch (a person).", "At 1166.0s, [0.559, 0.022, 0.983, 0.985]: stand, listen to (a person).", "At 1165.0s, [0.514, 0.031, 0.975, 0.96]: stand, listen to (a person), watch (a person).", "At 1164.0s, [0.506, 0.024, 0.967, 0.967]: stand, listen to (a person)."]}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [1181, 1226], "conversations": [{"from": "human", "timestamps": 1211.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.225, 0.189, 0.395, 0.925]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1226.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.547, 0.184, 0.629, 0.591]?"}, {"from": "gpt", "value": ["At Now, [0.547, 0.184, 0.629, 0.591]: walk, talk to (e.g., self, a person, a group).", "At 1225.0s, [0.535, 0.207, 0.625, 0.595]: walk.", "From 1223.0s to 1224.0s, [0.676, 0.433, 0.739, 0.668]->[0.59, 0.268, 0.662, 0.598]: walk, listen to (a person)."]}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [1238, 1291], "conversations": [{"from": "human", "timestamps": 1268.0, "value": "What actions did the person at bbox [0.42, 0.075, 0.927, 0.995] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.42, 0.075, 0.927, 0.995]: stand, listen to (a person), watch (a person).", "At 1267.0s, [0.321, 0.019, 1.0, 0.98]: stand, listen to (a person).", "At 1266.0s, [0.488, 0.056, 1.0, 0.98]: stand, listen to (a person), watch (a person).", "From 1264.0s to 1265.0s, [0.527, 0.037, 1.0, 0.987]->[0.495, 0.056, 1.0, 1.0]: stand, listen to (a person).", "From 1262.0s to 1263.0s, [0.485, 0.072, 0.994, 0.983]->[0.364, 0.065, 0.939, 0.993]: stand, listen to (a person), watch (a person).", "At 1261.0s, [0.545, 0.059, 1.0, 1.0]: stand, grab (a person), listen to (a person), watch (a person).", "At 1260.0s, [0.506, 0.024, 0.994, 0.984]: stand, listen to (a person), watch (a person).", "At 1259.0s, [0.514, 0.054, 0.999, 0.979]: stand, listen to (a person).", "At 1258.0s, [0.417, 0.018, 0.993, 0.986]: stand, talk to (e.g., self, a person, a group).", "At 1257.0s, [0.529, 0.072, 0.993, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1255.0s to 1256.0s, [0.489, 0.006, 0.961, 0.968]->[0.368, 0.021, 0.977, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1254.0s, [0.516, 0.01, 0.997, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1253.0s, [0.608, 0.012, 1.0, 0.967]: stand, talk to (e.g., self, a person, a group).", "At 1252.0s, [0.54, 0.025, 1.0, 0.996]: walk, grab (a person), talk to (e.g., self, a person, a group).", "At 1251.0s, [0.46, 0.0, 0.958, 0.968]: walk, talk to (e.g., self, a person, a group).", "From 1247.0s to 1249.0s, [0.635, 0.194, 0.926, 1.0]->[0.602, 0.195, 0.975, 0.989]->[0.67, 0.121, 1.0, 0.99]: walk, talk to (e.g., self, a person, a group).", "At 1246.0s, [0.574, 0.194, 0.896, 0.99]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1245.0s, [0.522, 0.201, 0.832, 1.0]: walk, talk to (e.g., self, a person, a group).", "At 1244.0s, [0.582, 0.248, 0.858, 1.0]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1243.0s, [0.583, 0.208, 1.0, 0.994]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1291.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.027, 0.346, 0.153, 0.896]: listen to (a person), stand; [0.158, 0.348, 0.297, 0.666]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.301, 0.156, 0.569, 0.841]: listen to (a person), stand, watch (a person); [0.632, 0.16, 0.975, 0.932]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [1287, 1374], "conversations": [{"from": "human", "timestamps": 1317.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.182, 0.156, 0.503, 0.88]: carry/hold (an object), stand, listen to (a person); [0.38, 0.296, 0.624, 0.723]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.544, 0.23, 0.647, 0.7]: carry/hold (an object), stand; [0.622, 0.203, 0.895, 0.98]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1374.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.162, 0.51, 0.227, 0.801]?"}, {"from": "gpt", "value": ["At Now, [0.162, 0.51, 0.227, 0.801]: run/jog, carry/hold (an object).", "From 1372.0s to 1373.0s, [0.141, 0.527, 0.252, 0.988]->[0.138, 0.527, 0.214, 0.869]: run/jog, carry/hold (an object)."]}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [1323, 1360], "conversations": [{"from": "human", "timestamps": 1353.0, "value": "Describe the actions of the person at bbox [0.51, 0.369, 0.797, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.51, 0.369, 0.797, 1.0]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1352.0s, [0.517, 0.429, 0.752, 0.996]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1351.0s, [0.552, 0.246, 0.878, 0.983]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1350.0s, [0.632, 0.198, 0.901, 0.994]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1347.0s to 1349.0s, [0.597, 0.282, 0.769, 0.997]->[0.636, 0.13, 0.863, 0.992]->[0.642, 0.059, 0.916, 0.998]: walk, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1360.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.159, 0.19, 0.537, 0.975]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.471, 0.609, 0.578, 0.993]: walk; [0.67, 0.295, 0.956, 0.981]: listen to (a person), stand, watch (a person)."}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [1383, 1425], "conversations": [{"from": "human", "timestamps": 1413.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.429, 0.195, 0.954, 1.0]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1425.0, "value": "What actions did the person at bbox [0.41, 0.109, 0.982, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.41, 0.109, 0.982, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1424.0s, [0.478, 0.128, 0.974, 0.986]: stand, talk to (e.g., self, a person, a group).", "At 1423.0s, [0.393, 0.104, 0.977, 0.995]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1422.0s, [0.443, 0.092, 0.985, 0.99]: stand, listen to (a person).", "From 1419.0s to 1421.0s, [0.451, 0.141, 0.977, 0.997]->[0.458, 0.131, 0.975, 0.987]->[0.416, 0.117, 0.96, 0.989]: stand, listen to (a person), watch (a person).", "At 1418.0s, [0.473, 0.128, 0.969, 0.986]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [1369, 1489], "conversations": [{"from": "human", "timestamps": 1399.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.135, 0.206, 0.915, 0.987]: carry/hold (an object), stand, pull (an object), listen to (a person)."}, {"from": "human", "timestamps": 1489.0, "value": "For the person at bbox [0.705, 0.104, 0.827, 0.88], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.705, 0.104, 0.827, 0.88]: walk, carry/hold (an object).", "At 1488.0s, [0.565, 0.114, 0.819, 0.89]: stand, carry/hold (an object), touch (an object).", "At 1487.0s, [0.652, 0.084, 0.807, 0.861]: stand, carry/hold (an object)."]}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [1428, 1470], "conversations": [{"from": "human", "timestamps": 1458.0, "value": "For the person at bbox [0.558, 0.387, 0.731, 0.985], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.558, 0.387, 0.731, 0.985]: walk.", "From 1455.0s to 1457.0s, [0.194, 0.316, 0.423, 0.994]->[0.343, 0.286, 0.593, 0.991]->[0.336, 0.277, 0.584, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 1454.0s, [0.536, 0.619, 0.636, 0.974]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1470.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.056, 0.013, 0.637, 0.992]: stand, answer phone."}]}, {"video": "J4bt4y9ShTA.mkv", "clip": [1594, 1662], "conversations": [{"from": "human", "timestamps": 1624.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.276, 0.024, 0.9, 0.985]: stand."}, {"from": "human", "timestamps": 1662.0, "value": "Describe the actions of the person at bbox [0.476, 0.21, 0.599, 0.88] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.476, 0.21, 0.599, 0.88]: stand, carry/hold (an object).", "At 1661.0s, [0.406, 0.223, 0.537, 0.859]: stand, carry/hold (an object)."]}]}, {"video": "JNb4nWexD0I.mkv", "clip": [897, 956], "conversations": [{"from": "human", "timestamps": 927.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.174, 0.108, 0.77, 0.962]: watch (a person), stand."}, {"from": "human", "timestamps": 956.0, "value": "What actions is the person in bbox [0.514, 0.141, 0.846, 0.96] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.514, 0.141, 0.846, 0.96]: stand, listen to (a person), watch (a person).", "From 950.0s to 955.0s, [0.631, 0.134, 0.787, 0.445]->[0.601, 0.191, 0.867, 0.87]->[0.496, 0.147, 0.942, 0.976]->[0.457, 0.119, 0.909, 0.972]->[0.52, 0.142, 0.912, 0.921]->[0.556, 0.14, 0.903, 0.881]: stand, listen to (a person), watch (a person)."]}]}, {"video": "JNb4nWexD0I.mkv", "clip": [930, 965], "conversations": [{"from": "human", "timestamps": 960.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.257, 0.429, 1.0]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.522, 0.167, 0.805, 0.907]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 965.0, "value": "What actions did the person at bbox [0.007, 0.234, 0.393, 0.993] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.007, 0.234, 0.393, 0.993]: stand, listen to (a person), watch (a person).", "From 962.0s to 964.0s, [0.009, 0.255, 0.404, 1.0]->[0.004, 0.239, 0.401, 0.984]->[0.0, 0.231, 0.389, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 961.0s, [0.006, 0.26, 0.412, 1.0]: stand, listen to (a person), watch (a person).", "At 960.0s, [0.0, 0.257, 0.429, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 956.0s to 959.0s, [0.0, 0.273, 0.388, 0.981]->[0.001, 0.281, 0.412, 1.0]->[0.0, 0.251, 0.414, 1.0]->[0.0, 0.224, 0.413, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 955.0s, [0.0, 0.264, 0.39, 0.992]: stand.", "From 952.0s to 954.0s, [0.0, 0.282, 0.37, 0.985]->[0.0, 0.255, 0.401, 1.0]->[0.0, 0.264, 0.384, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 949.0s to 951.0s, [0.0, 0.369, 0.164, 0.991]->[0.0, 0.418, 0.246, 1.0]->[0.001, 0.34, 0.3, 0.999]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "JNb4nWexD0I.mkv", "clip": [936, 971], "conversations": [{"from": "human", "timestamps": 966.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.201, 0.111, 0.721, 0.971]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 971.0, "value": "What actions is the person in bbox [0.045, 0.571, 0.409, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.045, 0.571, 0.409, 0.978]: stand, listen to (a person), watch (a person).", "From 969.0s to 970.0s, [0.082, 0.629, 0.459, 0.994]->[0.048, 0.603, 0.413, 0.989]: stand, listen to (a person), watch (a person).", "At 968.0s, [0.002, 0.229, 0.445, 0.989]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 967.0s, [0.127, 0.024, 0.687, 0.973]: walk.", "At 966.0s, [0.201, 0.111, 0.721, 0.971]: walk, carry/hold (an object).", "At 965.0s, [0.461, 0.135, 0.818, 0.965]: walk.", "At 964.0s, [0.52, 0.165, 0.801, 0.929]: stand, listen to (a person), watch (a person)."]}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1005, 1042], "conversations": [{"from": "human", "timestamps": 1035.0, "value": "What actions did the person at bbox [0.017, 0.229, 0.629, 0.988] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.017, 0.229, 0.629, 0.988]: stand, listen to (a person), watch (a person).", "From 1031.0s to 1034.0s, [0.0, 0.213, 0.585, 0.984]->[0.0, 0.171, 0.608, 0.953]->[0.006, 0.237, 0.612, 0.983]->[0.0, 0.16, 0.615, 0.956]: stand, listen to (a person), watch (a person).", "At 1030.0s, [0.009, 0.151, 0.678, 0.957]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1029.0s, [0.0, 0.169, 0.59, 0.961]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1028.0s, [0.004, 0.16, 0.547, 0.964]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1026.0s to 1027.0s, [0.0, 0.241, 0.548, 0.974]->[0.006, 0.232, 0.531, 0.983]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1025.0s, [0.011, 0.131, 0.679, 0.985]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1024.0s, [0.0, 0.186, 0.62, 0.998]: stand, touch (an object), listen to (a person), watch (a person).", "At 1023.0s, [0.0, 0.202, 0.583, 0.98]: stand, touch (an object), listen to (a person).", "From 1020.0s to 1022.0s, [0.007, 0.199, 0.571, 0.977]->[0.005, 0.202, 0.598, 0.974]->[0.005, 0.213, 0.653, 0.974]: stand, touch (an object), listen to (a person), watch (a person).", "From 1017.0s to 1019.0s, [0.009, 0.216, 0.614, 0.955]->[0.018, 0.161, 0.712, 0.974]->[0.005, 0.158, 0.608, 0.966]: stand, listen to (a person), watch (a person).", "At 1016.0s, [0.005, 0.189, 0.563, 0.972]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1015.0s, [0.017, 0.179, 0.628, 0.976]: stand, touch (an object), listen to (a person), watch (a person).", "From 1011.0s to 1014.0s, [0.0, 0.213, 0.542, 0.969]->[0.0, 0.217, 0.546, 0.982]->[0.001, 0.206, 0.546, 0.967]->[0.003, 0.186, 0.599, 0.971]: stand, listen to (a person), watch (a person).", "At 1010.0s, [0.0, 0.202, 0.549, 0.962]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1008.0s to 1009.0s, [0.0, 0.244, 0.558, 1.0]->[0.0, 0.243, 0.551, 0.981]: stand, listen to (a person), watch (a person).", "At 1007.0s, [0.0, 0.278, 0.416, 0.976]: stand, touch (an object), listen to (a person), watch (a person).", "From 1004.0s to 1006.0s, [0.01, 0.116, 0.648, 0.976]->[0.017, 0.25, 0.682, 0.973]->[0.01, 0.255, 0.599, 0.961]: stand.", "At 1003.0s, [0.196, 0.176, 0.754, 0.973]: stand, touch (an object), talk to (e.g., self, a person, a group).", "From 1001.0s to 1002.0s, [0.272, 0.235, 0.529, 0.795]->[0.275, 0.266, 0.514, 0.919]: stand, touch (an object).", "At 1000.0s, [0.285, 0.226, 0.51, 0.93]: stand.", "From 998.0s to 999.0s, [0.452, 0.115, 0.71, 0.563]->[0.338, 0.209, 0.669, 0.708]: stand, carry/hold (an object), watch (a person).", "At 997.0s, [0.17, 0.079, 0.9, 0.909]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 996.0s, [0.352, 0.014, 1.0, 0.971]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 995.0s, [0.378, 0.031, 0.935, 0.951]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1042.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.079, 0.157, 0.865, 0.99]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1069, 1122], "conversations": [{"from": "human", "timestamps": 1099.0, "value": "Describe the actions of the person at bbox [0.063, 0.166, 0.701, 0.914] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.063, 0.166, 0.701, 0.914]: sit.", "At 1098.0s, [0.142, 0.133, 0.897, 0.895]: sit, touch (an object).", "At 1097.0s, [0.208, 0.112, 0.939, 0.914]: sit, carry/hold (an object).", "From 1095.0s to 1096.0s, [0.194, 0.132, 0.964, 0.899]->[0.202, 0.114, 0.968, 0.904]: sit, write."]}, {"from": "human", "timestamps": 1122.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.043, 0.058, 1.0, 0.976]: sit, talk to (e.g., self, a person, a group), stand."}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1148, 1241], "conversations": [{"from": "human", "timestamps": 1178.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.075, 0.118, 0.932, 0.992]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1241.0, "value": "For the person at bbox [0.489, 0.199, 0.995, 0.991], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.489, 0.199, 0.995, 0.991]: stand, listen to (a person), watch (a person).", "From 1232.0s to 1240.0s, [0.465, 0.215, 0.957, 0.982]->[0.455, 0.207, 0.956, 0.985]->[0.456, 0.211, 0.96, 0.986]->[0.469, 0.21, 0.955, 0.994]->[0.457, 0.213, 0.964, 0.996]->[0.462, 0.204, 0.975, 0.989]->[0.467, 0.207, 0.978, 0.987]->[0.466, 0.21, 0.979, 0.988]->[0.48, 0.209, 0.972, 0.995]: stand, listen to (a person), watch (a person).", "At 1231.0s, [0.467, 0.218, 0.97, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1230.0s, [0.489, 0.2, 0.97, 0.976]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1229.0s, [0.473, 0.207, 0.964, 0.991]: stand, listen to (a person), watch (a person).", "At 1228.0s, [0.478, 0.199, 0.979, 0.978]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1227.0s, [0.482, 0.204, 0.976, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1226.0s, [0.482, 0.211, 0.966, 0.982]: stand, listen to (a person), watch (a person).", "At 1225.0s, [0.47, 0.2, 0.973, 0.986]: stand, watch (a person).", "At 1224.0s, [0.487, 0.187, 0.979, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1220.0s to 1223.0s, [0.156, 0.097, 0.964, 0.965]->[0.463, 0.171, 0.966, 0.984]->[0.471, 0.2, 0.981, 0.988]->[0.482, 0.204, 0.979, 0.989]: stand, listen to (a person), watch (a person).", "At 1219.0s, [0.335, 0.048, 0.92, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1258, 1316], "conversations": [{"from": "human", "timestamps": 1288.0, "value": "Describe the actions of the person at bbox [0.552, 0.146, 0.824, 0.926] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.552, 0.146, 0.824, 0.926]: stand, talk to (e.g., self, a person, a group).", "From 1285.0s to 1287.0s, [0.563, 0.137, 0.818, 0.91]->[0.566, 0.143, 0.822, 0.911]->[0.57, 0.157, 0.839, 0.933]: stand, talk to (e.g., self, a person, a group).", "From 1282.0s to 1284.0s, [0.548, 0.126, 0.826, 0.938]->[0.551, 0.145, 0.826, 0.937]->[0.548, 0.154, 0.826, 0.908]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1279.0s to 1281.0s, [0.577, 0.094, 0.875, 0.97]->[0.539, 0.101, 0.857, 0.914]->[0.554, 0.123, 0.82, 0.933]: stand, talk to (e.g., self, a person, a group).", "At 1278.0s, [0.593, 0.143, 0.844, 0.998]: walk, watch (a person).", "From 1275.0s to 1277.0s, [0.532, 0.141, 0.917, 1.0]->[0.558, 0.188, 0.914, 0.985]->[0.558, 0.211, 0.819, 0.995]: walk, talk to (e.g., self, a person, a group).", "At 1274.0s, [0.578, 0.112, 0.958, 0.999]: stand.", "At 1273.0s, [0.616, 0.079, 0.971, 0.997]: stand, talk to (e.g., self, a person, a group).", "At 1272.0s, [0.615, 0.11, 0.969, 0.992]: stand, watch (a person).", "At 1271.0s, [0.59, 0.164, 0.965, 1.0]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "From 1261.0s to 1270.0s, [0.48, 0.203, 0.981, 0.916]->[0.467, 0.192, 0.99, 0.938]->[0.459, 0.198, 0.989, 0.93]->[0.45, 0.221, 1.0, 0.939]->[0.447, 0.202, 0.984, 0.922]->[0.471, 0.211, 0.983, 0.933]->[0.481, 0.199, 0.981, 0.933]->[0.457, 0.206, 0.977, 0.923]->[0.465, 0.201, 0.98, 0.927]->[0.558, 0.242, 0.973, 0.957]: sit, listen to (a person), watch (a person).", "At 1260.0s, [0.464, 0.2, 0.989, 0.93]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1258.0s to 1259.0s, [0.472, 0.212, 0.995, 0.939]->[0.469, 0.215, 0.992, 0.928]: sit, listen to (a person).", "From 1255.0s to 1257.0s, [0.479, 0.199, 0.989, 0.931]->[0.492, 0.199, 0.987, 0.934]->[0.473, 0.201, 0.991, 0.933]: sit, talk to (e.g., self, a person, a group).", "At 1254.0s, [0.471, 0.204, 0.992, 0.922]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1252.0s to 1253.0s, [0.485, 0.196, 0.996, 0.946]->[0.457, 0.208, 0.992, 0.931]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1251.0s, [0.462, 0.193, 0.983, 0.924]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1316.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.154, 0.087, 0.479, 0.928]: stand, talk to (e.g., self, a person, a group); [0.627, 0.584, 0.961, 0.977]: listen to (a person), sit, watch (a person)."}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1279, 1375], "conversations": [{"from": "human", "timestamps": 1309.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.405, 0.165, 0.666, 0.984]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1375.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.235, 0.122, 0.777, 0.972]?"}, {"from": "gpt", "value": ["At Now, [0.235, 0.122, 0.777, 0.972]: stand.", "At 1374.0s, [0.277, 0.192, 0.819, 0.975]: sit, stand.", "From 1371.0s to 1373.0s, [0.323, 0.167, 0.929, 0.978]->[0.193, 0.159, 0.897, 1.0]->[0.357, 0.152, 0.823, 0.977]: stand.", "At 1370.0s, [0.371, 0.15, 0.976, 0.977]: stand, touch (an object).", "From 1368.0s to 1369.0s, [0.24, 0.207, 0.922, 0.987]->[0.36, 0.15, 0.998, 0.985]: stand, close (e.g., a door, a box).", "At 1367.0s, [0.281, 0.144, 1.0, 0.982]: stand, close (e.g., a door, a box), watch (a person).", "At 1366.0s, [0.519, 0.297, 0.995, 0.985]: stand, open (e.g., a window, a car door), watch (a person).", "From 1364.0s to 1365.0s, [0.532, 0.33, 0.942, 0.983]->[0.541, 0.343, 0.949, 0.978]: stand, listen to (a person), watch (a person)."]}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1313, 1367], "conversations": [{"from": "human", "timestamps": 1343.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.13, 0.09, 0.43, 0.918]: listen to (a person), stand, watch (a person); [0.66, 0.571, 0.954, 0.961]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1367.0, "value": "What actions did the person at bbox [0.281, 0.144, 1.0, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.281, 0.144, 1.0, 0.982]: stand, close (e.g., a door, a box), watch (a person).", "At 1366.0s, [0.519, 0.297, 0.995, 0.985]: stand, open (e.g., a window, a car door), watch (a person).", "From 1364.0s to 1365.0s, [0.532, 0.33, 0.942, 0.983]->[0.541, 0.343, 0.949, 0.978]: stand, listen to (a person), watch (a person)."]}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1382, 1416], "conversations": [{"from": "human", "timestamps": 1412.0, "value": "What actions is the person in bbox [0.416, 0.12, 0.994, 0.967] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.416, 0.12, 0.994, 0.967]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1410.0s to 1411.0s, [0.385, 0.116, 0.994, 0.968]->[0.346, 0.118, 0.989, 0.949]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1409.0s, [0.373, 0.139, 0.995, 0.969]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1408.0s, [0.437, 0.147, 1.0, 0.99]: sit, stand, listen to (a person), watch (a person).", "At 1407.0s, [0.447, 0.138, 1.0, 0.975]: sit, listen to (a person), watch (a person).", "At 1406.0s, [0.428, 0.095, 1.0, 0.966]: stand, touch (an object), listen to (a person), watch (a person).", "At 1405.0s, [0.447, 0.091, 1.0, 0.97]: stand, listen to (a person), watch (a person).", "At 1404.0s, [0.462, 0.096, 0.998, 0.977]: watch (a person).", "At 1403.0s, [0.479, 0.098, 1.0, 0.981]: bend/bow (at the waist), watch (a person).", "At 1402.0s, [0.471, 0.081, 1.0, 0.979]: stand, watch (a person).", "From 1398.0s to 1401.0s, [0.467, 0.098, 0.948, 0.887]->[0.283, 0.082, 0.96, 0.951]->[0.445, 0.095, 0.998, 0.99]->[0.469, 0.063, 1.0, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1397.0s, [0.651, 0.149, 0.907, 0.83]: stand, touch (an object), listen to (a person), watch (a person).", "At 1396.0s, [0.001, 0.317, 0.095, 0.71]: stand, listen to (a person), watch (a person).", "At 1395.0s, [0.426, 0.282, 0.604, 0.632]: walk, listen to (a person)."]}, {"from": "human", "timestamps": 1416.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.206, 0.404, 0.987]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.401, 0.14, 0.988, 0.98]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1377, 1465], "conversations": [{"from": "human", "timestamps": 1407.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.026, 0.086, 0.495, 0.971]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.447, 0.138, 1.0, 0.975]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1465.0, "value": "Describe the actions of the person at bbox [0.0, 0.379, 0.281, 0.993] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.379, 0.281, 0.993]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1464.0s, [0.0, 0.354, 0.279, 0.989]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1463.0s, [0.0, 0.376, 0.231, 0.978]: sit, listen to (a person), watch (a person)."]}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1477, 1605], "conversations": [{"from": "human", "timestamps": 1507.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.051, 0.123, 0.628, 0.934]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1605.0, "value": "What actions did the person at bbox [0.0, 0.118, 0.576, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.118, 0.576, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1604.0s, [0.002, 0.095, 0.582, 0.997]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1600.0s to 1603.0s, [0.0, 0.16, 0.556, 0.981]->[0.002, 0.155, 0.565, 0.979]->[0.0, 0.15, 0.563, 0.989]->[0.0, 0.13, 0.557, 0.991]: stand, carry/hold (an object), listen to (a person).", "At 1599.0s, [0.0, 0.162, 0.553, 0.979]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1597.0s to 1598.0s, [0.0, 0.244, 0.444, 0.964]->[0.0, 0.201, 0.603, 1.0]: stand, carry/hold (an object), listen to (a person).", "At 1596.0s, [0.0, 0.334, 0.385, 0.988]: stand."]}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1544, 1601], "conversations": [{"from": "human", "timestamps": 1574.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.116, 0.267, 0.487, 0.99]: carry/hold (an object), walk; [0.582, 0.116, 0.996, 0.981]: listen to (a person), stand."}, {"from": "human", "timestamps": 1601.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.002, 0.155, 0.565, 0.979]?"}, {"from": "gpt", "value": ["In the current frame, [0.002, 0.155, 0.565, 0.979]: stand, carry/hold (an object), listen to (a person).", "At 1600.0s, [0.0, 0.16, 0.556, 0.981]: stand, carry/hold (an object), listen to (a person).", "At 1599.0s, [0.0, 0.162, 0.553, 0.979]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1597.0s to 1598.0s, [0.0, 0.244, 0.444, 0.964]->[0.0, 0.201, 0.603, 1.0]: stand, carry/hold (an object), listen to (a person).", "At 1596.0s, [0.0, 0.334, 0.385, 0.988]: stand."]}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1632, 1725], "conversations": [{"from": "human", "timestamps": 1662.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.106, 0.373, 0.461, 0.928]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.519, 0.112, 0.859, 1.0]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1725.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.118, 0.077, 0.898, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.118, 0.077, 0.898, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1724.0s, [0.159, 0.077, 0.894, 0.972]: stand, talk to (e.g., self, a person, a group).", "At 1723.0s, [0.048, 0.074, 0.679, 0.98]: stand.", "From 1721.0s to 1722.0s, [0.009, 0.05, 0.884, 1.0]->[0.023, 0.274, 0.802, 0.986]: walk."]}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1653, 1691], "conversations": [{"from": "human", "timestamps": 1683.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.373, 0.068, 0.811, 0.987]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1691.0, "value": "What actions did the person at bbox [0.424, 0.09, 0.776, 0.932] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.424, 0.09, 0.776, 0.932]: stand, talk to (e.g., self, a person, a group).", "From 1685.0s to 1690.0s, [0.433, 0.082, 0.761, 0.967]->[0.38, 0.07, 0.769, 0.95]->[0.427, 0.084, 0.767, 0.939]->[0.435, 0.078, 0.763, 0.956]->[0.428, 0.082, 0.77, 0.959]->[0.426, 0.09, 0.768, 0.918]: stand, talk to (e.g., self, a person, a group).", "At 1684.0s, [0.184, 0.084, 0.681, 0.921]: stand, point to (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1680.0s to 1683.0s, [0.158, 0.073, 0.89, 0.983]->[0.345, 0.095, 0.816, 0.981]->[0.475, 0.102, 0.876, 0.984]->[0.373, 0.068, 0.811, 0.987]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "JNb4nWexD0I.mkv", "clip": [1718, 1763], "conversations": [{"from": "human", "timestamps": 1748.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.008, 0.293, 0.528, 0.985]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.447, 0.06, 1.0, 0.984]: listen to (a person), stand."}, {"from": "human", "timestamps": 1763.0, "value": "What actions did the person at bbox [0.151, 0.043, 0.996, 0.967] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.151, 0.043, 0.996, 0.967]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1762.0s, [0.507, 0.0, 1.0, 0.981]: walk, talk to (e.g., self, a person, a group).", "From 1760.0s to 1761.0s, [0.69, 0.09, 0.996, 0.988]->[0.615, 0.128, 0.993, 0.993]: stand, watch (a person)."]}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1012, 1078], "conversations": [{"from": "human", "timestamps": 1042.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.213, 0.079, 0.617, 0.518]?"}, {"from": "gpt", "value": ["In the current frame, [0.213, 0.079, 0.617, 0.518]: lie/sleep.", "At 1040.0s, [0.114, 0.112, 0.58, 0.49]: lie/sleep."]}, {"from": "human", "timestamps": 1078.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.01, 0.003, 0.911, 0.897]: lie/sleep, talk to (e.g., self, a person, a group)."}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1020, 1114], "conversations": [{"from": "human", "timestamps": 1050.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.009, 0.944, 0.937]: lie/sleep, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1114.0, "value": "What actions did the person at bbox [0.585, 0.002, 0.964, 0.795] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.585, 0.002, 0.964, 0.795]: sit, talk to (e.g., self, a person, a group).", "From 1110.0s to 1113.0s, [0.447, 0.27, 0.885, 0.775]->[0.424, 0.158, 0.947, 0.833]->[0.303, 0.136, 0.951, 0.896]->[0.43, 0.103, 0.95, 0.879]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1064, 1131], "conversations": [{"from": "human", "timestamps": 1094.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.077, 0.0, 0.951, 0.889]: watch (a person), lie/sleep."}, {"from": "human", "timestamps": 1131.0, "value": "For the person at bbox [0.26, 0.297, 0.609, 0.872], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.26, 0.297, 0.609, 0.872]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1130.0s, [0.237, 0.295, 0.56, 0.776]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1114, 1167], "conversations": [{"from": "human", "timestamps": 1144.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.05, 0.003, 0.75, 0.976]: lie/sleep, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1167.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.074, 0.114, 0.532, 0.979]?"}, {"from": "gpt", "value": ["In the current frame, [0.074, 0.114, 0.532, 0.979]: stand, carry/hold (an object), listen to (a person).", "From 1164.0s to 1166.0s, [0.117, 0.141, 0.497, 0.975]->[0.1, 0.127, 0.528, 0.977]->[0.095, 0.122, 0.532, 0.966]: stand, carry/hold (an object), listen to (a person).", "At 1163.0s, [0.108, 0.126, 0.519, 0.953]: stand, carry/hold (an object).", "At 1162.0s, [0.178, 0.134, 0.472, 0.931]: stand, carry/hold (an object), watch (e.g., TV).", "At 1161.0s, [0.174, 0.052, 0.652, 0.983]: stand, carry/hold (an object).", "From 1158.0s to 1160.0s, [0.326, 0.134, 0.769, 0.984]->[0.38, 0.085, 0.811, 0.979]->[0.209, 0.081, 0.725, 0.987]: walk.", "At 1157.0s, [0.283, 0.13, 0.64, 0.978]: walk, talk to (e.g., self, a person, a group).", "From 1155.0s to 1156.0s, [0.237, 0.125, 0.573, 0.994]->[0.209, 0.135, 0.537, 0.986]: walk.", "From 1153.0s to 1154.0s, [0.235, 0.327, 0.498, 0.942]->[0.241, 0.164, 0.497, 0.987]: get up.", "At 1152.0s, [0.231, 0.286, 0.518, 0.649]: sit, touch (an object).", "At 1151.0s, [0.233, 0.248, 0.579, 0.851]: sit, carry/hold (an object), watch (a person)."]}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1237, 1277], "conversations": [{"from": "human", "timestamps": 1267.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.149, 0.066, 0.544, 0.993]: listen to (a person), stand, smoke, touch (an object); [0.561, 0.322, 0.791, 0.986]: sit, watch (a person), carry/hold (an object), talk to (e.g., self, a person, a group), smoke."}, {"from": "human", "timestamps": 1277.0, "value": "Describe the actions of the person at bbox [0.529, 0.299, 0.768, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.529, 0.299, 0.768, 0.983]: sit, smoke, talk to (e.g., self, a person, a group), watch (a person).", "From 1275.0s to 1276.0s, [0.523, 0.297, 0.763, 0.989]->[0.531, 0.293, 0.766, 0.989]: sit, smoke, talk to (e.g., self, a person, a group), watch (a person).", "From 1273.0s to 1274.0s, [0.541, 0.31, 0.778, 0.994]->[0.528, 0.313, 0.774, 0.98]: sit, smoke, listen to (a person).", "At 1272.0s, [0.539, 0.312, 0.769, 0.994]: sit, smoke, listen to (a person), watch (a person).", "At 1271.0s, [0.539, 0.324, 0.785, 0.99]: sit, smoke, listen to (a person).", "At 1270.0s, [0.531, 0.315, 0.771, 0.989]: sit, stand, smoke, listen to (a person).", "At 1269.0s, [0.548, 0.318, 0.784, 0.988]: sit, smoke, touch (an object), listen to (a person).", "At 1268.0s, [0.555, 0.306, 0.79, 0.987]: sit, smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1267.0s, [0.561, 0.322, 0.791, 0.986]: sit, carry/hold (an object), smoke, talk to (e.g., self, a person, a group), watch (a person).", "From 1265.0s to 1266.0s, [0.559, 0.308, 0.778, 0.976]->[0.563, 0.313, 0.783, 0.986]: sit, smoke, talk to (e.g., self, a person, a group).", "At 1264.0s, [0.562, 0.31, 0.767, 0.962]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "From 1262.0s to 1263.0s, [0.549, 0.281, 0.75, 0.945]->[0.555, 0.299, 0.778, 0.967]: sit, smoke, listen to (a person).", "At 1261.0s, [0.556, 0.279, 0.744, 0.956]: sit, stand, smoke, listen to (a person).", "From 1258.0s to 1260.0s, [0.538, 0.275, 0.73, 0.954]->[0.542, 0.264, 0.729, 0.955]->[0.559, 0.289, 0.744, 0.974]: sit, smoke, listen to (a person).", "At 1257.0s, [0.56, 0.304, 0.725, 0.937]: sit, stand, smoke, listen to (a person).", "At 1256.0s, [0.541, 0.26, 0.709, 0.938]: sit, smoke, listen to (a person).", "From 1254.0s to 1255.0s, [0.53, 0.237, 0.7, 0.885]->[0.521, 0.246, 0.707, 0.923]: sit, carry/hold (an object), smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1253.0s, [0.528, 0.244, 0.691, 0.889]: sit, carry/hold (an object), smoke, talk to (e.g., self, a person, a group).", "At 1252.0s, [0.546, 0.249, 0.698, 0.866]: sit, smoke, talk to (e.g., self, a person, a group).", "From 1250.0s to 1251.0s, [0.53, 0.235, 0.699, 0.876]->[0.533, 0.228, 0.698, 0.875]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1249.0s, [0.534, 0.223, 0.7, 0.878]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1248.0s, [0.537, 0.234, 0.7, 0.882]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1247.0s, [0.533, 0.239, 0.704, 0.877]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1246.0s, [0.541, 0.244, 0.711, 0.878]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1228, 1318], "conversations": [{"from": "human", "timestamps": 1258.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.332, 0.095, 0.528, 0.989]: stand, smoke, touch (an object), talk to (e.g., self, a person, a group); [0.538, 0.275, 0.73, 0.954]: listen to (a person), sit, smoke."}, {"from": "human", "timestamps": 1318.0, "value": "For the person at bbox [0.392, 0.1, 0.788, 0.996], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.392, 0.1, 0.788, 0.996]: bend/bow (at the waist), listen to (a person), watch (a person).", "From 1316.0s to 1317.0s, [0.327, 0.15, 0.82, 0.958]->[0.356, 0.082, 0.8, 0.993]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 1315.0s, [0.347, 0.231, 0.791, 0.954]: bend/bow (at the waist), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1314.0s, [0.487, 0.195, 0.706, 1.0]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1313.0s, [0.484, 0.183, 0.654, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1311.0s to 1312.0s, [0.483, 0.197, 0.635, 0.981]->[0.47, 0.195, 0.657, 0.965]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1309.0s to 1310.0s, [0.531, 0.232, 0.764, 0.984]->[0.532, 0.213, 0.666, 0.926]: walk, carry/hold (an object)."]}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1321, 1370], "conversations": [{"from": "human", "timestamps": 1351.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.035, 0.2, 0.107, 0.719]: listen to (a person), watch (a person), walk; [0.058, 0.223, 0.266, 0.985]: listen to (a person), stand, watch (a person); [0.221, 0.223, 0.296, 0.664]: listen to (a person), watch (a person), walk; [0.286, 0.26, 0.355, 0.586]: listen to (a person), watch (a person), walk; [0.359, 0.233, 0.614, 0.974]: listen to (a person), bend/bow (at the waist); [0.36, 0.232, 0.47, 0.498]: watch (a person), walk; [0.546, 0.26, 0.814, 0.983]: watch (a person), talk to (e.g., self, a person, a group), grab (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1370.0, "value": "What actions did the person at bbox [0.18, 0.148, 0.496, 0.936] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.18, 0.148, 0.496, 0.936]: stand, watch (a person).", "From 1368.0s to 1369.0s, [0.092, 0.159, 0.413, 1.0]->[0.092, 0.176, 0.412, 0.967]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1367.0s, [0.122, 0.186, 0.486, 0.996]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1365.0s to 1366.0s, [0.174, 0.163, 0.385, 0.996]->[0.165, 0.182, 0.446, 0.934]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1364.0s, [0.064, 0.088, 0.365, 0.962]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1363.0s, [0.067, 0.053, 0.418, 0.965]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1395, 1431], "conversations": [{"from": "human", "timestamps": 1425.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.049, 0.158, 0.251, 0.98]: listen to (a person), stand, watch (a person); [0.173, 0.053, 0.389, 0.965]: listen to (a person), stand, watch (a person); [0.384, 0.134, 0.682, 0.748]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.605, 0.168, 0.809, 0.723]: listen to (a person), stand, watch (a person); [0.665, 0.301, 0.961, 0.974]: carry/hold (an object), sit, listen to (a person)."}, {"from": "human", "timestamps": 1431.0, "value": "Describe the actions of the person at bbox [0.735, 0.337, 0.969, 0.961] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.735, 0.337, 0.969, 0.961]: sit, carry/hold (an object), listen to (a person).", "From 1429.0s to 1430.0s, [0.729, 0.371, 0.966, 0.98]->[0.736, 0.324, 0.96, 0.943]: sit, carry/hold (an object), listen to (a person)."]}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1355, 1459], "conversations": [{"from": "human", "timestamps": 1385.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.025, 0.007, 0.208, 0.769]: listen to (a person), stand, watch (a person); [0.104, 0.141, 0.398, 0.643]: listen to (a person), stand, watch (a person); [0.38, 0.117, 0.629, 0.953]: listen to (a person), stand, watch (a person); [0.523, 0.294, 0.869, 0.975]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1459.0, "value": "What actions did the person at bbox [0.11, 0.064, 0.441, 0.974] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.11, 0.064, 0.441, 0.974]: stand, listen to (a person), watch (a person).", "From 1456.0s to 1458.0s, [0.168, 0.111, 0.497, 0.989]->[0.144, 0.124, 0.45, 0.976]->[0.11, 0.088, 0.44, 0.972]: stand, listen to (a person), watch (a person)."]}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1425, 1467], "conversations": [{"from": "human", "timestamps": 1455.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.073, 0.207, 0.23, 0.607]: listen to (a person), stand, watch (a person); [0.192, 0.118, 0.514, 0.963]: listen to (a person), stand, watch (a person); [0.195, 0.082, 0.314, 0.401]: watch (a person), stand; [0.368, 0.174, 0.494, 0.742]: listen to (a person), stand, watch (a person); [0.472, 0.198, 0.72, 0.953]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.584, 0.321, 0.936, 0.991]: listen to (a person), sit, watch (a person), drink."}, {"from": "human", "timestamps": 1467.0, "value": "For the person at bbox [0.441, 0.117, 0.769, 0.995], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.441, 0.117, 0.769, 0.995]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1465.0s to 1466.0s, [0.453, 0.119, 0.761, 1.0]->[0.436, 0.124, 0.765, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1460.0s to 1464.0s, [0.424, 0.133, 0.731, 0.989]->[0.439, 0.133, 0.738, 0.991]->[0.444, 0.157, 0.744, 0.995]->[0.439, 0.15, 0.747, 0.978]->[0.446, 0.122, 0.761, 0.992]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1459.0s, [0.437, 0.134, 0.717, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1457.0s to 1458.0s, [0.433, 0.17, 0.678, 0.979]->[0.428, 0.158, 0.71, 0.989]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1456.0s, [0.451, 0.184, 0.708, 0.955]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1455.0s, [0.472, 0.198, 0.72, 0.953]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1454.0s, [0.584, 0.194, 0.739, 0.598]: stand, watch (a person)."]}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1466, 1530], "conversations": [{"from": "human", "timestamps": 1496.0, "value": "What actions is the person in bbox [0.032, 0.136, 0.546, 0.991] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.032, 0.136, 0.546, 0.991]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1495.0s, [0.033, 0.03, 0.6, 0.994]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1494.0s, [0.05, 0.045, 0.576, 0.986]: stand, carry/hold (an object), cook, listen to (a person), watch (a person).", "From 1491.0s to 1493.0s, [0.063, 0.013, 0.667, 0.956]->[0.038, 0.023, 0.524, 0.983]->[0.023, 0.015, 0.622, 0.981]: stand, carry/hold (an object), cook.", "At 1490.0s, [0.129, 0.037, 0.709, 0.99]: stand, carry/hold (an object), cook, listen to (a person).", "From 1488.0s to 1489.0s, [0.351, 0.198, 0.876, 0.997]->[0.185, 0.069, 0.921, 1.0]: stand, carry/hold (an object), cook.", "At 1487.0s, [0.294, 0.213, 0.786, 0.991]: stand."]}, {"from": "human", "timestamps": 1530.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.055, 0.063, 0.491, 0.985]: carry/hold (an object), stand, listen to (a person); [0.276, 0.16, 0.519, 0.715]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1510, 1587], "conversations": [{"from": "human", "timestamps": 1540.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.033, 0.139, 0.623, 1.0]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1587.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.141, 0.44, 0.387, 0.732]?"}, {"from": "gpt", "value": ["In the current frame, [0.141, 0.44, 0.387, 0.732]: sit, carry/hold (an object), listen to (a person).", "At 1586.0s, [0.087, 0.429, 0.361, 0.751]: sit, touch (an object), listen to (a person).", "At 1585.0s, [0.028, 0.409, 0.3, 0.769]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1585, 1634], "conversations": [{"from": "human", "timestamps": 1615.0, "value": "What actions did the person at bbox [0.051, 0.406, 0.43, 0.914] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.051, 0.406, 0.43, 0.914]: sit, touch (an object), listen to (a person), watch (a person).", "From 1612.0s to 1614.0s, [0.028, 0.36, 0.364, 0.82]->[0.028, 0.381, 0.366, 0.846]->[0.029, 0.403, 0.376, 0.869]: sit, touch (an object), listen to (a person), watch (a person).", "At 1611.0s, [0.024, 0.35, 0.362, 0.801]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1610.0s, [0.08, 0.34, 0.413, 0.726]: sit, touch (an object), listen to (a person), watch (a person).", "At 1609.0s, [0.115, 0.312, 0.417, 0.697]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1608.0s, [0.056, 0.314, 0.366, 0.715]: sit, touch (an object), listen to (a person), watch (a person).", "At 1607.0s, [0.029, 0.313, 0.313, 0.713]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "From 1603.0s to 1606.0s, [0.17, 0.372, 0.486, 0.784]->[0.037, 0.387, 0.352, 0.804]->[0.036, 0.357, 0.323, 0.731]->[0.032, 0.329, 0.322, 0.743]: sit, touch (an object), listen to (a person), watch (a person).", "At 1602.0s, [0.237, 0.366, 0.553, 0.771]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "From 1593.0s to 1601.0s, [0.197, 0.457, 0.439, 0.725]->[0.187, 0.437, 0.432, 0.73]->[0.193, 0.392, 0.442, 0.691]->[0.194, 0.35, 0.46, 0.668]->[0.207, 0.326, 0.488, 0.695]->[0.229, 0.296, 0.521, 0.686]->[0.216, 0.308, 0.531, 0.73]->[0.212, 0.358, 0.539, 0.787]->[0.218, 0.373, 0.547, 0.807]: sit, touch (an object), listen to (a person), watch (a person).", "At 1592.0s, [0.187, 0.461, 0.435, 0.742]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1591.0s, [0.195, 0.454, 0.413, 0.726]: sit, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1590.0s, [0.197, 0.454, 0.413, 0.728]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1589.0s, [0.179, 0.456, 0.401, 0.746]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1588.0s, [0.177, 0.443, 0.382, 0.742]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1587.0s, [0.141, 0.44, 0.387, 0.732]: sit, carry/hold (an object), listen to (a person).", "At 1586.0s, [0.087, 0.429, 0.361, 0.751]: sit, touch (an object), listen to (a person).", "At 1585.0s, [0.028, 0.409, 0.3, 0.769]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1634.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.024, 0.361, 0.292, 0.953]: listen to (a person), sit, watch (a person); [0.111, 0.347, 0.21, 0.649]: listen to (a person), sit, watch (a person); [0.223, 0.404, 0.362, 0.82]: listen to (a person), sit, watch (a person); [0.426, 0.171, 0.639, 0.646]: stand, touch (an object), talk to (e.g., self, a person, a group); [0.685, 0.38, 0.846, 0.754]: listen to (a person), sit, watch (a person); [0.82, 0.347, 0.954, 0.645]: listen to (a person), sit, watch (a person)."}]}, {"video": "KIy2a-nejxg.mp4", "clip": [1663, 1726], "conversations": [{"from": "human", "timestamps": 1693.0, "value": "For the person at bbox [0.206, 0.094, 0.98, 0.979], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.206, 0.094, 0.98, 0.979]: stand, talk to (e.g., self, a person, a group).", "From 1683.0s to 1692.0s, [0.196, 0.114, 0.968, 0.977]->[0.171, 0.12, 0.993, 0.983]->[0.252, 0.119, 0.962, 0.981]->[0.186, 0.123, 1.0, 0.983]->[0.192, 0.228, 1.0, 0.985]->[0.11, 0.147, 0.974, 0.985]->[0.114, 0.079, 0.983, 0.989]->[0.095, 0.055, 0.973, 0.984]->[0.09, 0.072, 0.987, 0.974]->[0.186, 0.039, 0.987, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 1678.0s to 1682.0s, [0.213, 0.136, 0.98, 0.996]->[0.258, 0.129, 0.977, 1.0]->[0.251, 0.147, 0.978, 0.997]->[0.23, 0.092, 0.983, 0.981]->[0.228, 0.094, 0.968, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1670.0s to 1677.0s, [0.268, 0.172, 0.851, 0.983]->[0.101, 0.168, 0.954, 0.989]->[0.096, 0.124, 0.971, 0.984]->[0.071, 0.146, 0.961, 0.987]->[0.059, 0.146, 0.976, 0.989]->[0.225, 0.146, 0.973, 0.979]->[0.235, 0.074, 0.919, 0.999]->[0.22, 0.105, 0.953, 0.988]: stand, talk to (e.g., self, a person, a group).", "From 1668.0s to 1669.0s, [0.258, 0.156, 0.792, 0.99]->[0.284, 0.174, 0.829, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1667.0s, [0.275, 0.163, 0.781, 0.979]: stand, talk to (e.g., self, a person, a group).", "At 1666.0s, [0.285, 0.149, 0.76, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1635.0s to 1665.0s, [0.416, 0.158, 0.641, 0.624]->[0.424, 0.165, 0.643, 0.633]->[0.422, 0.156, 0.644, 0.628]->[0.414, 0.179, 0.729, 0.632]->[0.423, 0.164, 0.643, 0.628]->[0.419, 0.165, 0.641, 0.627]->[0.421, 0.165, 0.636, 0.626]->[0.42, 0.179, 0.638, 0.632]->[0.417, 0.164, 0.633, 0.629]->[0.416, 0.168, 0.646, 0.627]->[0.418, 0.156, 0.643, 0.627]->[0.415, 0.155, 0.639, 0.633]->[0.414, 0.165, 0.637, 0.629]->[0.419, 0.164, 0.642, 0.631]->[0.412, 0.171, 0.631, 0.632]->[0.401, 0.18, 0.648, 0.66]->[0.406, 0.155, 0.653, 0.664]->[0.404, 0.134, 0.661, 0.659]->[0.408, 0.128, 0.669, 0.679]->[0.405, 0.127, 0.671, 0.687]->[0.382, 0.115, 0.669, 0.712]->[0.368, 0.166, 0.668, 0.73]->[0.356, 0.122, 0.666, 0.756]->[0.343, 0.077, 0.641, 0.777]->[0.327, 0.098, 0.658, 0.82]->[0.322, 0.127, 0.663, 0.834]->[0.309, 0.152, 0.77, 0.898]->[0.292, 0.158, 0.738, 0.956]->[0.278, 0.187, 0.719, 0.978]->[0.281, 0.182, 0.748, 0.999]->[0.281, 0.2, 0.723, 0.987]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1633.0s to 1634.0s, [0.422, 0.181, 0.643, 0.633]->[0.426, 0.171, 0.639, 0.646]: stand, touch (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1726.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.034, 0.172, 0.459, 0.675]: sit, touch (an object), talk to (e.g., self, a person, a group); [0.232, 0.065, 0.377, 0.289]: carry/hold (an object), walk; [0.382, 0.046, 0.542, 0.594]: carry/hold (an object), walk; [0.474, 0.197, 0.958, 0.84]: listen to (a person), sit, touch (an object), watch (a person)."}]}, {"video": "KWoSGtglCms.mkv", "clip": [873, 911], "conversations": [{"from": "human", "timestamps": 903.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.267, 0.206, 0.839, 0.853]: sit, smoke, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 911.0, "value": "For the person at bbox [0.29, 0.185, 0.851, 0.871], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.29, 0.185, 0.851, 0.871]: sit, talk to (e.g., self, a person, a group).", "From 908.0s to 910.0s, [0.295, 0.177, 0.854, 0.859]->[0.283, 0.176, 0.852, 0.859]->[0.273, 0.178, 0.859, 0.867]: sit, talk to (e.g., self, a person, a group).", "From 906.0s to 907.0s, [0.254, 0.178, 0.852, 0.861]->[0.3, 0.173, 0.862, 0.869]: sit, smoke, talk to (e.g., self, a person, a group).", "At 905.0s, [0.276, 0.192, 0.841, 0.86]: sit, carry/hold (an object), smoke.", "At 904.0s, [0.274, 0.192, 0.85, 0.847]: sit, smoke.", "At 903.0s, [0.267, 0.206, 0.839, 0.853]: sit, smoke, talk to (e.g., self, a person, a group).", "At 902.0s, [0.252, 0.202, 0.87, 0.86]: sit, smoke, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "KWoSGtglCms.mkv", "clip": [875, 995], "conversations": [{"from": "human", "timestamps": 905.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.276, 0.192, 0.841, 0.86]: carry/hold (an object), sit, smoke."}, {"from": "human", "timestamps": 995.0, "value": "What actions is the person in bbox [0.602, 0.292, 0.816, 0.851] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.602, 0.292, 0.816, 0.851]: walk.", "At 994.0s, [0.587, 0.243, 0.836, 0.865]: walk.", "From 992.0s to 993.0s, [0.153, 0.207, 0.63, 0.847]->[0.17, 0.233, 0.616, 0.853]: stand, touch (an object).", "At 991.0s, [0.122, 0.204, 0.632, 0.852]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 990.0s, [0.101, 0.208, 0.642, 0.851]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 989.0s, [0.098, 0.206, 0.628, 0.857]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 987.0s to 988.0s, [0.099, 0.202, 0.626, 0.856]->[0.1, 0.193, 0.641, 0.868]: stand, listen to (a person), watch (a person).", "From 985.0s to 986.0s, [0.099, 0.207, 0.643, 0.859]->[0.091, 0.206, 0.624, 0.86]: stand, touch (an object), listen to (a person), watch (a person).", "From 983.0s to 984.0s, [0.096, 0.218, 0.62, 0.851]->[0.101, 0.212, 0.623, 0.853]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 981.0s to 982.0s, [0.071, 0.216, 0.607, 0.851]->[0.071, 0.22, 0.605, 0.852]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 979.0s to 980.0s, [0.034, 0.187, 0.328, 0.859]->[0.056, 0.198, 0.454, 0.85]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 978.0s, [0.011, 0.209, 0.131, 0.878]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "KWoSGtglCms.mkv", "clip": [997, 1039], "conversations": [{"from": "human", "timestamps": 1027.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.021, 0.122, 0.859, 0.879]: sit."}, {"from": "human", "timestamps": 1039.0, "value": "For the person at bbox [0.08, 0.208, 0.292, 0.855], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.08, 0.208, 0.292, 0.855]: walk.", "From 1037.0s to 1038.0s, [0.298, 0.27, 0.387, 0.515]->[0.211, 0.245, 0.357, 0.623]: walk.", "At 1036.0s, [0.275, 0.296, 0.369, 0.522]: bend/bow (at the waist), open (e.g., a window, a car door)."]}]}, {"video": "KWoSGtglCms.mkv", "clip": [1079, 1126], "conversations": [{"from": "human", "timestamps": 1109.0, "value": "For the person at bbox [0.567, 0.19, 0.831, 0.517], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.567, 0.19, 0.831, 0.517]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1108.0s, [0.588, 0.183, 0.844, 0.511]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1106.0s to 1107.0s, [0.629, 0.181, 0.874, 0.515]->[0.607, 0.186, 0.853, 0.513]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1105.0s, [0.631, 0.183, 0.877, 0.511]: sit, touch (an object), listen to (a person), watch (a person).", "At 1104.0s, [0.629, 0.188, 0.871, 0.534]: sit, touch (an object), watch (a person).", "At 1103.0s, [0.47, 0.34, 0.838, 0.703]: sit, hand shake, talk to (e.g., self, a person, a group), watch (a person).", "At 1102.0s, [0.512, 0.348, 0.858, 0.697]: sit, hand shake, watch (a person).", "At 1101.0s, [0.543, 0.418, 0.951, 0.79]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1126.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.038, 0.182, 0.735, 0.879]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "KWoSGtglCms.mkv", "clip": [1110, 1186], "conversations": [{"from": "human", "timestamps": 1140.0, "value": "What actions did the person at bbox [0.015, 0.125, 0.855, 0.88] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.015, 0.125, 0.855, 0.88]: sit, talk to (e.g., self, a person, a group).", "At 1139.0s, [0.012, 0.13, 0.832, 0.891]: sit, stand.", "At 1138.0s, [0.01, 0.132, 0.841, 0.886]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1186.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.102, 0.277, 0.491, 0.857]: sit, talk to (e.g., self, a person, a group); [0.576, 0.261, 0.802, 0.594]: listen to (a person), sit, watch (a person)."}]}, {"video": "KWoSGtglCms.mkv", "clip": [1124, 1205], "conversations": [{"from": "human", "timestamps": 1154.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.223, 0.111, 0.972, 0.871]: sit."}, {"from": "human", "timestamps": 1205.0, "value": "For the person at bbox [0.232, 0.142, 0.887, 0.85], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.232, 0.142, 0.887, 0.85]: sit, talk to (e.g., self, a person, a group).", "At 1204.0s, [0.128, 0.169, 0.873, 0.853]: sit, talk to (e.g., self, a person, a group).", "At 1203.0s, [0.329, 0.182, 0.908, 0.866]: sit, listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "KWoSGtglCms.mkv", "clip": [1170, 1209], "conversations": [{"from": "human", "timestamps": 1200.0, "value": "Describe the actions of the person at bbox [0.423, 0.256, 0.934, 0.862] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.423, 0.256, 0.934, 0.862]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1197.0s to 1199.0s, [0.568, 0.26, 0.818, 0.602]->[0.542, 0.251, 0.844, 0.614]->[0.498, 0.249, 0.825, 0.76]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1196.0s, [0.574, 0.258, 0.84, 0.61]: sit, talk to (e.g., self, a person, a group).", "At 1195.0s, [0.571, 0.266, 0.863, 0.614]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1192.0s to 1194.0s, [0.568, 0.265, 0.782, 0.603]->[0.574, 0.268, 0.836, 0.597]->[0.57, 0.26, 0.787, 0.606]: sit, listen to (a person), watch (a person).", "From 1189.0s to 1191.0s, [0.58, 0.257, 0.884, 0.605]->[0.573, 0.259, 0.893, 0.619]->[0.569, 0.268, 0.78, 0.598]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1185.0s to 1188.0s, [0.588, 0.252, 0.795, 0.59]->[0.576, 0.261, 0.802, 0.594]->[0.582, 0.263, 0.803, 0.596]->[0.588, 0.253, 0.786, 0.591]: sit, listen to (a person), watch (a person).", "At 1184.0s, [0.582, 0.265, 0.784, 0.586]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1179.0s to 1183.0s, [0.588, 0.263, 0.875, 0.6]->[0.582, 0.261, 0.865, 0.603]->[0.591, 0.262, 0.819, 0.591]->[0.589, 0.256, 0.802, 0.588]->[0.582, 0.262, 0.824, 0.597]: sit, listen to (a person), watch (a person).", "At 1178.0s, [0.59, 0.259, 0.815, 0.588]: sit, touch (an object), listen to (a person), watch (a person).", "From 1174.0s to 1177.0s, [0.733, 0.285, 0.984, 0.753]->[0.608, 0.283, 0.865, 0.646]->[0.591, 0.262, 0.822, 0.594]->[0.612, 0.266, 0.899, 0.604]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1209.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.236, 0.142, 0.955, 0.847]: sit."}]}, {"video": "KWoSGtglCms.mkv", "clip": [1380, 1430], "conversations": [{"from": "human", "timestamps": 1410.0, "value": "For the person at bbox [0.006, 0.179, 0.489, 0.879], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.006, 0.179, 0.489, 0.879]: sit.", "At 1409.0s, [0.02, 0.183, 0.485, 0.868]: sit, touch (an object).", "At 1408.0s, [0.035, 0.181, 0.503, 0.867]: sit.", "From 1405.0s to 1407.0s, [0.12, 0.138, 0.62, 0.852]->[0.115, 0.161, 0.623, 0.863]->[0.131, 0.181, 0.644, 0.847]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1430.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.138, 0.366, 0.194, 0.489]: listen to (a person), sit, watch (a person); [0.184, 0.376, 0.25, 0.488]: listen to (a person), sit; [0.337, 0.357, 0.403, 0.473]: listen to (a person), sit, watch (a person); [0.475, 0.354, 0.523, 0.465]: sit."}]}, {"video": "KWoSGtglCms.mkv", "clip": [1471, 1520], "conversations": [{"from": "human", "timestamps": 1501.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.009, 0.236, 0.359, 0.878]: watch (a person), sit; [0.235, 0.308, 0.308, 0.537]: sit; [0.289, 0.236, 0.398, 0.592]: dance; [0.529, 0.38, 0.618, 0.541]: watch (a person), sit; [0.632, 0.376, 0.708, 0.519]: watch (a person), sit."}, {"from": "human", "timestamps": 1520.0, "value": "What actions is the person in bbox [0.571, 0.216, 0.767, 0.771] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.571, 0.216, 0.767, 0.771]: dance.", "At 1519.0s, [0.661, 0.221, 0.813, 0.78]: dance."]}]}, {"video": "KWoSGtglCms.mkv", "clip": [1538, 1607], "conversations": [{"from": "human", "timestamps": 1568.0, "value": "For the person at bbox [0.437, 0.17, 0.967, 0.849], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.437, 0.17, 0.967, 0.849]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1565.0s to 1567.0s, [0.442, 0.132, 0.962, 0.858]->[0.439, 0.14, 0.97, 0.851]->[0.443, 0.175, 0.974, 0.847]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1564.0s, [0.392, 0.171, 0.964, 0.857]: sit, give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1563.0s, [0.393, 0.168, 0.949, 0.878]: sit, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1562.0s, [0.367, 0.16, 0.963, 0.857]: sit, carry/hold (an object), give/serve (an object) to (a person), watch (a person).", "At 1561.0s, [0.368, 0.173, 0.964, 0.848]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1607.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.024, 0.172, 0.714, 0.875]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "KWoSGtglCms.mkv", "clip": [1591, 1635], "conversations": [{"from": "human", "timestamps": 1621.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.159, 0.318, 0.355, 0.612]: dance; [0.396, 0.378, 0.528, 0.623]: talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.529, 0.441, 0.636, 0.643]: watch (a person), sit, smoke; [0.664, 0.49, 0.762, 0.683]: watch (a person), sit; [0.751, 0.271, 0.834, 0.659]: watch (a person), stand; [0.836, 0.342, 0.934, 0.681]: watch (a person), stand."}, {"from": "human", "timestamps": 1635.0, "value": "What actions is the person in bbox [0.209, 0.137, 0.83, 0.862] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.209, 0.137, 0.83, 0.862]: sit, stand, talk to (e.g., self, a person, a group).", "At 1634.0s, [0.211, 0.143, 0.826, 0.858]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "KWoSGtglCms.mkv", "clip": [1637, 1703], "conversations": [{"from": "human", "timestamps": 1667.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.281, 0.131, 0.715, 0.899]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1703.0, "value": "Describe the actions of the person at bbox [0.038, 0.141, 0.805, 0.864] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.038, 0.141, 0.805, 0.864]: sit, talk to (e.g., self, a person, a group).", "At 1702.0s, [0.036, 0.149, 0.763, 0.872]: sit, talk to (e.g., self, a person, a group).", "At 1701.0s, [0.034, 0.136, 0.769, 0.863]: sit."]}]}, {"video": "KWoSGtglCms.mkv", "clip": [1719, 1768], "conversations": [{"from": "human", "timestamps": 1749.0, "value": "What actions is the person in bbox [0.704, 0.416, 0.781, 0.839] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.704, 0.416, 0.781, 0.839]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1748.0s, [0.705, 0.417, 0.783, 0.852]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1768.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.062, 0.277, 0.534, 0.861]: sit, talk to (e.g., self, a person, a group), stand."}]}, {"video": "K_SpqDJnlps.mkv", "clip": [897, 952], "conversations": [{"from": "human", "timestamps": 927.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.002, 0.37, 0.979]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.425, 0.089, 0.976, 0.985]: listen to (a person), stand."}, {"from": "human", "timestamps": 952.0, "value": "What actions is the person in bbox [0.0, 0.002, 0.368, 0.998] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.002, 0.368, 0.998]: stand, talk to (e.g., self, a person, a group).", "From 949.0s to 951.0s, [0.0, 0.0, 0.383, 1.0]->[0.0, 0.0, 0.374, 1.0]->[0.0, 0.0, 0.374, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 947.0s to 948.0s, [0.0, 0.008, 0.34, 0.984]->[0.0, 0.0, 0.382, 0.959]: stand, listen to (a person), watch (a person).", "At 946.0s, [0.007, 0.097, 0.662, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 945.0s, [0.002, 0.069, 0.646, 0.996]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 940.0s to 944.0s, [0.01, 0.076, 0.657, 0.991]->[0.001, 0.008, 0.667, 0.99]->[0.0, 0.021, 0.655, 1.0]->[0.0, 0.072, 0.688, 0.995]->[0.0, 0.076, 0.646, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 938.0s to 939.0s, [0.0, 0.0, 0.367, 1.0]->[0.0, 0.0, 0.364, 0.952]: stand, listen to (a person), watch (a person).", "From 936.0s to 937.0s, [0.0, 0.001, 0.384, 0.981]->[0.0, 0.0, 0.352, 0.993]: stand, listen to (a person).", "At 935.0s, [0.0, 0.099, 0.686, 0.983]: stand, listen to (a person), watch (a person).", "From 930.0s to 934.0s, [0.001, 0.0, 0.368, 0.998]->[0.0, 0.062, 0.692, 0.988]->[0.0, 0.086, 0.686, 0.992]->[0.0, 0.088, 0.683, 0.987]->[0.0, 0.098, 0.682, 0.975]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 929.0s, [0.0, 0.007, 0.365, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 926.0s to 928.0s, [0.0, 0.0, 0.38, 1.0]->[0.0, 0.002, 0.37, 0.979]->[0.0, 0.014, 0.377, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 918.0s to 925.0s, [0.0, 0.0, 0.451, 0.981]->[0.0, 0.0, 0.389, 0.989]->[0.0, 0.0, 0.409, 1.0]->[0.0, 0.0, 0.379, 0.995]->[0.0, 0.007, 0.391, 1.0]->[0.0, 0.011, 0.405, 0.992]->[0.0, 0.002, 0.406, 0.982]->[0.0, 0.001, 0.436, 0.989]: stand, listen to (a person).", "At 917.0s, [0.011, 0.073, 0.642, 0.993]: stand, listen to (a person), watch (a person).", "From 913.0s to 916.0s, [0.017, 0.073, 0.645, 1.0]->[0.017, 0.075, 0.644, 0.986]->[0.006, 0.044, 0.663, 0.994]->[0.021, 0.077, 0.657, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 910.0s to 912.0s, [0.011, 0.038, 0.616, 0.986]->[0.009, 0.048, 0.644, 0.995]->[0.006, 0.055, 0.649, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 905.0s to 909.0s, [0.014, 0.061, 0.657, 0.994]->[0.005, 0.059, 0.624, 0.987]->[0.008, 0.057, 0.647, 0.997]->[0.012, 0.065, 0.652, 1.0]->[0.005, 0.082, 0.662, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 903.0s to 904.0s, [0.0, 0.049, 0.644, 0.987]->[0.008, 0.048, 0.642, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 902.0s, [0.0, 0.072, 0.626, 0.984]: stand, listen to (a person), watch (a person)."]}]}, {"video": "K_SpqDJnlps.mkv", "clip": [887, 961], "conversations": [{"from": "human", "timestamps": 917.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.011, 0.073, 0.642, 0.993]: listen to (a person), stand, watch (a person); [0.662, 0.002, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 961.0, "value": "Describe the actions of the person at bbox [0.0, 0.0, 0.389, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.0, 0.389, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 959.0s to 960.0s, [0.0, 0.004, 0.374, 1.0]->[0.0, 0.0, 0.401, 0.974]: stand, talk to (e.g., self, a person, a group).", "At 958.0s, [0.0, 0.0, 0.35, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 949.0s to 957.0s, [0.0, 0.0, 0.383, 1.0]->[0.0, 0.0, 0.374, 1.0]->[0.0, 0.0, 0.374, 1.0]->[0.0, 0.002, 0.368, 0.998]->[0.001, 0.0, 0.348, 1.0]->[0.0, 0.0, 0.37, 0.977]->[0.0, 0.0, 0.373, 1.0]->[0.0, 0.0, 0.383, 0.977]->[0.0, 0.0, 0.372, 0.984]: stand, talk to (e.g., self, a person, a group).", "From 947.0s to 948.0s, [0.0, 0.008, 0.34, 0.984]->[0.0, 0.0, 0.382, 0.959]: stand, listen to (a person), watch (a person).", "At 946.0s, [0.007, 0.097, 0.662, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 945.0s, [0.002, 0.069, 0.646, 0.996]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 940.0s to 944.0s, [0.01, 0.076, 0.657, 0.991]->[0.001, 0.008, 0.667, 0.99]->[0.0, 0.021, 0.655, 1.0]->[0.0, 0.072, 0.688, 0.995]->[0.0, 0.076, 0.646, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 938.0s to 939.0s, [0.0, 0.0, 0.367, 1.0]->[0.0, 0.0, 0.364, 0.952]: stand, listen to (a person), watch (a person).", "From 936.0s to 937.0s, [0.0, 0.001, 0.384, 0.981]->[0.0, 0.0, 0.352, 0.993]: stand, listen to (a person).", "At 935.0s, [0.0, 0.099, 0.686, 0.983]: stand, listen to (a person), watch (a person).", "From 930.0s to 934.0s, [0.001, 0.0, 0.368, 0.998]->[0.0, 0.062, 0.692, 0.988]->[0.0, 0.086, 0.686, 0.992]->[0.0, 0.088, 0.683, 0.987]->[0.0, 0.098, 0.682, 0.975]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 929.0s, [0.0, 0.007, 0.365, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 926.0s to 928.0s, [0.0, 0.0, 0.38, 1.0]->[0.0, 0.002, 0.37, 0.979]->[0.0, 0.014, 0.377, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 918.0s to 925.0s, [0.0, 0.0, 0.451, 0.981]->[0.0, 0.0, 0.389, 0.989]->[0.0, 0.0, 0.409, 1.0]->[0.0, 0.0, 0.379, 0.995]->[0.0, 0.007, 0.391, 1.0]->[0.0, 0.011, 0.405, 0.992]->[0.0, 0.002, 0.406, 0.982]->[0.0, 0.001, 0.436, 0.989]: stand, listen to (a person).", "At 917.0s, [0.011, 0.073, 0.642, 0.993]: stand, listen to (a person), watch (a person).", "From 913.0s to 916.0s, [0.017, 0.073, 0.645, 1.0]->[0.017, 0.075, 0.644, 0.986]->[0.006, 0.044, 0.663, 0.994]->[0.021, 0.077, 0.657, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 910.0s to 912.0s, [0.011, 0.038, 0.616, 0.986]->[0.009, 0.048, 0.644, 0.995]->[0.006, 0.055, 0.649, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 905.0s to 909.0s, [0.014, 0.061, 0.657, 0.994]->[0.005, 0.059, 0.624, 0.987]->[0.008, 0.057, 0.647, 0.997]->[0.012, 0.065, 0.652, 1.0]->[0.005, 0.082, 0.662, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 903.0s to 904.0s, [0.0, 0.049, 0.644, 0.987]->[0.008, 0.048, 0.642, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 902.0s, [0.0, 0.072, 0.626, 0.984]: stand, listen to (a person), watch (a person)."]}]}, {"video": "K_SpqDJnlps.mkv", "clip": [1035, 1071], "conversations": [{"from": "human", "timestamps": 1065.0, "value": "What actions is the person in bbox [0.616, 0.035, 0.861, 0.917] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.616, 0.035, 0.861, 0.917]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1064.0s, [0.623, 0.044, 0.846, 0.922]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1071.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.049, 0.006, 0.524, 1.0]: stand, lift (a person)."}]}, {"video": "K_SpqDJnlps.mkv", "clip": [1021, 1125], "conversations": [{"from": "human", "timestamps": 1051.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.254, 0.0, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1125.0, "value": "For the person at bbox [0.209, 0.0, 1.0, 0.986], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.209, 0.0, 1.0, 0.986]: stand.", "At 1124.0s, [0.183, 0.0, 1.0, 0.99]: stand, carry/hold (an object).", "At 1123.0s, [0.169, 0.0, 0.957, 0.998]: stand, carry/hold (an object), listen to (a person).", "At 1122.0s, [0.322, 0.0, 0.996, 0.988]: stand, carry/hold (an object).", "At 1121.0s, [0.435, 0.026, 0.998, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1120.0s, [0.535, 0.023, 0.988, 0.975]: stand, carry/hold (an object), read."]}]}, {"video": "K_SpqDJnlps.mkv", "clip": [1060, 1119], "conversations": [{"from": "human", "timestamps": 1090.0, "value": "Describe the actions of the person at bbox [0.035, 0.059, 0.656, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.035, 0.059, 0.656, 0.982]: stand, listen to (a person).", "At 1089.0s, [0.029, 0.055, 0.664, 0.983]: stand, listen to (a person), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1119.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.36, 0.177, 0.896]: carry/hold (an object), stand; [0.038, 0.129, 0.634, 0.979]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.563, 0.052, 1.0, 1.0]: stand."}]}, {"video": "K_SpqDJnlps.mkv", "clip": [1133, 1189], "conversations": [{"from": "human", "timestamps": 1163.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.12, 0.188, 0.404, 0.989]: listen to (a person), stand, watch (a person); [0.54, 0.078, 0.941, 0.989]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1189.0, "value": "For the person at bbox [0.439, 0.0, 0.712, 0.544], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.439, 0.0, 0.712, 0.544]: stand, play musical instrument, watch (a person).", "At 1188.0s, [0.222, 0.0, 0.405, 0.263]: stand, play musical instrument, watch (a person)."]}]}, {"video": "K_SpqDJnlps.mkv", "clip": [1330, 1368], "conversations": [{"from": "human", "timestamps": 1360.0, "value": "For the person at bbox [0.294, 0.019, 1.0, 0.978], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.294, 0.019, 1.0, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1359.0s, [0.302, 0.041, 1.0, 0.978]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1368.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.081, 0.017, 1.0, 0.982]: stand."}]}, {"video": "K_SpqDJnlps.mkv", "clip": [1352, 1401], "conversations": [{"from": "human", "timestamps": 1382.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.28, 0.075, 0.97, 0.992]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1401.0, "value": "What actions did the person at bbox [0.05, 0.041, 0.713, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.05, 0.041, 0.713, 0.983]: stand, listen to (a person), watch (a person).", "At 1400.0s, [0.065, 0.041, 0.727, 0.986]: stand, listen to (a person), watch (a person)."]}]}, {"video": "K_SpqDJnlps.mkv", "clip": [1390, 1448], "conversations": [{"from": "human", "timestamps": 1420.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.844, 0.982]: stand."}, {"from": "human", "timestamps": 1448.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.0, 0.24, 0.928]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.0, 0.24, 0.928]: stand, listen to (a person), watch (a person).", "At 1447.0s, [0.0, 0.001, 0.248, 0.944]: stand, listen to (a person)."]}]}, {"video": "K_SpqDJnlps.mkv", "clip": [1475, 1549], "conversations": [{"from": "human", "timestamps": 1505.0, "value": "For the person at bbox [0.047, 0.071, 0.497, 0.974], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.047, 0.071, 0.497, 0.974]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1504.0s, [0.02, 0.025, 0.516, 0.997]: sit, carry/hold (an object).", "At 1503.0s, [0.031, 0.034, 0.501, 0.993]: sit, carry/hold (an object), lift (a person), watch (a person)."]}, {"from": "human", "timestamps": 1549.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.028, 0.07, 0.412, 0.954]: sit; [0.409, 0.074, 0.996, 1.0]: listen to (a person), sit."}]}, {"video": "K_SpqDJnlps.mkv", "clip": [1543, 1579], "conversations": [{"from": "human", "timestamps": 1573.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.015, 0.043, 0.827, 0.989]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1579.0, "value": "Describe the actions of the person at bbox [0.206, 0.14, 0.371, 0.605] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.206, 0.14, 0.371, 0.605]: bend/bow (at the waist), carry/hold (an object), watch (a person).", "At 1578.0s, [0.213, 0.145, 0.391, 0.615]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "K_SpqDJnlps.mkv", "clip": [1623, 1668], "conversations": [{"from": "human", "timestamps": 1653.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.184, 0.06, 0.985, 0.978]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1668.0, "value": "What actions is the person in bbox [0.016, 0.058, 0.705, 0.983] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.016, 0.058, 0.705, 0.983]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1666.0s to 1667.0s, [0.014, 0.07, 0.656, 0.987]->[0.015, 0.084, 0.693, 0.975]: sit, talk to (e.g., self, a person, a group).", "At 1665.0s, [0.035, 0.041, 0.705, 0.991]: sit, touch (an object).", "From 1663.0s to 1664.0s, [0.023, 0.052, 0.721, 0.98]->[0.017, 0.06, 0.725, 0.98]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "K_SpqDJnlps.mkv", "clip": [1670, 1729], "conversations": [{"from": "human", "timestamps": 1700.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.007, 0.013, 0.696, 0.978]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.802, 0.029, 0.997, 0.881]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1729.0, "value": "What actions is the person in bbox [0.084, 0.07, 0.794, 0.98] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.084, 0.07, 0.794, 0.98]: stand, talk to (e.g., self, a person, a group).", "From 1727.0s to 1728.0s, [0.083, 0.091, 0.824, 0.993]->[0.084, 0.078, 0.801, 0.99]: stand, talk to (e.g., self, a person, a group).", "At 1726.0s, [0.129, 0.101, 0.875, 1.0]: stand."]}]}, {"video": "K_SpqDJnlps.mkv", "clip": [1736, 1795], "conversations": [{"from": "human", "timestamps": 1766.0, "value": "Describe the actions of the person at bbox [0.295, 0.041, 0.986, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.295, 0.041, 0.986, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1765.0s, [0.29, 0.039, 0.976, 1.0]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1795.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.124, 0.622, 0.167, 0.813]: watch (a person), stand; [0.334, 0.632, 0.396, 0.825]: watch (a person), walk; [0.42, 0.594, 0.486, 0.835]: watch (a person), walk; [0.565, 0.581, 0.627, 0.84]: listen to (a person), watch (a person), walk; [0.603, 0.574, 0.705, 0.813]: watch (a person), walk."}]}, {"video": "Kb1fduj-jdY.mp4", "clip": [911, 951], "conversations": [{"from": "human", "timestamps": 941.0, "value": "What actions did the person at bbox [0.686, 0.45, 0.795, 0.988] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.686, 0.45, 0.795, 0.988]: sit, carry/hold (an object), listen to (a person).", "From 937.0s to 940.0s, [0.796, 0.47, 0.876, 0.964]->[0.68, 0.45, 0.788, 0.984]->[0.684, 0.436, 0.793, 0.994]->[0.685, 0.448, 0.799, 0.99]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 951.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.128, 0.0, 0.423, 0.979]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.266, 0.088, 0.754, 0.986]: carry/hold (an object), listen to (a person), walk."}]}, {"video": "Kb1fduj-jdY.mp4", "clip": [887, 981], "conversations": [{"from": "human", "timestamps": 917.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.124, 0.351, 0.38, 0.995]: talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.634, 0.125, 0.876, 0.885]: watch (a person), dance."}, {"from": "human", "timestamps": 981.0, "value": "What actions did the person at bbox [0.134, 0.021, 0.52, 0.984] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.134, 0.021, 0.52, 0.984]: stand, talk to (e.g., self, a person, a group).", "At 980.0s, [0.127, 0.063, 0.598, 0.977]: stand, talk to (e.g., self, a person, a group).", "At 979.0s, [0.367, 0.325, 0.515, 0.813]: sit.", "At 978.0s, [0.357, 0.35, 0.535, 0.816]: sit, listen to (a person).", "At 977.0s, [0.365, 0.335, 0.555, 0.815]: sit, hand clap, watch (a person)."]}]}, {"video": "Kb1fduj-jdY.mp4", "clip": [935, 1007], "conversations": [{"from": "human", "timestamps": 965.0, "value": "What actions is the person in bbox [0.307, 0.206, 0.442, 0.833] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.307, 0.206, 0.442, 0.833]: stand, listen to (a person), watch (a person).", "At 964.0s, [0.31, 0.196, 0.435, 0.858]: stand, listen to (a person), watch (a person).", "At 963.0s, [0.307, 0.177, 0.414, 0.828]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1007.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.193, 0.0, 0.644, 1.0]: stand, talk to (e.g., self, a person, a group); [0.562, 0.187, 0.876, 1.0]: listen to (a person), stand."}]}, {"video": "Kb1fduj-jdY.mp4", "clip": [1025, 1063], "conversations": [{"from": "human", "timestamps": 1055.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.436, 0.058, 0.749, 0.995]?"}, {"from": "gpt", "value": ["In the current frame, [0.436, 0.058, 0.749, 0.995]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1054.0s, [0.429, 0.081, 0.828, 0.985]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1053.0s, [0.468, 0.183, 0.816, 0.995]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1051.0s to 1052.0s, [0.489, 0.0, 0.849, 1.0]->[0.444, 0.0, 0.82, 1.0]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1063.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.298, 0.139, 0.491, 0.949]: carry/hold (an object), stand, talk to (e.g., self, a person, a group), listen to (a person); [0.466, 0.125, 0.634, 0.919]: carry/hold (an object), stand, listen to (a person)."}]}, {"video": "Kb1fduj-jdY.mp4", "clip": [1256, 1361], "conversations": [{"from": "human", "timestamps": 1286.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.139, 0.412, 0.207, 0.975]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.221, 0.48, 0.331, 0.994]: watch (a person), stand; [0.413, 0.803, 0.52, 0.993]: watch (a person), bend/bow (at the waist); [0.416, 0.175, 0.448, 0.407]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.572, 0.321, 0.616, 0.617]: carry/hold (an object), stand, hit (an object), watch (a person); [0.702, 0.447, 0.815, 0.956]: listen to (a person), stand, watch (a person); [0.775, 0.554, 0.876, 0.994]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1361.0, "value": "What actions did the person at bbox [0.363, 0.038, 0.499, 0.906] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.363, 0.038, 0.499, 0.906]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1360.0s, [0.368, 0.036, 0.513, 0.676]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Kb1fduj-jdY.mp4", "clip": [1297, 1340], "conversations": [{"from": "human", "timestamps": 1327.0, "value": "What actions did the person at bbox [0.237, 0.121, 0.625, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.237, 0.121, 0.625, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 1325.0s to 1326.0s, [0.233, 0.137, 0.654, 0.991]->[0.221, 0.12, 0.734, 0.974]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1340.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.154, 0.023, 0.595, 0.988]: stand, talk to (e.g., self, a person, a group); [0.205, 0.264, 0.272, 0.62]: listen to (a person), stand, watch (a person); [0.441, 0.051, 0.551, 0.493]: stand; [0.491, 0.107, 0.842, 0.973]: listen to (a person), stand; [0.53, 0.095, 0.627, 0.571]: listen to (a person), stand, watch (a person); [0.734, 0.147, 0.79, 0.482]: stand."}]}, {"video": "Kb1fduj-jdY.mp4", "clip": [1355, 1412], "conversations": [{"from": "human", "timestamps": 1385.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.205, 0.0, 0.465, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.425, 0.068, 0.535, 0.989]: stand; [0.519, 0.099, 0.647, 0.864]: listen to (a person), stand, watch (a person); [0.556, 0.076, 0.798, 0.997]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.762, 0.122, 0.876, 0.981]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1412.0, "value": "What actions is the person in bbox [0.215, 0.071, 0.471, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.215, 0.071, 0.471, 0.984]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1411.0s, [0.203, 0.078, 0.468, 0.989]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1410.0s, [0.166, 0.065, 0.454, 1.0]: stand, listen to (a person), watch (a person).", "At 1409.0s, [0.165, 0.082, 0.424, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Kb1fduj-jdY.mp4", "clip": [1524, 1561], "conversations": [{"from": "human", "timestamps": 1554.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.297, 0.066, 0.41, 0.893]: listen to (a person), walk; [0.371, 0.086, 0.51, 0.972]: listen to (a person), watch (a person), talk to (e.g., self, a person, a group), walk; [0.52, 0.04, 0.706, 0.844]: listen to (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1561.0, "value": "What actions did the person at bbox [0.458, 0.125, 0.709, 0.969] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.458, 0.125, 0.709, 0.969]: stand, listen to (a person), watch (a person).", "From 1559.0s to 1560.0s, [0.457, 0.097, 0.705, 0.984]->[0.468, 0.119, 0.712, 0.987]: stand, listen to (a person), watch (a person).", "At 1558.0s, [0.468, 0.103, 0.718, 0.965]: stand, listen to (a person).", "At 1557.0s, [0.454, 0.04, 0.651, 0.984]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1556.0s, [0.417, 0.076, 0.604, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1555.0s, [0.406, 0.08, 0.558, 0.954]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1553.0s to 1554.0s, [0.349, 0.1, 0.489, 0.9]->[0.371, 0.086, 0.51, 0.972]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1552.0s, [0.387, 0.109, 0.507, 0.896]: walk, listen to (a person), watch (a person).", "At 1551.0s, [0.379, 0.201, 0.458, 0.909]: walk, listen to (a person)."]}]}, {"video": "Kb1fduj-jdY.mp4", "clip": [1535, 1571], "conversations": [{"from": "human", "timestamps": 1565.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.692, 0.332, 0.876, 0.995]?"}, {"from": "gpt", "value": ["In the current frame, [0.692, 0.332, 0.876, 0.995]: sit, listen to (a person).", "From 1561.0s to 1564.0s, [0.711, 0.303, 0.876, 0.999]->[0.708, 0.339, 0.876, 0.981]->[0.709, 0.346, 0.874, 0.991]->[0.696, 0.33, 0.876, 0.995]: sit, listen to (a person).", "At 1560.0s, [0.666, 0.285, 0.876, 0.998]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1559.0s, [0.675, 0.312, 0.876, 0.978]: sit, talk to (e.g., self, a person, a group).", "At 1558.0s, [0.676, 0.26, 0.873, 0.983]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1557.0s, [0.645, 0.28, 0.839, 0.999]: bend/bow (at the waist), listen to (a person).", "At 1556.0s, [0.599, 0.245, 0.782, 0.985]: listen to (a person).", "At 1555.0s, [0.575, 0.073, 0.754, 0.99]: stand, listen to (a person).", "At 1554.0s, [0.52, 0.04, 0.706, 0.844]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1553.0s, [0.463, 0.024, 0.702, 0.976]: walk, touch (an object), talk to (e.g., self, a person, a group).", "From 1548.0s to 1552.0s, [0.384, 0.278, 0.473, 0.889]->[0.383, 0.27, 0.495, 0.942]->[0.421, 0.231, 0.545, 0.959]->[0.445, 0.165, 0.613, 0.985]->[0.461, 0.12, 0.739, 0.968]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1571.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.124, 0.014, 0.3, 1.0]: listen to (a person), stand, watch (a person); [0.422, 0.028, 0.671, 0.999]: listen to (a person), stand; [0.674, 0.301, 0.875, 0.999]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "Kb1fduj-jdY.mp4", "clip": [1600, 1649], "conversations": [{"from": "human", "timestamps": 1630.0, "value": "What actions did the person at bbox [0.133, 0.0, 0.38, 0.994] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.133, 0.0, 0.38, 0.994]: stand, listen to (a person), watch (a person).", "From 1622.0s to 1629.0s, [0.124, 0.011, 0.266, 0.696]->[0.124, 0.011, 0.31, 0.846]->[0.124, 0.005, 0.317, 0.989]->[0.124, 0.007, 0.27, 0.818]->[0.124, 0.027, 0.263, 0.711]->[0.124, 0.013, 0.318, 1.0]->[0.13, 0.0, 0.368, 0.983]->[0.133, 0.008, 0.359, 1.0]: stand, listen to (a person), watch (a person).", "At 1621.0s, [0.124, 0.01, 0.255, 0.689]: stand, listen to (a person).", "From 1619.0s to 1620.0s, [0.124, 0.012, 0.262, 0.977]->[0.126, 0.029, 0.272, 0.926]: stand, listen to (a person), watch (a person).", "At 1618.0s, [0.124, 0.0, 0.279, 0.788]: stand, talk to (e.g., self, a person, a group).", "From 1616.0s to 1617.0s, [0.124, 0.019, 0.391, 0.967]->[0.124, 0.004, 0.287, 0.984]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1612.0s to 1615.0s, [0.125, 0.0, 0.234, 0.752]->[0.124, 0.022, 0.26, 0.949]->[0.124, 0.0, 0.264, 0.961]->[0.124, 0.033, 0.273, 0.934]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1649.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.124, 0.097, 0.224, 0.978]: listen to (a person), stand, watch (a person); [0.39, 0.117, 0.705, 0.981]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.696, 0.043, 0.876, 0.973]: listen to (a person), stand, watch (a person)."}]}, {"video": "Kb1fduj-jdY.mp4", "clip": [1684, 1719], "conversations": [{"from": "human", "timestamps": 1714.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.206, 0.102, 0.673, 0.997]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1719.0, "value": "What actions did the person at bbox [0.349, 0.108, 0.72, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.349, 0.108, 0.72, 1.0]: stand, listen to (a person).", "At 1718.0s, [0.41, 0.102, 0.707, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1717.0s, [0.38, 0.099, 0.742, 0.99]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1716.0s, [0.326, 0.133, 0.671, 1.0]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1715.0s, [0.402, 0.12, 0.787, 0.98]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Kb1fduj-jdY.mp4", "clip": [1732, 1791], "conversations": [{"from": "human", "timestamps": 1762.0, "value": "For the person at bbox [0.328, 0.144, 0.453, 0.81], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.328, 0.144, 0.453, 0.81]: walk.", "From 1759.0s to 1761.0s, [0.206, 0.193, 0.288, 0.763]->[0.226, 0.196, 0.322, 0.775]->[0.273, 0.173, 0.372, 0.745]: walk.", "From 1757.0s to 1758.0s, [0.208, 0.19, 0.261, 0.731]->[0.186, 0.189, 0.288, 0.714]: walk, enter, open (e.g., a window, a car door)."]}, {"from": "human", "timestamps": 1791.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.475, 0.0, 0.653, 0.987]: stand, touch (an object)."}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [921, 1001], "conversations": [{"from": "human", "timestamps": 951.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.035, 0.269, 0.248, 0.972]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.167, 0.2, 0.61, 1.0]: watch (a person), talk to (e.g., self, a person, a group), touch (an object), bend/bow (at the waist); [0.223, 0.116, 0.347, 0.534]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.53, 0.45, 0.801, 0.986]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1001.0, "value": "Describe the actions of the person at bbox [0.636, 0.007, 0.864, 0.639] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.636, 0.007, 0.864, 0.639]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1000.0s, [0.64, 0.0, 0.852, 0.624]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [953, 997], "conversations": [{"from": "human", "timestamps": 983.0, "value": "For the person at bbox [0.027, 0.17, 0.17, 0.971], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.027, 0.17, 0.17, 0.971]: stand, listen to (a person), watch (a person).", "At 982.0s, [0.024, 0.158, 0.163, 0.967]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 997.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.249, 0.037, 0.462, 0.988]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.614, 0.01, 0.857, 0.668]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1035, 1123], "conversations": [{"from": "human", "timestamps": 1065.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.519, 0.149, 0.688, 0.742]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.627, 0.106, 0.946, 0.992]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1123.0, "value": "Describe the actions of the person at bbox [0.607, 0.17, 0.769, 0.895] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.607, 0.17, 0.769, 0.895]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1122.0s, [0.727, 0.176, 0.898, 0.857]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1121.0s, [0.645, 0.15, 0.833, 0.902]: walk, listen to (a person)."]}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1078, 1137], "conversations": [{"from": "human", "timestamps": 1108.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.117, 0.173, 0.423, 0.974]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.404, 0.016, 0.726, 0.977]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1137.0, "value": "What actions is the person in bbox [0.344, 0.109, 0.585, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.344, 0.109, 0.585, 0.985]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1136.0s, [0.37, 0.155, 0.626, 0.967]: walk, listen to (a person).", "At 1135.0s, [0.357, 0.172, 0.571, 0.975]: stand, listen to (a person), watch (a person).", "At 1134.0s, [0.428, 0.181, 0.58, 0.96]: walk, listen to (a person), watch (a person).", "At 1133.0s, [0.392, 0.184, 0.551, 0.962]: stand, listen to (a person), watch (a person).", "From 1131.0s to 1132.0s, [0.489, 0.213, 0.626, 0.912]->[0.453, 0.217, 0.605, 0.918]: walk, listen to (a person), watch (a person)."]}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1116, 1150], "conversations": [{"from": "human", "timestamps": 1146.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.061, 0.217, 0.226, 0.967]: carry/hold (an object), stand, write, listen to (a person); [0.249, 0.148, 0.526, 0.971]: stand, talk to (e.g., self, a person, a group); [0.491, 0.202, 0.901, 0.979]: stand, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1150.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.597, 0.227, 0.753, 0.654]?"}, {"from": "gpt", "value": ["In the current frame, [0.597, 0.227, 0.753, 0.654]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1148.0s to 1149.0s, [0.614, 0.21, 0.843, 0.858]->[0.583, 0.196, 0.813, 0.773]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1146.0s to 1147.0s, [0.491, 0.202, 0.901, 0.979]->[0.681, 0.227, 0.908, 0.648]: stand, touch (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1160, 1211], "conversations": [{"from": "human", "timestamps": 1190.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.425, 0.072, 0.564, 0.827]: listen to (a person), stand, touch (an object), watch (a person); [0.426, 0.098, 0.682, 0.996]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group); [0.689, 0.163, 0.89, 0.967]: carry/hold (an object), stand, listen to (a person)."}, {"from": "human", "timestamps": 1211.0, "value": "What actions is the person in bbox [0.36, 0.026, 0.66, 0.935] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.36, 0.026, 0.66, 0.935]: stand, carry/hold (an object), open (e.g., a window, a car door), listen to (a person), talk to (e.g., self, a person, a group).", "At 1210.0s, [0.292, 0.007, 0.561, 0.985]: stand, open (e.g., a window, a car door), listen to (a person).", "At 1209.0s, [0.326, 0.076, 0.666, 0.994]: walk, listen to (a person)."]}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1195, 1235], "conversations": [{"from": "human", "timestamps": 1225.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.689, 0.118, 0.903, 0.974]?"}, {"from": "gpt", "value": ["In the current frame, [0.689, 0.118, 0.903, 0.974]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1222.0s to 1224.0s, [0.687, 0.138, 0.896, 0.997]->[0.705, 0.07, 0.899, 0.975]->[0.723, 0.087, 0.908, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1221.0s, [0.756, 0.138, 0.884, 0.685]: stand, carry/hold (an object), write, listen to (a person).", "At 1220.0s, [0.765, 0.107, 0.889, 0.902]: stand, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1235.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.22, 0.035, 0.458, 0.991]: listen to (a person), watch (a person), walk."}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1251, 1292], "conversations": [{"from": "human", "timestamps": 1281.0, "value": "Describe the actions of the person at bbox [0.527, 0.299, 0.706, 0.985] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.527, 0.299, 0.706, 0.985]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1280.0s, [0.589, 0.305, 0.729, 0.995]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1279.0s, [0.499, 0.154, 0.869, 1.0]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1292.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.178, 0.139, 0.555, 0.978]: carry/hold (an object), stand, talk to (e.g., self, a person, a group), listen to (a person); [0.514, 0.168, 0.709, 0.998]: carry/hold (an object), stand, write, listen to (a person); [0.531, 0.171, 0.91, 0.987]: carry/hold (an object), listen to (a person)."}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1324, 1358], "conversations": [{"from": "human", "timestamps": 1354.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.136, 0.253, 0.229, 0.96]: carry/hold (an object), stand, listen to (a person); [0.208, 0.201, 0.425, 0.979]: carry/hold (an object), stand, listen to (a person); [0.368, 0.262, 0.442, 0.628]: listen to (a person), stand; [0.457, 0.251, 0.562, 0.852]: listen to (a person), stand, watch (a person); [0.508, 0.186, 0.775, 0.998]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1358.0, "value": "What actions did the person at bbox [0.523, 0.202, 0.746, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.523, 0.202, 0.746, 0.982]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1356.0s to 1357.0s, [0.593, 0.191, 0.765, 0.976]->[0.509, 0.185, 0.742, 0.984]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1355.0s, [0.566, 0.226, 0.772, 0.974]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1354.0s, [0.508, 0.186, 0.775, 0.998]: stand, talk to (e.g., self, a person, a group).", "From 1352.0s to 1353.0s, [0.534, 0.209, 0.753, 0.971]->[0.592, 0.187, 0.786, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1377, 1438], "conversations": [{"from": "human", "timestamps": 1407.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.062, 0.127, 0.565, 1.0]: listen to (a person), stand; [0.731, 0.039, 0.976, 1.0]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1438.0, "value": "What actions did the person at bbox [0.29, 0.384, 0.565, 0.976] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.29, 0.384, 0.565, 0.976]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1437.0s, [0.296, 0.393, 0.607, 0.983]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1371, 1496], "conversations": [{"from": "human", "timestamps": 1401.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.184, 0.063, 0.417, 0.914]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.375, 0.175, 0.599, 0.979]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.693, 0.02, 0.931, 0.993]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1496.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.049, 0.019, 0.518, 0.995]?"}, {"from": "gpt", "value": ["At Now, [0.049, 0.019, 0.518, 0.995]: stand, talk to (e.g., self, a person, a group).", "From 1494.0s to 1495.0s, [0.183, 0.067, 0.515, 0.983]->[0.105, 0.046, 0.442, 0.998]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1493.0s, [0.168, 0.048, 0.492, 0.976]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1489.0s to 1492.0s, [0.282, 0.016, 0.525, 0.985]->[0.254, 0.047, 0.514, 0.99]->[0.211, 0.012, 0.495, 0.981]->[0.144, 0.018, 0.454, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1488.0s, [0.298, 0.034, 0.516, 0.961]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1486.0s to 1487.0s, [0.275, 0.078, 0.515, 0.996]->[0.25, 0.051, 0.461, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1485.0s, [0.188, 0.077, 0.414, 0.988]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1484.0s, [0.23, 0.077, 0.429, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1483.0s, [0.253, 0.068, 0.444, 0.991]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1482.0s, [0.279, 0.063, 0.466, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1481.0s, [0.337, 0.075, 0.558, 0.974]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1480.0s, [0.341, 0.073, 0.554, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1479.0s, [0.347, 0.07, 0.54, 0.964]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1478.0s, [0.279, 0.101, 0.645, 0.973]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1477.0s, [0.333, 0.116, 0.606, 0.952]: stand, talk to (e.g., self, a person, a group).", "At 1476.0s, [0.308, 0.094, 0.655, 0.967]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1475.0s, [0.309, 0.102, 0.657, 0.966]: stand, talk to (e.g., self, a person, a group).", "From 1473.0s to 1474.0s, [0.339, 0.102, 0.634, 0.981]->[0.321, 0.106, 0.633, 0.969]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1472.0s, [0.296, 0.087, 0.618, 0.984]: stand, talk to (e.g., self, a person, a group).", "From 1470.0s to 1471.0s, [0.295, 0.103, 0.6, 0.969]->[0.288, 0.094, 0.583, 0.969]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1469.0s, [0.294, 0.096, 0.627, 0.973]: stand, touch (an object), talk to (e.g., self, a person, a group).", "From 1467.0s to 1468.0s, [0.331, 0.092, 0.631, 0.975]->[0.326, 0.108, 0.626, 0.972]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1466.0s, [0.277, 0.062, 0.464, 0.975]: stand, talk to (e.g., self, a person, a group).", "At 1465.0s, [0.283, 0.082, 0.464, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1464.0s, [0.285, 0.067, 0.464, 0.981]: stand, listen to (a person), watch (a person).", "At 1463.0s, [0.281, 0.084, 0.456, 0.984]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1462.0s, [0.299, 0.05, 0.55, 0.966]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1461.0s, [0.242, 0.097, 0.577, 0.968]: stand, touch (an object), watch (e.g., TV), listen to (a person).", "At 1460.0s, [0.21, 0.083, 0.524, 0.978]: stand, touch (an object), listen to (a person).", "At 1459.0s, [0.207, 0.077, 0.45, 0.971]: stand, listen to (a person), watch (a person)."]}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1429, 1474], "conversations": [{"from": "human", "timestamps": 1459.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.038, 0.07, 0.238, 0.995]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.207, 0.077, 0.45, 0.971]: listen to (a person), stand, watch (a person); [0.726, 0.231, 0.966, 0.969]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1474.0, "value": "What actions is the person in bbox [0.074, 0.072, 0.301, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.074, 0.072, 0.301, 0.978]: stand, listen to (a person), watch (a person).", "From 1472.0s to 1473.0s, [0.064, 0.078, 0.293, 0.991]->[0.076, 0.064, 0.292, 0.99]: stand, listen to (a person), watch (a person).", "At 1471.0s, [0.06, 0.077, 0.278, 0.996]: stand, listen to (a person).", "From 1464.0s to 1470.0s, [0.082, 0.073, 0.286, 0.986]->[0.065, 0.077, 0.273, 1.0]->[0.071, 0.059, 0.276, 0.982]->[0.078, 0.063, 0.274, 0.985]->[0.075, 0.056, 0.297, 0.978]->[0.093, 0.071, 0.301, 0.966]->[0.087, 0.073, 0.293, 0.979]: stand, listen to (a person), watch (a person).", "At 1463.0s, [0.055, 0.092, 0.28, 0.999]: stand, listen to (a person).", "From 1461.0s to 1462.0s, [0.038, 0.09, 0.239, 1.0]->[0.063, 0.1, 0.286, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1459.0s to 1460.0s, [0.038, 0.07, 0.238, 0.995]->[0.038, 0.039, 0.217, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1489, 1553], "conversations": [{"from": "human", "timestamps": 1519.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.652, 0.141, 0.737, 0.474]: listen to (a person), stand, watch (a person); [0.659, 0.144, 0.86, 0.966]: listen to (a person), stand."}, {"from": "human", "timestamps": 1553.0, "value": "Describe the actions of the person at bbox [0.472, 0.083, 0.68, 0.757] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.472, 0.083, 0.68, 0.757]: stand, carry/hold (an object), grab (a person), listen to (a person).", "From 1551.0s to 1552.0s, [0.374, 0.094, 0.678, 0.8]->[0.417, 0.132, 0.681, 0.806]: stand, grab (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1492, 1585], "conversations": [{"from": "human", "timestamps": 1522.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.382, 0.256, 0.528, 0.977]: listen to (a person), walk; [0.459, 0.198, 0.692, 1.0]: stand, talk to (e.g., self, a person, a group); [0.614, 0.155, 0.763, 0.751]: listen to (a person), stand; [0.673, 0.196, 0.89, 0.939]: listen to (a person), stand."}, {"from": "human", "timestamps": 1585.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.528, 0.088, 0.77, 0.995]?"}, {"from": "gpt", "value": ["At Now, [0.528, 0.088, 0.77, 0.995]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1584.0s, [0.507, 0.078, 0.771, 0.984]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1583.0s, [0.541, 0.076, 0.793, 0.994]: walk, grab (a person), listen to (a person).", "From 1581.0s to 1582.0s, [0.488, 0.131, 0.717, 0.999]->[0.471, 0.082, 0.712, 0.996]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1580.0s, [0.491, 0.162, 0.697, 0.98]: walk, grab (a person), listen to (a person).", "At 1579.0s, [0.507, 0.204, 0.674, 0.984]: walk, listen to (a person), watch (a person).", "From 1577.0s to 1578.0s, [0.558, 0.234, 0.678, 0.992]->[0.542, 0.243, 0.697, 0.991]: walk, grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1576.0s, [0.596, 0.223, 0.73, 0.977]: walk, grab (a person), talk to (e.g., self, a person, a group).", "At 1575.0s, [0.716, 0.102, 0.923, 0.989]: stand, carry/hold (an object), grab (a person), talk to (e.g., self, a person, a group).", "At 1574.0s, [0.575, 0.093, 0.914, 0.977]: stand, carry/hold (an object), touch (an object), grab (a person), listen to (a person), talk to (e.g., self, a person, a group).", "At 1573.0s, [0.679, 0.077, 0.912, 0.997]: stand, grab (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1569, 1607], "conversations": [{"from": "human", "timestamps": 1599.0, "value": "Describe the actions of the person at bbox [0.355, 0.593, 0.571, 0.877] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.355, 0.593, 0.571, 0.877]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1597.0s to 1598.0s, [0.363, 0.581, 0.545, 0.887]->[0.352, 0.571, 0.564, 0.88]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1607.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.291, 0.055, 0.661, 0.986]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.317, 0.271, 0.392, 0.429]: stand; [0.678, 0.041, 0.972, 0.974]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1634, 1698], "conversations": [{"from": "human", "timestamps": 1664.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.128, 0.251, 0.383, 0.977]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.626, 0.299, 0.726, 0.928]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1698.0, "value": "Describe the actions of the person at bbox [0.642, 0.139, 0.801, 0.541] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.642, 0.139, 0.801, 0.541]: stand, carry/hold (an object), put down.", "At 1697.0s, [0.688, 0.167, 0.841, 0.887]: stand, carry/hold (an object)."]}]}, {"video": "Ksd1JQFHYWA.mp4", "clip": [1742, 1774], "conversations": [{"from": "human", "timestamps": 1772.0, "value": "What actions did the person at bbox [0.568, 0.167, 0.852, 0.995] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.568, 0.167, 0.852, 0.995]: stand, answer phone.", "At 1771.0s, [0.578, 0.167, 0.839, 0.996]: stand, answer phone."]}, {"from": "human", "timestamps": 1774.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.559, 0.113, 0.814, 0.992]: stand, answer phone."}]}, {"video": "LIavUJVrXaI.mkv", "clip": [897, 938], "conversations": [{"from": "human", "timestamps": 927.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.144, 0.217, 0.422, 0.878]: carry/hold (an object), sit, listen to (a person); [0.321, 0.301, 0.425, 0.531]: listen to (a person), sit, eat; [0.547, 0.26, 0.706, 0.596]: sit, watch (a person), carry/hold (an object), eat, talk to (e.g., self, a person, a group); [0.654, 0.263, 0.868, 0.723]: listen to (a person), sit, touch (an object)."}, {"from": "human", "timestamps": 938.0, "value": "What actions is the person in bbox [0.594, 0.277, 0.874, 0.758] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.594, 0.277, 0.874, 0.758]: sit, carry/hold (an object), listen to (a person).", "From 936.0s to 937.0s, [0.642, 0.279, 0.868, 0.782]->[0.654, 0.283, 0.874, 0.756]: sit, carry/hold (an object), listen to (a person).", "At 935.0s, [0.611, 0.28, 0.868, 0.761]: sit, listen to (a person)."]}]}, {"video": "LIavUJVrXaI.mkv", "clip": [930, 974], "conversations": [{"from": "human", "timestamps": 960.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.119, 0.219, 0.319, 0.885]: listen to (a person), sit, eat, touch (an object); [0.295, 0.382, 0.442, 0.629]: listen to (a person), sit, eat; [0.488, 0.327, 0.661, 0.56]: sit, watch (a person), carry/hold (an object), eat, talk to (e.g., self, a person, a group); [0.493, 0.292, 0.86, 0.742]: carry/hold (an object), sit, eat, listen to (a person)."}, {"from": "human", "timestamps": 974.0, "value": "What actions is the person in bbox [0.685, 0.289, 0.869, 0.784] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.685, 0.289, 0.869, 0.784]: sit, carry/hold (an object), listen to (a person).", "At 973.0s, [0.686, 0.289, 0.87, 0.761]: sit, carry/hold (an object), eat, listen to (a person).", "From 970.0s to 972.0s, [0.688, 0.296, 0.868, 0.785]->[0.686, 0.298, 0.87, 0.764]->[0.683, 0.288, 0.868, 0.788]: sit, carry/hold (an object), listen to (a person).", "From 968.0s to 969.0s, [0.677, 0.294, 0.868, 0.79]->[0.683, 0.297, 0.868, 0.791]: sit, carry/hold (an object), eat, listen to (a person).", "At 967.0s, [0.631, 0.296, 0.859, 0.786]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 966.0s, [0.525, 0.286, 0.861, 0.773]: sit, carry/hold (an object), eat, listen to (a person), watch (a person).", "At 965.0s, [0.505, 0.289, 0.863, 0.746]: sit, carry/hold (an object), eat, listen to (a person).", "From 963.0s to 964.0s, [0.651, 0.291, 0.861, 0.78]->[0.642, 0.286, 0.859, 0.788]: sit, carry/hold (an object), listen to (a person).", "At 962.0s, [0.657, 0.287, 0.862, 0.782]: sit, listen to (a person).", "At 961.0s, [0.529, 0.288, 0.862, 0.762]: sit, lift/pick up, touch (an object), give/serve (an object) to (a person), listen to (a person), watch (a person)."]}]}, {"video": "LIavUJVrXaI.mkv", "clip": [935, 1007], "conversations": [{"from": "human", "timestamps": 965.0, "value": "What actions did the person at bbox [0.505, 0.289, 0.863, 0.746] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.505, 0.289, 0.863, 0.746]: sit, carry/hold (an object), eat, listen to (a person).", "From 963.0s to 964.0s, [0.651, 0.291, 0.861, 0.78]->[0.642, 0.286, 0.859, 0.788]: sit, carry/hold (an object), listen to (a person).", "At 962.0s, [0.657, 0.287, 0.862, 0.782]: sit, listen to (a person).", "At 961.0s, [0.529, 0.288, 0.862, 0.762]: sit, lift/pick up, touch (an object), give/serve (an object) to (a person), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1007.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.39, 0.217, 0.644, 0.625]: carry/hold (an object), stand."}]}, {"video": "LIavUJVrXaI.mkv", "clip": [1041, 1111], "conversations": [{"from": "human", "timestamps": 1071.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.244, 0.204, 0.588, 0.966]: listen to (a person), stand, watch (a person); [0.502, 0.06, 0.843, 0.97]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1111.0, "value": "What actions did the person at bbox [0.114, 0.0, 0.861, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.114, 0.0, 0.861, 0.986]: sit, talk to (e.g., self, a person, a group).", "At 1110.0s, [0.114, 0.008, 0.846, 0.957]: sit, talk to (e.g., self, a person, a group).", "At 1109.0s, [0.103, 0.046, 0.815, 0.994]: sit, touch (an object), listen to (a person)."]}]}, {"video": "LIavUJVrXaI.mkv", "clip": [1062, 1112], "conversations": [{"from": "human", "timestamps": 1092.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.202, 0.234, 0.671, 0.983]: listen to (a person), sit, talk to (e.g., self, a person, a group); [0.566, 0.0, 0.887, 0.968]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1112.0, "value": "Describe the actions of the person at bbox [0.151, 0.0, 0.88, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.151, 0.0, 0.88, 0.984]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1110.0s to 1111.0s, [0.114, 0.008, 0.846, 0.957]->[0.114, 0.0, 0.861, 0.986]: sit, talk to (e.g., self, a person, a group).", "At 1109.0s, [0.103, 0.046, 0.815, 0.994]: sit, touch (an object), listen to (a person)."]}]}, {"video": "LIavUJVrXaI.mkv", "clip": [1292, 1339], "conversations": [{"from": "human", "timestamps": 1322.0, "value": "What actions is the person in bbox [0.502, 0.309, 0.752, 0.972] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.502, 0.309, 0.752, 0.972]: stand, watch (a person).", "From 1317.0s to 1321.0s, [0.508, 0.296, 0.785, 0.964]->[0.501, 0.311, 0.765, 0.951]->[0.506, 0.321, 0.764, 0.966]->[0.52, 0.318, 0.759, 0.975]->[0.512, 0.31, 0.759, 0.98]: stand, watch (a person).", "At 1316.0s, [0.514, 0.289, 0.772, 0.952]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1314.0s to 1315.0s, [0.515, 0.292, 0.689, 0.959]->[0.477, 0.274, 0.75, 0.956]: stand, watch (a person).", "At 1313.0s, [0.444, 0.304, 0.695, 0.946]: stand, open (e.g., a window, a car door), touch (an object), watch (a person).", "From 1310.0s to 1312.0s, [0.535, 0.302, 0.799, 0.975]->[0.507, 0.278, 0.754, 0.955]->[0.431, 0.298, 0.72, 0.943]: stand, open (e.g., a window, a car door), watch (a person).", "At 1309.0s, [0.544, 0.316, 0.805, 0.988]: stand, open (e.g., a window, a car door)."]}, {"from": "human", "timestamps": 1339.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.523, 0.499, 0.798, 0.971]: walk."}]}, {"video": "LIavUJVrXaI.mkv", "clip": [1395, 1430], "conversations": [{"from": "human", "timestamps": 1425.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.354, 0.148, 0.879, 0.966]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1430.0, "value": "For the person at bbox [0.124, 0.0, 0.839, 0.971], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.124, 0.0, 0.839, 0.971]: stand, talk to (e.g., self, a person, a group).", "At 1429.0s, [0.13, 0.0, 0.837, 0.979]: stand, talk to (e.g., self, a person, a group).", "From 1427.0s to 1428.0s, [0.278, 0.04, 0.88, 0.969]->[0.17, 0.015, 0.871, 0.964]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1423.0s to 1426.0s, [0.361, 0.138, 0.884, 0.967]->[0.347, 0.128, 0.888, 0.962]->[0.354, 0.148, 0.879, 0.966]->[0.341, 0.103, 0.885, 0.971]: stand, talk to (e.g., self, a person, a group).", "At 1422.0s, [0.351, 0.16, 0.878, 0.979]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1421.0s, [0.361, 0.169, 0.873, 0.975]: stand.", "From 1414.0s to 1420.0s, [0.394, 0.135, 0.746, 0.978]->[0.391, 0.136, 0.79, 0.968]->[0.401, 0.15, 0.826, 0.979]->[0.389, 0.138, 0.83, 1.0]->[0.379, 0.131, 0.832, 0.975]->[0.375, 0.129, 0.864, 0.966]->[0.386, 0.139, 0.867, 0.961]: stand, talk to (e.g., self, a person, a group).", "From 1412.0s to 1413.0s, [0.264, 0.116, 0.644, 0.983]->[0.368, 0.108, 0.708, 0.969]: stand.", "At 1411.0s, [0.238, 0.105, 0.646, 1.0]: stand, listen to (a person)."]}]}, {"video": "LIavUJVrXaI.mkv", "clip": [1518, 1580], "conversations": [{"from": "human", "timestamps": 1548.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.103, 0.017, 0.458, 0.988]: watch (a person), sing to (e.g., self, a person, a group), dance; [0.301, 0.115, 0.891, 1.0]: sing to (e.g., self, a person, a group), dance."}, {"from": "human", "timestamps": 1580.0, "value": "What actions is the person in bbox [0.376, 0.06, 0.879, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.376, 0.06, 0.879, 0.984]: dance.", "At 1579.0s, [0.45, 0.002, 0.864, 0.988]: listen (e.g., to music), dance.", "At 1578.0s, [0.225, 0.059, 0.895, 0.969]: dance, listen to (a person)."]}]}, {"video": "LIavUJVrXaI.mkv", "clip": [1551, 1615], "conversations": [{"from": "human", "timestamps": 1581.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.113, 0.253, 0.396, 1.0]: watch (a person), dance; [0.216, 0.068, 0.795, 0.981]: dance."}, {"from": "human", "timestamps": 1615.0, "value": "For the person at bbox [0.106, 0.198, 0.553, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.106, 0.198, 0.553, 0.987]: dance.", "At 1614.0s, [0.119, 0.214, 0.329, 0.977]: dance."]}]}, {"video": "LrDT25hmApw.mkv", "clip": [908, 1008], "conversations": [{"from": "human", "timestamps": 938.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.17, 0.372, 0.448, 0.72]: read, sit, touch (an object), talk to (e.g., self, a person, a group); [0.467, 0.176, 0.677, 0.669]: touch (an object), listen to (a person), watch (a person), stand, talk to (e.g., self, a person, a group); [0.733, 0.185, 0.948, 0.871]: listen to (a person), stand."}, {"from": "human", "timestamps": 1008.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.526, 0.264, 0.843, 0.611]?"}, {"from": "gpt", "value": ["At Now, [0.526, 0.264, 0.843, 0.611]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1006.0s to 1007.0s, [0.456, 0.226, 0.833, 0.687]->[0.512, 0.253, 0.845, 0.644]: sit, touch (an object).", "From 1001.0s to 1005.0s, [0.268, 0.171, 0.952, 0.853]->[0.265, 0.16, 0.926, 0.87]->[0.301, 0.192, 0.861, 0.828]->[0.35, 0.201, 0.851, 0.802]->[0.397, 0.216, 0.831, 0.743]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1000.0s, [0.289, 0.171, 0.947, 0.867]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "LrDT25hmApw.mkv", "clip": [934, 996], "conversations": [{"from": "human", "timestamps": 964.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.112, 0.162, 0.632, 0.825]: read, sit, touch (an object)."}, {"from": "human", "timestamps": 996.0, "value": "Describe the actions of the person at bbox [0.053, 0.126, 0.747, 0.893] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.053, 0.126, 0.747, 0.893]: sit.", "From 994.0s to 995.0s, [0.078, 0.132, 0.774, 0.894]->[0.051, 0.131, 0.8, 0.895]: sit.", "At 993.0s, [0.031, 0.166, 0.792, 0.885]: sit, listen to (a person), watch (a person)."]}]}, {"video": "LrDT25hmApw.mkv", "clip": [1007, 1046], "conversations": [{"from": "human", "timestamps": 1037.0, "value": "What actions is the person in bbox [0.187, 0.16, 0.994, 0.883] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.187, 0.16, 0.994, 0.883]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1035.0s to 1036.0s, [0.19, 0.155, 1.0, 0.878]->[0.159, 0.158, 0.971, 0.863]: sit, touch (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1046.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.05, 0.284, 0.414, 0.846]: sit, talk to (e.g., self, a person, a group); [0.467, 0.197, 0.654, 0.686]: listen to (a person), walk."}]}, {"video": "LrDT25hmApw.mkv", "clip": [996, 1106], "conversations": [{"from": "human", "timestamps": 1026.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.235, 0.144, 0.956, 0.866]: carry/hold (an object), sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1106.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.467, 0.157, 0.905, 0.833]?"}, {"from": "gpt", "value": ["In the current frame, [0.467, 0.157, 0.905, 0.833]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1105.0s, [0.07, 0.335, 0.419, 0.721]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1102.0s to 1104.0s, [0.074, 0.312, 0.422, 0.696]->[0.07, 0.32, 0.42, 0.709]->[0.072, 0.33, 0.427, 0.708]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1100.0s to 1101.0s, [0.062, 0.299, 0.4, 0.693]->[0.068, 0.308, 0.411, 0.689]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1099.0s, [0.052, 0.296, 0.393, 0.684]: sit, read, touch (an object), talk to (e.g., self, a person, a group).", "From 1097.0s to 1098.0s, [0.033, 0.387, 0.296, 0.779]->[0.05, 0.265, 0.388, 0.68]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "LrDT25hmApw.mkv", "clip": [1078, 1113], "conversations": [{"from": "human", "timestamps": 1108.0, "value": "What actions did the person at bbox [0.466, 0.161, 0.912, 0.84] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.466, 0.161, 0.912, 0.84]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1106.0s to 1107.0s, [0.467, 0.157, 0.905, 0.833]->[0.459, 0.17, 0.913, 0.818]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1105.0s, [0.07, 0.335, 0.419, 0.721]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1102.0s to 1104.0s, [0.074, 0.312, 0.422, 0.696]->[0.07, 0.32, 0.42, 0.709]->[0.072, 0.33, 0.427, 0.708]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1100.0s to 1101.0s, [0.062, 0.299, 0.4, 0.693]->[0.068, 0.308, 0.411, 0.689]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1099.0s, [0.052, 0.296, 0.393, 0.684]: sit, read, touch (an object), talk to (e.g., self, a person, a group).", "From 1097.0s to 1098.0s, [0.033, 0.387, 0.296, 0.779]->[0.05, 0.265, 0.388, 0.68]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1113.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.069, 0.196, 0.561, 0.752]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.463, 0.164, 0.942, 0.857]: listen to (a person), sit, touch (an object), watch (a person)."}]}, {"video": "LrDT25hmApw.mkv", "clip": [1154, 1189], "conversations": [{"from": "human", "timestamps": 1184.0, "value": "What actions did the person at bbox [0.522, 0.29, 0.851, 0.88] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.522, 0.29, 0.851, 0.88]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1183.0s, [0.524, 0.282, 0.896, 0.88]: stand, listen to (a person), watch (a person).", "At 1182.0s, [0.462, 0.255, 0.808, 0.902]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1180.0s to 1181.0s, [0.591, 0.209, 0.976, 0.877]->[0.558, 0.24, 0.947, 0.878]: stand, drink, listen to (a person), watch (a person).", "From 1178.0s to 1179.0s, [0.104, 0.203, 0.499, 0.876]->[0.446, 0.202, 0.829, 0.872]: stand, drink, listen to (a person).", "At 1177.0s, [0.081, 0.202, 0.471, 0.863]: stand, carry/hold (an object), drink, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1189.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.036, 0.162, 0.581, 0.921]: stand, talk to (e.g., self, a person, a group); [0.56, 0.294, 0.937, 0.876]: drink, listen to (a person), watch (a person), carry/hold (an object), stand."}]}, {"video": "LrDT25hmApw.mkv", "clip": [1178, 1230], "conversations": [{"from": "human", "timestamps": 1208.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.02, 0.164, 0.517, 0.922]: stand, talk to (e.g., self, a person, a group); [0.47, 0.258, 0.864, 0.886]: listen to (a person), stand, watch (a person), drink."}, {"from": "human", "timestamps": 1230.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.218, 0.206, 0.939, 0.888]?"}, {"from": "gpt", "value": ["At Now, [0.218, 0.206, 0.939, 0.888]: carry/hold (an object), lie/sleep.", "From 1227.0s to 1229.0s, [0.193, 0.235, 0.856, 0.874]->[0.259, 0.219, 0.727, 0.683]->[0.26, 0.271, 0.782, 0.7]: carry/hold (an object), lie/sleep, watch (a person)."]}]}, {"video": "LrDT25hmApw.mkv", "clip": [1242, 1295], "conversations": [{"from": "human", "timestamps": 1272.0, "value": "Describe the actions of the person at bbox [0.537, 0.158, 0.938, 0.875] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.537, 0.158, 0.938, 0.875]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1271.0s, [0.58, 0.145, 0.978, 0.866]: stand, watch (a person).", "At 1270.0s, [0.576, 0.17, 0.98, 0.867]: stand, touch (an object), listen to (a person), watch (a person).", "From 1268.0s to 1269.0s, [0.546, 0.227, 0.983, 0.878]->[0.582, 0.183, 0.982, 0.864]: bend/bow (at the waist), watch (a person).", "From 1264.0s to 1267.0s, [0.531, 0.242, 0.983, 0.867]->[0.545, 0.238, 0.983, 0.871]->[0.555, 0.225, 0.979, 0.868]->[0.542, 0.236, 0.99, 0.866]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 1263.0s, [0.535, 0.242, 0.988, 0.865]: bend/bow (at the waist), touch (an object), listen to (a person), watch (a person).", "From 1259.0s to 1262.0s, [0.521, 0.26, 0.988, 0.862]->[0.534, 0.237, 0.98, 0.867]->[0.533, 0.244, 0.981, 0.862]->[0.543, 0.235, 0.978, 0.87]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1258.0s, [0.496, 0.281, 0.978, 0.853]: bend/bow (at the waist), sit, play with kids, talk to (e.g., self, a person, a group), watch (a person).", "At 1257.0s, [0.409, 0.255, 0.944, 0.843]: bend/bow (at the waist), carry/hold (an object), play with kids, talk to (e.g., self, a person, a group).", "From 1253.0s to 1256.0s, [0.336, 0.226, 0.946, 0.85]->[0.368, 0.219, 0.944, 0.843]->[0.517, 0.242, 0.984, 0.869]->[0.535, 0.235, 0.974, 0.867]: bend/bow (at the waist), carry/hold (an object), play with kids, talk to (e.g., self, a person, a group), watch (a person).", "From 1251.0s to 1252.0s, [0.5, 0.254, 0.978, 0.864]->[0.486, 0.248, 0.976, 0.869]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1250.0s, [0.312, 0.246, 0.942, 0.858]: bend/bow (at the waist), carry/hold (an object), play with kids, watch (a person)."]}, {"from": "human", "timestamps": 1295.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.136, 0.171, 0.418, 0.829]: read, stand, talk to (e.g., self, a person, a group); [0.546, 0.157, 0.839, 0.868]: listen to (a person), stand, watch (a person)."}]}, {"video": "LrDT25hmApw.mkv", "clip": [1261, 1345], "conversations": [{"from": "human", "timestamps": 1291.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.139, 0.18, 0.44, 0.821]: carry/hold (an object), stand, read; [0.537, 0.16, 0.839, 0.861]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1345.0, "value": "What actions is the person in bbox [0.094, 0.235, 0.533, 0.884] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.094, 0.235, 0.533, 0.884]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1344.0s, [0.253, 0.243, 0.657, 0.871]: walk, listen to (a person).", "At 1343.0s, [0.293, 0.225, 0.672, 0.879]: walk, carry/hold (an object).", "At 1342.0s, [0.207, 0.213, 0.487, 0.878]: walk, carry/hold (an object), listen to (a person).", "At 1341.0s, [0.167, 0.202, 0.507, 0.872]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1340.0s, [0.172, 0.242, 0.405, 0.863]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1339.0s, [0.158, 0.247, 0.494, 0.865]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1338.0s, [0.101, 0.239, 0.344, 0.874]: stand, carry/hold (an object), watch (a person).", "At 1337.0s, [0.263, 0.24, 0.543, 0.791]: carry/hold (an object), crouch/kneel, kiss (a person), watch (a person)."]}]}, {"video": "LrDT25hmApw.mkv", "clip": [1326, 1361], "conversations": [{"from": "human", "timestamps": 1356.0, "value": "What actions is the person in bbox [0.226, 0.19, 0.473, 0.863] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.226, 0.19, 0.473, 0.863]: walk, carry/hold (an object).", "From 1354.0s to 1355.0s, [0.258, 0.222, 0.499, 0.878]->[0.153, 0.209, 0.369, 0.868]: walk, carry/hold (an object).", "At 1353.0s, [0.267, 0.183, 0.53, 0.866]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1349.0s to 1352.0s, [0.309, 0.193, 0.537, 0.874]->[0.311, 0.196, 0.539, 0.867]->[0.312, 0.188, 0.548, 0.859]->[0.32, 0.184, 0.563, 0.868]: stand, listen to (a person), watch (a person).", "At 1348.0s, [0.239, 0.212, 0.495, 0.867]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1347.0s, [0.117, 0.226, 0.464, 0.871]: walk, carry/hold (an object), listen to (a person).", "At 1346.0s, [0.028, 0.257, 0.337, 0.875]: walk.", "At 1345.0s, [0.094, 0.235, 0.533, 0.884]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1344.0s, [0.253, 0.243, 0.657, 0.871]: walk, listen to (a person).", "At 1343.0s, [0.293, 0.225, 0.672, 0.879]: walk, carry/hold (an object).", "At 1342.0s, [0.207, 0.213, 0.487, 0.878]: walk, carry/hold (an object), listen to (a person).", "At 1341.0s, [0.167, 0.202, 0.507, 0.872]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1340.0s, [0.172, 0.242, 0.405, 0.863]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1339.0s, [0.158, 0.247, 0.494, 0.865]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1338.0s, [0.101, 0.239, 0.344, 0.874]: stand, carry/hold (an object), watch (a person).", "At 1337.0s, [0.263, 0.24, 0.543, 0.791]: carry/hold (an object), crouch/kneel, kiss (a person), watch (a person)."]}, {"from": "human", "timestamps": 1361.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.286, 0.2, 0.454, 0.859]: carry/hold (an object), stand, watch (a person)."}]}, {"video": "LrDT25hmApw.mkv", "clip": [1361, 1414], "conversations": [{"from": "human", "timestamps": 1391.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.195, 0.372, 0.31, 0.782]: carry/hold (an object), walk; [0.446, 0.381, 0.556, 0.734]: walk; [0.507, 0.394, 0.66, 0.745]: watch (a person), walk; [0.752, 0.357, 0.853, 0.699]: carry/hold (an object), stand, watch (a person); [0.875, 0.344, 0.943, 0.668]: watch (a person), stand."}, {"from": "human", "timestamps": 1414.0, "value": "What actions did the person at bbox [0.465, 0.343, 0.612, 0.719] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.465, 0.343, 0.612, 0.719]: sit, read, touch (an object).", "At 1413.0s, [0.472, 0.334, 0.598, 0.714]: sit, read, touch (an object).", "At 1412.0s, [0.473, 0.332, 0.593, 0.724]: sit, carry/hold (an object).", "At 1411.0s, [0.472, 0.336, 0.603, 0.704]: sit, carry/hold (an object), touch (an object).", "At 1410.0s, [0.488, 0.336, 0.61, 0.725]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1408.0s to 1409.0s, [0.471, 0.348, 0.607, 0.706]->[0.47, 0.338, 0.606, 0.718]: sit, carry/hold (an object), touch (an object).", "At 1407.0s, [0.484, 0.346, 0.604, 0.704]: sit, carry/hold (an object), read, touch (an object).", "From 1405.0s to 1406.0s, [0.485, 0.357, 0.601, 0.682]->[0.485, 0.359, 0.598, 0.691]: sit, touch (an object)."]}]}, {"video": "LrDT25hmApw.mkv", "clip": [1424, 1472], "conversations": [{"from": "human", "timestamps": 1454.0, "value": "Describe the actions of the person at bbox [0.458, 0.175, 0.791, 0.869] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.458, 0.175, 0.791, 0.869]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1452.0s to 1453.0s, [0.44, 0.173, 0.788, 0.876]->[0.451, 0.171, 0.79, 0.866]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1451.0s, [0.442, 0.177, 0.788, 0.874]: stand, carry/hold (an object).", "At 1450.0s, [0.488, 0.179, 0.793, 0.882]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1448.0s to 1449.0s, [0.486, 0.145, 0.779, 0.872]->[0.51, 0.16, 0.806, 0.872]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1447.0s, [0.51, 0.15, 0.802, 0.874]: stand, watch (a person).", "From 1442.0s to 1446.0s, [0.511, 0.163, 0.807, 0.881]->[0.514, 0.152, 0.809, 0.875]->[0.516, 0.16, 0.808, 0.88]->[0.511, 0.15, 0.81, 0.882]->[0.507, 0.165, 0.805, 0.872]: stand, listen to (a person), watch (a person).", "At 1441.0s, [0.511, 0.148, 0.809, 0.875]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1432.0s to 1440.0s, [0.534, 0.148, 0.793, 0.88]->[0.533, 0.151, 0.81, 0.88]->[0.526, 0.148, 0.807, 0.886]->[0.528, 0.144, 0.797, 0.871]->[0.5, 0.159, 0.803, 0.873]->[0.505, 0.153, 0.806, 0.888]->[0.517, 0.15, 0.806, 0.883]->[0.516, 0.14, 0.81, 0.878]->[0.512, 0.147, 0.81, 0.874]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1427.0s to 1431.0s, [0.538, 0.179, 0.794, 0.885]->[0.525, 0.164, 0.802, 0.864]->[0.54, 0.169, 0.798, 0.879]->[0.533, 0.158, 0.796, 0.875]->[0.531, 0.143, 0.802, 0.885]: stand, listen to (a person), watch (a person).", "From 1422.0s to 1426.0s, [0.607, 0.178, 0.877, 0.861]->[0.562, 0.18, 0.827, 0.861]->[0.536, 0.186, 0.791, 0.864]->[0.532, 0.182, 0.791, 0.873]->[0.535, 0.182, 0.791, 0.872]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1420.0s to 1421.0s, [0.646, 0.164, 0.899, 0.885]->[0.67, 0.18, 0.915, 0.876]: stand, listen to (a person), watch (a person).", "At 1419.0s, [0.624, 0.149, 0.969, 0.894]: walk, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1472.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.36, 0.28, 0.46, 0.86]: stand, close (e.g., a door, a box); [0.421, 0.297, 0.599, 0.873]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.604, 0.185, 0.907, 0.88]: listen to (a person), stand, watch (a person)."}]}, {"video": "LrDT25hmApw.mkv", "clip": [1473, 1514], "conversations": [{"from": "human", "timestamps": 1503.0, "value": "Describe the actions of the person at bbox [0.437, 0.267, 0.54, 0.712] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.437, 0.267, 0.54, 0.712]: stand, watch (a person).", "From 1494.0s to 1502.0s, [0.224, 0.294, 0.448, 0.884]->[0.435, 0.258, 0.6, 0.808]->[0.432, 0.265, 0.602, 0.796]->[0.432, 0.261, 0.623, 0.8]->[0.436, 0.26, 0.62, 0.803]->[0.439, 0.261, 0.544, 0.811]->[0.436, 0.265, 0.539, 0.792]->[0.434, 0.26, 0.572, 0.794]->[0.432, 0.262, 0.59, 0.812]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1514.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.208, 0.468, 0.478, 0.853]: listen to (a person), sit, touch (an object), write; [0.428, 0.175, 0.68, 0.811]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.461, 0.248, 0.531, 0.356]: watch (a person), stand; [0.646, 0.259, 0.835, 0.846]: watch (a person), stand."}]}, {"video": "LrDT25hmApw.mkv", "clip": [1631, 1677], "conversations": [{"from": "human", "timestamps": 1661.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.215, 0.476, 0.278, 0.788]: stand."}, {"from": "human", "timestamps": 1677.0, "value": "What actions did the person at bbox [0.068, 0.204, 0.519, 0.887] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.068, 0.204, 0.519, 0.887]: stand.", "From 1675.0s to 1676.0s, [0.111, 0.165, 0.635, 0.886]->[0.075, 0.165, 0.567, 0.876]: stand."]}]}, {"video": "LrDT25hmApw.mkv", "clip": [1734, 1765], "conversations": [{"from": "human", "timestamps": 1764.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.009, 0.138, 0.325, 0.906]: listen to (a person), stand, watch (a person); [0.219, 0.152, 0.517, 0.87]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.691, 0.206, 0.975, 0.852]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1765.0, "value": "What actions did the person at bbox [0.677, 0.203, 0.975, 0.872] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.677, 0.203, 0.975, 0.872]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1764.0s, [0.691, 0.206, 0.975, 0.852]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1763.0s, [0.662, 0.195, 0.974, 0.877]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1762.0s, [0.666, 0.217, 0.967, 0.883]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1761.0s, [0.621, 0.224, 0.913, 0.869]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1759.0s to 1760.0s, [0.769, 0.244, 0.98, 0.852]->[0.566, 0.226, 0.865, 0.864]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [888, 950], "conversations": [{"from": "human", "timestamps": 918.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.159, 0.025, 0.89, 0.982]?"}, {"from": "gpt", "value": ["At Now, [0.159, 0.025, 0.89, 0.982]: stand, talk to (e.g., self, a person, a group).", "At 917.0s, [0.058, 0.069, 0.969, 0.988]: stand, talk to (e.g., self, a person, a group).", "At 916.0s, [0.14, 0.077, 0.973, 0.982]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 915.0s, [0.086, 0.075, 0.901, 0.983]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 950.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.233, 0.061, 0.914, 0.989]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [907, 991], "conversations": [{"from": "human", "timestamps": 937.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.267, 0.019, 0.987, 0.986]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 991.0, "value": "What actions did the person at bbox [0.0, 0.025, 0.871, 0.98] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.025, 0.871, 0.98]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 990.0s, [0.025, 0.015, 0.804, 0.968]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [970, 1007], "conversations": [{"from": "human", "timestamps": 1000.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.142, 0.079, 0.746, 1.0]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1007.0, "value": "Describe the actions of the person at bbox [0.0, 0.037, 1.0, 0.97] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.037, 1.0, 0.97]: crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "From 1005.0s to 1006.0s, [0.006, 0.021, 0.877, 0.95]->[0.0, 0.023, 1.0, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1004.0s, [0.025, 0.007, 0.981, 0.974]: sit, crouch/kneel, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1003.0s, [0.007, 0.0, 0.772, 0.974]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [1000, 1065], "conversations": [{"from": "human", "timestamps": 1030.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.369, 0.096, 0.904, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1065.0, "value": "What actions did the person at bbox [0.387, 0.092, 0.913, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.387, 0.092, 0.913, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1064.0s, [0.388, 0.096, 0.917, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1062.0s to 1063.0s, [0.389, 0.087, 0.913, 1.0]->[0.38, 0.095, 0.916, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1061.0s, [0.409, 0.101, 0.909, 1.0]: sit, watch (a person).", "From 1059.0s to 1060.0s, [0.126, 0.051, 0.683, 0.984]->[0.122, 0.072, 0.683, 0.982]: sit, eat, watch (a person).", "At 1058.0s, [0.115, 0.07, 0.695, 0.989]: sit, eat, listen to (a person), watch (a person)."]}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [1075, 1118], "conversations": [{"from": "human", "timestamps": 1105.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.384, 0.085, 0.913, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1118.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.456, 0.236, 0.732, 0.872]?"}, {"from": "gpt", "value": ["In the current frame, [0.456, 0.236, 0.732, 0.872]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1117.0s, [0.443, 0.236, 0.726, 0.865]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1116.0s, [0.444, 0.212, 0.725, 0.887]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1113.0s to 1115.0s, [0.46, 0.227, 0.746, 0.863]->[0.445, 0.229, 0.726, 0.882]->[0.442, 0.221, 0.726, 0.89]: sit, listen to (a person), watch (a person)."]}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [1152, 1197], "conversations": [{"from": "human", "timestamps": 1182.0, "value": "For the person at bbox [0.133, 0.097, 0.76, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.133, 0.097, 0.76, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1181.0s, [0.149, 0.11, 0.774, 0.98]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1197.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.264, 0.202, 0.408, 0.884]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.39, 0.042, 0.538, 0.891]: listen to (a person), watch (a person), walk."}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [1143, 1202], "conversations": [{"from": "human", "timestamps": 1173.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.314, 0.133, 0.955, 0.98]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1202.0, "value": "What actions is the person in bbox [0.353, 0.225, 0.529, 0.903] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.353, 0.225, 0.529, 0.903]: sit, talk to (e.g., self, a person, a group).", "At 1201.0s, [0.362, 0.246, 0.539, 0.914]: sit, talk to (e.g., self, a person, a group).", "From 1194.0s to 1200.0s, [0.253, 0.165, 0.433, 0.85]->[0.258, 0.124, 0.578, 0.863]->[0.253, 0.185, 0.446, 0.85]->[0.264, 0.202, 0.408, 0.884]->[0.282, 0.25, 0.452, 0.913]->[0.28, 0.253, 0.46, 0.916]->[0.356, 0.245, 0.54, 0.909]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1191.0s to 1193.0s, [0.242, 0.152, 0.427, 0.844]->[0.239, 0.153, 0.432, 0.839]->[0.24, 0.146, 0.432, 0.844]: sit, listen to (a person), watch (a person)."]}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [1233, 1278], "conversations": [{"from": "human", "timestamps": 1263.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.261, 0.095, 0.839, 0.999]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1278.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.418, 0.133, 0.989, 0.992]?"}, {"from": "gpt", "value": ["In the current frame, [0.418, 0.133, 0.989, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1277.0s, [0.428, 0.121, 0.994, 0.997]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [1409, 1443], "conversations": [{"from": "human", "timestamps": 1439.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.078, 0.0, 0.773, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1443.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.067, 0.029, 0.649, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.067, 0.029, 0.649, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1442.0s, [0.141, 0.0, 0.726, 0.999]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [1493, 1541], "conversations": [{"from": "human", "timestamps": 1523.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.1, 0.097, 0.773, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1541.0, "value": "Describe the actions of the person at bbox [0.283, 0.132, 0.764, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.283, 0.132, 0.764, 0.988]: sit, watch (a person).", "At 1540.0s, [0.329, 0.085, 0.733, 1.0]: sit, listen to (a person), watch (a person)."]}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [1486, 1601], "conversations": [{"from": "human", "timestamps": 1516.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.174, 0.11, 0.765, 0.987]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1601.0, "value": "Describe the actions of the person at bbox [0.099, 0.039, 0.673, 0.998] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.099, 0.039, 0.673, 0.998]: sit, talk to (e.g., self, a person, a group).", "At 1600.0s, [0.1, 0.073, 0.687, 0.983]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [1649, 1696], "conversations": [{"from": "human", "timestamps": 1679.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.093, 0.056, 0.788, 0.978]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1696.0, "value": "What actions is the person in bbox [0.223, 0.032, 0.915, 0.971] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.223, 0.032, 0.915, 0.971]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1695.0s, [0.223, 0.034, 0.911, 0.997]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "M6cgEs9JgDo.mkv", "clip": [1763, 1794], "conversations": [{"from": "human", "timestamps": 1793.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.403, 0.187, 0.88, 0.979]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1794.0, "value": "For the person at bbox [0.503, 0.222, 0.932, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.503, 0.222, 0.932, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 1788.0s to 1793.0s, [0.504, 0.098, 0.977, 0.988]->[0.493, 0.086, 0.98, 0.982]->[0.469, 0.218, 0.891, 0.986]->[0.293, 0.066, 0.845, 0.967]->[0.437, 0.008, 0.905, 0.988]->[0.403, 0.187, 0.88, 0.979]: stand, talk to (e.g., self, a person, a group).", "At 1787.0s, [0.332, 0.182, 0.766, 0.983]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [924, 958], "conversations": [{"from": "human", "timestamps": 954.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.532, 0.078, 0.844, 0.976]?"}, {"from": "gpt", "value": ["At Now, [0.532, 0.078, 0.844, 0.976]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 952.0s to 953.0s, [0.537, 0.081, 0.877, 0.991]->[0.572, 0.088, 0.89, 0.985]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 951.0s, [0.431, 0.097, 0.699, 0.998]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 950.0s, [0.115, 0.143, 0.224, 0.988]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 958.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.194, 0.116, 0.578, 0.979]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.56, 0.102, 0.889, 0.994]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [873, 996], "conversations": [{"from": "human", "timestamps": 903.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.116, 0.155, 0.502, 0.966]: sit, watch (a person), listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.462, 0.151, 0.89, 0.989]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 996.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.32, 0.207, 0.534, 0.916]?"}, {"from": "gpt", "value": ["In the current frame, [0.32, 0.207, 0.534, 0.916]: dance.", "At 995.0s, [0.352, 0.18, 0.508, 0.933]: dance.", "At 994.0s, [0.384, 0.191, 0.561, 0.989]: walk.", "From 992.0s to 993.0s, [0.353, 0.215, 0.581, 0.993]->[0.308, 0.245, 0.541, 1.0]: walk, dance.", "At 991.0s, [0.315, 0.147, 0.586, 0.997]: dance.", "At 990.0s, [0.398, 0.118, 0.694, 0.985]: stand, touch (an object).", "At 989.0s, [0.406, 0.073, 0.708, 0.99]: stand, put down.", "From 986.0s to 988.0s, [0.373, 0.069, 0.593, 0.981]->[0.376, 0.053, 0.65, 0.99]->[0.4, 0.078, 0.678, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 985.0s, [0.392, 0.128, 0.573, 0.99]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 984.0s, [0.432, 0.148, 0.591, 0.953]: walk, carry/hold (an object)."]}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [945, 978], "conversations": [{"from": "human", "timestamps": 975.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.215, 0.086, 0.554, 0.987]?"}, {"from": "gpt", "value": ["In the current frame, [0.215, 0.086, 0.554, 0.987]: stand, listen to (a person), watch (a person).", "From 973.0s to 974.0s, [0.189, 0.094, 0.576, 0.987]->[0.179, 0.097, 0.545, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 972.0s, [0.203, 0.091, 0.529, 0.993]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 971.0s, [0.197, 0.092, 0.543, 0.995]: stand, listen to (a person), watch (a person).", "From 969.0s to 970.0s, [0.203, 0.085, 0.557, 1.0]->[0.189, 0.083, 0.531, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 967.0s to 968.0s, [0.222, 0.109, 0.544, 0.99]->[0.215, 0.095, 0.51, 0.977]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 957.0s to 966.0s, [0.18, 0.116, 0.464, 0.971]->[0.194, 0.116, 0.578, 0.979]->[0.187, 0.107, 0.527, 0.993]->[0.187, 0.109, 0.539, 1.0]->[0.184, 0.113, 0.549, 0.986]->[0.214, 0.099, 0.554, 0.983]->[0.215, 0.106, 0.552, 0.989]->[0.206, 0.109, 0.573, 0.994]->[0.22, 0.098, 0.544, 0.986]->[0.225, 0.092, 0.545, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 956.0s, [0.184, 0.104, 0.467, 0.989]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 955.0s, [0.179, 0.107, 0.468, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 953.0s to 954.0s, [0.177, 0.103, 0.478, 0.988]->[0.186, 0.08, 0.475, 0.979]: stand, listen to (a person), watch (a person).", "At 952.0s, [0.184, 0.103, 0.453, 0.975]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 951.0s, [0.093, 0.209, 0.298, 0.978]: walk, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 978.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.207, 0.077, 0.507, 0.992]: listen to (a person), stand, watch (a person); [0.446, 0.093, 0.84, 0.998]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1000, 1042], "conversations": [{"from": "human", "timestamps": 1030.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.32, 0.13, 0.568, 0.937]: dance."}, {"from": "human", "timestamps": 1042.0, "value": "What actions is the person in bbox [0.434, 0.112, 0.613, 0.922] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.434, 0.112, 0.613, 0.922]: walk, touch (an object).", "At 1041.0s, [0.417, 0.108, 0.61, 0.928]: stand, touch (an object).", "From 1019.0s to 1040.0s, [0.292, 0.116, 0.534, 0.937]->[0.306, 0.131, 0.535, 0.919]->[0.33, 0.143, 0.488, 0.937]->[0.354, 0.164, 0.527, 0.921]->[0.352, 0.141, 0.507, 0.958]->[0.171, 0.119, 0.622, 0.956]->[0.208, 0.127, 0.628, 0.954]->[0.243, 0.124, 0.641, 0.969]->[0.392, 0.145, 0.54, 0.928]->[0.32, 0.1, 0.553, 0.936]->[0.327, 0.108, 0.544, 0.926]->[0.32, 0.13, 0.568, 0.937]->[0.368, 0.146, 0.587, 0.943]->[0.372, 0.145, 0.639, 0.955]->[0.464, 0.182, 0.609, 0.913]->[0.469, 0.1, 0.679, 0.967]->[0.253, 0.112, 0.433, 0.929]->[0.44, 0.155, 0.569, 0.897]->[0.4, 0.18, 0.658, 0.925]->[0.422, 0.188, 0.671, 0.917]->[0.366, 0.136, 0.71, 0.941]->[0.419, 0.102, 0.609, 0.914]: dance.", "At 1018.0s, [0.321, 0.141, 0.483, 0.935]: dance, hand clap.", "At 1017.0s, [0.322, 0.135, 0.486, 0.932]: dance, sing to (e.g., self, a person, a group).", "From 1015.0s to 1016.0s, [0.304, 0.123, 0.48, 0.923]->[0.315, 0.125, 0.482, 0.926]: dance.", "From 1012.0s to 1014.0s, [0.305, 0.206, 0.555, 0.967]->[0.363, 0.209, 0.567, 0.968]->[0.369, 0.138, 0.514, 0.999]: walk.", "At 1011.0s, [0.377, 0.129, 0.669, 0.999]: stand, watch (e.g., TV).", "At 1010.0s, [0.369, 0.108, 0.621, 0.991]: stand, touch (an object).", "From 1007.0s to 1009.0s, [0.443, 0.072, 0.797, 0.841]->[0.387, 0.056, 0.728, 0.979]->[0.399, 0.067, 0.737, 0.996]: stand, carry/hold (an object).", "At 1006.0s, [0.451, 0.247, 0.75, 0.815]: bend/bow (at the waist), carry/hold (an object).", "At 1005.0s, [0.336, 0.359, 0.563, 0.918]: walk.", "At 1004.0s, [0.41, 0.124, 0.689, 0.993]: stand.", "At 1003.0s, [0.427, 0.093, 0.691, 0.974]: stand, touch (an object).", "At 1002.0s, [0.351, 0.096, 0.567, 0.984]: walk, hand clap.", "At 1001.0s, [0.345, 0.097, 0.488, 0.994]: walk, listen (e.g., to music), talk to (e.g., self, a person, a group).", "From 995.0s to 1000.0s, [0.352, 0.18, 0.508, 0.933]->[0.32, 0.207, 0.534, 0.916]->[0.317, 0.191, 0.535, 0.932]->[0.259, 0.197, 0.549, 0.929]->[0.307, 0.208, 0.528, 0.933]->[0.309, 0.192, 0.526, 0.938]: dance.", "At 994.0s, [0.384, 0.191, 0.561, 0.989]: walk.", "From 992.0s to 993.0s, [0.353, 0.215, 0.581, 0.993]->[0.308, 0.245, 0.541, 1.0]: walk, dance.", "At 991.0s, [0.315, 0.147, 0.586, 0.997]: dance.", "At 990.0s, [0.398, 0.118, 0.694, 0.985]: stand, touch (an object).", "At 989.0s, [0.406, 0.073, 0.708, 0.99]: stand, put down.", "From 986.0s to 988.0s, [0.373, 0.069, 0.593, 0.981]->[0.376, 0.053, 0.65, 0.99]->[0.4, 0.078, 0.678, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 985.0s, [0.392, 0.128, 0.573, 0.99]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 984.0s, [0.432, 0.148, 0.591, 0.953]: walk, carry/hold (an object)."]}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1048, 1106], "conversations": [{"from": "human", "timestamps": 1078.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.492, 0.171, 0.755, 0.846]: carry/hold (an object), dance."}, {"from": "human", "timestamps": 1106.0, "value": "What actions did the person at bbox [0.424, 0.205, 0.754, 0.818] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.424, 0.205, 0.754, 0.818]: carry/hold (an object), dance.", "From 1101.0s to 1105.0s, [0.277, 0.165, 0.428, 0.873]->[0.336, 0.126, 0.498, 0.86]->[0.264, 0.204, 0.526, 0.917]->[0.313, 0.117, 0.651, 0.942]->[0.532, 0.16, 0.763, 0.99]: carry/hold (an object), dance.", "From 1099.0s to 1100.0s, [0.488, 0.147, 0.841, 0.87]->[0.292, 0.137, 0.502, 0.857]: dance, touch (an object).", "At 1098.0s, [0.534, 0.206, 0.77, 0.848]: dance, push (an object).", "At 1097.0s, [0.559, 0.186, 0.744, 0.823]: dance, sing to (e.g., self, a person, a group).", "From 1094.0s to 1096.0s, [0.425, 0.167, 0.559, 0.829]->[0.357, 0.161, 0.619, 0.823]->[0.407, 0.178, 0.62, 0.816]: carry/hold (an object), dance.", "At 1093.0s, [0.479, 0.153, 0.716, 0.852]: dance.", "At 1092.0s, [0.53, 0.182, 0.704, 0.85]: dance, push (an object).", "From 1078.0s to 1091.0s, [0.492, 0.171, 0.755, 0.846]->[0.434, 0.215, 0.609, 0.789]->[0.354, 0.188, 0.548, 0.846]->[0.437, 0.167, 0.622, 0.889]->[0.524, 0.187, 0.662, 0.863]->[0.475, 0.162, 0.722, 0.867]->[0.428, 0.168, 0.639, 0.862]->[0.498, 0.166, 0.698, 0.855]->[0.392, 0.201, 0.625, 0.887]->[0.451, 0.158, 0.593, 0.83]->[0.34, 0.138, 0.693, 0.843]->[0.353, 0.117, 0.708, 0.879]->[0.287, 0.179, 0.508, 0.833]->[0.469, 0.186, 0.631, 0.809]: carry/hold (an object), dance.", "At 1077.0s, [0.318, 0.182, 0.493, 0.848]: carry/hold (an object), dance, sing to (e.g., self, a person, a group).", "At 1076.0s, [0.505, 0.198, 0.662, 0.791]: carry/hold (an object), dance.", "At 1075.0s, [0.485, 0.205, 0.701, 0.838]: carry/hold (an object), dance, touch (an object).", "At 1074.0s, [0.466, 0.202, 0.604, 0.852]: carry/hold (an object), dance.", "From 1072.0s to 1073.0s, [0.456, 0.292, 0.612, 0.872]->[0.464, 0.243, 0.631, 0.874]: carry/hold (an object), crouch/kneel.", "At 1071.0s, [0.407, 0.291, 0.684, 0.891]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group).", "At 1070.0s, [0.431, 0.294, 0.712, 0.904]: carry/hold (an object), dance, sing to (e.g., self, a person, a group).", "At 1069.0s, [0.511, 0.124, 0.715, 0.88]: carry/hold (an object), dance.", "At 1068.0s, [0.364, 0.123, 0.594, 0.811]: carry/hold (an object), dance, sing to (e.g., self, a person, a group).", "From 1056.0s to 1067.0s, [0.189, 0.092, 0.479, 0.964]->[0.291, 0.128, 0.493, 0.899]->[0.414, 0.097, 0.582, 0.881]->[0.266, 0.111, 0.618, 0.895]->[0.456, 0.154, 0.804, 0.872]->[0.568, 0.138, 0.834, 0.874]->[0.544, 0.072, 0.743, 0.88]->[0.509, 0.104, 0.806, 0.986]->[0.399, 0.127, 0.637, 0.832]->[0.231, 0.205, 0.479, 0.838]->[0.394, 0.165, 0.664, 0.83]->[0.358, 0.11, 0.6, 0.921]: carry/hold (an object), dance.", "At 1055.0s, [0.503, 0.136, 0.771, 0.93]: dance.", "From 1053.0s to 1054.0s, [0.464, 0.105, 0.662, 0.912]->[0.52, 0.126, 0.656, 0.883]: stand.", "From 1050.0s to 1052.0s, [0.241, 0.109, 0.693, 1.0]->[0.286, 0.078, 0.675, 0.989]->[0.274, 0.15, 0.548, 0.982]: walk, carry/hold (an object).", "From 1048.0s to 1049.0s, [0.428, 0.116, 0.859, 0.998]->[0.339, 0.087, 0.84, 0.984]: stand, carry/hold (an object).", "At 1047.0s, [0.509, 0.12, 0.883, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1045.0s to 1046.0s, [0.174, 0.195, 0.648, 0.989]->[0.171, 0.099, 0.673, 0.994]: stand, carry/hold (an object).", "From 1043.0s to 1044.0s, [0.478, 0.168, 0.61, 0.896]->[0.34, 0.227, 0.467, 0.85]: walk.", "At 1042.0s, [0.434, 0.112, 0.613, 0.922]: walk, touch (an object).", "At 1041.0s, [0.417, 0.108, 0.61, 0.928]: stand, touch (an object).", "From 1019.0s to 1040.0s, [0.292, 0.116, 0.534, 0.937]->[0.306, 0.131, 0.535, 0.919]->[0.33, 0.143, 0.488, 0.937]->[0.354, 0.164, 0.527, 0.921]->[0.352, 0.141, 0.507, 0.958]->[0.171, 0.119, 0.622, 0.956]->[0.208, 0.127, 0.628, 0.954]->[0.243, 0.124, 0.641, 0.969]->[0.392, 0.145, 0.54, 0.928]->[0.32, 0.1, 0.553, 0.936]->[0.327, 0.108, 0.544, 0.926]->[0.32, 0.13, 0.568, 0.937]->[0.368, 0.146, 0.587, 0.943]->[0.372, 0.145, 0.639, 0.955]->[0.464, 0.182, 0.609, 0.913]->[0.469, 0.1, 0.679, 0.967]->[0.253, 0.112, 0.433, 0.929]->[0.44, 0.155, 0.569, 0.897]->[0.4, 0.18, 0.658, 0.925]->[0.422, 0.188, 0.671, 0.917]->[0.366, 0.136, 0.71, 0.941]->[0.419, 0.102, 0.609, 0.914]: dance.", "At 1018.0s, [0.321, 0.141, 0.483, 0.935]: dance, hand clap.", "At 1017.0s, [0.322, 0.135, 0.486, 0.932]: dance, sing to (e.g., self, a person, a group).", "From 1015.0s to 1016.0s, [0.304, 0.123, 0.48, 0.923]->[0.315, 0.125, 0.482, 0.926]: dance.", "From 1012.0s to 1014.0s, [0.305, 0.206, 0.555, 0.967]->[0.363, 0.209, 0.567, 0.968]->[0.369, 0.138, 0.514, 0.999]: walk.", "At 1011.0s, [0.377, 0.129, 0.669, 0.999]: stand, watch (e.g., TV).", "At 1010.0s, [0.369, 0.108, 0.621, 0.991]: stand, touch (an object).", "From 1007.0s to 1009.0s, [0.443, 0.072, 0.797, 0.841]->[0.387, 0.056, 0.728, 0.979]->[0.399, 0.067, 0.737, 0.996]: stand, carry/hold (an object).", "At 1006.0s, [0.451, 0.247, 0.75, 0.815]: bend/bow (at the waist), carry/hold (an object).", "At 1005.0s, [0.336, 0.359, 0.563, 0.918]: walk.", "At 1004.0s, [0.41, 0.124, 0.689, 0.993]: stand.", "At 1003.0s, [0.427, 0.093, 0.691, 0.974]: stand, touch (an object).", "At 1002.0s, [0.351, 0.096, 0.567, 0.984]: walk, hand clap.", "At 1001.0s, [0.345, 0.097, 0.488, 0.994]: walk, listen (e.g., to music), talk to (e.g., self, a person, a group).", "From 995.0s to 1000.0s, [0.352, 0.18, 0.508, 0.933]->[0.32, 0.207, 0.534, 0.916]->[0.317, 0.191, 0.535, 0.932]->[0.259, 0.197, 0.549, 0.929]->[0.307, 0.208, 0.528, 0.933]->[0.309, 0.192, 0.526, 0.938]: dance.", "At 994.0s, [0.384, 0.191, 0.561, 0.989]: walk.", "From 992.0s to 993.0s, [0.353, 0.215, 0.581, 0.993]->[0.308, 0.245, 0.541, 1.0]: walk, dance.", "At 991.0s, [0.315, 0.147, 0.586, 0.997]: dance.", "At 990.0s, [0.398, 0.118, 0.694, 0.985]: stand, touch (an object).", "At 989.0s, [0.406, 0.073, 0.708, 0.99]: stand, put down.", "From 986.0s to 988.0s, [0.373, 0.069, 0.593, 0.981]->[0.376, 0.053, 0.65, 0.99]->[0.4, 0.078, 0.678, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 985.0s, [0.392, 0.128, 0.573, 0.99]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 984.0s, [0.432, 0.148, 0.591, 0.953]: walk, carry/hold (an object)."]}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1059, 1123], "conversations": [{"from": "human", "timestamps": 1089.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.353, 0.117, 0.708, 0.879]?"}, {"from": "gpt", "value": ["In the current frame, [0.353, 0.117, 0.708, 0.879]: carry/hold (an object), dance.", "From 1078.0s to 1088.0s, [0.492, 0.171, 0.755, 0.846]->[0.434, 0.215, 0.609, 0.789]->[0.354, 0.188, 0.548, 0.846]->[0.437, 0.167, 0.622, 0.889]->[0.524, 0.187, 0.662, 0.863]->[0.475, 0.162, 0.722, 0.867]->[0.428, 0.168, 0.639, 0.862]->[0.498, 0.166, 0.698, 0.855]->[0.392, 0.201, 0.625, 0.887]->[0.451, 0.158, 0.593, 0.83]->[0.34, 0.138, 0.693, 0.843]: carry/hold (an object), dance.", "At 1077.0s, [0.318, 0.182, 0.493, 0.848]: carry/hold (an object), dance, sing to (e.g., self, a person, a group).", "At 1076.0s, [0.505, 0.198, 0.662, 0.791]: carry/hold (an object), dance.", "At 1075.0s, [0.485, 0.205, 0.701, 0.838]: carry/hold (an object), dance, touch (an object).", "At 1074.0s, [0.466, 0.202, 0.604, 0.852]: carry/hold (an object), dance.", "From 1072.0s to 1073.0s, [0.456, 0.292, 0.612, 0.872]->[0.464, 0.243, 0.631, 0.874]: carry/hold (an object), crouch/kneel.", "At 1071.0s, [0.407, 0.291, 0.684, 0.891]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group).", "At 1070.0s, [0.431, 0.294, 0.712, 0.904]: carry/hold (an object), dance, sing to (e.g., self, a person, a group).", "At 1069.0s, [0.511, 0.124, 0.715, 0.88]: carry/hold (an object), dance.", "At 1068.0s, [0.364, 0.123, 0.594, 0.811]: carry/hold (an object), dance, sing to (e.g., self, a person, a group).", "From 1056.0s to 1067.0s, [0.189, 0.092, 0.479, 0.964]->[0.291, 0.128, 0.493, 0.899]->[0.414, 0.097, 0.582, 0.881]->[0.266, 0.111, 0.618, 0.895]->[0.456, 0.154, 0.804, 0.872]->[0.568, 0.138, 0.834, 0.874]->[0.544, 0.072, 0.743, 0.88]->[0.509, 0.104, 0.806, 0.986]->[0.399, 0.127, 0.637, 0.832]->[0.231, 0.205, 0.479, 0.838]->[0.394, 0.165, 0.664, 0.83]->[0.358, 0.11, 0.6, 0.921]: carry/hold (an object), dance.", "At 1055.0s, [0.503, 0.136, 0.771, 0.93]: dance.", "From 1053.0s to 1054.0s, [0.464, 0.105, 0.662, 0.912]->[0.52, 0.126, 0.656, 0.883]: stand.", "From 1050.0s to 1052.0s, [0.241, 0.109, 0.693, 1.0]->[0.286, 0.078, 0.675, 0.989]->[0.274, 0.15, 0.548, 0.982]: walk, carry/hold (an object).", "From 1048.0s to 1049.0s, [0.428, 0.116, 0.859, 0.998]->[0.339, 0.087, 0.84, 0.984]: stand, carry/hold (an object).", "At 1047.0s, [0.509, 0.12, 0.883, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1045.0s to 1046.0s, [0.174, 0.195, 0.648, 0.989]->[0.171, 0.099, 0.673, 0.994]: stand, carry/hold (an object).", "From 1043.0s to 1044.0s, [0.478, 0.168, 0.61, 0.896]->[0.34, 0.227, 0.467, 0.85]: walk.", "At 1042.0s, [0.434, 0.112, 0.613, 0.922]: walk, touch (an object).", "At 1041.0s, [0.417, 0.108, 0.61, 0.928]: stand, touch (an object).", "From 1019.0s to 1040.0s, [0.292, 0.116, 0.534, 0.937]->[0.306, 0.131, 0.535, 0.919]->[0.33, 0.143, 0.488, 0.937]->[0.354, 0.164, 0.527, 0.921]->[0.352, 0.141, 0.507, 0.958]->[0.171, 0.119, 0.622, 0.956]->[0.208, 0.127, 0.628, 0.954]->[0.243, 0.124, 0.641, 0.969]->[0.392, 0.145, 0.54, 0.928]->[0.32, 0.1, 0.553, 0.936]->[0.327, 0.108, 0.544, 0.926]->[0.32, 0.13, 0.568, 0.937]->[0.368, 0.146, 0.587, 0.943]->[0.372, 0.145, 0.639, 0.955]->[0.464, 0.182, 0.609, 0.913]->[0.469, 0.1, 0.679, 0.967]->[0.253, 0.112, 0.433, 0.929]->[0.44, 0.155, 0.569, 0.897]->[0.4, 0.18, 0.658, 0.925]->[0.422, 0.188, 0.671, 0.917]->[0.366, 0.136, 0.71, 0.941]->[0.419, 0.102, 0.609, 0.914]: dance.", "At 1018.0s, [0.321, 0.141, 0.483, 0.935]: dance, hand clap.", "At 1017.0s, [0.322, 0.135, 0.486, 0.932]: dance, sing to (e.g., self, a person, a group).", "From 1015.0s to 1016.0s, [0.304, 0.123, 0.48, 0.923]->[0.315, 0.125, 0.482, 0.926]: dance.", "From 1012.0s to 1014.0s, [0.305, 0.206, 0.555, 0.967]->[0.363, 0.209, 0.567, 0.968]->[0.369, 0.138, 0.514, 0.999]: walk.", "At 1011.0s, [0.377, 0.129, 0.669, 0.999]: stand, watch (e.g., TV).", "At 1010.0s, [0.369, 0.108, 0.621, 0.991]: stand, touch (an object).", "From 1007.0s to 1009.0s, [0.443, 0.072, 0.797, 0.841]->[0.387, 0.056, 0.728, 0.979]->[0.399, 0.067, 0.737, 0.996]: stand, carry/hold (an object).", "At 1006.0s, [0.451, 0.247, 0.75, 0.815]: bend/bow (at the waist), carry/hold (an object).", "At 1005.0s, [0.336, 0.359, 0.563, 0.918]: walk.", "At 1004.0s, [0.41, 0.124, 0.689, 0.993]: stand.", "At 1003.0s, [0.427, 0.093, 0.691, 0.974]: stand, touch (an object).", "At 1002.0s, [0.351, 0.096, 0.567, 0.984]: walk, hand clap.", "At 1001.0s, [0.345, 0.097, 0.488, 0.994]: walk, listen (e.g., to music), talk to (e.g., self, a person, a group).", "From 995.0s to 1000.0s, [0.352, 0.18, 0.508, 0.933]->[0.32, 0.207, 0.534, 0.916]->[0.317, 0.191, 0.535, 0.932]->[0.259, 0.197, 0.549, 0.929]->[0.307, 0.208, 0.528, 0.933]->[0.309, 0.192, 0.526, 0.938]: dance.", "At 994.0s, [0.384, 0.191, 0.561, 0.989]: walk.", "From 992.0s to 993.0s, [0.353, 0.215, 0.581, 0.993]->[0.308, 0.245, 0.541, 1.0]: walk, dance.", "At 991.0s, [0.315, 0.147, 0.586, 0.997]: dance.", "At 990.0s, [0.398, 0.118, 0.694, 0.985]: stand, touch (an object).", "At 989.0s, [0.406, 0.073, 0.708, 0.99]: stand, put down.", "From 986.0s to 988.0s, [0.373, 0.069, 0.593, 0.981]->[0.376, 0.053, 0.65, 0.99]->[0.4, 0.078, 0.678, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 985.0s, [0.392, 0.128, 0.573, 0.99]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 984.0s, [0.432, 0.148, 0.591, 0.953]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1123.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.456, 0.095, 0.747, 0.822]: dance."}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1115, 1186], "conversations": [{"from": "human", "timestamps": 1145.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.431, 0.181, 0.607, 0.95]: carry/hold (an object), dance."}, {"from": "human", "timestamps": 1186.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.48, 0.468, 0.615, 0.952]?"}, {"from": "gpt", "value": ["In the current frame, [0.48, 0.468, 0.615, 0.952]: dance.", "At 1185.0s, [0.402, 0.369, 0.597, 0.924]: carry/hold (an object), dance.", "At 1184.0s, [0.397, 0.28, 0.627, 0.955]: carry/hold (an object), dance, throw.", "From 1180.0s to 1183.0s, [0.359, 0.178, 0.658, 0.978]->[0.352, 0.118, 0.681, 0.947]->[0.494, 0.174, 0.793, 0.965]->[0.363, 0.178, 0.682, 0.943]: carry/hold (an object), dance.", "At 1179.0s, [0.35, 0.189, 0.581, 0.932]: walk.", "At 1178.0s, [0.619, 0.146, 0.762, 0.989]: kick (an object), dance, talk to (e.g., self, a person, a group).", "At 1177.0s, [0.597, 0.175, 0.756, 0.93]: dance.", "From 1175.0s to 1176.0s, [0.41, 0.136, 0.614, 1.0]->[0.51, 0.216, 0.758, 0.984]: dance, sing to (e.g., self, a person, a group).", "From 1173.0s to 1174.0s, [0.362, 0.153, 0.643, 0.923]->[0.439, 0.119, 0.635, 0.975]: dance.", "At 1172.0s, [0.442, 0.214, 0.852, 0.654]: carry/hold (an object), jump/leap.", "At 1171.0s, [0.38, 0.228, 0.723, 0.943]: jump/leap.", "At 1170.0s, [0.244, 0.217, 0.602, 0.936]: carry/hold (an object), dance.", "At 1169.0s, [0.433, 0.135, 0.863, 0.977]: dance.", "From 1167.0s to 1168.0s, [0.353, 0.238, 0.669, 0.968]->[0.347, 0.212, 0.577, 0.972]: carry/hold (an object), dance.", "At 1166.0s, [0.401, 0.114, 0.559, 0.969]: dance.", "At 1165.0s, [0.366, 0.138, 0.583, 0.9]: carry/hold (an object).", "At 1164.0s, [0.349, 0.133, 0.599, 0.816]: run/jog, carry/hold (an object).", "At 1163.0s, [0.366, 0.132, 0.533, 0.823]: carry/hold (an object), touch (an object).", "At 1162.0s, [0.335, 0.136, 0.642, 0.843]: carry/hold (an object).", "From 1156.0s to 1161.0s, [0.439, 0.172, 0.746, 0.981]->[0.397, 0.207, 0.675, 0.947]->[0.397, 0.024, 0.634, 0.894]->[0.459, 0.091, 0.648, 0.75]->[0.419, 0.115, 0.643, 0.987]->[0.344, 0.138, 0.649, 0.839]: carry/hold (an object), dance.", "At 1155.0s, [0.424, 0.207, 0.584, 0.757]: carry/hold (an object), dance, touch (an object).", "From 1153.0s to 1154.0s, [0.375, 0.198, 0.564, 0.885]->[0.437, 0.197, 0.604, 0.94]: carry/hold (an object), dance.", "At 1152.0s, [0.313, 0.128, 0.568, 0.936]: carry/hold (an object), dance, touch (an object).", "At 1151.0s, [0.409, 0.169, 0.55, 0.911]: carry/hold (an object), dance.", "From 1149.0s to 1150.0s, [0.51, 0.134, 0.656, 0.95]->[0.408, 0.173, 0.592, 0.93]: dance.", "At 1148.0s, [0.515, 0.17, 0.674, 0.945]: carry/hold (an object), dance, pull (an object).", "From 1145.0s to 1147.0s, [0.431, 0.181, 0.607, 0.95]->[0.456, 0.008, 0.674, 0.945]->[0.45, 0.113, 0.62, 0.922]: carry/hold (an object), dance.", "From 1143.0s to 1144.0s, [0.488, 0.101, 0.656, 0.937]->[0.371, 0.127, 0.569, 0.981]: carry/hold (an object), dance, pull (an object).", "At 1142.0s, [0.424, 0.109, 0.617, 0.932]: dance, pull (an object).", "From 1140.0s to 1141.0s, [0.404, 0.161, 0.683, 0.963]->[0.401, 0.131, 0.649, 0.941]: carry/hold (an object), dance, pull (an object).", "At 1139.0s, [0.442, 0.158, 0.687, 0.935]: dance, pull (an object).", "At 1138.0s, [0.489, 0.149, 0.697, 0.951]: bend/bow (at the waist), carry/hold (an object), dance, pull (an object).", "At 1137.0s, [0.475, 0.127, 0.617, 0.944]: dance, pull (an object).", "At 1136.0s, [0.447, 0.102, 0.654, 0.938]: carry/hold (an object), dance, pull (an object).", "From 1134.0s to 1135.0s, [0.305, 0.084, 0.521, 0.95]->[0.39, 0.107, 0.576, 0.943]: carry/hold (an object), dance.", "From 1132.0s to 1133.0s, [0.437, 0.114, 0.713, 0.961]->[0.39, 0.122, 0.603, 0.949]: dance, pull (an object).", "At 1131.0s, [0.38, 0.043, 0.59, 0.914]: dance.", "From 1129.0s to 1130.0s, [0.476, 0.07, 0.671, 0.961]->[0.382, 0.08, 0.553, 0.967]: dance, pull (an object).", "From 1123.0s to 1128.0s, [0.456, 0.095, 0.747, 0.822]->[0.495, 0.273, 0.707, 0.843]->[0.469, 0.108, 0.616, 0.84]->[0.444, 0.186, 0.601, 0.836]->[0.455, 0.249, 0.6, 0.843]->[0.495, 0.316, 0.622, 0.82]: dance.", "From 1120.0s to 1122.0s, [0.391, 0.198, 0.584, 0.881]->[0.476, 0.103, 0.711, 0.939]->[0.427, 0.181, 0.752, 0.875]: carry/hold (an object), dance.", "At 1119.0s, [0.48, 0.148, 0.66, 0.868]: carry/hold (an object), lift/pick up, dance.", "At 1118.0s, [0.426, 0.127, 0.629, 0.86]: lift/pick up, dance.", "From 1116.0s to 1117.0s, [0.463, 0.099, 0.7, 0.975]->[0.456, 0.197, 0.765, 0.818]: dance.", "From 1114.0s to 1115.0s, [0.322, 0.123, 0.6, 1.0]->[0.334, 0.096, 0.611, 0.986]: stand."]}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1134, 1233], "conversations": [{"from": "human", "timestamps": 1164.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.349, 0.133, 0.599, 0.816]: carry/hold (an object), run/jog."}, {"from": "human", "timestamps": 1233.0, "value": "Describe the actions of the person at bbox [0.449, 0.143, 0.62, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.449, 0.143, 0.62, 0.984]: stand, carry/hold (an object).", "From 1231.0s to 1232.0s, [0.379, 0.223, 0.696, 0.953]->[0.452, 0.192, 0.72, 0.965]: stand, carry/hold (an object).", "At 1230.0s, [0.386, 0.239, 0.692, 0.926]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1218.0s to 1229.0s, [0.391, 0.173, 0.637, 0.944]->[0.278, 0.262, 0.658, 0.982]->[0.444, 0.379, 0.568, 0.861]->[0.466, 0.266, 0.629, 0.872]->[0.416, 0.171, 0.64, 0.9]->[0.424, 0.207, 0.62, 0.849]->[0.425, 0.203, 0.645, 0.895]->[0.489, 0.091, 0.657, 0.941]->[0.47, 0.277, 0.65, 0.875]->[0.472, 0.239, 0.715, 0.838]->[0.454, 0.224, 0.622, 0.866]->[0.402, 0.217, 0.698, 0.899]: carry/hold (an object), dance."]}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1209, 1243], "conversations": [{"from": "human", "timestamps": 1239.0, "value": "For the person at bbox [0.458, 0.257, 0.681, 0.812], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.458, 0.257, 0.681, 0.812]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1238.0s, [0.423, 0.318, 0.66, 0.897]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1237.0s, [0.374, 0.33, 0.604, 0.974]: walk, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1243.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.171, 0.2, 0.281, 0.592]: walk; [0.181, 0.195, 0.435, 0.932]: listen to (a person), stand, touch (an object), watch (a person); [0.426, 0.38, 0.675, 0.928]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.795, 0.507, 0.877, 0.956]: watch (a person), sit."}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1273, 1334], "conversations": [{"from": "human", "timestamps": 1303.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.195, 0.817, 0.318, 0.997]: watch (a person), sit; [0.367, 0.424, 0.454, 0.698]: dance; [0.499, 0.65, 0.592, 0.923]: watch (a person), stand, hand clap; [0.58, 0.574, 0.688, 0.951]: stand, hand clap; [0.689, 0.569, 0.786, 0.785]: watch (a person), sit, hand clap; [0.766, 0.641, 0.881, 0.97]: watch (a person), hand clap."}, {"from": "human", "timestamps": 1334.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.527, 0.183, 0.858, 0.976]?"}, {"from": "gpt", "value": ["At Now, [0.527, 0.183, 0.858, 0.976]: stand, touch (an object), listen to (a person), watch (a person).", "At 1333.0s, [0.581, 0.209, 0.878, 0.973]: stand, touch (an object), listen to (a person).", "From 1331.0s to 1332.0s, [0.599, 0.173, 0.856, 0.977]->[0.581, 0.191, 0.858, 0.982]: stand, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1305, 1346], "conversations": [{"from": "human", "timestamps": 1335.0, "value": "What actions is the person in bbox [0.191, 0.135, 0.42, 0.981] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.191, 0.135, 0.42, 0.981]: stand, listen to (a person), watch (a person).", "At 1334.0s, [0.198, 0.133, 0.438, 0.983]: stand, listen to (a person), watch (a person).", "At 1333.0s, [0.208, 0.134, 0.438, 0.977]: stand, listen to (a person).", "From 1331.0s to 1332.0s, [0.235, 0.139, 0.441, 0.978]->[0.201, 0.152, 0.427, 0.979]: stand, listen to (a person), watch (a person).", "At 1330.0s, [0.199, 0.104, 0.389, 0.993]: stand, listen to (a person).", "At 1329.0s, [0.151, 0.145, 0.336, 0.983]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1346.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.121, 0.167, 0.324, 0.979]: watch (a person), stand; [0.24, 0.292, 0.586, 0.986]: watch (a person), dance; [0.478, 0.228, 0.635, 0.742]: watch (a person), stand; [0.482, 0.587, 0.711, 0.983]: listen to (a person), sit, watch (a person); [0.57, 0.128, 0.882, 0.976]: listen to (a person), dance, watch (a person); [0.577, 0.319, 0.626, 0.431]: watch (a person), stand."}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1447, 1497], "conversations": [{"from": "human", "timestamps": 1477.0, "value": "Describe the actions of the person at bbox [0.258, 0.191, 0.706, 0.977] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.258, 0.191, 0.706, 0.977]: dance.", "At 1476.0s, [0.402, 0.189, 0.738, 0.994]: dance.", "At 1475.0s, [0.489, 0.139, 0.717, 0.996]: dance, listen to (a person), watch (a person).", "At 1474.0s, [0.456, 0.136, 0.712, 0.979]: stand, talk to (e.g., self, a person, a group).", "At 1473.0s, [0.471, 0.146, 0.72, 0.999]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1472.0s, [0.467, 0.162, 0.791, 0.98]: dance, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1471.0s, [0.417, 0.32, 0.661, 0.969]: dance, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1497.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.134, 0.522, 0.18, 0.801]: watch (a person), sit; [0.193, 0.358, 0.284, 0.62]: watch (a person), stand; [0.284, 0.355, 0.364, 0.605]: carry/hold (an object), stand, watch (a person); [0.373, 0.329, 0.468, 0.676]: watch (a person), stand; [0.64, 0.335, 0.718, 0.646]: watch (a person), stand; [0.656, 0.474, 0.764, 0.754]: watch (a person), sit; [0.771, 0.458, 0.872, 0.748]: watch (a person), sit."}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1490, 1527], "conversations": [{"from": "human", "timestamps": 1520.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.134, 0.365, 0.195, 0.609]: watch (a person), stand; [0.25, 0.328, 0.34, 0.679]: watch (a person), stand; [0.332, 0.453, 0.433, 0.883]: dance; [0.366, 0.456, 0.517, 0.991]: watch (a person), dance; [0.425, 0.268, 0.498, 0.505]: watch (a person), stand; [0.487, 0.354, 0.54, 0.621]: watch (a person), sit; [0.638, 0.197, 0.706, 0.449]: watch (a person), stand; [0.699, 0.165, 0.79, 0.51]: watch (a person), stand."}, {"from": "human", "timestamps": 1527.0, "value": "What actions did the person at bbox [0.598, 0.429, 0.653, 0.558] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.598, 0.429, 0.653, 0.558]: stand, watch (a person).", "At 1526.0s, [0.626, 0.375, 0.687, 0.495]: stand, listen to (a person), watch (a person)."]}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1596, 1655], "conversations": [{"from": "human", "timestamps": 1626.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.178, 0.152, 0.383, 0.986]: listen to (a person), stand, watch (a person); [0.618, 0.182, 0.833, 0.984]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1655.0, "value": "What actions did the person at bbox [0.397, 0.124, 0.64, 0.991] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.397, 0.124, 0.64, 0.991]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1651.0s to 1654.0s, [0.429, 0.125, 0.71, 0.989]->[0.441, 0.111, 0.696, 0.97]->[0.439, 0.121, 0.699, 0.968]->[0.433, 0.127, 0.69, 0.991]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1650.0s, [0.43, 0.111, 0.685, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1649.0s, [0.391, 0.107, 0.644, 0.981]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1647.0s to 1648.0s, [0.406, 0.091, 0.686, 0.965]->[0.395, 0.127, 0.674, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1646.0s, [0.4, 0.094, 0.662, 0.991]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1644.0s to 1645.0s, [0.405, 0.092, 0.658, 0.985]->[0.376, 0.08, 0.628, 0.991]: stand, carry/hold (an object), hand shake, listen to (a person), watch (a person).", "At 1643.0s, [0.295, 0.088, 0.633, 0.967]: stand, carry/hold (an object), hand shake, talk to (e.g., self, a person, a group), watch (a person).", "At 1642.0s, [0.138, 0.089, 0.464, 0.98]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1622, 1721], "conversations": [{"from": "human", "timestamps": 1652.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.239, 0.153, 0.462, 0.987]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.441, 0.111, 0.696, 0.97]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.652, 0.261, 0.842, 0.99]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1721.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.199, 0.148, 0.463, 0.985]?"}, {"from": "gpt", "value": ["In the current frame, [0.199, 0.148, 0.463, 0.985]: walk, listen to (a person).", "At 1720.0s, [0.136, 0.245, 0.52, 0.995]: walk, listen to (a person).", "From 1717.0s to 1719.0s, [0.16, 0.103, 0.339, 0.837]->[0.273, 0.079, 0.439, 0.949]->[0.201, 0.042, 0.498, 1.0]: stand, dress/put on clothing, listen to (a person), watch (a person).", "At 1716.0s, [0.185, 0.102, 0.381, 0.843]: stand, carry/hold (an object), dress/put on clothing, listen to (a person), watch (a person).", "From 1714.0s to 1715.0s, [0.183, 0.177, 0.511, 0.911]->[0.245, 0.14, 0.44, 0.845]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1713.0s, [0.334, 0.123, 0.575, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1712.0s, [0.264, 0.125, 0.497, 0.99]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ma2hgTmveKQ.mkv", "clip": [1716, 1793], "conversations": [{"from": "human", "timestamps": 1746.0, "value": "For the person at bbox [0.458, 0.11, 0.894, 0.976], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.458, 0.11, 0.894, 0.976]: stand, watch (a person).", "At 1745.0s, [0.312, 0.134, 0.896, 0.97]: stand, carry/hold (an object), give/serve (an object) to (a person).", "At 1744.0s, [0.352, 0.17, 0.879, 0.967]: stand, carry/hold (an object).", "At 1743.0s, [0.392, 0.119, 0.856, 0.983]: stand, listen to (a person), watch (a person).", "From 1741.0s to 1742.0s, [0.451, 0.21, 0.792, 0.955]->[0.509, 0.189, 0.879, 0.992]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1793.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.148, 0.175, 0.477, 0.982]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.432, 0.238, 0.556, 0.636]: walk; [0.472, 0.067, 0.811, 0.989]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.78, 0.207, 0.878, 0.747]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [880, 934], "conversations": [{"from": "human", "timestamps": 910.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.142, 0.132, 0.907, 0.985]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 934.0, "value": "What actions did the person at bbox [0.042, 0.0, 0.939, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.042, 0.0, 0.939, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 928.0s to 933.0s, [0.033, 0.0, 0.919, 0.984]->[0.015, 0.0, 0.979, 0.981]->[0.023, 0.0, 0.951, 0.971]->[0.034, 0.0, 0.953, 0.976]->[0.038, 0.0, 0.954, 0.976]->[0.042, 0.0, 0.94, 0.975]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [926, 999], "conversations": [{"from": "human", "timestamps": 956.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.048, 0.945, 0.973]: sit, write."}, {"from": "human", "timestamps": 999.0, "value": "Describe the actions of the person at bbox [0.035, 0.01, 0.655, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.035, 0.01, 0.655, 0.982]: sit, write, talk to (e.g., self, a person, a group).", "From 993.0s to 998.0s, [0.022, 0.039, 0.697, 1.0]->[0.028, 0.049, 0.687, 0.987]->[0.026, 0.043, 0.69, 1.0]->[0.025, 0.042, 0.672, 0.991]->[0.038, 0.029, 0.673, 0.99]->[0.031, 0.021, 0.682, 0.981]: sit, talk to (e.g., self, a person, a group).", "At 992.0s, [0.037, 0.051, 0.705, 0.989]: sit, read, talk to (e.g., self, a person, a group).", "From 987.0s to 991.0s, [0.043, 0.046, 0.651, 1.0]->[0.04, 0.03, 0.612, 1.0]->[0.025, 0.035, 0.656, 0.985]->[0.027, 0.038, 0.699, 0.988]->[0.032, 0.027, 0.666, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 983.0s to 986.0s, [0.05, 0.055, 0.676, 0.992]->[0.056, 0.068, 0.67, 1.0]->[0.033, 0.057, 0.688, 1.0]->[0.02, 0.012, 0.627, 0.986]: sit.", "From 981.0s to 982.0s, [0.021, 0.0, 0.776, 0.995]->[0.055, 0.016, 0.658, 0.991]: sit, write."]}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [961, 1006], "conversations": [{"from": "human", "timestamps": 991.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.032, 0.027, 0.666, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1006.0, "value": "What actions did the person at bbox [0.296, 0.244, 0.716, 0.745] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.296, 0.244, 0.716, 0.745]: sit, talk to (e.g., self, a person, a group).", "From 1002.0s to 1005.0s, [0.298, 0.244, 0.709, 0.751]->[0.295, 0.239, 0.71, 0.748]->[0.301, 0.247, 0.712, 0.744]->[0.297, 0.241, 0.719, 0.741]: sit, talk to (e.g., self, a person, a group).", "At 1001.0s, [0.3, 0.249, 0.707, 0.745]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1000.0s, [0.0, 0.0, 0.505, 0.99]: sit.", "At 999.0s, [0.035, 0.01, 0.655, 0.982]: sit, write, talk to (e.g., self, a person, a group).", "From 993.0s to 998.0s, [0.022, 0.039, 0.697, 1.0]->[0.028, 0.049, 0.687, 0.987]->[0.026, 0.043, 0.69, 1.0]->[0.025, 0.042, 0.672, 0.991]->[0.038, 0.029, 0.673, 0.99]->[0.031, 0.021, 0.682, 0.981]: sit, talk to (e.g., self, a person, a group).", "At 992.0s, [0.037, 0.051, 0.705, 0.989]: sit, read, talk to (e.g., self, a person, a group).", "From 987.0s to 991.0s, [0.043, 0.046, 0.651, 1.0]->[0.04, 0.03, 0.612, 1.0]->[0.025, 0.035, 0.656, 0.985]->[0.027, 0.038, 0.699, 0.988]->[0.032, 0.027, 0.666, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 983.0s to 986.0s, [0.05, 0.055, 0.676, 0.992]->[0.056, 0.068, 0.67, 1.0]->[0.033, 0.057, 0.688, 1.0]->[0.02, 0.012, 0.627, 0.986]: sit.", "From 981.0s to 982.0s, [0.021, 0.0, 0.776, 0.995]->[0.055, 0.016, 0.658, 0.991]: sit, write."]}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1014, 1062], "conversations": [{"from": "human", "timestamps": 1044.0, "value": "What actions did the person at bbox [0.255, 0.193, 0.599, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.255, 0.193, 0.599, 0.983]: stand, talk to (e.g., self, a person, a group).", "From 1035.0s to 1043.0s, [0.247, 0.146, 0.598, 0.985]->[0.244, 0.161, 0.604, 1.0]->[0.261, 0.166, 0.599, 1.0]->[0.259, 0.182, 0.626, 0.98]->[0.26, 0.192, 0.631, 0.984]->[0.261, 0.166, 0.649, 0.989]->[0.273, 0.17, 0.639, 0.999]->[0.261, 0.179, 0.627, 0.991]->[0.26, 0.182, 0.621, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1034.0s, [0.25, 0.192, 0.527, 0.977]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1033.0s, [0.239, 0.279, 0.427, 0.94]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1062.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.262, 0.202, 0.519, 0.975]: stand, talk to (e.g., self, a person, a group); [0.475, 0.413, 0.747, 0.992]: carry/hold (an object), stand, talk to (e.g., self, a person, a group), listen to (a person)."}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1075, 1112], "conversations": [{"from": "human", "timestamps": 1105.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.17, 0.233, 0.503, 0.969]: listen to (a person), sit, drink; [0.545, 0.209, 0.847, 0.573]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1112.0, "value": "What actions is the person in bbox [0.016, 0.176, 0.693, 0.972] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.016, 0.176, 0.693, 0.972]: sit, carry/hold (an object).", "At 1111.0s, [0.033, 0.196, 0.849, 0.97]: stand, carry/hold (an object).", "From 1109.0s to 1110.0s, [0.029, 0.171, 0.84, 0.969]->[0.05, 0.181, 0.85, 0.979]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1107.0s to 1108.0s, [0.006, 0.2, 0.727, 0.967]->[0.003, 0.181, 0.849, 0.966]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1106.0s, [0.169, 0.219, 0.506, 0.978]: sit, carry/hold (an object), listen to (a person).", "At 1105.0s, [0.17, 0.233, 0.503, 0.969]: sit, drink, listen to (a person).", "At 1104.0s, [0.173, 0.227, 0.562, 0.982]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1103.0s, [0.183, 0.31, 0.515, 0.972]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1102.0s, [0.191, 0.407, 0.522, 0.982]: sit, carry/hold (an object), listen to (a person).", "From 1100.0s to 1101.0s, [0.219, 0.445, 0.517, 0.986]->[0.205, 0.459, 0.509, 0.992]: sit, write, listen to (a person).", "At 1099.0s, [0.214, 0.44, 0.527, 0.989]: sit, write, watch (a person).", "From 1097.0s to 1098.0s, [0.226, 0.453, 0.531, 0.988]->[0.215, 0.457, 0.526, 0.985]: sit, carry/hold (an object), eat, write.", "At 1096.0s, [0.223, 0.45, 0.529, 0.99]: sit, carry/hold (an object), write.", "At 1095.0s, [0.229, 0.453, 0.536, 0.991]: sit, write.", "At 1094.0s, [0.233, 0.448, 0.54, 0.988]: sit, carry/hold (an object), write."]}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1118, 1170], "conversations": [{"from": "human", "timestamps": 1148.0, "value": "What actions is the person in bbox [0.441, 0.235, 0.73, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.441, 0.235, 0.73, 0.985]: stand, listen to (a person), watch (a person).", "From 1146.0s to 1147.0s, [0.54, 0.188, 0.794, 0.99]->[0.438, 0.24, 0.759, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1145.0s, [0.398, 0.188, 0.738, 0.983]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1144.0s, [0.526, 0.232, 0.784, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1170.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.306, 0.401, 0.459, 0.681]: sit; [0.502, 0.414, 0.652, 0.658]: sit."}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1120, 1212], "conversations": [{"from": "human", "timestamps": 1150.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.261, 0.357, 0.56, 0.992]: watch (a person), listen to (a person), carry/hold (an object), stand, hug (a person); [0.402, 0.212, 0.738, 0.988]: watch (a person), stand, grab (a person)."}, {"from": "human", "timestamps": 1212.0, "value": "For the person at bbox [0.176, 0.158, 0.36, 0.742], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.176, 0.158, 0.36, 0.742]: walk, listen to (a person).", "At 1211.0s, [0.26, 0.143, 0.512, 0.605]: stand, carry/hold (an object), touch (an object).", "At 1210.0s, [0.263, 0.152, 0.516, 0.639]: stand, push (an object), listen to (a person).", "From 1208.0s to 1209.0s, [0.314, 0.198, 0.486, 0.583]->[0.274, 0.152, 0.506, 0.649]: walk, push (an object)."]}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1256, 1308], "conversations": [{"from": "human", "timestamps": 1286.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.351, 0.173, 0.517, 0.476]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.519, 0.195, 0.63, 0.469]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1308.0, "value": "What actions did the person at bbox [0.289, 0.195, 0.432, 0.51] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.289, 0.195, 0.432, 0.51]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1306.0s to 1307.0s, [0.287, 0.211, 0.43, 0.504]->[0.285, 0.209, 0.443, 0.511]: stand, talk to (e.g., self, a person, a group).", "At 1305.0s, [0.287, 0.204, 0.419, 0.533]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1304.0s, [0.285, 0.193, 0.441, 0.52]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1230, 1337], "conversations": [{"from": "human", "timestamps": 1260.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.528, 0.205, 0.65, 0.474]: stand."}, {"from": "human", "timestamps": 1337.0, "value": "For the person at bbox [0.616, 0.346, 0.735, 0.606], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.616, 0.346, 0.735, 0.606]: stand.", "From 1333.0s to 1336.0s, [0.648, 0.325, 0.805, 0.641]->[0.648, 0.333, 0.799, 0.646]->[0.625, 0.364, 0.786, 0.631]->[0.621, 0.357, 0.766, 0.613]: stand.", "At 1332.0s, [0.611, 0.343, 0.763, 0.623]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1331.0s, [0.636, 0.34, 0.763, 0.618]: stand, watch (a person)."]}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1304, 1346], "conversations": [{"from": "human", "timestamps": 1334.0, "value": "What actions did the person at bbox [0.648, 0.333, 0.799, 0.646] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.648, 0.333, 0.799, 0.646]: stand.", "At 1333.0s, [0.648, 0.325, 0.805, 0.641]: stand.", "At 1332.0s, [0.611, 0.343, 0.763, 0.623]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1331.0s, [0.636, 0.34, 0.763, 0.618]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1346.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.456, 0.213, 0.597, 0.65]: walk; [0.555, 0.23, 0.646, 0.621]: carry/hold (an object), walk."}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1361, 1487], "conversations": [{"from": "human", "timestamps": 1391.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.11, 0.329, 0.32, 0.957]: carry/hold (an object), touch (an object), walk; [0.263, 0.207, 0.374, 0.628]: walk."}, {"from": "human", "timestamps": 1487.0, "value": "What actions did the person at bbox [0.032, 0.145, 1.0, 0.975] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.032, 0.145, 1.0, 0.975]: bend/bow (at the waist), talk to (e.g., self, a person, a group).", "At 1486.0s, [0.0, 0.123, 0.975, 0.977]: stand, talk to (e.g., self, a person, a group).", "At 1485.0s, [0.004, 0.127, 0.95, 1.0]: bend/bow (at the waist), talk to (e.g., self, a person, a group).", "At 1484.0s, [0.041, 0.129, 0.98, 0.979]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1497, 1614], "conversations": [{"from": "human", "timestamps": 1527.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.014, 0.152, 0.963, 0.968]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1614.0, "value": "For the person at bbox [0.37, 0.277, 0.527, 0.565], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.37, 0.277, 0.527, 0.565]: sit, ride (e.g., a bike, a car, a horse).", "At 1613.0s, [0.374, 0.295, 0.528, 0.565]: sit, ride (e.g., a bike, a car, a horse).", "From 1611.0s to 1612.0s, [0.364, 0.259, 0.528, 0.568]->[0.369, 0.289, 0.524, 0.562]: sit, ride (e.g., a bike, a car, a horse), listen to (a person).", "At 1610.0s, [0.364, 0.275, 0.527, 0.567]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), watch (a person).", "At 1609.0s, [0.362, 0.27, 0.501, 0.555]: sit, ride (e.g., a bike, a car, a horse), listen to (a person).", "At 1608.0s, [0.359, 0.255, 0.496, 0.557]: sit, listen to (a person), watch (a person).", "At 1607.0s, [0.359, 0.254, 0.496, 0.553]: sit, ride (e.g., a bike, a car, a horse).", "At 1606.0s, [0.354, 0.255, 0.486, 0.551]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group), watch (a person).", "At 1605.0s, [0.355, 0.258, 0.486, 0.549]: sit, ride (e.g., a bike, a car, a horse)."]}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1560, 1598], "conversations": [{"from": "human", "timestamps": 1590.0, "value": "Describe the actions of the person at bbox [0.193, 0.344, 0.296, 0.636] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.193, 0.344, 0.296, 0.636]: stand, listen to (a person), watch (a person).", "At 1589.0s, [0.195, 0.344, 0.295, 0.635]: bend/bow (at the waist), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1586.0s to 1588.0s, [0.188, 0.326, 0.308, 0.664]->[0.195, 0.337, 0.292, 0.638]->[0.21, 0.341, 0.301, 0.628]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1584.0s to 1585.0s, [0.188, 0.293, 0.325, 0.748]->[0.204, 0.311, 0.333, 0.717]: walk.", "At 1583.0s, [0.051, 0.246, 0.176, 0.589]: walk, watch (a person)."]}, {"from": "human", "timestamps": 1598.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.074, 0.168, 0.992, 0.974]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1608, 1662], "conversations": [{"from": "human", "timestamps": 1638.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.459, 0.122, 0.989, 0.995]?"}, {"from": "gpt", "value": ["In the current frame, [0.459, 0.122, 0.989, 0.995]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), watch (a person).", "At 1637.0s, [0.421, 0.101, 0.994, 1.0]: sit, listen to (a person), watch (a person).", "From 1634.0s to 1636.0s, [0.451, 0.13, 1.0, 1.0]->[0.468, 0.141, 0.999, 0.999]->[0.442, 0.134, 0.982, 1.0]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), watch (a person).", "At 1633.0s, [0.407, 0.159, 0.993, 1.0]: sit, ride (e.g., a bike, a car, a horse), listen to (a person)."]}, {"from": "human", "timestamps": 1662.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.146, 0.014, 0.987, 0.997]: stand."}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1643, 1707], "conversations": [{"from": "human", "timestamps": 1673.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.16, 0.501, 0.983]: ride (e.g., a bike, a car, a horse), sit; [0.36, 0.201, 0.995, 0.981]: ride (e.g., a bike, a car, a horse), sit; [0.362, 0.235, 0.597, 0.699]: ride (e.g., a bike, a car, a horse), sit."}, {"from": "human", "timestamps": 1707.0, "value": "Describe the actions of the person at bbox [0.364, 0.209, 1.0, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.364, 0.209, 1.0, 0.983]: sit, ride (e.g., a bike, a car, a horse), listen to (a person).", "At 1706.0s, [0.386, 0.201, 0.997, 0.986]: sit, ride (e.g., a bike, a car, a horse).", "At 1705.0s, [0.359, 0.208, 1.0, 0.996]: sit.", "At 1704.0s, [0.375, 0.197, 1.0, 1.0]: sit, listen to (a person), watch (a person)."]}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1663, 1734], "conversations": [{"from": "human", "timestamps": 1693.0, "value": "What actions is the person in bbox [0.365, 0.237, 0.586, 0.691] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.365, 0.237, 0.586, 0.691]: sit, ride (e.g., a bike, a car, a horse).", "From 1691.0s to 1692.0s, [0.373, 0.243, 0.59, 0.703]->[0.375, 0.245, 0.581, 0.699]: sit, ride (e.g., a bike, a car, a horse)."]}, {"from": "human", "timestamps": 1734.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.194, 0.15, 1.0, 0.973]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "N0Dt9i9IUNg.mkv", "clip": [1733, 1765], "conversations": [{"from": "human", "timestamps": 1763.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.159, 0.501, 0.266, 0.924]?"}, {"from": "gpt", "value": ["At Now, [0.159, 0.501, 0.266, 0.924]: walk, watch (a person).", "At 1762.0s, [0.254, 0.63, 0.359, 0.987]: walk."]}, {"from": "human", "timestamps": 1765.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.048, 0.393, 0.163, 0.913]: watch (a person), stand; [0.276, 0.439, 0.379, 0.906]: watch (a person), walk."}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [901, 955], "conversations": [{"from": "human", "timestamps": 931.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.112, 0.413, 1.0]: stand; [0.528, 0.033, 0.996, 0.988]: watch (a person), stand."}, {"from": "human", "timestamps": 955.0, "value": "What actions is the person in bbox [0.24, 0.111, 0.548, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.24, 0.111, 0.548, 1.0]: stand, answer phone.", "From 952.0s to 954.0s, [0.245, 0.104, 0.533, 0.996]->[0.25, 0.111, 0.567, 0.984]->[0.275, 0.132, 0.596, 0.983]: stand, answer phone."]}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [913, 989], "conversations": [{"from": "human", "timestamps": 943.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.437, 0.172, 0.602, 0.945]: stand; [0.651, 0.252, 0.784, 0.974]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 989.0, "value": "For the person at bbox [0.217, 0.203, 0.409, 0.831], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.217, 0.203, 0.409, 0.831]: stand, listen to (a person), watch (a person).", "From 987.0s to 988.0s, [0.193, 0.208, 0.374, 0.832]->[0.216, 0.205, 0.41, 0.842]: stand, listen to (a person), watch (a person).", "At 986.0s, [0.249, 0.209, 0.397, 0.811]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 984.0s to 985.0s, [0.289, 0.218, 0.422, 0.832]->[0.264, 0.218, 0.404, 0.815]: stand, talk to (e.g., self, a person, a group).", "At 983.0s, [0.275, 0.208, 0.444, 0.84]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 976.0s to 982.0s, [0.268, 0.2, 0.436, 0.827]->[0.266, 0.201, 0.443, 0.828]->[0.266, 0.205, 0.44, 0.834]->[0.266, 0.2, 0.441, 0.829]->[0.262, 0.201, 0.436, 0.829]->[0.264, 0.197, 0.436, 0.836]->[0.265, 0.196, 0.436, 0.832]: stand, listen to (a person), watch (a person).", "At 975.0s, [0.264, 0.199, 0.446, 0.832]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 968.0s to 974.0s, [0.204, 0.212, 0.361, 0.831]->[0.25, 0.209, 0.441, 0.815]->[0.235, 0.209, 0.407, 0.827]->[0.252, 0.207, 0.439, 0.83]->[0.265, 0.201, 0.449, 0.831]->[0.261, 0.197, 0.447, 0.831]->[0.257, 0.203, 0.444, 0.83]: stand, listen to (a person), watch (a person).", "From 963.0s to 967.0s, [0.536, 0.127, 0.745, 0.876]->[0.436, 0.139, 0.683, 0.995]->[0.25, 0.174, 0.581, 0.943]->[0.292, 0.192, 0.467, 0.868]->[0.196, 0.21, 0.372, 0.829]: walk.", "At 962.0s, [0.458, 0.14, 0.707, 0.994]: walk, touch (an object).", "From 960.0s to 961.0s, [0.301, 0.12, 0.606, 0.989]->[0.411, 0.12, 0.683, 0.983]: stand.", "At 959.0s, [0.221, 0.142, 0.507, 0.978]: stand, carry/hold (an object).", "From 952.0s to 958.0s, [0.245, 0.104, 0.533, 0.996]->[0.25, 0.111, 0.567, 0.984]->[0.275, 0.132, 0.596, 0.983]->[0.24, 0.111, 0.548, 1.0]->[0.244, 0.115, 0.568, 0.992]->[0.253, 0.125, 0.555, 0.991]->[0.173, 0.112, 0.502, 1.0]: stand, answer phone."]}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [936, 1007], "conversations": [{"from": "human", "timestamps": 966.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.292, 0.192, 0.467, 0.868]: walk."}, {"from": "human", "timestamps": 1007.0, "value": "What actions did the person at bbox [0.3, 0.128, 0.595, 0.97] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.3, 0.128, 0.595, 0.97]: walk, talk to (e.g., self, a person, a group).", "At 1006.0s, [0.331, 0.176, 0.538, 0.922]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1001.0s to 1005.0s, [0.358, 0.192, 0.529, 0.85]->[0.364, 0.197, 0.528, 0.853]->[0.363, 0.194, 0.529, 0.859]->[0.363, 0.188, 0.529, 0.854]->[0.359, 0.198, 0.504, 0.802]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 999.0s, [0.363, 0.199, 0.524, 0.861]: stand, watch (a person).", "From 993.0s to 998.0s, [0.354, 0.185, 0.526, 0.845]->[0.369, 0.192, 0.532, 0.859]->[0.356, 0.187, 0.531, 0.842]->[0.364, 0.199, 0.524, 0.857]->[0.366, 0.182, 0.534, 0.852]->[0.361, 0.184, 0.532, 0.852]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 991.0s to 992.0s, [0.35, 0.195, 0.517, 0.829]->[0.367, 0.204, 0.524, 0.854]: stand, talk to (e.g., self, a person, a group).", "From 989.0s to 990.0s, [0.405, 0.204, 0.57, 0.847]->[0.395, 0.205, 0.552, 0.856]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 988.0s, [0.393, 0.206, 0.574, 0.853]: stand, talk to (e.g., self, a person, a group).", "At 987.0s, [0.382, 0.198, 0.533, 0.879]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 983.0s to 986.0s, [0.41, 0.199, 0.536, 0.823]->[0.402, 0.195, 0.534, 0.858]->[0.39, 0.203, 0.537, 0.856]->[0.389, 0.199, 0.533, 0.853]: stand, listen to (a person), watch (a person).", "At 982.0s, [0.397, 0.197, 0.537, 0.859]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 981.0s, [0.399, 0.19, 0.533, 0.85]: stand, talk to (e.g., self, a person, a group).", "At 980.0s, [0.403, 0.192, 0.538, 0.874]: stand, watch (a person).", "From 977.0s to 978.0s, [0.416, 0.205, 0.548, 0.779]->[0.408, 0.206, 0.542, 0.864]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 976.0s, [0.415, 0.204, 0.559, 0.856]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 974.0s, [0.466, 0.22, 0.613, 0.773]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 973.0s, [0.49, 0.223, 0.692, 0.75]: stand, close (e.g., a door, a box), talk to (e.g., self, a person, a group).", "From 969.0s to 972.0s, [0.468, 0.227, 0.616, 0.789]->[0.433, 0.233, 0.659, 0.775]->[0.502, 0.228, 0.673, 0.766]->[0.51, 0.235, 0.699, 0.755]: stand, close (e.g., a door, a box), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [998, 1053], "conversations": [{"from": "human", "timestamps": 1028.0, "value": "For the person at bbox [0.51, 0.182, 0.792, 0.967], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.51, 0.182, 0.792, 0.967]: stand, listen to (a person), watch (a person).", "From 1020.0s to 1027.0s, [0.71, 0.137, 0.994, 0.994]->[0.433, 0.124, 0.694, 0.978]->[0.422, 0.113, 0.685, 0.971]->[0.419, 0.126, 0.674, 0.986]->[0.414, 0.139, 0.68, 0.971]->[0.414, 0.139, 0.677, 0.992]->[0.451, 0.164, 0.715, 0.981]->[0.48, 0.173, 0.754, 0.98]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1053.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.101, 0.23, 0.28, 0.895]: listen to (a person), stand, watch (a person); [0.413, 0.111, 0.941, 0.979]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1002, 1130], "conversations": [{"from": "human", "timestamps": 1032.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.05, 0.161, 0.384, 0.972]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.557, 0.169, 0.908, 0.979]: listen to (a person), watch (a person), walk."}, {"from": "human", "timestamps": 1130.0, "value": "For the person at bbox [0.271, 0.262, 0.462, 0.982], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.271, 0.262, 0.462, 0.982]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1129.0s, [0.336, 0.268, 0.508, 0.982]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1127.0s to 1128.0s, [0.363, 0.222, 0.574, 0.586]->[0.341, 0.262, 0.552, 0.962]: stand, listen to (a person), watch (a person)."]}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1088, 1138], "conversations": [{"from": "human", "timestamps": 1118.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.1, 0.045, 0.477, 0.985]: listen to (a person), stand, grab (a person), watch (a person); [0.371, 0.164, 0.698, 0.958]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.608, 0.284, 0.957, 0.986]: listen to (a person), stand."}, {"from": "human", "timestamps": 1138.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.765, 0.019, 1.0, 0.981]?"}, {"from": "gpt", "value": ["In the current frame, [0.765, 0.019, 1.0, 0.981]: stand, hand shake, listen to (a person), watch (a person).", "At 1137.0s, [0.774, 0.039, 1.0, 0.984]: stand, listen to (a person), watch (a person).", "From 1134.0s to 1136.0s, [0.775, 0.023, 1.0, 0.981]->[0.765, 0.017, 1.0, 0.976]->[0.769, 0.019, 1.0, 1.0]: stand, watch (a person).", "From 1130.0s to 1133.0s, [0.78, 0.016, 1.0, 0.996]->[0.762, 0.02, 1.0, 0.996]->[0.769, 0.025, 1.0, 0.996]->[0.767, 0.029, 1.0, 1.0]: stand, listen to (a person), watch (a person)."]}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1144, 1184], "conversations": [{"from": "human", "timestamps": 1174.0, "value": "What actions did the person at bbox [0.0, 0.093, 0.476, 0.996] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.093, 0.476, 0.996]: stand.", "From 1169.0s to 1173.0s, [0.552, 0.083, 0.981, 0.982]->[0.555, 0.077, 0.988, 0.984]->[0.547, 0.108, 0.988, 0.982]->[0.544, 0.093, 0.989, 0.984]->[0.531, 0.105, 0.989, 0.979]: stand.", "At 1168.0s, [0.56, 0.082, 0.993, 0.977]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1184.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.149, 0.0, 0.991, 0.995]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1118, 1225], "conversations": [{"from": "human", "timestamps": 1148.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.044, 0.096, 0.298, 0.991]: listen to (a person), stand, watch (a person); [0.335, 0.237, 0.62, 0.981]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.591, 0.158, 0.855, 0.979]: listen to (a person), stand, watch (a person); [0.756, 0.027, 1.0, 0.994]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1225.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.357, 0.089, 0.934, 0.993]?"}, {"from": "gpt", "value": ["In the current frame, [0.357, 0.089, 0.934, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 1221.0s to 1224.0s, [0.473, 0.082, 0.929, 0.985]->[0.184, 0.078, 0.957, 0.975]->[0.374, 0.088, 0.952, 0.986]->[0.36, 0.084, 0.924, 0.98]: stand, talk to (e.g., self, a person, a group).", "From 1219.0s to 1220.0s, [0.442, 0.072, 0.923, 0.972]->[0.493, 0.104, 0.917, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1218.0s, [0.418, 0.06, 0.838, 0.968]: stand, point to (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1190, 1226], "conversations": [{"from": "human", "timestamps": 1220.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.057, 0.261, 0.47, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.057, 0.261, 0.47, 1.0]: stand, listen to (a person).", "From 1218.0s to 1219.0s, [0.056, 0.264, 0.446, 0.987]->[0.039, 0.263, 0.496, 0.986]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1226.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.357, 0.088, 0.928, 0.994]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1231, 1288], "conversations": [{"from": "human", "timestamps": 1261.0, "value": "For the person at bbox [0.194, 0.138, 0.472, 0.975], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.194, 0.138, 0.472, 0.975]: walk, carry/hold (an object).", "At 1260.0s, [0.194, 0.161, 0.411, 0.998]: walk, carry/hold (an object).", "At 1259.0s, [0.269, 0.174, 0.432, 0.958]: walk, listen to (a person), watch (a person).", "At 1258.0s, [0.195, 0.158, 0.41, 0.983]: walk."]}, {"from": "human", "timestamps": 1288.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.019, 0.468, 0.968]: listen to (a person), stand, watch (a person); [0.431, 0.082, 1.0, 0.983]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1241, 1349], "conversations": [{"from": "human", "timestamps": 1271.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.008, 0.499, 0.99]: listen to (a person), stand, watch (a person); [0.36, 0.165, 0.597, 0.862]: listen to (a person), stand, watch (a person); [0.46, 0.14, 0.9, 0.983]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1349.0, "value": "Describe the actions of the person at bbox [0.311, 0.146, 0.832, 0.996] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.311, 0.146, 0.832, 0.996]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1346.0s to 1348.0s, [0.193, 0.227, 0.549, 0.994]->[0.094, 0.178, 0.486, 0.986]->[0.039, 0.174, 0.505, 0.974]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1344.0s to 1345.0s, [0.38, 0.261, 0.619, 0.982]->[0.332, 0.239, 0.612, 0.986]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1343.0s, [0.411, 0.35, 0.671, 0.988]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1342.0s, [0.41, 0.289, 0.67, 0.991]: stand, carry/hold (an object), put down, talk to (e.g., self, a person, a group).", "From 1339.0s to 1341.0s, [0.442, 0.262, 0.703, 0.984]->[0.428, 0.262, 0.695, 0.982]->[0.42, 0.27, 0.68, 0.983]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1338.0s, [0.449, 0.259, 0.687, 0.984]: stand, carry/hold (an object)."]}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1327, 1377], "conversations": [{"from": "human", "timestamps": 1357.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.008, 0.112, 0.557, 1.0]: listen to (a person), stand, watch (a person); [0.505, 0.164, 0.984, 0.998]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1377.0, "value": "Describe the actions of the person at bbox [0.083, 0.061, 0.986, 0.996] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.083, 0.061, 0.986, 0.996]: stand, talk to (e.g., self, a person, a group).", "From 1375.0s to 1376.0s, [0.109, 0.048, 1.0, 0.983]->[0.071, 0.08, 1.0, 0.971]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1394, 1436], "conversations": [{"from": "human", "timestamps": 1424.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.019, 0.331, 0.84]: listen to (a person), stand, watch (a person); [0.115, 0.026, 0.965, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1436.0, "value": "What actions is the person in bbox [0.697, 0.032, 0.994, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.697, 0.032, 0.994, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1435.0s, [0.707, 0.038, 0.994, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1361, 1495], "conversations": [{"from": "human", "timestamps": 1391.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.0, 1.0, 0.988]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1495.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.102, 0.185, 1.0, 0.991]?"}, {"from": "gpt", "value": ["At Now, [0.102, 0.185, 1.0, 0.991]: stand, watch (a person).", "At 1494.0s, [0.258, 0.092, 0.987, 0.965]: stand, talk to (e.g., self, a person, a group).", "At 1492.0s, [0.039, 0.0, 0.956, 1.0]: stand, watch (a person).", "From 1490.0s to 1491.0s, [0.333, 0.07, 0.991, 0.979]->[0.056, 0.0, 0.952, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1489.0s, [0.17, 0.005, 0.972, 0.995]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1513, 1600], "conversations": [{"from": "human", "timestamps": 1543.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.039, 0.024, 1.0, 0.978]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1600.0, "value": "For the person at bbox [0.049, 0.0, 1.0, 0.978], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.049, 0.0, 1.0, 0.978]: stand, talk to (e.g., self, a person, a group).", "At 1599.0s, [0.056, 0.0, 0.998, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1574, 1617], "conversations": [{"from": "human", "timestamps": 1604.0, "value": "What actions did the person at bbox [0.045, 0.006, 1.0, 0.976] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.045, 0.006, 1.0, 0.976]: stand, talk to (e.g., self, a person, a group).", "From 1602.0s to 1603.0s, [0.067, 0.002, 1.0, 0.999]->[0.069, 0.0, 1.0, 0.98]: stand.", "From 1600.0s to 1601.0s, [0.049, 0.0, 1.0, 0.978]->[0.044, 0.0, 1.0, 0.973]: stand, talk to (e.g., self, a person, a group).", "At 1599.0s, [0.056, 0.0, 0.998, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1617.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.029, 0.0, 0.997, 0.99]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1598, 1668], "conversations": [{"from": "human", "timestamps": 1628.0, "value": "What actions did the person at bbox [0.022, 0.004, 1.0, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.022, 0.004, 1.0, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 1618.0s to 1627.0s, [0.073, 0.009, 1.0, 0.995]->[0.04, 0.014, 0.999, 0.971]->[0.038, 0.017, 1.0, 0.984]->[0.067, 0.0, 1.0, 0.988]->[0.042, 0.0, 0.995, 0.972]->[0.056, 0.0, 0.999, 0.989]->[0.058, 0.008, 0.997, 0.987]->[0.047, 0.004, 0.999, 0.99]->[0.037, 0.0, 0.991, 0.975]->[0.05, 0.005, 0.99, 0.975]: stand, talk to (e.g., self, a person, a group).", "At 1617.0s, [0.029, 0.0, 0.997, 0.99]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1604.0s to 1616.0s, [0.045, 0.006, 1.0, 0.976]->[0.05, 0.022, 0.992, 0.966]->[0.041, 0.009, 1.0, 0.977]->[0.047, 0.003, 1.0, 0.992]->[0.06, 0.013, 1.0, 0.978]->[0.047, 0.0, 1.0, 0.979]->[0.032, 0.031, 1.0, 0.976]->[0.03, 0.0, 1.0, 0.991]->[0.04, 0.005, 1.0, 0.998]->[0.021, 0.01, 1.0, 0.969]->[0.021, 0.0, 0.997, 0.984]->[0.034, 0.0, 0.992, 0.987]->[0.034, 0.0, 1.0, 0.989]: stand, talk to (e.g., self, a person, a group).", "From 1602.0s to 1603.0s, [0.067, 0.002, 1.0, 0.999]->[0.069, 0.0, 1.0, 0.98]: stand.", "From 1600.0s to 1601.0s, [0.049, 0.0, 1.0, 0.978]->[0.044, 0.0, 1.0, 0.973]: stand, talk to (e.g., self, a person, a group).", "At 1599.0s, [0.056, 0.0, 0.998, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1668.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.084, 0.462, 0.207, 0.629]: listen to (a person), sit, watch (a person); [0.357, 0.323, 0.489, 0.649]: watch (a person), talk to (e.g., self, a person, a group), walk; [0.597, 0.251, 0.711, 0.77]: listen to (a person), stand; [0.766, 0.22, 0.9, 0.983]: stand."}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1633, 1724], "conversations": [{"from": "human", "timestamps": 1663.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.001, 0.488, 0.997]: listen to (a person), stand, watch (a person); [0.341, 0.112, 0.975, 0.996]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1724.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.234, 0.079, 0.988, 0.98]?"}, {"from": "gpt", "value": ["At Now, [0.234, 0.079, 0.988, 0.98]: sit, talk to (e.g., self, a person, a group).", "At 1723.0s, [0.209, 0.078, 0.989, 0.974]: sit, talk to (e.g., self, a person, a group).", "From 1720.0s to 1722.0s, [0.281, 0.063, 0.974, 0.978]->[0.292, 0.066, 0.97, 0.971]->[0.232, 0.079, 0.987, 0.965]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1653, 1685], "conversations": [{"from": "human", "timestamps": 1683.0, "value": "What actions did the person at bbox [0.074, 0.125, 0.394, 0.998] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.074, 0.125, 0.394, 0.998]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1680.0s to 1682.0s, [0.102, 0.332, 0.416, 0.996]->[0.0, 0.265, 0.346, 0.984]->[0.004, 0.235, 0.375, 0.988]: walk, listen to (a person).", "At 1679.0s, [0.156, 0.45, 0.396, 0.973]: walk, watch (a person)."]}, {"from": "human", "timestamps": 1685.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.097, 0.151, 0.465, 0.984]: stand, talk to (e.g., self, a person, a group); [0.395, 0.181, 0.687, 0.98]: listen to (a person), stand, watch (a person); [0.687, 0.111, 0.996, 0.982]: listen to (a person), stand, watch (a person)."}]}, {"video": "N1K2bEZLL_A.mkv", "clip": [1715, 1783], "conversations": [{"from": "human", "timestamps": 1745.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.472, 0.07, 0.996, 0.993]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1783.0, "value": "For the person at bbox [0.187, 0.008, 0.989, 0.977], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.187, 0.008, 0.989, 0.977]: sit.", "From 1781.0s to 1782.0s, [0.057, 0.091, 0.881, 0.988]->[0.2, 0.007, 0.988, 0.975]: sit.", "At 1780.0s, [0.0, 0.095, 0.849, 0.987]: sit, watch (a person)."]}]}, {"video": "N5UD8FGzDek.mkv", "clip": [877, 999], "conversations": [{"from": "human", "timestamps": 907.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.194, 0.252, 0.418, 0.865]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.362, 0.295, 0.484, 0.528]: listen to (a person), sit, eat, watch (a person); [0.444, 0.284, 0.631, 0.646]: listen to (a person), sit; [0.567, 0.302, 0.656, 0.557]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.606, 0.322, 0.82, 0.772]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 999.0, "value": "What actions did the person at bbox [0.027, 0.182, 0.502, 0.988] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.027, 0.182, 0.502, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 998.0s, [0.022, 0.194, 0.479, 0.997]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 997.0s, [0.059, 0.2, 0.516, 1.0]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "N5UD8FGzDek.mkv", "clip": [1026, 1077], "conversations": [{"from": "human", "timestamps": 1056.0, "value": "Describe the actions of the person at bbox [0.487, 0.011, 0.953, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.487, 0.011, 0.953, 1.0]: walk.", "From 1054.0s to 1055.0s, [0.518, 0.007, 0.905, 1.0]->[0.489, 0.0, 0.912, 0.991]: walk.", "From 1052.0s to 1053.0s, [0.53, 0.142, 0.86, 1.0]->[0.56, 0.024, 0.884, 1.0]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1077.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.456, 0.25, 0.811, 0.99]: lie/sleep, talk to (e.g., self, a person, a group)."}]}, {"video": "N5UD8FGzDek.mkv", "clip": [1123, 1192], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "For the person at bbox [0.439, 0.14, 0.957, 0.973], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.439, 0.14, 0.957, 0.973]: lie/sleep.", "At 1152.0s, [0.451, 0.137, 0.948, 0.961]: lie/sleep.", "At 1151.0s, [0.446, 0.193, 0.932, 0.968]: talk to (e.g., self, a person, a group), lie/sleep."]}, {"from": "human", "timestamps": 1192.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.326, 0.046, 0.546, 0.587]: carry/hold (an object), stand."}]}, {"video": "N5UD8FGzDek.mkv", "clip": [1252, 1301], "conversations": [{"from": "human", "timestamps": 1282.0, "value": "Describe the actions of the person at bbox [0.463, 0.046, 0.878, 0.878] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.463, 0.046, 0.878, 0.878]: stand, carry/hold (an object), smoke, talk to (e.g., self, a person, a group).", "At 1281.0s, [0.445, 0.057, 0.74, 0.839]: stand, carry/hold (an object), smoke."]}, {"from": "human", "timestamps": 1301.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.009, 0.054, 0.494, 0.985]: walk; [0.354, 0.017, 0.897, 0.999]: talk to (e.g., self, a person, a group), walk."}]}, {"video": "N5UD8FGzDek.mkv", "clip": [1232, 1329], "conversations": [{"from": "human", "timestamps": 1262.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.625, 0.509, 0.912, 0.885]: carry/hold (an object), smoke, bend/bow (at the waist)."}, {"from": "human", "timestamps": 1329.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.527, 0.263, 0.643, 0.869]?"}, {"from": "gpt", "value": ["At Now, [0.527, 0.263, 0.643, 0.869]: walk, listen to (a person).", "At 1328.0s, [0.549, 0.285, 0.66, 0.84]: walk, listen to (a person), watch (a person).", "At 1327.0s, [0.581, 0.302, 0.682, 0.815]: walk, talk to (e.g., self, a person, a group).", "From 1325.0s to 1326.0s, [0.603, 0.325, 0.686, 0.769]->[0.597, 0.317, 0.692, 0.789]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1322.0s to 1324.0s, [0.307, 0.229, 0.592, 0.982]->[0.2, 0.203, 0.59, 0.98]->[0.309, 0.075, 0.61, 0.974]: walk, talk to (e.g., self, a person, a group).", "At 1321.0s, [0.292, 0.116, 0.523, 1.0]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1320.0s, [0.113, 0.079, 0.511, 0.989]: walk, listen to (a person).", "At 1319.0s, [0.171, 0.031, 0.482, 0.975]: walk, listen to (a person), watch (a person)."]}]}, {"video": "N5UD8FGzDek.mkv", "clip": [1292, 1374], "conversations": [{"from": "human", "timestamps": 1322.0, "value": "Describe the actions of the person at bbox [0.484, 0.183, 0.988, 0.99] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.484, 0.183, 0.988, 0.99]: walk, listen to (a person).", "From 1319.0s to 1321.0s, [0.355, 0.074, 0.881, 0.987]->[0.408, 0.029, 0.913, 0.984]->[0.401, 0.042, 0.897, 0.988]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1374.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.026, 0.062, 0.238, 0.758]: carry/hold (an object), watch (a person), walk; [0.05, 0.533, 0.3, 0.99]: listen to (a person), sit, watch (a person); [0.268, 0.092, 0.472, 0.941]: listen to (a person), stand, write, watch (a person); [0.443, 0.323, 0.523, 0.54]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.445, 0.391, 0.65, 0.798]: listen to (a person), sit, watch (a person); [0.608, 0.392, 0.915, 0.981]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.668, 0.283, 0.855, 0.661]: carry/hold (an object), sit."}]}, {"video": "N5UD8FGzDek.mkv", "clip": [1402, 1477], "conversations": [{"from": "human", "timestamps": 1432.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.068, 0.098, 0.663, 0.997]: listen to (a person), sit."}, {"from": "human", "timestamps": 1477.0, "value": "What actions did the person at bbox [0.504, 0.043, 0.897, 0.807] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.504, 0.043, 0.897, 0.807]: sit, drive (e.g., a car, a truck), sing to (e.g., self, a person, a group), talk to (e.g., self, a person, a group).", "From 1475.0s to 1476.0s, [0.516, 0.058, 0.968, 0.792]->[0.548, 0.072, 0.974, 0.82]: sit, drive (e.g., a car, a truck), sing to (e.g., self, a person, a group).", "At 1474.0s, [0.516, 0.068, 0.967, 0.801]: sit, drive (e.g., a car, a truck), talk to (e.g., self, a person, a group)."]}]}, {"video": "N5UD8FGzDek.mkv", "clip": [1472, 1565], "conversations": [{"from": "human", "timestamps": 1502.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.168, 0.0, 0.97, 0.988]: watch (a person), sit; [0.759, 0.182, 1.0, 0.919]: carry/hold (an object), sit, drink."}, {"from": "human", "timestamps": 1565.0, "value": "What actions is the person in bbox [0.614, 0.169, 0.727, 0.62] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.614, 0.169, 0.727, 0.62]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1564.0s, [0.622, 0.159, 0.741, 0.565]: sit, touch (an object), listen to (a person), take (an object) from (a person), talk to (e.g., self, a person, a group).", "At 1563.0s, [0.601, 0.186, 0.75, 0.612]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "N5UD8FGzDek.mkv", "clip": [1540, 1585], "conversations": [{"from": "human", "timestamps": 1570.0, "value": "What actions did the person at bbox [0.157, 0.0, 0.893, 0.993] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.157, 0.0, 0.893, 0.993]: sit, talk to (e.g., self, a person, a group).", "At 1569.0s, [0.0, 0.0, 0.72, 0.987]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1585.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.082, 0.284, 0.313, 0.914]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.161, 0.098, 0.311, 0.594]: watch (a person), walk; [0.377, 0.121, 0.663, 0.783]: talk to (e.g., self, a person, a group), put down, give/serve (an object) to (a person), bend/bow (at the waist); [0.563, 0.321, 0.895, 0.978]: carry/hold (an object), sit, listen to (a person)."}]}, {"video": "N5UD8FGzDek.mkv", "clip": [1606, 1717], "conversations": [{"from": "human", "timestamps": 1636.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.001, 0.27, 0.118, 0.704]: sit; [0.106, 0.273, 0.369, 0.937]: sit, talk to (e.g., self, a person, a group); [0.529, 0.001, 0.689, 0.728]: carry/hold (an object), stand, listen to (a person); [0.627, 0.275, 0.914, 0.993]: listen to (a person), sit, watch (a person); [0.671, 0.288, 0.761, 0.582]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.886, 0.267, 1.0, 0.716]: listen to (a person), sit."}, {"from": "human", "timestamps": 1717.0, "value": "Describe the actions of the person at bbox [0.003, 0.051, 0.384, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.003, 0.051, 0.384, 0.987]: sit, talk to (e.g., self, a person, a group).", "At 1716.0s, [0.051, 0.016, 0.621, 0.973]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1714.0s to 1715.0s, [0.095, 0.028, 0.64, 0.972]->[0.129, 0.023, 0.662, 0.985]: sit, talk to (e.g., self, a person, a group).", "At 1713.0s, [0.0, 0.0, 0.414, 0.983]: sit.", "From 1711.0s to 1712.0s, [0.128, 0.296, 0.353, 0.969]->[0.118, 0.289, 0.353, 0.962]: sit, touch (an object), watch (a person).", "At 1710.0s, [0.115, 0.285, 0.372, 0.965]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1709.0s, [0.123, 0.295, 0.362, 0.97]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1707.0s to 1708.0s, [0.054, 0.278, 0.35, 0.947]->[0.05, 0.293, 0.34, 0.913]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1706.0s, [0.017, 0.29, 0.326, 0.963]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "N5UD8FGzDek.mkv", "clip": [1690, 1722], "conversations": [{"from": "human", "timestamps": 1720.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.219, 0.162, 0.901]: sit, talk to (e.g., self, a person, a group); [0.412, 0.078, 1.0, 0.996]: sit."}, {"from": "human", "timestamps": 1722.0, "value": "What actions did the person at bbox [0.004, 0.139, 0.206, 0.879] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.004, 0.139, 0.206, 0.879]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1721.0s, [0.001, 0.208, 0.187, 0.911]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1720.0s, [0.0, 0.219, 0.162, 0.901]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "N5UD8FGzDek.mkv", "clip": [1746, 1782], "conversations": [{"from": "human", "timestamps": 1776.0, "value": "Describe the actions of the person at bbox [0.0, 0.229, 0.262, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.229, 0.262, 0.991]: sit, listen to (a person).", "From 1772.0s to 1775.0s, [0.0, 0.233, 0.327, 0.976]->[0.0, 0.222, 0.286, 0.991]->[0.0, 0.197, 0.274, 0.985]->[0.005, 0.221, 0.29, 0.985]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1782.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.19, 0.008, 0.909, 0.987]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "N7baJsMszJ0.mkv", "clip": [911, 983], "conversations": [{"from": "human", "timestamps": 941.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.084, 0.101, 0.987]: watch (a person), stand; [0.221, 0.162, 0.325, 0.939]: watch (a person), stand; [0.281, 0.189, 0.445, 0.97]: watch (a person), martial art, fight/hit (a person); [0.597, 0.186, 0.716, 1.0]: watch (a person), stand; [0.848, 0.156, 0.978, 0.987]: watch (a person), stand."}, {"from": "human", "timestamps": 983.0, "value": "Describe the actions of the person at bbox [0.357, 0.006, 0.852, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.357, 0.006, 0.852, 0.986]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 982.0s, [0.491, 0.013, 0.926, 0.978]: stand, watch (a person).", "At 981.0s, [0.449, 0.021, 0.938, 0.993]: fight/hit (a person), martial art."]}]}, {"video": "N7baJsMszJ0.mkv", "clip": [939, 1020], "conversations": [{"from": "human", "timestamps": 969.0, "value": "What actions did the person at bbox [0.001, 0.218, 0.103, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.218, 0.103, 0.99]: stand, watch (a person).", "From 967.0s to 968.0s, [0.0, 0.212, 0.096, 0.989]->[0.001, 0.212, 0.101, 0.991]: stand, watch (a person).", "From 964.0s to 966.0s, [0.001, 0.208, 0.093, 0.99]->[0.0, 0.21, 0.096, 0.976]->[0.0, 0.213, 0.096, 0.982]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1020.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.253, 0.097, 0.697, 0.972]: fight/hit (a person), run/jog; [0.609, 0.077, 0.881, 0.975]: carry/hold (an object), stand, watch (a person), push (another person); [0.869, 0.442, 0.961, 0.847]: watch (a person), sit."}]}, {"video": "N7baJsMszJ0.mkv", "clip": [1022, 1114], "conversations": [{"from": "human", "timestamps": 1052.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.129, 0.011, 0.76, 0.972]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1114.0, "value": "What actions is the person in bbox [0.32, 0.006, 0.888, 0.999] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.32, 0.006, 0.888, 0.999]: stand.", "At 1113.0s, [0.309, 0.011, 0.931, 0.984]: stand, listen to (a person).", "From 1111.0s to 1112.0s, [0.331, 0.011, 0.981, 0.982]->[0.268, 0.012, 0.937, 0.99]: stand.", "From 1109.0s to 1110.0s, [0.341, 0.026, 0.887, 0.966]->[0.341, 0.009, 0.956, 0.977]: stand, carry/hold (an object).", "At 1108.0s, [0.38, 0.005, 0.784, 0.966]: stand, carry/hold (an object), listen to (a person).", "At 1107.0s, [0.364, 0.002, 0.762, 0.994]: stand, carry/hold (an object).", "At 1106.0s, [0.36, 0.02, 0.743, 0.976]: stand, carry/hold (an object), watch (a person).", "From 1102.0s to 1105.0s, [0.35, 0.029, 0.791, 0.98]->[0.37, 0.013, 0.8, 0.989]->[0.349, 0.023, 0.768, 0.97]->[0.362, 0.001, 0.78, 0.981]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1101.0s, [0.353, 0.0, 0.747, 0.981]: stand, carry/hold (an object), listen to (a person)."]}]}, {"video": "N7baJsMszJ0.mkv", "clip": [1091, 1127], "conversations": [{"from": "human", "timestamps": 1121.0, "value": "Describe the actions of the person at bbox [0.032, 0.1, 0.362, 0.935] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.032, 0.1, 0.362, 0.935]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1117.0s to 1120.0s, [0.022, 0.139, 0.335, 0.967]->[0.033, 0.157, 0.319, 0.985]->[0.046, 0.04, 0.33, 0.973]->[0.048, 0.081, 0.353, 0.965]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1127.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.589, 0.968]: listen to (a person), stand, watch (a person); [0.389, 0.016, 0.927, 0.977]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "N7baJsMszJ0.mkv", "clip": [1184, 1239], "conversations": [{"from": "human", "timestamps": 1214.0, "value": "What actions is the person in bbox [0.762, 0.037, 0.999, 0.979] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.762, 0.037, 0.999, 0.979]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1213.0s, [0.691, 0.083, 0.984, 0.989]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1239.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.003, 0.0, 0.181, 0.976]: listen to (a person), stand, watch (a person); [0.152, 0.105, 0.446, 0.981]: play board game, put down, carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.495, 0.104, 0.683, 0.904]: watch (a person), listen to (a person), play board game, carry/hold (an object), stand; [0.6, 0.042, 0.868, 0.977]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "N7baJsMszJ0.mkv", "clip": [1248, 1305], "conversations": [{"from": "human", "timestamps": 1278.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.497, 0.074, 0.996, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.497, 0.074, 0.996, 1.0]: stand, carry/hold (an object).", "At 1277.0s, [0.501, 0.09, 0.981, 0.983]: stand, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1305.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.012, 0.022, 0.355, 0.974]: listen to (a person), stand, watch (a person); [0.255, 0.176, 0.428, 0.85]: listen to (a person), stand, watch (a person); [0.425, 0.104, 0.646, 0.89]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.56, 0.036, 0.908, 0.958]: listen to (a person), stand, watch (a person)."}]}, {"video": "N7baJsMszJ0.mkv", "clip": [1242, 1320], "conversations": [{"from": "human", "timestamps": 1272.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.078, 0.02, 0.481, 0.965]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.405, 0.144, 0.539, 0.865]: stand; [0.445, 0.009, 0.97, 0.97]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1320.0, "value": "Describe the actions of the person at bbox [0.637, 0.027, 0.887, 0.977] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.637, 0.027, 0.887, 0.977]: stand, carry/hold (an object), fight/hit (a person), watch (a person).", "At 1319.0s, [0.538, 0.011, 0.949, 0.955]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1316.0s to 1318.0s, [0.623, 0.018, 0.933, 0.984]->[0.59, 0.006, 0.936, 0.992]->[0.539, 0.025, 0.938, 0.991]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "N7baJsMszJ0.mkv", "clip": [1456, 1493], "conversations": [{"from": "human", "timestamps": 1486.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.284, 0.065, 0.402, 0.963]: listen to (a person), stand, watch (a person); [0.334, 0.182, 0.564, 0.99]: martial art; [0.583, 0.112, 0.725, 0.986]: martial art, fight/hit (a person)."}, {"from": "human", "timestamps": 1493.0, "value": "For the person at bbox [0.305, 0.005, 0.704, 0.978], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.305, 0.005, 0.704, 0.978]: fight/hit (a person), martial art.", "From 1491.0s to 1492.0s, [0.333, 0.068, 0.505, 0.953]->[0.169, 0.316, 0.349, 0.939]: fight/hit (a person), martial art.", "At 1490.0s, [0.406, 0.058, 0.598, 0.917]: martial art.", "At 1489.0s, [0.367, 0.143, 0.599, 0.957]: fight/hit (a person), watch (a person), martial art.", "At 1488.0s, [0.37, 0.113, 0.582, 0.927]: fight/hit (a person), martial art.", "At 1487.0s, [0.423, 0.092, 0.687, 0.95]: fight/hit (a person), watch (a person), martial art."]}]}, {"video": "N7baJsMszJ0.mkv", "clip": [1531, 1580], "conversations": [{"from": "human", "timestamps": 1561.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.011, 0.319, 0.149, 0.892]: sit, hand clap; [0.149, 0.261, 0.268, 1.0]: sit, hand clap; [0.343, 0.084, 0.489, 0.594]: carry/hold (an object), stand, watch (a person); [0.46, 0.422, 0.633, 0.99]: listen to (a person), sit, hand clap; [0.806, 0.358, 0.993, 0.9]: watch (a person), sit, hand clap."}, {"from": "human", "timestamps": 1580.0, "value": "For the person at bbox [0.015, 0.006, 0.431, 0.943], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.015, 0.006, 0.431, 0.943]: bend/bow (at the waist), sit, listen to (a person), watch (a person).", "At 1579.0s, [0.023, 0.003, 0.462, 0.94]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1578.0s, [0.0, 0.046, 0.376, 0.996]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1577.0s, [0.003, 0.026, 0.299, 1.0]: sit, watch (a person).", "From 1575.0s to 1576.0s, [0.239, 0.263, 0.438, 0.981]->[0.03, 0.085, 0.355, 1.0]: sit, listen to (a person), watch (a person)."]}]}, {"video": "N7baJsMszJ0.mkv", "clip": [1610, 1653], "conversations": [{"from": "human", "timestamps": 1640.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.004, 0.048, 0.593, 0.992]: touch (an object), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1653.0, "value": "What actions did the person at bbox [0.253, 0.008, 0.752, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.253, 0.008, 0.752, 0.99]: stand, talk to (e.g., self, a person, a group).", "At 1652.0s, [0.272, 0.158, 0.472, 0.969]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1651.0s, [0.315, 0.024, 0.436, 0.677]: stand, carry/hold (an object)."]}]}, {"video": "N7baJsMszJ0.mkv", "clip": [1639, 1718], "conversations": [{"from": "human", "timestamps": 1669.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.317, 0.035, 0.558, 0.97]: martial art, fight/hit (a person)."}, {"from": "human", "timestamps": 1718.0, "value": "What actions did the person at bbox [0.009, 0.139, 0.487, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.009, 0.139, 0.487, 0.99]: fight/hit (a person), martial art.", "From 1716.0s to 1717.0s, [0.164, 0.006, 0.717, 0.987]->[0.051, 0.268, 0.523, 1.0]: fight/hit (a person), martial art."]}]}, {"video": "N7baJsMszJ0.mkv", "clip": [1720, 1778], "conversations": [{"from": "human", "timestamps": 1750.0, "value": "What actions did the person at bbox [0.012, 0.016, 0.494, 0.973] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.012, 0.016, 0.494, 0.973]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1749.0s, [0.02, 0.064, 0.326, 0.961]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1748.0s, [0.076, 0.016, 0.553, 0.979]: stand, walk, watch (a person).", "At 1747.0s, [0.096, 0.021, 0.484, 0.975]: walk, watch (a person).", "At 1746.0s, [0.14, 0.032, 0.435, 0.956]: stand, talk to (e.g., self, a person, a group).", "At 1745.0s, [0.123, 0.0, 0.536, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1743.0s to 1744.0s, [0.025, 0.001, 0.525, 0.99]->[0.086, 0.019, 0.589, 0.973]: stand, talk to (e.g., self, a person, a group).", "At 1742.0s, [0.306, 0.048, 0.552, 0.957]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1778.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.105, 0.575, 0.306, 0.988]: lie/sleep, talk to (e.g., self, a person, a group); [0.34, 0.251, 0.569, 0.998]: listen to (a person), sit, watch (a person); [0.723, 0.248, 0.887, 0.974]: listen to (a person), sit, watch (a person)."}]}, {"video": "NEQ7Wpf-EtI.mkv", "clip": [873, 915], "conversations": [{"from": "human", "timestamps": 903.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.105, 0.039, 0.897, 0.998]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 915.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.357, 0.035, 0.854, 0.964]?"}, {"from": "gpt", "value": ["At Now, [0.357, 0.035, 0.854, 0.964]: sit, talk to (e.g., self, a person, a group).", "At 914.0s, [0.369, 0.02, 0.907, 0.99]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 910.0s to 913.0s, [0.375, 0.017, 0.877, 0.99]->[0.382, 0.035, 0.872, 0.981]->[0.392, 0.036, 0.873, 0.994]->[0.411, 0.046, 0.864, 0.987]: sit, talk to (e.g., self, a person, a group).", "From 908.0s to 909.0s, [0.369, 0.04, 0.866, 0.999]->[0.339, 0.032, 0.867, 0.986]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 905.0s to 907.0s, [0.402, 0.068, 0.895, 0.981]->[0.398, 0.028, 0.915, 0.987]->[0.376, 0.014, 0.897, 0.99]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "NEQ7Wpf-EtI.mkv", "clip": [952, 1012], "conversations": [{"from": "human", "timestamps": 982.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.212, 0.355, 0.558, 0.98]: listen to (a person), sit, lie/sleep; [0.39, 0.082, 0.801, 0.887]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1012.0, "value": "Describe the actions of the person at bbox [0.207, 0.461, 0.715, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.207, 0.461, 0.715, 1.0]: listen to (a person), lie/sleep, watch (a person).", "At 1011.0s, [0.218, 0.45, 0.721, 1.0]: listen to (a person), lie/sleep, watch (a person).", "At 1010.0s, [0.227, 0.476, 0.698, 0.975]: listen to (a person), talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "At 1009.0s, [0.214, 0.422, 0.724, 0.995]: talk to (e.g., self, a person, a group), lie/sleep.", "At 1008.0s, [0.225, 0.425, 0.737, 1.0]: sit, touch (an object), listen to (a person).", "From 1006.0s to 1007.0s, [0.215, 0.39, 0.726, 0.995]->[0.214, 0.39, 0.731, 0.995]: sit, carry/hold (an object), listen to (a person).", "At 1005.0s, [0.213, 0.372, 0.732, 0.991]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1003.0s to 1004.0s, [0.167, 0.411, 0.751, 1.0]->[0.204, 0.423, 0.751, 1.0]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "NEQ7Wpf-EtI.mkv", "clip": [1047, 1079], "conversations": [{"from": "human", "timestamps": 1077.0, "value": "What actions is the person in bbox [0.228, 0.035, 0.73, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.228, 0.035, 0.73, 1.0]: stand, touch (an object).", "At 1076.0s, [0.129, 0.001, 0.617, 1.0]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1079.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.181, 0.076, 0.893, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "NEQ7Wpf-EtI.mkv", "clip": [1127, 1193], "conversations": [{"from": "human", "timestamps": 1157.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.004, 0.0, 0.281, 0.991]: sit, eat; [0.164, 0.129, 0.909, 0.968]: watch (a person), sit, eat."}, {"from": "human", "timestamps": 1193.0, "value": "What actions is the person in bbox [0.005, 0.008, 0.647, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.005, 0.008, 0.647, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1192.0s, [0.004, 0.014, 0.556, 0.989]: stand, eat, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "NEQ7Wpf-EtI.mkv", "clip": [1111, 1221], "conversations": [{"from": "human", "timestamps": 1141.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.065, 0.417, 0.99]: carry/hold (an object), sit; [0.366, 0.154, 0.909, 0.993]: sit, eat."}, {"from": "human", "timestamps": 1221.0, "value": "What actions did the person at bbox [0.613, 0.0, 1.0, 0.989] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.613, 0.0, 1.0, 0.989]: stand.", "At 1220.0s, [0.588, 0.016, 1.0, 1.0]: stand, watch (a person).", "From 1218.0s to 1219.0s, [0.599, 0.0, 1.0, 0.997]->[0.616, 0.018, 0.997, 0.956]: stand.", "At 1217.0s, [0.607, 0.018, 1.0, 0.983]: stand, watch (a person)."]}]}, {"video": "NEQ7Wpf-EtI.mkv", "clip": [1194, 1240], "conversations": [{"from": "human", "timestamps": 1224.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.323, 0.047, 0.854, 1.0]: carry/hold (an object), sit, read, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1240.0, "value": "For the person at bbox [0.313, 0.182, 0.468, 0.601], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.313, 0.182, 0.468, 0.601]: stand, listen to (a person), watch (a person).", "At 1239.0s, [0.333, 0.183, 0.483, 0.605]: stand, touch (an object), listen to (a person), watch (a person).", "At 1238.0s, [0.362, 0.196, 0.512, 0.611]: stand, touch (an object), watch (a person)."]}]}, {"video": "NEQ7Wpf-EtI.mkv", "clip": [1356, 1494], "conversations": [{"from": "human", "timestamps": 1386.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.168, 0.701, 0.327, 0.996]: watch (a person), sit, stand; [0.413, 0.032, 0.878, 0.973]: stand."}, {"from": "human", "timestamps": 1494.0, "value": "Describe the actions of the person at bbox [0.437, 0.139, 0.747, 0.992] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.437, 0.139, 0.747, 0.992]: walk.", "At 1493.0s, [0.442, 0.273, 0.638, 0.984]: walk."]}]}, {"video": "NEQ7Wpf-EtI.mkv", "clip": [1457, 1489], "conversations": [{"from": "human", "timestamps": 1487.0, "value": "What actions is the person in bbox [0.634, 0.66, 0.811, 0.99] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.634, 0.66, 0.811, 0.99]: sit, watch (a person).", "From 1482.0s to 1486.0s, [0.011, 0.496, 0.215, 0.987]->[0.177, 0.512, 0.362, 0.977]->[0.32, 0.55, 0.489, 0.945]->[0.426, 0.585, 0.604, 0.981]->[0.554, 0.629, 0.729, 0.991]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1489.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.148, 0.669, 0.311, 0.994]: listen to (a person), sit; [0.681, 0.676, 0.855, 0.993]: watch (a person), sit; [0.728, 0.421, 0.824, 0.63]: stand."}]}, {"video": "NEQ7Wpf-EtI.mkv", "clip": [1523, 1579], "conversations": [{"from": "human", "timestamps": 1553.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.139, 0.041, 0.731, 0.978]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1579.0, "value": "What actions is the person in bbox [0.217, 0.003, 0.971, 0.983] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.217, 0.003, 0.971, 0.983]: stand, listen to (a person).", "From 1577.0s to 1578.0s, [0.051, 0.01, 0.725, 1.0]->[0.042, 0.015, 0.723, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1574.0s to 1576.0s, [0.215, 0.075, 0.953, 0.971]->[0.216, 0.059, 0.963, 0.968]->[0.213, 0.054, 0.95, 0.973]: stand, talk to (e.g., self, a person, a group).", "At 1573.0s, [0.208, 0.07, 1.0, 0.948]: sit, carry/hold (an object), eat, listen to (a person).", "At 1572.0s, [0.244, 0.059, 0.975, 0.973]: sit, carry/hold (an object), eat, listen to (a person), watch (a person).", "At 1571.0s, [0.252, 0.086, 0.985, 0.952]: sit, carry/hold (an object), eat, listen to (a person).", "From 1567.0s to 1570.0s, [0.222, 0.03, 0.948, 0.979]->[0.219, 0.022, 0.949, 0.975]->[0.209, 0.025, 0.971, 0.984]->[0.2, 0.033, 0.954, 0.986]: stand, talk to (e.g., self, a person, a group).", "From 1564.0s to 1566.0s, [0.793, 0.235, 0.924, 0.836]->[0.797, 0.26, 0.928, 0.829]->[0.787, 0.246, 0.922, 0.817]: stand, watch (a person).", "From 1561.0s to 1563.0s, [0.789, 0.236, 0.925, 0.831]->[0.787, 0.23, 0.927, 0.829]->[0.784, 0.228, 0.924, 0.83]: stand, listen to (a person), watch (a person)."]}]}, {"video": "NEQ7Wpf-EtI.mkv", "clip": [1605, 1644], "conversations": [{"from": "human", "timestamps": 1635.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.223, 0.062, 0.983, 0.947]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1644.0, "value": "What actions is the person in bbox [0.723, 0.047, 0.933, 0.803] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.723, 0.047, 0.933, 0.803]: stand, listen to (a person), watch (a person).", "From 1642.0s to 1643.0s, [0.724, 0.038, 0.944, 0.805]->[0.725, 0.046, 0.933, 0.794]: stand, listen to (a person).", "At 1641.0s, [0.722, 0.049, 0.935, 0.913]: stand.", "At 1640.0s, [0.72, 0.053, 0.931, 0.895]: stand, listen to (a person)."]}]}, {"video": "NEQ7Wpf-EtI.mkv", "clip": [1593, 1709], "conversations": [{"from": "human", "timestamps": 1623.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.207, 0.074, 0.653, 0.968]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1709.0, "value": "Describe the actions of the person at bbox [0.343, 0.033, 0.971, 0.909] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.343, 0.033, 0.971, 0.909]: sit.", "At 1708.0s, [0.319, 0.036, 0.964, 0.917]: sit."]}]}, {"video": "NEQ7Wpf-EtI.mkv", "clip": [1731, 1766], "conversations": [{"from": "human", "timestamps": 1761.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.051, 0.005, 0.723, 0.98]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1766.0, "value": "Describe the actions of the person at bbox [0.377, 0.07, 0.951, 0.924] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.377, 0.07, 0.951, 0.924]: sit, talk to (e.g., self, a person, a group).", "From 1763.0s to 1765.0s, [0.376, 0.078, 0.964, 0.947]->[0.373, 0.073, 0.959, 0.931]->[0.38, 0.074, 0.946, 0.917]: sit, talk to (e.g., self, a person, a group).", "At 1762.0s, [0.384, 0.082, 0.961, 0.94]: sit, listen to (a person)."]}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [901, 958], "conversations": [{"from": "human", "timestamps": 931.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.02, 0.06, 0.607, 0.978]: watch (a person), stand; [0.509, 0.341, 0.972, 0.942]: listen to (a person), stand."}, {"from": "human", "timestamps": 958.0, "value": "What actions did the person at bbox [0.001, 0.406, 0.144, 0.924] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.406, 0.144, 0.924]: stand, touch (an object).", "From 956.0s to 957.0s, [0.03, 0.351, 0.212, 0.885]->[0.037, 0.4, 0.201, 0.934]: stand, touch (an object), watch (a person)."]}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [964, 1010], "conversations": [{"from": "human", "timestamps": 994.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.33, 0.362, 0.501, 0.915]: listen to (a person), stand, close (e.g., a door, a box), watch (a person); [0.459, 0.346, 0.662, 0.938]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1010.0, "value": "Describe the actions of the person at bbox [0.074, 0.126, 0.751, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.074, 0.126, 0.751, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1007.0s to 1009.0s, [0.11, 0.139, 0.715, 0.981]->[0.035, 0.133, 0.754, 0.985]->[0.077, 0.137, 0.79, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1010, 1082], "conversations": [{"from": "human", "timestamps": 1040.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.255, 0.101, 1.0, 0.964]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1082.0, "value": "What actions did the person at bbox [0.079, 0.131, 0.443, 0.643] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.079, 0.131, 0.443, 0.643]: stand, close (e.g., a door, a box).", "From 1080.0s to 1081.0s, [0.424, 0.084, 0.916, 0.806]->[0.203, 0.162, 0.825, 0.887]: stand."]}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1087, 1129], "conversations": [{"from": "human", "timestamps": 1117.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.015, 0.121, 0.965, 0.956]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1129.0, "value": "What actions is the person in bbox [0.571, 0.498, 0.798, 0.948] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.571, 0.498, 0.798, 0.948]: sit, listen to (a person), watch (a person).", "At 1128.0s, [0.471, 0.398, 0.809, 0.949]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1125.0s to 1127.0s, [0.617, 0.226, 0.941, 0.956]->[0.519, 0.211, 0.912, 0.95]->[0.561, 0.229, 0.91, 0.95]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1124.0s, [0.668, 0.233, 0.934, 0.969]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1129, 1180], "conversations": [{"from": "human", "timestamps": 1159.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.124, 0.13, 0.919, 0.966]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1180.0, "value": "What actions is the person in bbox [0.188, 0.323, 0.65, 0.962] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.188, 0.323, 0.65, 0.962]: walk, listen to (a person).", "From 1178.0s to 1179.0s, [0.108, 0.306, 0.418, 0.96]->[0.336, 0.297, 0.568, 0.95]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1167, 1213], "conversations": [{"from": "human", "timestamps": 1197.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.42, 0.05, 0.656, 0.934]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1213.0, "value": "What actions is the person in bbox [0.634, 0.306, 0.768, 0.733] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.634, 0.306, 0.768, 0.733]: stand, carry/hold (an object), touch (an object).", "At 1212.0s, [0.618, 0.315, 0.759, 0.763]: stand, carry/hold (an object), touch (an object).", "At 1211.0s, [0.582, 0.327, 0.718, 0.667]: stand, touch (an object).", "At 1210.0s, [0.593, 0.315, 0.704, 0.669]: walk."]}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1182, 1220], "conversations": [{"from": "human", "timestamps": 1212.0, "value": "For the person at bbox [0.618, 0.315, 0.759, 0.763], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.618, 0.315, 0.759, 0.763]: stand, carry/hold (an object), touch (an object).", "At 1211.0s, [0.582, 0.327, 0.718, 0.667]: stand, touch (an object).", "At 1210.0s, [0.593, 0.315, 0.704, 0.669]: walk."]}, {"from": "human", "timestamps": 1220.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.116, 0.176, 0.932, 0.958]: touch (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1242, 1278], "conversations": [{"from": "human", "timestamps": 1272.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.048, 0.05, 1.0, 0.945]: stand, answer phone."}, {"from": "human", "timestamps": 1278.0, "value": "What actions is the person in bbox [0.0, 0.045, 0.895, 0.965] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.045, 0.895, 0.965]: stand, answer phone.", "From 1265.0s to 1277.0s, [0.406, 0.111, 0.663, 0.673]->[0.402, 0.111, 0.667, 0.677]->[0.41, 0.106, 0.66, 0.681]->[0.416, 0.109, 0.663, 0.684]->[0.353, 0.111, 0.624, 0.667]->[0.352, 0.13, 0.613, 0.671]->[0.084, 0.085, 0.982, 0.965]->[0.048, 0.05, 1.0, 0.945]->[0.105, 0.047, 0.944, 0.947]->[0.08, 0.055, 0.937, 0.948]->[0.076, 0.069, 0.946, 0.963]->[0.023, 0.162, 0.798, 0.981]->[0.0, 0.095, 0.879, 0.972]: stand, answer phone."]}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1310, 1369], "conversations": [{"from": "human", "timestamps": 1340.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.452, 0.261, 0.723, 0.934]: stand; [0.816, 0.307, 1.0, 0.969]: stand."}, {"from": "human", "timestamps": 1369.0, "value": "What actions did the person at bbox [0.008, 0.124, 0.973, 0.964] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.008, 0.124, 0.973, 0.964]: stand, touch (an object), listen to (a person).", "At 1368.0s, [0.018, 0.17, 0.998, 0.948]: stand, touch (an object), listen to (a person)."]}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1356, 1456], "conversations": [{"from": "human", "timestamps": 1386.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.065, 0.03, 0.713, 0.967]: watch (a person), crouch/kneel, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1456.0, "value": "Describe the actions of the person at bbox [0.462, 0.443, 0.633, 0.907] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.462, 0.443, 0.633, 0.907]: walk.", "From 1448.0s to 1455.0s, [0.349, 0.18, 0.554, 0.882]->[0.292, 0.183, 0.592, 0.959]->[0.104, 0.074, 0.46, 0.96]->[0.198, 0.187, 0.773, 0.962]->[0.084, 0.35, 0.445, 0.961]->[0.184, 0.352, 0.46, 0.957]->[0.267, 0.363, 0.537, 0.945]->[0.383, 0.419, 0.601, 0.948]: walk.", "At 1447.0s, [0.638, 0.203, 0.776, 0.832]: stand, watch (a person).", "At 1446.0s, [0.662, 0.229, 0.807, 0.841]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1418, 1449], "conversations": [{"from": "human", "timestamps": 1448.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.349, 0.18, 0.554, 0.882]: walk."}, {"from": "human", "timestamps": 1449.0, "value": "Describe the actions of the person at bbox [0.292, 0.183, 0.592, 0.959] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.292, 0.183, 0.592, 0.959]: walk.", "At 1448.0s, [0.349, 0.18, 0.554, 0.882]: walk.", "At 1447.0s, [0.638, 0.203, 0.776, 0.832]: stand, watch (a person).", "At 1446.0s, [0.662, 0.229, 0.807, 0.841]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1476, 1516], "conversations": [{"from": "human", "timestamps": 1506.0, "value": "What actions did the person at bbox [0.072, 0.09, 1.0, 0.971] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.072, 0.09, 1.0, 0.971]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1505.0s, [0.108, 0.067, 0.998, 0.954]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1503.0s to 1504.0s, [0.1, 0.04, 1.0, 0.962]->[0.162, 0.047, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1502.0s, [0.054, 0.072, 0.983, 1.0]: stand.", "At 1501.0s, [0.109, 0.075, 1.0, 0.971]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1516.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.203, 0.099, 0.391, 0.903]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.575, 0.215, 0.853, 0.93]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1469, 1598], "conversations": [{"from": "human", "timestamps": 1499.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.156, 0.098, 0.993, 0.971]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1598.0, "value": "For the person at bbox [0.211, 0.076, 0.942, 0.996], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.211, 0.076, 0.942, 0.996]: stand, talk to (e.g., self, a person, a group).", "From 1591.0s to 1597.0s, [0.424, 0.143, 0.638, 0.802]->[0.411, 0.146, 0.621, 0.803]->[0.397, 0.14, 0.6, 0.802]->[0.339, 0.148, 0.584, 0.825]->[0.398, 0.131, 0.628, 0.821]->[0.425, 0.137, 0.653, 0.854]->[0.262, 0.155, 0.96, 0.963]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1590.0s, [0.393, 0.178, 0.628, 0.816]: stand, carry/hold (an object).", "At 1589.0s, [0.31, 0.242, 0.625, 0.817]: bend/bow (at the waist), carry/hold (an object), put down.", "At 1588.0s, [0.167, 0.117, 0.996, 0.966]: stand, carry/hold (an object), put down.", "From 1578.0s to 1587.0s, [0.078, 0.149, 0.916, 0.977]->[0.057, 0.144, 0.897, 0.965]->[0.033, 0.191, 0.887, 0.919]->[0.059, 0.147, 0.886, 0.935]->[0.069, 0.156, 0.949, 0.976]->[0.081, 0.144, 0.886, 0.97]->[0.078, 0.132, 0.892, 0.968]->[0.161, 0.222, 0.88, 0.942]->[0.043, 0.16, 0.939, 0.915]->[0.026, 0.175, 0.802, 0.942]: stand, drink.", "From 1575.0s to 1577.0s, [0.178, 0.124, 0.994, 0.974]->[0.007, 0.204, 0.849, 0.936]->[0.043, 0.177, 0.895, 0.985]: stand, drink, talk to (e.g., self, a person, a group).", "At 1574.0s, [0.182, 0.204, 0.936, 0.96]: bend/bow (at the waist), drink.", "From 1572.0s to 1573.0s, [0.397, 0.181, 0.63, 0.829]->[0.312, 0.214, 0.598, 0.815]: stand, carry/hold (an object).", "At 1571.0s, [0.36, 0.169, 0.596, 0.824]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1570.0s, [0.453, 0.173, 0.74, 0.854]: stand, carry/hold (an object).", "From 1566.0s to 1569.0s, [0.476, 0.183, 0.699, 0.877]->[0.487, 0.178, 0.74, 0.883]->[0.502, 0.176, 0.725, 0.892]->[0.456, 0.178, 0.731, 0.865]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1564.0s to 1565.0s, [0.076, 0.171, 0.737, 0.949]->[0.145, 0.099, 0.825, 0.968]: stand, carry/hold (an object).", "From 1562.0s to 1563.0s, [0.058, 0.062, 0.7, 0.974]->[0.077, 0.081, 0.686, 0.978]: stand.", "At 1561.0s, [0.034, 0.092, 0.915, 0.978]: sit, stand.", "From 1559.0s to 1560.0s, [0.154, 0.055, 0.976, 0.959]->[0.163, 0.06, 0.976, 0.957]: stand.", "From 1557.0s to 1558.0s, [0.194, 0.105, 0.998, 0.965]->[0.169, 0.084, 0.975, 0.982]: stand, talk to (e.g., self, a person, a group).", "From 1553.0s to 1556.0s, [0.482, 0.191, 0.733, 0.913]->[0.469, 0.181, 0.745, 0.883]->[0.338, 0.165, 0.716, 0.869]->[0.166, 0.141, 0.989, 0.94]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1552.0s, [0.432, 0.207, 0.671, 0.894]: stand, carry/hold (an object).", "From 1543.0s to 1551.0s, [0.449, 0.192, 0.667, 0.901]->[0.358, 0.181, 0.694, 0.929]->[0.415, 0.203, 0.713, 0.902]->[0.435, 0.202, 0.727, 0.875]->[0.412, 0.198, 0.719, 0.912]->[0.446, 0.19, 0.732, 0.913]->[0.439, 0.181, 0.729, 0.914]->[0.43, 0.183, 0.724, 0.918]->[0.421, 0.18, 0.716, 0.914]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1539.0s to 1542.0s, [0.405, 0.185, 0.606, 0.904]->[0.386, 0.196, 0.621, 0.894]->[0.397, 0.212, 0.652, 0.91]->[0.461, 0.212, 0.732, 0.926]: stand, carry/hold (an object).", "At 1538.0s, [0.417, 0.184, 0.614, 0.886]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1531.0s to 1537.0s, [0.389, 0.262, 0.538, 0.698]->[0.49, 0.275, 0.639, 0.844]->[0.508, 0.261, 0.659, 0.812]->[0.518, 0.263, 0.657, 0.771]->[0.515, 0.261, 0.648, 0.702]->[0.52, 0.261, 0.652, 0.79]->[0.416, 0.193, 0.618, 0.902]: stand, carry/hold (an object).", "At 1530.0s, [0.434, 0.262, 0.558, 0.758]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1561, 1594], "conversations": [{"from": "human", "timestamps": 1591.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.424, 0.143, 0.638, 0.802]?"}, {"from": "gpt", "value": ["At Now, [0.424, 0.143, 0.638, 0.802]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1590.0s, [0.393, 0.178, 0.628, 0.816]: stand, carry/hold (an object).", "At 1589.0s, [0.31, 0.242, 0.625, 0.817]: bend/bow (at the waist), carry/hold (an object), put down.", "At 1588.0s, [0.167, 0.117, 0.996, 0.966]: stand, carry/hold (an object), put down.", "From 1578.0s to 1587.0s, [0.078, 0.149, 0.916, 0.977]->[0.057, 0.144, 0.897, 0.965]->[0.033, 0.191, 0.887, 0.919]->[0.059, 0.147, 0.886, 0.935]->[0.069, 0.156, 0.949, 0.976]->[0.081, 0.144, 0.886, 0.97]->[0.078, 0.132, 0.892, 0.968]->[0.161, 0.222, 0.88, 0.942]->[0.043, 0.16, 0.939, 0.915]->[0.026, 0.175, 0.802, 0.942]: stand, drink.", "From 1575.0s to 1577.0s, [0.178, 0.124, 0.994, 0.974]->[0.007, 0.204, 0.849, 0.936]->[0.043, 0.177, 0.895, 0.985]: stand, drink, talk to (e.g., self, a person, a group).", "At 1574.0s, [0.182, 0.204, 0.936, 0.96]: bend/bow (at the waist), drink.", "From 1572.0s to 1573.0s, [0.397, 0.181, 0.63, 0.829]->[0.312, 0.214, 0.598, 0.815]: stand, carry/hold (an object).", "At 1571.0s, [0.36, 0.169, 0.596, 0.824]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1570.0s, [0.453, 0.173, 0.74, 0.854]: stand, carry/hold (an object).", "From 1566.0s to 1569.0s, [0.476, 0.183, 0.699, 0.877]->[0.487, 0.178, 0.74, 0.883]->[0.502, 0.176, 0.725, 0.892]->[0.456, 0.178, 0.731, 0.865]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1564.0s to 1565.0s, [0.076, 0.171, 0.737, 0.949]->[0.145, 0.099, 0.825, 0.968]: stand, carry/hold (an object).", "From 1562.0s to 1563.0s, [0.058, 0.062, 0.7, 0.974]->[0.077, 0.081, 0.686, 0.978]: stand.", "At 1561.0s, [0.034, 0.092, 0.915, 0.978]: sit, stand.", "From 1559.0s to 1560.0s, [0.154, 0.055, 0.976, 0.959]->[0.163, 0.06, 0.976, 0.957]: stand.", "From 1557.0s to 1558.0s, [0.194, 0.105, 0.998, 0.965]->[0.169, 0.084, 0.975, 0.982]: stand, talk to (e.g., self, a person, a group).", "From 1553.0s to 1556.0s, [0.482, 0.191, 0.733, 0.913]->[0.469, 0.181, 0.745, 0.883]->[0.338, 0.165, 0.716, 0.869]->[0.166, 0.141, 0.989, 0.94]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1552.0s, [0.432, 0.207, 0.671, 0.894]: stand, carry/hold (an object).", "From 1543.0s to 1551.0s, [0.449, 0.192, 0.667, 0.901]->[0.358, 0.181, 0.694, 0.929]->[0.415, 0.203, 0.713, 0.902]->[0.435, 0.202, 0.727, 0.875]->[0.412, 0.198, 0.719, 0.912]->[0.446, 0.19, 0.732, 0.913]->[0.439, 0.181, 0.729, 0.914]->[0.43, 0.183, 0.724, 0.918]->[0.421, 0.18, 0.716, 0.914]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1539.0s to 1542.0s, [0.405, 0.185, 0.606, 0.904]->[0.386, 0.196, 0.621, 0.894]->[0.397, 0.212, 0.652, 0.91]->[0.461, 0.212, 0.732, 0.926]: stand, carry/hold (an object).", "At 1538.0s, [0.417, 0.184, 0.614, 0.886]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1531.0s to 1537.0s, [0.389, 0.262, 0.538, 0.698]->[0.49, 0.275, 0.639, 0.844]->[0.508, 0.261, 0.659, 0.812]->[0.518, 0.263, 0.657, 0.771]->[0.515, 0.261, 0.648, 0.702]->[0.52, 0.261, 0.652, 0.79]->[0.416, 0.193, 0.618, 0.902]: stand, carry/hold (an object).", "At 1530.0s, [0.434, 0.262, 0.558, 0.758]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1594.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.339, 0.148, 0.584, 0.825]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "O5m_0Yay4EU.mkv", "clip": [1717, 1755], "conversations": [{"from": "human", "timestamps": 1747.0, "value": "For the person at bbox [0.501, 0.259, 0.656, 0.844], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.501, 0.259, 0.656, 0.844]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1746.0s, [0.524, 0.239, 0.692, 0.866]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1745.0s, [0.484, 0.25, 0.653, 0.865]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1740.0s to 1744.0s, [0.499, 0.238, 0.665, 0.859]->[0.471, 0.251, 0.641, 0.868]->[0.457, 0.259, 0.642, 0.853]->[0.45, 0.25, 0.621, 0.867]->[0.477, 0.262, 0.645, 0.866]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1739.0s, [0.508, 0.254, 0.692, 0.862]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1732.0s to 1738.0s, [0.497, 0.252, 0.65, 0.866]->[0.527, 0.252, 0.676, 0.859]->[0.492, 0.246, 0.658, 0.863]->[0.481, 0.245, 0.65, 0.867]->[0.486, 0.244, 0.651, 0.86]->[0.5, 0.255, 0.679, 0.836]->[0.515, 0.261, 0.692, 0.85]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1730.0s to 1731.0s, [0.499, 0.264, 0.667, 0.883]->[0.512, 0.255, 0.684, 0.887]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1729.0s, [0.511, 0.26, 0.679, 0.905]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1728.0s, [0.498, 0.259, 0.67, 0.884]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1727.0s, [0.508, 0.279, 0.666, 0.924]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1725.0s to 1726.0s, [0.488, 0.283, 0.662, 0.934]->[0.512, 0.268, 0.668, 0.895]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1724.0s, [0.499, 0.332, 0.662, 0.924]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1722.0s to 1723.0s, [0.312, 0.323, 0.497, 0.857]->[0.523, 0.347, 0.722, 0.912]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1721.0s, [0.233, 0.353, 0.378, 0.843]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1719.0s to 1720.0s, [0.45, 0.34, 0.666, 0.859]->[0.386, 0.422, 0.512, 0.855]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1755.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.242, 0.255, 0.444, 0.893]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.476, 0.265, 0.643, 0.862]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "OfMdakd4bHI.mkv", "clip": [891, 945], "conversations": [{"from": "human", "timestamps": 921.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.023, 0.425, 0.27, 0.988]: listen to (a person), sit; [0.064, 0.003, 0.481, 0.98]: watch (a person), walk, carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.392, 0.544, 0.672, 0.995]: watch (a person), sit."}, {"from": "human", "timestamps": 945.0, "value": "Describe the actions of the person at bbox [0.375, 0.644, 0.62, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.375, 0.644, 0.62, 1.0]: sit.", "From 943.0s to 944.0s, [0.192, 0.237, 0.51, 0.791]->[0.108, 0.439, 0.429, 0.982]: sit, listen to (a person)."]}]}, {"video": "OfMdakd4bHI.mkv", "clip": [913, 1006], "conversations": [{"from": "human", "timestamps": 943.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.192, 0.237, 0.51, 0.791]: listen to (a person), sit; [0.27, 0.14, 0.867, 0.979]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1006.0, "value": "What actions is the person in bbox [0.655, 0.286, 0.942, 0.709] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.655, 0.286, 0.942, 0.709]: stand, read.", "At 1005.0s, [0.719, 0.29, 0.965, 0.659]: sit, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 1004.0s, [0.675, 0.287, 0.936, 0.71]: stand, read.", "At 1003.0s, [0.591, 0.306, 0.862, 0.973]: walk, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 1001.0s to 1002.0s, [0.448, 0.328, 0.787, 0.998]->[0.507, 0.328, 0.824, 0.996]: walk, carry/hold (an object), read.", "From 999.0s to 1000.0s, [0.44, 0.227, 0.676, 0.934]->[0.456, 0.382, 0.749, 0.985]: walk, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 998.0s, [0.409, 0.295, 0.764, 0.992]: stand, walk, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 997.0s, [0.305, 0.291, 0.772, 0.983]: walk, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 995.0s to 996.0s, [0.297, 0.346, 0.724, 0.983]->[0.425, 0.315, 0.807, 0.991]: walk, carry/hold (an object), read.", "At 994.0s, [0.415, 0.3, 0.879, 0.986]: walk, carry/hold (an object), read, talk to (e.g., self, a person, a group)."]}]}, {"video": "OfMdakd4bHI.mkv", "clip": [960, 1010], "conversations": [{"from": "human", "timestamps": 990.0, "value": "What actions did the person at bbox [0.041, 0.298, 0.264, 0.959] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.041, 0.298, 0.264, 0.959]: walk, carry/hold (an object).", "At 989.0s, [0.164, 0.217, 0.498, 0.996]: walk, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1010.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.122, 0.181, 0.649, 0.972]: read, stand, talk to (e.g., self, a person, a group); [0.511, 0.188, 0.93, 0.819]: listen to (a person), stand, watch (a person)."}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1000, 1136], "conversations": [{"from": "human", "timestamps": 1030.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.031, 0.141, 0.522, 0.997]: stand, touch (an object), write."}, {"from": "human", "timestamps": 1136.0, "value": "For the person at bbox [0.216, 0.104, 0.741, 0.914], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.216, 0.104, 0.741, 0.914]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1131.0s to 1135.0s, [0.277, 0.119, 0.648, 0.75]->[0.275, 0.115, 0.648, 0.755]->[0.277, 0.111, 0.651, 0.695]->[0.263, 0.109, 0.714, 0.897]->[0.275, 0.129, 0.67, 0.758]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1090, 1138], "conversations": [{"from": "human", "timestamps": 1120.0, "value": "For the person at bbox [0.3, 0.205, 0.768, 0.89], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.3, 0.205, 0.768, 0.89]: sit, carry/hold (an object), drink, touch (an object).", "At 1119.0s, [0.261, 0.201, 0.777, 0.907]: sit, drink, touch (an object).", "At 1118.0s, [0.243, 0.214, 0.811, 0.913]: sit, drink.", "At 1117.0s, [0.167, 0.16, 0.792, 0.902]: sit, carry/hold (an object).", "From 1114.0s to 1116.0s, [0.102, 0.004, 0.833, 0.84]->[0.174, 0.055, 0.81, 0.914]->[0.203, 0.101, 0.809, 0.896]: sit, carry/hold (an object), read.", "At 1113.0s, [0.076, 0.0, 0.84, 0.84]: sit, carry/hold (an object), eat, read.", "At 1112.0s, [0.056, 0.0, 0.832, 0.873]: sit, read."]}, {"from": "human", "timestamps": 1138.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.228, 0.11, 0.865, 0.968]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1126, 1198], "conversations": [{"from": "human", "timestamps": 1156.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.139, 0.023, 0.838, 0.976]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1198.0, "value": "What actions did the person at bbox [0.038, 0.151, 0.453, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.038, 0.151, 0.453, 0.985]: sit, listen to (a person), watch (a person).", "At 1197.0s, [0.029, 0.155, 0.378, 0.915]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1133, 1225], "conversations": [{"from": "human", "timestamps": 1163.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.028, 0.149, 0.295, 0.955]: listen to (a person), sit, watch (a person); [0.476, 0.133, 0.831, 0.622]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1225.0, "value": "What actions did the person at bbox [0.025, 0.104, 0.235, 0.977] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.025, 0.104, 0.235, 0.977]: sit.", "At 1224.0s, [0.024, 0.106, 0.249, 0.972]: sit, watch (a person)."]}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1272, 1318], "conversations": [{"from": "human", "timestamps": 1302.0, "value": "What actions is the person in bbox [0.091, 0.139, 0.33, 0.493] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.091, 0.139, 0.33, 0.493]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1301.0s, [0.099, 0.134, 0.34, 0.499]: sit."]}, {"from": "human", "timestamps": 1318.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.035, 0.529, 0.294, 0.981]: listen to (a person), stand, watch (a person); [0.039, 0.045, 0.974, 0.971]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1251, 1370], "conversations": [{"from": "human", "timestamps": 1281.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.144, 0.396, 0.396, 0.99]: watch (a person), stand; [0.284, 0.075, 0.897, 0.979]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1370.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.564, 0.17, 0.867, 0.547]?"}, {"from": "gpt", "value": ["In the current frame, [0.564, 0.17, 0.867, 0.547]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1369.0s, [0.614, 0.159, 0.89, 0.553]: sit, carry/hold (an object), touch (an object), listen to (a person).", "At 1368.0s, [0.609, 0.168, 0.911, 0.559]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1367.0s, [0.608, 0.2, 0.905, 0.556]: sit, carry/hold (an object), write, talk to (e.g., self, a person, a group)."]}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1345, 1376], "conversations": [{"from": "human", "timestamps": 1375.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.111, 0.439, 0.293, 0.988]: listen to (a person), stand, watch (a person); [0.274, 0.204, 0.75, 1.0]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1376.0, "value": "For the person at bbox [0.264, 0.248, 0.724, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.264, 0.248, 0.724, 0.994]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1372.0s to 1375.0s, [0.343, 0.156, 0.955, 1.0]->[0.316, 0.148, 0.813, 0.997]->[0.284, 0.179, 0.767, 0.989]->[0.274, 0.204, 0.75, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1371.0s, [0.341, 0.136, 0.874, 1.0]: stand, carry/hold (an object)."]}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1413, 1469], "conversations": [{"from": "human", "timestamps": 1443.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.342, 0.061, 0.965, 0.996]: ride (e.g., a bike, a car, a horse), sit; [0.346, 0.059, 0.599, 0.478]: sit; [0.471, 0.0, 0.943, 0.603]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1469.0, "value": "What actions did the person at bbox [0.423, 0.294, 0.735, 0.936] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.423, 0.294, 0.735, 0.936]: bend/bow (at the waist), lift (a person).", "At 1468.0s, [0.456, 0.297, 0.771, 0.929]: bend/bow (at the waist), lift (a person)."]}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1499, 1588], "conversations": [{"from": "human", "timestamps": 1529.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.043, 0.012, 0.395, 0.971]: watch (a person), stand; [0.587, 0.046, 0.849, 0.715]: watch (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1588.0, "value": "What actions did the person at bbox [0.164, 0.17, 0.49, 0.866] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.164, 0.17, 0.49, 0.866]: stand, watch (a person).", "From 1585.0s to 1587.0s, [0.092, 0.111, 0.358, 0.962]->[0.12, 0.164, 0.36, 0.736]->[0.047, 0.108, 0.361, 0.914]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1582.0s to 1584.0s, [0.105, 0.098, 0.357, 0.942]->[0.096, 0.091, 0.37, 0.958]->[0.098, 0.104, 0.368, 0.804]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1539, 1613], "conversations": [{"from": "human", "timestamps": 1569.0, "value": "What actions is the person in bbox [0.446, 0.476, 0.605, 0.653] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.446, 0.476, 0.605, 0.653]: sit.", "At 1568.0s, [0.544, 0.521, 0.651, 0.645]: sit, watch (a person).", "At 1567.0s, [0.515, 0.438, 0.606, 0.539]: sit, listen to (a person), watch (a person).", "From 1565.0s to 1566.0s, [0.488, 0.378, 0.554, 0.457]->[0.492, 0.384, 0.573, 0.471]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1613.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.478, 0.341, 0.923, 0.979]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1606, 1667], "conversations": [{"from": "human", "timestamps": 1636.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.245, 0.012, 0.994, 1.0]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1667.0, "value": "What actions did the person at bbox [0.644, 0.068, 0.962, 0.989] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.644, 0.068, 0.962, 0.989]: stand, watch (a person).", "From 1665.0s to 1666.0s, [0.588, 0.056, 0.975, 0.981]->[0.607, 0.051, 0.968, 0.978]: stand, watch (a person).", "From 1658.0s to 1664.0s, [0.578, 0.186, 0.886, 0.976]->[0.556, 0.161, 0.89, 1.0]->[0.571, 0.119, 0.931, 1.0]->[0.586, 0.067, 0.976, 0.978]->[0.598, 0.052, 0.97, 0.983]->[0.604, 0.056, 0.97, 0.984]->[0.612, 0.048, 0.971, 0.986]: stand, listen to (a person), watch (a person).", "At 1657.0s, [0.589, 0.187, 0.918, 0.979]: stand, watch (a person).", "From 1655.0s to 1656.0s, [0.609, 0.193, 0.952, 0.981]->[0.556, 0.189, 0.907, 0.995]: stand, listen to (a person), watch (a person)."]}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1656, 1714], "conversations": [{"from": "human", "timestamps": 1686.0, "value": "What actions is the person in bbox [0.129, 0.129, 0.345, 0.611] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.129, 0.129, 0.345, 0.611]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1680.0s to 1685.0s, [0.121, 0.113, 0.35, 0.627]->[0.131, 0.099, 0.346, 0.624]->[0.118, 0.087, 0.345, 0.618]->[0.083, 0.079, 0.332, 0.602]->[0.091, 0.076, 0.324, 0.604]->[0.086, 0.094, 0.31, 0.615]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1714.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.037, 0.06, 0.667, 1.0]: carry/hold (an object), bend/bow (at the waist); [0.495, 0.254, 0.915, 0.986]: bend/bow (at the waist)."}]}, {"video": "OfMdakd4bHI.mkv", "clip": [1714, 1770], "conversations": [{"from": "human", "timestamps": 1744.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.526, 0.069, 0.873, 0.998]?"}, {"from": "gpt", "value": ["At Now, [0.526, 0.069, 0.873, 0.998]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1741.0s to 1743.0s, [0.523, 0.068, 0.861, 0.98]->[0.52, 0.074, 0.874, 0.985]->[0.516, 0.071, 0.885, 0.994]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1740.0s, [0.52, 0.081, 0.859, 0.985]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1738.0s to 1739.0s, [0.447, 0.115, 0.767, 0.835]->[0.533, 0.114, 0.81, 0.933]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1734.0s to 1737.0s, [0.541, 0.117, 0.796, 0.805]->[0.534, 0.117, 0.792, 0.81]->[0.534, 0.128, 0.788, 0.815]->[0.525, 0.117, 0.775, 0.835]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1731.0s to 1733.0s, [0.579, 0.117, 0.836, 0.803]->[0.559, 0.129, 0.821, 0.818]->[0.552, 0.126, 0.81, 0.821]: stand, carry/hold (an object), listen to (a person).", "At 1730.0s, [0.696, 0.109, 0.945, 0.797]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1770.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.318, 0.178, 0.587, 0.987]: carry/hold (an object), stand, listen to (a person); [0.553, 0.016, 0.853, 0.964]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "P60OxWahxBQ.mkv", "clip": [891, 927], "conversations": [{"from": "human", "timestamps": 921.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.15, 0.457, 0.247, 1.0]: listen to (a person), stand, watch (a person); [0.196, 0.524, 0.327, 0.999]: listen to (a person), crouch/kneel, watch (a person); [0.525, 0.437, 0.617, 0.622]: listen to (a person), sit, watch (a person); [0.626, 0.216, 0.861, 0.987]: sit, watch (a person), give/serve (an object) to (a person), carry/hold (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 927.0, "value": "For the person at bbox [0.194, 0.526, 0.446, 0.997], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.194, 0.526, 0.446, 0.997]: carry/hold (an object), crouch/kneel, watch (a person).", "At 926.0s, [0.192, 0.532, 0.448, 0.993]: crouch/kneel, listen to (a person), watch (a person).", "At 925.0s, [0.189, 0.53, 0.444, 0.991]: crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "From 923.0s to 924.0s, [0.2, 0.529, 0.516, 0.993]->[0.189, 0.531, 0.453, 0.991]: crouch/kneel, take (an object) from (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 922.0s, [0.195, 0.529, 0.324, 1.0]: crouch/kneel, listen to (a person).", "From 918.0s to 921.0s, [0.193, 0.525, 0.327, 1.0]->[0.196, 0.527, 0.326, 1.0]->[0.195, 0.526, 0.326, 0.999]->[0.196, 0.524, 0.327, 0.999]: crouch/kneel, listen to (a person), watch (a person).", "At 917.0s, [0.188, 0.518, 0.301, 0.994]: stand, listen to (a person), watch (a person).", "At 916.0s, [0.192, 0.533, 0.325, 1.0]: stand, watch (a person).", "At 915.0s, [0.19, 0.533, 0.326, 1.0]: crouch/kneel, watch (a person).", "At 914.0s, [0.19, 0.528, 0.325, 1.0]: stand, listen to (a person), watch (a person).", "At 913.0s, [0.19, 0.533, 0.325, 1.0]: talk to (e.g., self, a person, a group), watch (a person).", "From 911.0s to 912.0s, [0.114, 0.007, 0.714, 1.0]->[0.11, 0.003, 0.689, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 910.0s, [0.116, 0.022, 0.696, 0.996]: sit, talk to (e.g., self, a person, a group).", "At 909.0s, [0.109, 0.0, 0.729, 0.982]: stand, watch (a person).", "At 908.0s, [0.131, 0.017, 0.723, 0.967]: crouch/kneel, listen to (a person), watch (a person)."]}]}, {"video": "P60OxWahxBQ.mkv", "clip": [937, 1004], "conversations": [{"from": "human", "timestamps": 967.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.192, 0.456, 0.26, 0.916]: listen to (a person), stand, watch (a person); [0.349, 0.393, 0.434, 0.848]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.518, 0.433, 0.58, 0.66]: listen to (a person), stand, watch (a person); [0.646, 0.4, 0.69, 0.625]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1004.0, "value": "What actions is the person in bbox [0.453, 0.292, 0.563, 0.79] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.453, 0.292, 0.563, 0.79]: stand, listen to (a person), watch (a person).", "At 1003.0s, [0.438, 0.289, 0.564, 0.851]: stand, talk to (e.g., self, a person, a group).", "At 1002.0s, [0.437, 0.287, 0.552, 0.792]: stand, listen to (a person), watch (a person).", "From 1000.0s to 1001.0s, [0.423, 0.2, 0.543, 0.784]->[0.418, 0.244, 0.544, 0.767]: walk, watch (a person).", "At 999.0s, [0.416, 0.198, 0.517, 0.714]: stand, listen to (a person), watch (a person)."]}]}, {"video": "P60OxWahxBQ.mkv", "clip": [1040, 1133], "conversations": [{"from": "human", "timestamps": 1070.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.168, 0.259, 0.423, 0.98]: carry/hold (an object), stand, give/serve (an object) to (a person), watch (a person); [0.55, 0.231, 0.734, 0.968]: listen to (a person), stand."}, {"from": "human", "timestamps": 1133.0, "value": "What actions did the person at bbox [0.279, 0.102, 0.393, 0.761] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.279, 0.102, 0.393, 0.761]: stand, watch (a person).", "At 1132.0s, [0.275, 0.113, 0.396, 0.768]: stand, watch (a person)."]}]}, {"video": "P60OxWahxBQ.mkv", "clip": [1141, 1181], "conversations": [{"from": "human", "timestamps": 1171.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.111, 0.019, 0.543, 0.945]: stand, talk to (e.g., self, a person, a group); [0.474, 0.047, 0.864, 0.988]: listen to (a person), sit."}, {"from": "human", "timestamps": 1181.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.161, 0.023, 0.584, 0.947]?"}, {"from": "gpt", "value": ["In the current frame, [0.161, 0.023, 0.584, 0.947]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1180.0s, [0.122, 0.0, 0.498, 0.921]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1179.0s, [0.127, 0.0, 0.529, 0.944]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1176.0s to 1178.0s, [0.122, 0.0, 0.528, 0.943]->[0.125, 0.0, 0.513, 0.941]->[0.134, 0.0, 0.537, 0.935]: stand, listen to (a person), watch (a person).", "At 1175.0s, [0.119, 0.0, 0.579, 0.959]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1172.0s to 1174.0s, [0.121, 0.021, 0.563, 0.96]->[0.111, 0.001, 0.556, 0.96]->[0.119, 0.0, 0.562, 0.96]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1170.0s to 1171.0s, [0.118, 0.005, 0.556, 0.953]->[0.111, 0.019, 0.543, 0.945]: stand, talk to (e.g., self, a person, a group).", "From 1166.0s to 1169.0s, [0.11, 0.01, 0.583, 0.978]->[0.094, 0.008, 0.557, 0.971]->[0.11, 0.002, 0.581, 0.961]->[0.108, 0.011, 0.57, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "P60OxWahxBQ.mkv", "clip": [1152, 1203], "conversations": [{"from": "human", "timestamps": 1182.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.229, 0.097, 0.703, 0.981]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1203.0, "value": "What actions is the person in bbox [0.112, 0.116, 0.455, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.112, 0.116, 0.455, 1.0]: stand, listen to (a person), watch (a person).", "At 1202.0s, [0.112, 0.102, 0.462, 0.998]: stand, listen to (a person), watch (a person).", "At 1201.0s, [0.118, 0.096, 0.469, 0.982]: stand, listen to (a person).", "At 1200.0s, [0.141, 0.093, 0.475, 1.0]: stand, listen to (a person), watch (a person)."]}]}, {"video": "P60OxWahxBQ.mkv", "clip": [1175, 1213], "conversations": [{"from": "human", "timestamps": 1205.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.13, 0.119, 0.473, 1.0]: listen to (a person), stand, watch (a person); [0.403, 0.59, 0.569, 0.986]: listen to (a person), stand, watch (a person); [0.45, 0.083, 0.879, 0.985]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1213.0, "value": "What actions did the person at bbox [0.348, 0.047, 0.869, 0.98] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.348, 0.047, 0.869, 0.98]: stand, talk to (e.g., self, a person, a group).", "From 1210.0s to 1212.0s, [0.331, 0.039, 0.855, 0.959]->[0.332, 0.045, 0.857, 0.961]->[0.313, 0.043, 0.834, 0.967]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "P60OxWahxBQ.mkv", "clip": [1286, 1357], "conversations": [{"from": "human", "timestamps": 1316.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.48, 0.079, 0.843, 0.969]: watch (a person), stand."}, {"from": "human", "timestamps": 1357.0, "value": "Describe the actions of the person at bbox [0.34, 0.0, 0.877, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.34, 0.0, 0.877, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1356.0s, [0.361, 0.0, 0.894, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "P60OxWahxBQ.mkv", "clip": [1389, 1430], "conversations": [{"from": "human", "timestamps": 1419.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.465, 0.158, 0.599, 0.475]: sit, drink; [0.613, 0.314, 0.802, 0.937]: carry/hold (an object), sit, watch (a person)."}, {"from": "human", "timestamps": 1430.0, "value": "What actions is the person in bbox [0.394, 0.158, 0.655, 0.995] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.394, 0.158, 0.655, 0.995]: sit.", "At 1429.0s, [0.435, 0.196, 0.792, 0.905]: sit, listen to (a person).", "From 1427.0s to 1428.0s, [0.527, 0.29, 0.738, 0.677]->[0.49, 0.261, 0.744, 0.776]: sit."]}]}, {"video": "P60OxWahxBQ.mkv", "clip": [1418, 1481], "conversations": [{"from": "human", "timestamps": 1448.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.247, 0.541, 0.405, 0.993]: stand, talk to (e.g., self, a person, a group); [0.431, 0.471, 0.58, 0.973]: stand, talk to (e.g., self, a person, a group); [0.617, 0.347, 0.871, 0.986]: listen to (a person), stand."}, {"from": "human", "timestamps": 1481.0, "value": "For the person at bbox [0.274, 0.063, 0.878, 0.973], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.274, 0.063, 0.878, 0.973]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1480.0s, [0.42, 0.075, 0.893, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1479.0s, [0.309, 0.004, 0.871, 0.981]: stand, listen to (a person), watch (a person)."]}]}, {"video": "P60OxWahxBQ.mkv", "clip": [1478, 1527], "conversations": [{"from": "human", "timestamps": 1508.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.123, 0.103, 0.329, 0.989]: listen to (a person), stand, watch (a person); [0.228, 0.24, 0.56, 0.996]: listen to (a person), stand, watch (a person); [0.396, 0.007, 0.898, 0.992]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1527.0, "value": "For the person at bbox [0.119, 0.278, 0.383, 0.98], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.119, 0.278, 0.383, 0.98]: stand, listen to (a person).", "From 1525.0s to 1526.0s, [0.129, 0.0, 0.268, 0.96]->[0.119, 0.256, 0.361, 0.957]: stand, listen to (a person)."]}]}, {"video": "P60OxWahxBQ.mkv", "clip": [1553, 1598], "conversations": [{"from": "human", "timestamps": 1583.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.13, 0.594, 0.254, 0.939]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.279, 0.713, 0.366, 0.998]: listen to (a person), stand, watch (a person); [0.352, 0.446, 0.461, 0.989]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.415, 0.056, 0.867, 1.0]: listen to (a person), stand."}, {"from": "human", "timestamps": 1598.0, "value": "What actions did the person at bbox [0.091, 0.001, 0.626, 0.994] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.091, 0.001, 0.626, 0.994]: stand.", "At 1597.0s, [0.155, 0.055, 0.734, 0.991]: stand, talk to (e.g., self, a person, a group).", "From 1595.0s to 1596.0s, [0.186, 0.038, 0.805, 0.993]->[0.188, 0.049, 0.781, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1594.0s, [0.155, 0.07, 0.747, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1592.0s to 1593.0s, [0.31, 0.08, 0.868, 1.0]->[0.287, 0.069, 0.881, 0.992]: stand."]}]}, {"video": "P60OxWahxBQ.mkv", "clip": [1600, 1691], "conversations": [{"from": "human", "timestamps": 1630.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.125, 0.012, 0.214, 0.461]: watch (a person), stand; [0.131, 0.058, 0.634, 0.993]: listen to (a person), stand; [0.55, 0.221, 0.871, 0.971]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1691.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.674, 0.247, 0.869, 0.956]?"}, {"from": "gpt", "value": ["In the current frame, [0.674, 0.247, 0.869, 0.956]: stand, watch (a person).", "At 1690.0s, [0.65, 0.21, 0.87, 0.969]: stand, watch (a person)."]}]}, {"video": "P90hF2S1JzA.mkv", "clip": [896, 955], "conversations": [{"from": "human", "timestamps": 926.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.084, 0.973, 0.978]: sit, talk to (e.g., self, a person, a group), stand."}, {"from": "human", "timestamps": 955.0, "value": "For the person at bbox [0.09, 0.111, 0.95, 0.953], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.09, 0.111, 0.95, 0.953]: stand, talk to (e.g., self, a person, a group).", "From 953.0s to 954.0s, [0.31, 0.05, 0.992, 0.961]->[0.14, 0.158, 0.996, 0.963]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "P90hF2S1JzA.mkv", "clip": [895, 1014], "conversations": [{"from": "human", "timestamps": 925.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.064, 0.117, 1.0, 0.99]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1014.0, "value": "What actions is the person in bbox [0.452, 0.549, 0.81, 0.954] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.452, 0.549, 0.81, 0.954]: sit, listen to (a person), watch (a person).", "At 1013.0s, [0.447, 0.535, 0.815, 0.952]: sit, listen to (a person).", "From 1010.0s to 1012.0s, [0.45, 0.556, 0.818, 0.956]->[0.453, 0.545, 0.811, 0.964]->[0.448, 0.544, 0.809, 0.956]: sit, listen to (a person), watch (a person)."]}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1009, 1076], "conversations": [{"from": "human", "timestamps": 1039.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.173, 0.083, 1.0, 0.974]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1076.0, "value": "What actions did the person at bbox [0.252, 0.076, 1.0, 0.971] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.252, 0.076, 1.0, 0.971]: stand, talk to (e.g., self, a person, a group).", "From 1074.0s to 1075.0s, [0.21, 0.074, 0.994, 0.956]->[0.319, 0.058, 0.988, 0.965]: sit, stand, talk to (e.g., self, a person, a group).", "From 1072.0s to 1073.0s, [0.136, 0.074, 1.0, 0.959]->[0.209, 0.051, 0.998, 0.971]: sit, talk to (e.g., self, a person, a group).", "From 1070.0s to 1071.0s, [0.126, 0.062, 1.0, 0.976]->[0.045, 0.037, 0.959, 0.965]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1051, 1085], "conversations": [{"from": "human", "timestamps": 1081.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.029, 0.082, 0.987, 0.964]: watch (a person), sit."}, {"from": "human", "timestamps": 1085.0, "value": "For the person at bbox [0.081, 0.078, 0.93, 0.966], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.081, 0.078, 0.93, 0.966]: sit, talk to (e.g., self, a person, a group).", "At 1084.0s, [0.017, 0.062, 0.943, 0.951]: sit, talk to (e.g., self, a person, a group).", "At 1083.0s, [0.066, 0.086, 0.971, 0.969]: sit, stand, talk to (e.g., self, a person, a group).", "At 1082.0s, [0.021, 0.063, 0.958, 0.96]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1111, 1181], "conversations": [{"from": "human", "timestamps": 1141.0, "value": "What actions did the person at bbox [0.004, 0.06, 0.949, 0.963] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.004, 0.06, 0.949, 0.963]: stand, talk to (e.g., self, a person, a group).", "At 1140.0s, [0.04, 0.058, 0.93, 0.978]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1181.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.043, 0.454, 0.408, 0.786]: listen to (a person), sit, watch (a person); [0.289, 0.241, 0.537, 0.53]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.402, 0.393, 0.808, 0.876]: listen to (a person), sit, watch (a person); [0.74, 0.096, 0.992, 0.694]: listen to (a person), sit, watch (a person)."}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1119, 1246], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.043, 0.077, 0.879, 0.979]: listen to (a person), sit."}, {"from": "human", "timestamps": 1246.0, "value": "What actions is the person in bbox [0.012, 0.111, 0.927, 0.963] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.012, 0.111, 0.927, 0.963]: stand, talk to (e.g., self, a person, a group).", "At 1245.0s, [0.097, 0.121, 0.953, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1201, 1256], "conversations": [{"from": "human", "timestamps": 1231.0, "value": "What actions did the person at bbox [0.01, 0.227, 0.626, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.01, 0.227, 0.626, 0.981]: stand, grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1230.0s, [0.0, 0.064, 0.878, 0.963]: stand, walk, watch (a person)."]}, {"from": "human", "timestamps": 1256.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.04, 0.061, 0.534, 0.95]: listen to (a person), stand, watch (a person); [0.397, 0.12, 0.983, 0.961]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1238, 1325], "conversations": [{"from": "human", "timestamps": 1268.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.13, 0.04, 0.982, 0.972]: sit, stand."}, {"from": "human", "timestamps": 1325.0, "value": "What actions did the person at bbox [0.003, 0.083, 0.9, 0.978] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.003, 0.083, 0.9, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1322.0s to 1324.0s, [0.294, 0.219, 0.449, 0.623]->[0.296, 0.239, 0.444, 0.624]->[0.299, 0.244, 0.447, 0.627]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1321.0s, [0.293, 0.239, 0.443, 0.618]: stand, listen to (a person).", "At 1320.0s, [0.298, 0.238, 0.444, 0.617]: stand, listen to (a person), watch (a person)."]}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1298, 1361], "conversations": [{"from": "human", "timestamps": 1328.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.135, 0.926, 0.968]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1361.0, "value": "What actions is the person in bbox [0.103, 0.126, 0.827, 0.97] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.103, 0.126, 0.827, 0.97]: sit, talk to (e.g., self, a person, a group).", "From 1356.0s to 1360.0s, [0.08, 0.12, 0.896, 0.963]->[0.017, 0.07, 0.91, 0.965]->[0.037, 0.103, 0.912, 0.97]->[0.091, 0.114, 0.915, 0.961]->[0.088, 0.126, 0.89, 0.971]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1375, 1430], "conversations": [{"from": "human", "timestamps": 1405.0, "value": "What actions did the person at bbox [0.0, 0.062, 0.861, 0.955] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.062, 0.861, 0.955]: stand, talk to (e.g., self, a person, a group).", "From 1402.0s to 1404.0s, [0.071, 0.093, 0.993, 0.97]->[0.076, 0.048, 0.991, 0.978]->[0.012, 0.067, 0.868, 0.97]: stand, talk to (e.g., self, a person, a group).", "From 1400.0s to 1401.0s, [0.205, 0.058, 1.0, 0.968]->[0.038, 0.107, 0.928, 0.96]: stand.", "At 1399.0s, [0.242, 0.074, 0.996, 0.964]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1430.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.004, 0.063, 0.529, 0.958]: listen to (a person), stand; [0.364, 0.143, 0.974, 0.97]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1404, 1468], "conversations": [{"from": "human", "timestamps": 1434.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.07, 0.04, 0.545, 0.949]: listen to (a person), walk; [0.753, 0.181, 0.942, 0.848]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1468.0, "value": "For the person at bbox [0.0, 0.095, 0.897, 0.956], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.095, 0.897, 0.956]: sit.", "From 1460.0s to 1467.0s, [0.03, 0.084, 0.87, 0.96]->[0.172, 0.043, 0.833, 0.982]->[0.04, 0.043, 0.777, 0.965]->[0.033, 0.069, 0.863, 0.931]->[0.018, 0.086, 0.909, 0.935]->[0.012, 0.087, 0.937, 0.94]->[0.013, 0.086, 0.901, 0.953]->[0.0, 0.104, 0.91, 0.959]: sit.", "From 1458.0s to 1459.0s, [0.084, 0.205, 0.972, 0.976]->[0.097, 0.199, 0.94, 1.0]: sit, lie/sleep.", "From 1456.0s to 1457.0s, [0.075, 0.206, 0.947, 0.964]->[0.051, 0.208, 0.971, 0.954]: lie/sleep.", "From 1454.0s to 1455.0s, [0.003, 0.186, 0.863, 0.969]->[0.052, 0.201, 0.961, 0.99]: sit."]}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1459, 1496], "conversations": [{"from": "human", "timestamps": 1489.0, "value": "What actions is the person in bbox [0.759, 0.293, 0.891, 0.685] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.759, 0.293, 0.891, 0.685]: walk.", "From 1487.0s to 1488.0s, [0.793, 0.305, 0.901, 0.64]->[0.793, 0.296, 0.911, 0.639]: walk, close (e.g., a door, a box)."]}, {"from": "human", "timestamps": 1496.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.399, 0.115, 1.0, 0.979]: stand."}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1507, 1543], "conversations": [{"from": "human", "timestamps": 1537.0, "value": "What actions did the person at bbox [0.012, 0.049, 1.0, 0.953] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.012, 0.049, 1.0, 0.953]: sit, talk to (e.g., self, a person, a group).", "From 1535.0s to 1536.0s, [0.0, 0.049, 1.0, 0.961]->[0.0, 0.062, 1.0, 0.956]: stand, talk to (e.g., self, a person, a group).", "At 1534.0s, [0.0, 0.074, 1.0, 0.956]: sit, talk to (e.g., self, a person, a group).", "At 1533.0s, [0.058, 0.092, 0.754, 0.975]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1543.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.328, 0.3, 0.552, 0.777]: watch (a person), sit; [0.485, 0.301, 0.683, 0.817]: listen to (a person), sit."}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1528, 1586], "conversations": [{"from": "human", "timestamps": 1558.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.357, 0.139, 0.897, 0.967]: sit."}, {"from": "human", "timestamps": 1586.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.005, 0.088, 0.572, 0.983]?"}, {"from": "gpt", "value": ["In the current frame, [0.005, 0.088, 0.572, 0.983]: sit, talk to (e.g., self, a person, a group).", "At 1585.0s, [0.004, 0.072, 0.569, 0.973]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1540, 1598], "conversations": [{"from": "human", "timestamps": 1570.0, "value": "What actions is the person in bbox [0.146, 0.178, 0.348, 0.918] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.146, 0.178, 0.348, 0.918]: walk, close (e.g., a door, a box), watch (a person).", "At 1569.0s, [0.187, 0.169, 0.354, 0.899]: stand, close (e.g., a door, a box), watch (a person).", "At 1568.0s, [0.17, 0.163, 0.357, 0.904]: stand, close (e.g., a door, a box), listen to (a person), watch (a person).", "At 1567.0s, [0.187, 0.177, 0.446, 0.873]: stand, close (e.g., a door, a box), watch (a person).", "At 1566.0s, [0.172, 0.186, 0.38, 0.864]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1598.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.006, 0.046, 0.972, 0.966]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1617, 1650], "conversations": [{"from": "human", "timestamps": 1647.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.069, 0.042, 0.971, 0.958]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1650.0, "value": "What actions did the person at bbox [0.043, 0.023, 1.0, 0.984] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.043, 0.023, 1.0, 0.984]: sit, talk to (e.g., self, a person, a group).", "From 1646.0s to 1649.0s, [0.054, 0.033, 1.0, 0.994]->[0.069, 0.042, 0.971, 0.958]->[0.04, 0.041, 1.0, 0.957]->[0.046, 0.035, 0.991, 0.966]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1615, 1713], "conversations": [{"from": "human", "timestamps": 1645.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.013, 0.07, 0.98, 0.955]: listen to (a person), sit."}, {"from": "human", "timestamps": 1713.0, "value": "What actions did the person at bbox [0.0, 0.021, 0.989, 0.975] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.021, 0.989, 0.975]: sit, talk to (e.g., self, a person, a group).", "At 1712.0s, [0.0, 0.009, 0.978, 0.989]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1676, 1730], "conversations": [{"from": "human", "timestamps": 1706.0, "value": "Describe the actions of the person at bbox [0.006, 0.028, 1.0, 0.948] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.006, 0.028, 1.0, 0.948]: sit, talk to (e.g., self, a person, a group).", "At 1705.0s, [0.0, 0.023, 0.988, 0.942]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1730.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.073, 0.99, 0.981]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "P90hF2S1JzA.mkv", "clip": [1730, 1762], "conversations": [{"from": "human", "timestamps": 1760.0, "value": "What actions is the person in bbox [0.141, 0.038, 0.948, 0.93] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.141, 0.038, 0.948, 0.93]: sit, talk to (e.g., self, a person, a group).", "From 1755.0s to 1759.0s, [0.013, 0.053, 0.922, 0.973]->[0.064, 0.03, 0.899, 0.946]->[0.016, 0.04, 0.981, 0.949]->[0.052, 0.032, 0.935, 0.936]->[0.083, 0.036, 0.948, 0.961]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1762.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.125, 0.045, 0.97, 0.949]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "PNZQ2UJfyQE.mp4", "clip": [961, 997], "conversations": [{"from": "human", "timestamps": 991.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.023, 0.156, 0.564, 0.993]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 997.0, "value": "What actions did the person at bbox [0.365, 0.147, 0.873, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.365, 0.147, 0.873, 0.985]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 994.0s to 996.0s, [0.455, 0.212, 0.856, 0.973]->[0.342, 0.17, 0.814, 0.997]->[0.364, 0.165, 0.864, 0.995]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 993.0s, [0.451, 0.211, 0.858, 0.966]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "PNZQ2UJfyQE.mp4", "clip": [1208, 1259], "conversations": [{"from": "human", "timestamps": 1238.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.03, 0.029, 0.908, 0.99]?"}, {"from": "gpt", "value": ["In the current frame, [0.03, 0.029, 0.908, 0.99]: stand, talk to (e.g., self, a person, a group).", "From 1233.0s to 1237.0s, [0.022, 0.025, 0.865, 0.988]->[0.026, 0.032, 0.833, 0.988]->[0.03, 0.033, 0.815, 0.98]->[0.045, 0.065, 0.886, 0.984]->[0.041, 0.042, 0.888, 0.973]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1259.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.004, 0.021, 0.955, 1.0]: stand."}]}, {"video": "PNZQ2UJfyQE.mp4", "clip": [1249, 1307], "conversations": [{"from": "human", "timestamps": 1279.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.465, 0.18, 0.81, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.465, 0.18, 0.81, 1.0]: walk.", "At 1278.0s, [0.618, 0.249, 0.798, 0.925]: walk.", "From 1273.0s to 1277.0s, [0.559, 0.193, 0.739, 0.974]->[0.564, 0.202, 0.739, 0.97]->[0.562, 0.211, 0.735, 0.981]->[0.56, 0.206, 0.738, 0.98]->[0.536, 0.233, 0.741, 0.979]: stand, carry/hold (an object).", "From 1270.0s to 1272.0s, [0.591, 0.193, 0.755, 0.972]->[0.569, 0.203, 0.758, 0.975]->[0.567, 0.182, 0.741, 0.951]: stand."]}, {"from": "human", "timestamps": 1307.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.092, 0.237, 0.616, 0.982]: carry/hold (an object), stand, close (e.g., a door, a box)."}]}, {"video": "PNZQ2UJfyQE.mp4", "clip": [1306, 1356], "conversations": [{"from": "human", "timestamps": 1336.0, "value": "Describe the actions of the person at bbox [0.324, 0.059, 0.987, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.324, 0.059, 0.987, 0.991]: sit.", "At 1335.0s, [0.357, 0.071, 1.0, 1.0]: sit.", "From 1333.0s to 1334.0s, [0.108, 0.405, 0.322, 0.919]->[0.335, 0.064, 1.0, 1.0]: sit, listen to (a person).", "At 1332.0s, [0.116, 0.408, 0.327, 0.927]: sit, touch (an object), listen to (a person).", "From 1330.0s to 1331.0s, [0.117, 0.413, 0.328, 0.978]->[0.114, 0.417, 0.339, 0.962]: sit, listen to (a person).", "At 1329.0s, [0.121, 0.413, 0.351, 0.966]: sit, touch (an object), listen to (a person).", "From 1327.0s to 1328.0s, [0.125, 0.415, 0.327, 0.964]->[0.135, 0.415, 0.37, 0.979]: sit, listen to (a person).", "At 1326.0s, [0.126, 0.419, 0.321, 0.97]: sit, touch (an object), listen to (a person).", "From 1324.0s to 1325.0s, [0.11, 0.428, 0.302, 0.838]->[0.124, 0.429, 0.313, 0.82]: sit, listen to (a person).", "At 1323.0s, [0.118, 0.42, 0.324, 0.923]: sit, touch (an object), listen to (a person).", "From 1320.0s to 1322.0s, [0.142, 0.367, 0.317, 0.898]->[0.142, 0.384, 0.343, 0.975]->[0.113, 0.406, 0.333, 0.864]: sit, listen to (a person).", "At 1319.0s, [0.131, 0.351, 0.323, 0.794]: sit, touch (an object), listen to (a person).", "From 1316.0s to 1318.0s, [0.017, 0.361, 0.167, 0.771]->[0.094, 0.348, 0.249, 0.919]->[0.124, 0.345, 0.312, 0.807]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1356.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.375, 0.0, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "PNZQ2UJfyQE.mp4", "clip": [1352, 1419], "conversations": [{"from": "human", "timestamps": 1382.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.202, 0.02, 1.0, 0.976]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1419.0, "value": "For the person at bbox [0.169, 0.169, 0.405, 0.963], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.169, 0.169, 0.405, 0.963]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1418.0s, [0.159, 0.146, 0.395, 0.969]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1415.0s to 1417.0s, [0.151, 0.112, 0.405, 0.955]->[0.155, 0.116, 0.412, 0.957]->[0.159, 0.112, 0.415, 0.956]: stand, listen to (a person), watch (a person).", "From 1410.0s to 1414.0s, [0.148, 0.109, 0.401, 0.951]->[0.15, 0.107, 0.403, 0.951]->[0.15, 0.105, 0.404, 0.952]->[0.151, 0.108, 0.404, 0.95]->[0.151, 0.115, 0.403, 0.953]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1407.0s to 1409.0s, [0.186, 0.116, 0.418, 0.924]->[0.146, 0.12, 0.411, 0.953]->[0.149, 0.111, 0.403, 0.951]: stand, talk to (e.g., self, a person, a group).", "From 1405.0s to 1406.0s, [0.236, 0.143, 0.401, 0.925]->[0.074, 0.124, 0.386, 0.913]: walk.", "From 1401.0s to 1404.0s, [0.276, 0.12, 0.445, 0.95]->[0.26, 0.152, 0.41, 0.952]->[0.24, 0.177, 0.392, 0.954]->[0.237, 0.158, 0.392, 0.955]: stand, close (e.g., a door, a box).", "At 1400.0s, [0.177, 0.152, 0.437, 0.958]: stand, open (e.g., a window, a car door), watch (a person).", "At 1399.0s, [0.15, 0.17, 0.394, 0.965]: stand, carry/hold (an object), open (e.g., a window, a car door), watch (a person).", "At 1398.0s, [0.205, 0.15, 0.269, 0.805]: stand, open (e.g., a window, a car door)."]}]}, {"video": "PNZQ2UJfyQE.mp4", "clip": [1379, 1476], "conversations": [{"from": "human", "timestamps": 1409.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.149, 0.111, 0.403, 0.951]: stand, talk to (e.g., self, a person, a group); [0.525, 0.344, 0.992, 0.987]: listen to (a person), sit."}, {"from": "human", "timestamps": 1476.0, "value": "What actions did the person at bbox [0.465, 0.128, 0.89, 0.994] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.465, 0.128, 0.89, 0.994]: sit, listen to (a person).", "At 1475.0s, [0.465, 0.13, 0.883, 0.993]: sit, listen to (a person)."]}]}, {"video": "PNZQ2UJfyQE.mp4", "clip": [1589, 1636], "conversations": [{"from": "human", "timestamps": 1619.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.293, 0.4, 0.326, 0.571]: watch (a person), stand; [0.412, 0.084, 1.0, 0.991]: stand, answer phone."}, {"from": "human", "timestamps": 1636.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.512, 0.201, 0.658, 0.852]?"}, {"from": "gpt", "value": ["At Now, [0.512, 0.201, 0.658, 0.852]: walk, listen to (a person).", "From 1631.0s to 1635.0s, [0.097, 0.234, 0.238, 0.867]->[0.139, 0.218, 0.294, 0.748]->[0.189, 0.218, 0.326, 0.727]->[0.279, 0.199, 0.393, 0.663]->[0.407, 0.209, 0.532, 0.673]: walk, listen to (a person), watch (a person).", "At 1629.0s, [0.148, 0.243, 0.353, 0.98]: stand, close (e.g., a door, a box), listen to (a person), watch (a person).", "At 1628.0s, [0.199, 0.201, 0.381, 0.952]: stand, close (e.g., a door, a box), watch (a person).", "At 1627.0s, [0.267, 0.238, 0.433, 0.911]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "PNZQ2UJfyQE.mp4", "clip": [1712, 1792], "conversations": [{"from": "human", "timestamps": 1742.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.01, 0.134, 0.543, 0.978]: listen to (a person), stand, watch (a person); [0.382, 0.106, 0.982, 0.985]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1792.0, "value": "Describe the actions of the person at bbox [0.216, 0.06, 1.0, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.216, 0.06, 1.0, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1787.0s to 1791.0s, [0.178, 0.071, 1.0, 1.0]->[0.199, 0.067, 1.0, 0.99]->[0.192, 0.056, 1.0, 0.981]->[0.194, 0.058, 1.0, 0.979]->[0.217, 0.057, 1.0, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1786.0s, [0.21, 0.055, 1.0, 0.982]: stand, watch (a person)."]}]}, {"video": "PcFEhUKhN6g.mkv", "clip": [999, 1036], "conversations": [{"from": "human", "timestamps": 1029.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.334, 0.183, 0.547, 0.46]: run/jog."}, {"from": "human", "timestamps": 1036.0, "value": "Describe the actions of the person at bbox [0.531, 0.498, 0.597, 0.636] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.531, 0.498, 0.597, 0.636]: fall down.", "At 1035.0s, [0.55, 0.397, 0.627, 0.636]: walk, carry/hold (an object).", "At 1034.0s, [0.517, 0.362, 0.596, 0.655]: run/jog, carry/hold (an object).", "At 1033.0s, [0.461, 0.368, 0.551, 0.605]: run/jog, shoot, watch (a person)."]}]}, {"video": "PcFEhUKhN6g.mkv", "clip": [1025, 1111], "conversations": [{"from": "human", "timestamps": 1055.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.053, 0.224, 0.757, 0.896]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1111.0, "value": "Describe the actions of the person at bbox [0.445, 0.344, 0.753, 0.852] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.445, 0.344, 0.753, 0.852]: walk, listen to (a person).", "At 1110.0s, [0.495, 0.343, 0.719, 0.841]: walk, touch (an object), listen to (a person)."]}]}, {"video": "PcFEhUKhN6g.mkv", "clip": [1188, 1238], "conversations": [{"from": "human", "timestamps": 1218.0, "value": "What actions is the person in bbox [0.644, 0.19, 0.966, 0.847] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.644, 0.19, 0.966, 0.847]: walk, carry/hold (an object), watch (a person).", "At 1217.0s, [0.013, 0.143, 0.349, 0.846]: walk."]}, {"from": "human", "timestamps": 1238.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.441, 0.17, 0.866, 0.849]: carry/hold (an object), stand, smoke, drink."}]}, {"video": "PcFEhUKhN6g.mkv", "clip": [1258, 1307], "conversations": [{"from": "human", "timestamps": 1288.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.124, 0.249, 0.397, 0.843]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.463, 0.166, 0.923, 0.857]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1307.0, "value": "What actions did the person at bbox [0.544, 0.189, 0.832, 0.851] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.544, 0.189, 0.832, 0.851]: stand, smoke, talk to (e.g., self, a person, a group).", "At 1306.0s, [0.507, 0.2, 0.803, 0.845]: stand, smoke, talk to (e.g., self, a person, a group).", "At 1305.0s, [0.401, 0.222, 0.724, 0.852]: walk, carry/hold (an object), smoke, talk to (e.g., self, a person, a group).", "From 1301.0s to 1304.0s, [0.38, 0.229, 0.655, 0.865]->[0.343, 0.233, 0.615, 0.85]->[0.408, 0.234, 0.653, 0.856]->[0.384, 0.226, 0.661, 0.833]: walk, smoke, talk to (e.g., self, a person, a group).", "At 1300.0s, [0.498, 0.187, 0.968, 0.904]: walk, talk to (e.g., self, a person, a group).", "At 1299.0s, [0.379, 0.183, 0.93, 0.882]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1298.0s, [0.359, 0.185, 0.922, 0.914]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1291.0s to 1297.0s, [0.534, 0.172, 0.846, 0.872]->[0.538, 0.184, 0.824, 0.864]->[0.515, 0.192, 0.836, 0.848]->[0.451, 0.178, 0.849, 0.843]->[0.466, 0.18, 0.85, 0.843]->[0.462, 0.177, 0.846, 0.836]->[0.461, 0.179, 0.853, 0.843]: stand, listen to (a person), watch (a person).", "At 1290.0s, [0.419, 0.181, 0.908, 0.856]: stand, talk to (e.g., self, a person, a group).", "At 1289.0s, [0.457, 0.173, 0.934, 0.878]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1288.0s, [0.463, 0.166, 0.923, 0.857]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1286.0s to 1287.0s, [0.476, 0.158, 0.929, 0.849]->[0.464, 0.159, 0.934, 0.874]: stand, listen to (a person).", "From 1281.0s to 1285.0s, [0.527, 0.215, 0.966, 0.921]->[0.46, 0.161, 0.921, 0.927]->[0.456, 0.172, 0.922, 0.855]->[0.451, 0.149, 0.927, 0.915]->[0.468, 0.163, 0.923, 0.883]: stand, talk to (e.g., self, a person, a group).", "From 1279.0s to 1280.0s, [0.373, 0.18, 0.848, 0.882]->[0.438, 0.181, 0.93, 0.932]: walk, talk to (e.g., self, a person, a group).", "At 1278.0s, [0.495, 0.203, 0.925, 0.853]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1277.0s, [0.431, 0.242, 0.876, 0.873]: walk, smoke, talk to (e.g., self, a person, a group).", "From 1271.0s to 1276.0s, [0.602, 0.268, 0.896, 0.959]->[0.538, 0.275, 0.904, 0.874]->[0.456, 0.238, 0.89, 0.868]->[0.59, 0.232, 0.953, 0.873]->[0.349, 0.233, 0.9, 0.89]->[0.38, 0.219, 0.846, 0.939]: stand, smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1270.0s, [0.593, 0.261, 0.878, 0.916]: walk, smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1269.0s, [0.734, 0.288, 0.963, 0.95]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1268.0s, [0.768, 0.307, 0.959, 0.915]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1267.0s, [0.771, 0.309, 0.92, 0.902]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1266.0s, [0.774, 0.309, 0.915, 0.869]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1264.0s to 1265.0s, [0.783, 0.326, 0.97, 0.918]->[0.771, 0.312, 0.916, 0.888]: stand, smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1263.0s, [0.747, 0.334, 0.976, 0.933]: stand, listen to (a person).", "At 1262.0s, [0.768, 0.318, 0.973, 0.884]: walk, listen to (a person).", "From 1258.0s to 1261.0s, [0.543, 0.292, 0.927, 0.983]->[0.631, 0.303, 0.951, 0.945]->[0.637, 0.31, 0.912, 0.932]->[0.767, 0.332, 0.978, 0.934]: walk.", "At 1257.0s, [0.468, 0.282, 0.934, 0.931]: walk, talk to (e.g., self, a person, a group).", "At 1256.0s, [0.473, 0.194, 0.954, 0.856]: stand, talk to (e.g., self, a person, a group).", "From 1252.0s to 1255.0s, [0.431, 0.18, 0.835, 0.853]->[0.365, 0.163, 0.901, 0.875]->[0.557, 0.165, 0.941, 0.841]->[0.559, 0.164, 0.938, 0.851]: stand, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1251.0s, [0.548, 0.186, 0.903, 0.899]: walk, drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1250.0s, [0.51, 0.154, 0.873, 0.876]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1248.0s to 1249.0s, [0.48, 0.216, 0.69, 0.849]->[0.487, 0.213, 0.804, 0.855]: walk, talk to (e.g., self, a person, a group).", "At 1247.0s, [0.457, 0.237, 0.721, 0.861]: walk, drink, talk to (e.g., self, a person, a group).", "At 1246.0s, [0.59, 0.212, 0.811, 0.809]: walk, drink, smoke, talk to (e.g., self, a person, a group).", "At 1245.0s, [0.612, 0.194, 0.905, 0.862]: stand, drink, smoke, talk to (e.g., self, a person, a group).", "From 1243.0s to 1244.0s, [0.519, 0.175, 0.833, 0.847]->[0.531, 0.186, 0.806, 0.861]: stand, drink, smoke.", "At 1242.0s, [0.501, 0.167, 0.83, 0.849]: stand, carry/hold (an object), smoke.", "At 1241.0s, [0.49, 0.176, 0.835, 0.848]: stand, carry/hold (an object), drink, smoke.", "From 1239.0s to 1240.0s, [0.433, 0.171, 0.866, 0.85]->[0.442, 0.176, 0.856, 0.851]: stand, carry/hold (an object), smoke.", "At 1238.0s, [0.441, 0.17, 0.866, 0.849]: stand, carry/hold (an object), drink, smoke."]}]}, {"video": "PcFEhUKhN6g.mkv", "clip": [1279, 1347], "conversations": [{"from": "human", "timestamps": 1309.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.48, 0.195, 0.769, 0.828]: stand, smoke, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1347.0, "value": "For the person at bbox [0.246, 0.487, 0.488, 0.84], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.246, 0.487, 0.488, 0.84]: sit, touch (an object), listen to (a person), watch (a person).", "From 1344.0s to 1346.0s, [0.205, 0.468, 0.461, 0.79]->[0.219, 0.488, 0.462, 0.824]->[0.254, 0.535, 0.475, 0.835]: sit, touch (an object), listen to (a person), watch (a person).", "At 1343.0s, [0.247, 0.434, 0.43, 0.728]: sit, touch (an object), watch (a person)."]}]}, {"video": "PcFEhUKhN6g.mkv", "clip": [1309, 1353], "conversations": [{"from": "human", "timestamps": 1339.0, "value": "What actions did the person at bbox [0.211, 0.184, 0.787, 0.85] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.211, 0.184, 0.787, 0.85]: stand, drink, watch (a person).", "From 1335.0s to 1338.0s, [0.33, 0.184, 0.729, 0.851]->[0.186, 0.194, 0.745, 0.868]->[0.166, 0.185, 0.786, 0.879]->[0.127, 0.177, 0.796, 0.88]: stand, drink.", "At 1334.0s, [0.381, 0.223, 0.702, 0.848]: stand, carry/hold (an object), drink."]}, {"from": "human", "timestamps": 1353.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.249, 0.493, 0.5, 0.849]: sit, touch (an object), talk to (e.g., self, a person, a group); [0.511, 0.198, 0.67, 0.757]: listen to (a person), stand, watch (a person)."}]}, {"video": "PcFEhUKhN6g.mkv", "clip": [1361, 1415], "conversations": [{"from": "human", "timestamps": 1391.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.019, 0.21, 0.702, 0.884]: talk to (e.g., self, a person, a group), touch (an object), bend/bow (at the waist), answer phone."}, {"from": "human", "timestamps": 1415.0, "value": "What actions did the person at bbox [0.483, 0.365, 0.837, 0.839] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.483, 0.365, 0.837, 0.839]: walk.", "From 1413.0s to 1414.0s, [0.332, 0.229, 0.673, 0.844]->[0.412, 0.284, 0.808, 0.837]: walk.", "At 1412.0s, [0.137, 0.314, 0.494, 0.855]: walk, listen to (a person), watch (a person).", "At 1411.0s, [0.161, 0.313, 0.454, 0.826]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1410.0s, [0.204, 0.336, 0.419, 0.832]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1409.0s, [0.248, 0.325, 0.421, 0.835]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1408.0s, [0.286, 0.329, 0.42, 0.836]: walk, talk to (e.g., self, a person, a group).", "At 1407.0s, [0.305, 0.354, 0.419, 0.826]: walk, listen to (a person), watch (a person).", "At 1406.0s, [0.347, 0.351, 0.442, 0.768]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "PcFEhUKhN6g.mkv", "clip": [1407, 1469], "conversations": [{"from": "human", "timestamps": 1437.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.09, 0.323, 0.191, 0.562]: listen to (a person), stand, watch (a person); [0.184, 0.381, 0.475, 0.74]: sit, touch (an object); [0.209, 0.256, 0.333, 0.522]: watch (a person), stand; [0.45, 0.223, 0.6, 0.791]: listen to (a person), stand, watch (a person); [0.537, 0.23, 0.715, 0.844]: sit."}, {"from": "human", "timestamps": 1469.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.043, 0.255, 0.535, 0.839]?"}, {"from": "gpt", "value": ["In the current frame, [0.043, 0.255, 0.535, 0.839]: stand, listen to (a person).", "From 1467.0s to 1468.0s, [0.037, 0.237, 0.576, 0.845]->[0.051, 0.231, 0.617, 0.848]: stand, listen to (a person).", "At 1466.0s, [0.036, 0.245, 0.555, 0.836]: stand, watch (a person)."]}]}, {"video": "PcFEhUKhN6g.mkv", "clip": [1550, 1593], "conversations": [{"from": "human", "timestamps": 1580.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.057, 0.166, 0.888, 0.869]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.149, 0.41, 0.262, 0.743]: sit."}, {"from": "human", "timestamps": 1593.0, "value": "What actions is the person in bbox [0.379, 0.272, 0.768, 0.855] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.379, 0.272, 0.768, 0.855]: stand.", "At 1592.0s, [0.346, 0.395, 0.64, 0.856]: stand."]}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1013, 1049], "conversations": [{"from": "human", "timestamps": 1043.0, "value": "Describe the actions of the person at bbox [0.0, 0.117, 0.552, 0.927] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.117, 0.552, 0.927]: sit, listen to (a person).", "At 1042.0s, [0.0, 0.114, 0.567, 0.92]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1049.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.017, 0.157, 0.529, 0.893]: listen to (a person), sit, eat; [0.338, 0.189, 0.551, 0.584]: carry/hold (an object), walk."}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1005, 1127], "conversations": [{"from": "human", "timestamps": 1035.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.001, 0.15, 0.632, 0.883]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1127.0, "value": "What actions is the person in bbox [0.002, 0.124, 0.602, 0.892] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.002, 0.124, 0.602, 0.892]: sit.", "From 1114.0s to 1126.0s, [0.024, 0.135, 0.645, 0.891]->[0.02, 0.125, 0.641, 0.881]->[0.015, 0.118, 0.647, 0.883]->[0.018, 0.13, 0.646, 0.885]->[0.009, 0.122, 0.647, 0.883]->[0.014, 0.12, 0.652, 0.893]->[0.0, 0.098, 0.667, 0.905]->[0.02, 0.117, 0.649, 0.885]->[0.016, 0.118, 0.658, 0.876]->[0.013, 0.108, 0.643, 0.908]->[0.009, 0.115, 0.61, 0.889]->[0.005, 0.116, 0.613, 0.902]->[0.006, 0.132, 0.599, 0.905]: sit.", "At 1113.0s, [0.008, 0.127, 0.627, 0.897]: sit, listen to (a person)."]}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1067, 1099], "conversations": [{"from": "human", "timestamps": 1097.0, "value": "What actions is the person in bbox [0.665, 0.285, 0.99, 0.855] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.665, 0.285, 0.99, 0.855]: sit, carry/hold (an object), eat.", "From 1095.0s to 1096.0s, [0.66, 0.265, 1.0, 0.879]->[0.655, 0.299, 0.996, 0.861]: sit, carry/hold (an object), eat.", "From 1091.0s to 1094.0s, [0.705, 0.252, 0.992, 0.849]->[0.709, 0.245, 0.994, 0.737]->[0.678, 0.261, 0.997, 0.907]->[0.649, 0.268, 0.993, 0.879]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1099.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.179, 0.107, 1.0, 0.893]: sit; [0.257, 0.508, 0.336, 0.695]: stand; [0.336, 0.474, 0.463, 0.673]: stand."}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1112, 1152], "conversations": [{"from": "human", "timestamps": 1142.0, "value": "For the person at bbox [0.012, 0.143, 0.546, 0.906], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.012, 0.143, 0.546, 0.906]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1141.0s, [0.009, 0.129, 0.559, 0.911]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1138.0s to 1140.0s, [0.02, 0.134, 0.561, 0.886]->[0.014, 0.139, 0.561, 0.908]->[0.01, 0.132, 0.572, 0.89]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1152.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.015, 0.136, 0.589, 0.892]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.747, 0.387, 0.897, 0.802]: walk."}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1111, 1239], "conversations": [{"from": "human", "timestamps": 1141.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.009, 0.129, 0.559, 0.911]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1239.0, "value": "What actions is the person in bbox [0.022, 0.122, 0.552, 0.947] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.022, 0.122, 0.552, 0.947]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1238.0s, [0.0, 0.121, 0.368, 0.916]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1237.0s, [0.013, 0.138, 0.417, 0.945]: stand, talk to (e.g., self, a person, a group).", "At 1236.0s, [0.001, 0.134, 0.56, 0.891]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1235.0s, [0.0, 0.132, 0.557, 0.927]: stand, listen to (a person)."]}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1173, 1205], "conversations": [{"from": "human", "timestamps": 1203.0, "value": "Describe the actions of the person at bbox [0.025, 0.341, 0.452, 0.923] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.025, 0.341, 0.452, 0.923]: sit, carry/hold (an object), put down, listen to (a person).", "From 1199.0s to 1202.0s, [0.13, 0.371, 0.4, 0.851]->[0.093, 0.34, 0.387, 0.871]->[0.065, 0.336, 0.415, 0.949]->[0.038, 0.347, 0.453, 0.927]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1205.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.033, 0.357, 0.393, 0.906]: listen to (a person), sit, watch (a person); [0.337, 0.075, 0.983, 0.922]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1268, 1302], "conversations": [{"from": "human", "timestamps": 1298.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.155, 0.531, 0.421, 0.884]: listen to (a person), touch (an object), crouch/kneel; [0.544, 0.221, 1.0, 0.866]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1302.0, "value": "For the person at bbox [0.49, 0.355, 0.823, 0.898], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.49, 0.355, 0.823, 0.898]: sit, carry/hold (an object).", "At 1301.0s, [0.531, 0.404, 0.886, 0.889]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1300.0s, [0.544, 0.252, 0.897, 0.82]: sit, carry/hold (an object), lift/pick up, listen to (a person).", "At 1299.0s, [0.556, 0.217, 0.994, 0.811]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1297.0s to 1298.0s, [0.593, 0.223, 1.0, 0.851]->[0.544, 0.221, 1.0, 0.866]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1296.0s, [0.538, 0.229, 0.923, 0.886]: sit, talk to (e.g., self, a person, a group).", "At 1295.0s, [0.518, 0.248, 0.902, 0.86]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1294.0s, [0.541, 0.318, 0.91, 0.909]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1293.0s, [0.499, 0.394, 0.891, 0.906]: sit, listen to (a person), watch (a person)."]}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1356, 1419], "conversations": [{"from": "human", "timestamps": 1386.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.237, 0.138, 0.967, 0.89]?"}, {"from": "gpt", "value": ["At Now, [0.237, 0.138, 0.967, 0.89]: stand, talk to (e.g., self, a person, a group).", "From 1382.0s to 1385.0s, [0.094, 0.123, 0.838, 0.879]->[0.267, 0.138, 0.984, 0.891]->[0.393, 0.131, 0.993, 0.881]->[0.232, 0.128, 0.994, 0.88]: stand, talk to (e.g., self, a person, a group).", "From 1377.0s to 1381.0s, [0.026, 0.184, 0.726, 0.91]->[0.001, 0.123, 0.693, 0.897]->[0.081, 0.194, 0.796, 0.885]->[0.125, 0.196, 0.859, 0.895]->[0.157, 0.164, 0.889, 0.882]: walk, talk to (e.g., self, a person, a group).", "From 1370.0s to 1376.0s, [0.053, 0.266, 0.478, 0.893]->[0.043, 0.246, 0.59, 0.884]->[0.019, 0.152, 0.638, 0.902]->[0.0, 0.137, 0.616, 0.918]->[0.011, 0.118, 0.75, 0.882]->[0.019, 0.114, 0.761, 0.879]->[0.018, 0.167, 0.814, 0.908]: stand, talk to (e.g., self, a person, a group).", "From 1367.0s to 1369.0s, [0.018, 0.213, 0.898, 0.872]->[0.043, 0.264, 0.7, 0.894]->[0.045, 0.246, 0.597, 0.905]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1366.0s, [0.05, 0.122, 0.937, 0.9]: stand, carry/hold (an object).", "From 1362.0s to 1365.0s, [0.03, 0.15, 0.63, 0.889]->[0.008, 0.155, 0.563, 0.907]->[0.002, 0.127, 0.604, 0.909]->[0.0, 0.105, 0.528, 0.905]: stand, talk to (e.g., self, a person, a group).", "At 1361.0s, [0.007, 0.215, 0.825, 0.885]: stand, point to (an object), talk to (e.g., self, a person, a group).", "At 1360.0s, [0.035, 0.212, 0.783, 0.894]: stand, point to (an object), watch (e.g., TV), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1419.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.011, 0.118, 0.502, 0.925]: listen to (a person), stand, watch (a person); [0.488, 0.173, 0.988, 0.885]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1394, 1446], "conversations": [{"from": "human", "timestamps": 1424.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.097, 0.884, 0.882]: stand."}, {"from": "human", "timestamps": 1446.0, "value": "For the person at bbox [0.338, 0.198, 0.841, 0.886], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.338, 0.198, 0.841, 0.886]: walk.", "At 1445.0s, [0.354, 0.182, 0.889, 0.885]: walk.", "At 1444.0s, [0.434, 0.169, 0.999, 0.875]: walk, listen to (a person)."]}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1414, 1493], "conversations": [{"from": "human", "timestamps": 1444.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.434, 0.169, 0.999, 0.875]: listen to (a person), walk."}, {"from": "human", "timestamps": 1493.0, "value": "For the person at bbox [0.405, 0.419, 0.76, 0.774], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.405, 0.419, 0.76, 0.774]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1492.0s, [0.404, 0.418, 0.762, 0.771]: sit, carry/hold (an object), listen to (a person).", "From 1490.0s to 1491.0s, [0.411, 0.421, 0.76, 0.774]->[0.407, 0.418, 0.761, 0.77]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1489.0s, [0.405, 0.425, 0.759, 0.781]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1488.0s, [0.431, 0.43, 0.761, 0.787]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1486.0s to 1487.0s, [0.519, 0.435, 0.753, 0.782]->[0.519, 0.436, 0.755, 0.777]: sit, touch (an object), listen to (a person), watch (a person).", "From 1484.0s to 1485.0s, [0.497, 0.417, 0.808, 0.784]->[0.518, 0.432, 0.76, 0.786]: sit, listen to (a person), watch (a person).", "At 1483.0s, [0.464, 0.324, 0.792, 0.796]: carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1482.0s, [0.42, 0.288, 0.783, 0.797]: bend/bow (at the waist), pull (an object), touch (an object), listen to (a person).", "At 1481.0s, [0.427, 0.297, 0.706, 0.797]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "From 1479.0s to 1480.0s, [0.407, 0.222, 0.659, 0.796]->[0.448, 0.251, 0.719, 0.784]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1478.0s, [0.407, 0.211, 0.622, 0.754]: stand, carry/hold (an object), put down, talk to (e.g., self, a person, a group), watch (a person).", "From 1475.0s to 1477.0s, [0.644, 0.157, 0.84, 0.715]->[0.595, 0.136, 0.878, 0.74]->[0.495, 0.224, 0.67, 0.748]: stand, carry/hold (an object).", "At 1474.0s, [0.6, 0.174, 0.883, 0.695]: walk.", "At 1473.0s, [0.611, 0.233, 0.782, 0.619]: walk, talk to (e.g., self, a person, a group).", "At 1472.0s, [0.542, 0.171, 0.813, 0.877]: walk, touch (an object).", "At 1471.0s, [0.467, 0.115, 0.88, 0.847]: walk.", "From 1464.0s to 1469.0s, [0.126, 0.124, 0.982, 0.918]->[0.122, 0.128, 1.0, 0.92]->[0.187, 0.125, 1.0, 0.895]->[0.273, 0.119, 0.998, 0.887]->[0.235, 0.126, 1.0, 0.902]->[0.232, 0.125, 1.0, 0.923]: stand, talk to (e.g., self, a person, a group).", "From 1462.0s to 1463.0s, [0.164, 0.129, 0.989, 0.897]->[0.158, 0.106, 0.998, 0.908]: stand."]}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1477, 1537], "conversations": [{"from": "human", "timestamps": 1507.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.515, 0.149, 1.0, 0.889]?"}, {"from": "gpt", "value": ["In the current frame, [0.515, 0.149, 1.0, 0.889]: walk.", "From 1503.0s to 1506.0s, [0.517, 0.244, 0.888, 0.894]->[0.478, 0.195, 0.944, 0.922]->[0.436, 0.211, 0.983, 0.897]->[0.478, 0.135, 1.0, 0.896]: walk.", "At 1502.0s, [0.596, 0.223, 0.921, 0.865]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1537.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.043, 0.181, 0.662, 0.897]: carry/hold (an object), stand."}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1493, 1601], "conversations": [{"from": "human", "timestamps": 1523.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.545, 0.138, 0.995, 0.903]: stand."}, {"from": "human", "timestamps": 1601.0, "value": "For the person at bbox [0.038, 0.139, 0.764, 0.87], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.038, 0.139, 0.764, 0.87]: stand, answer phone.", "From 1599.0s to 1600.0s, [0.057, 0.136, 0.715, 0.87]->[0.075, 0.154, 0.737, 0.868]: stand, answer phone."]}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1577, 1616], "conversations": [{"from": "human", "timestamps": 1607.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.366, 0.171, 0.911, 0.895]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1616.0, "value": "What actions did the person at bbox [0.258, 0.2, 0.472, 0.892] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.258, 0.2, 0.472, 0.892]: walk, answer phone.", "From 1613.0s to 1615.0s, [0.297, 0.26, 0.568, 0.877]->[0.311, 0.263, 0.574, 0.877]->[0.307, 0.239, 0.581, 0.877]: walk, answer phone."]}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1624, 1659], "conversations": [{"from": "human", "timestamps": 1654.0, "value": "For the person at bbox [0.01, 0.124, 0.563, 0.892], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.01, 0.124, 0.563, 0.892]: stand, answer phone.", "From 1651.0s to 1653.0s, [0.011, 0.156, 0.581, 0.891]->[0.018, 0.139, 0.582, 0.898]->[0.015, 0.122, 0.574, 0.887]: stand, answer phone.", "At 1650.0s, [0.036, 0.188, 0.62, 0.879]: stand, answer phone, talk to (e.g., self, a person, a group).", "From 1647.0s to 1649.0s, [0.071, 0.182, 0.638, 0.884]->[0.057, 0.178, 0.633, 0.89]->[0.03, 0.174, 0.627, 0.888]: stand, answer phone.", "From 1645.0s to 1646.0s, [0.088, 0.161, 0.614, 0.887]->[0.078, 0.15, 0.632, 0.885]: stand, answer phone, talk to (e.g., self, a person, a group).", "From 1643.0s to 1644.0s, [0.089, 0.159, 0.631, 0.876]->[0.085, 0.165, 0.616, 0.886]: stand, answer phone."]}, {"from": "human", "timestamps": 1659.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.037, 0.151, 0.733, 0.907]: sit."}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1639, 1689], "conversations": [{"from": "human", "timestamps": 1669.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.012, 0.134, 0.737, 0.905]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1689.0, "value": "What actions is the person in bbox [0.434, 0.299, 0.877, 0.923] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.434, 0.299, 0.877, 0.923]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1688.0s, [0.415, 0.301, 0.877, 0.915]: stand, carry/hold (an object), watch (a person).", "At 1687.0s, [0.478, 0.294, 0.85, 0.897]: stand, watch (a person).", "At 1686.0s, [0.439, 0.296, 0.84, 0.886]: stand, carry/hold (an object), watch (a person).", "From 1680.0s to 1685.0s, [0.484, 0.26, 0.903, 0.912]->[0.499, 0.257, 0.911, 0.912]->[0.492, 0.287, 0.881, 0.893]->[0.462, 0.289, 0.865, 0.902]->[0.452, 0.295, 0.9, 0.901]->[0.455, 0.299, 0.831, 0.895]: stand, listen to (a person), watch (a person).", "At 1679.0s, [0.416, 0.296, 0.883, 0.899]: stand, carry/hold (an object), give/serve (an object) to (a person).", "At 1678.0s, [0.569, 0.312, 0.949, 0.896]: stand, carry/hold (an object).", "From 1670.0s to 1677.0s, [0.65, 0.221, 0.816, 0.88]->[0.636, 0.204, 0.818, 0.91]->[0.578, 0.22, 0.791, 0.885]->[0.591, 0.233, 0.821, 0.918]->[0.584, 0.223, 0.834, 0.902]->[0.588, 0.234, 0.841, 0.914]->[0.586, 0.237, 0.897, 0.923]->[0.531, 0.304, 0.87, 0.891]: walk, answer phone."]}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1655, 1735], "conversations": [{"from": "human", "timestamps": 1685.0, "value": "What actions is the person in bbox [0.162, 0.151, 0.466, 0.859] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.162, 0.151, 0.466, 0.859]: stand, answer phone.", "At 1684.0s, [0.133, 0.134, 0.447, 0.869]: stand, answer phone, talk to (e.g., self, a person, a group).", "At 1683.0s, [0.124, 0.13, 0.441, 0.881]: stand, answer phone.", "From 1680.0s to 1682.0s, [0.112, 0.142, 0.421, 0.884]->[0.132, 0.167, 0.441, 0.882]->[0.115, 0.125, 0.435, 0.878]: stand, answer phone, carry/hold (an object).", "At 1679.0s, [0.148, 0.14, 0.436, 0.889]: stand, carry/hold (an object), take (an object) from (a person), watch (a person).", "At 1678.0s, [0.172, 0.138, 0.466, 0.889]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1677.0s, [0.179, 0.109, 0.487, 0.887]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1735.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.559, 0.37, 0.693, 0.859]: walk; [0.726, 0.346, 0.893, 0.893]: carry/hold (an object), walk."}]}, {"video": "PmElx9ZVByw.mp4", "clip": [1719, 1782], "conversations": [{"from": "human", "timestamps": 1749.0, "value": "What actions did the person at bbox [0.293, 0.19, 0.831, 0.906] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.293, 0.19, 0.831, 0.906]: stand, touch (an object), listen to (a person).", "At 1748.0s, [0.339, 0.167, 0.787, 0.888]: stand, touch (an object), listen to (a person).", "At 1747.0s, [0.373, 0.191, 0.803, 0.889]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1746.0s, [0.327, 0.174, 0.783, 0.886]: stand, point to (an object), touch (an object), watch (e.g., TV), talk to (e.g., self, a person, a group).", "At 1745.0s, [0.332, 0.172, 0.786, 0.896]: stand, touch (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1782.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.309, 0.13, 1.0, 0.918]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "QD3L10bUnBo.mkv", "clip": [888, 948], "conversations": [{"from": "human", "timestamps": 918.0, "value": "Describe the actions of the person at bbox [0.138, 0.076, 0.945, 0.968] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.138, 0.076, 0.945, 0.968]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 917.0s, [0.119, 0.078, 0.938, 0.959]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 948.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.081, 0.097, 0.794, 0.967]: listen to (a person), sit, watch (a person)."}]}, {"video": "QD3L10bUnBo.mkv", "clip": [932, 1018], "conversations": [{"from": "human", "timestamps": 962.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.142, 0.035, 0.818, 0.97]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1018.0, "value": "For the person at bbox [0.124, 0.03, 0.965, 0.952], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.124, 0.03, 0.965, 0.952]: lie/sleep.", "From 1016.0s to 1017.0s, [0.108, 0.033, 0.976, 0.965]->[0.312, 0.011, 1.0, 0.991]: lie/sleep.", "At 1015.0s, [0.374, 0.278, 0.983, 0.953]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "QD3L10bUnBo.mkv", "clip": [1000, 1052], "conversations": [{"from": "human", "timestamps": 1030.0, "value": "What actions is the person in bbox [0.15, 0.098, 0.994, 0.968] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.15, 0.098, 0.994, 0.968]: stand, talk to (e.g., self, a person, a group).", "From 1028.0s to 1029.0s, [0.168, 0.08, 1.0, 0.966]->[0.182, 0.104, 0.993, 0.982]: stand, talk to (e.g., self, a person, a group).", "At 1027.0s, [0.186, 0.076, 0.994, 0.963]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1052.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.173, 0.067, 0.933, 0.972]: walk."}]}, {"video": "QD3L10bUnBo.mkv", "clip": [1032, 1121], "conversations": [{"from": "human", "timestamps": 1062.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.111, 0.01, 0.963, 0.978]: lie/sleep."}, {"from": "human", "timestamps": 1121.0, "value": "What actions did the person at bbox [0.327, 0.149, 0.859, 0.958] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.327, 0.149, 0.859, 0.958]: stand.", "At 1120.0s, [0.31, 0.158, 0.86, 0.966]: stand."]}]}, {"video": "QD3L10bUnBo.mkv", "clip": [1051, 1083], "conversations": [{"from": "human", "timestamps": 1081.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.069, 0.104, 0.756, 0.974]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1083.0, "value": "What actions is the person in bbox [0.058, 0.132, 0.734, 0.973] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.058, 0.132, 0.734, 0.973]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1081.0s to 1082.0s, [0.069, 0.104, 0.756, 0.974]->[0.059, 0.125, 0.75, 0.977]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "QD3L10bUnBo.mkv", "clip": [1255, 1317], "conversations": [{"from": "human", "timestamps": 1285.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.133, 0.096, 0.93, 0.953]?"}, {"from": "gpt", "value": ["In the current frame, [0.133, 0.096, 0.93, 0.953]: lie/sleep.", "From 1283.0s to 1284.0s, [0.281, 0.653, 0.976, 0.965]->[0.122, 0.147, 0.928, 0.951]: lie/sleep."]}, {"from": "human", "timestamps": 1317.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.133, 0.09, 0.917, 0.971]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "QD3L10bUnBo.mkv", "clip": [1322, 1365], "conversations": [{"from": "human", "timestamps": 1352.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.12, 0.109, 0.897, 0.948]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1365.0, "value": "For the person at bbox [0.115, 0.183, 0.332, 0.977], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.115, 0.183, 0.332, 0.977]: stand, carry/hold (an object), watch (a person).", "From 1362.0s to 1364.0s, [0.343, 0.158, 0.599, 0.967]->[0.222, 0.199, 0.447, 0.957]->[0.144, 0.176, 0.355, 0.949]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "QD3L10bUnBo.mkv", "clip": [1434, 1493], "conversations": [{"from": "human", "timestamps": 1464.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.301, 0.151, 0.798, 0.984]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1493.0, "value": "What actions is the person in bbox [0.286, 0.172, 0.726, 0.972] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.286, 0.172, 0.726, 0.972]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1490.0s to 1492.0s, [0.262, 0.152, 0.729, 0.983]->[0.262, 0.149, 0.766, 0.976]->[0.248, 0.15, 0.748, 0.975]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1488.0s to 1489.0s, [0.284, 0.161, 0.744, 0.988]->[0.284, 0.151, 0.756, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "QD3L10bUnBo.mkv", "clip": [1515, 1548], "conversations": [{"from": "human", "timestamps": 1545.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.048, 0.129, 0.98, 0.991]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1548.0, "value": "For the person at bbox [0.12, 0.08, 0.954, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.12, 0.08, 0.954, 0.987]: sit, talk to (e.g., self, a person, a group).", "At 1547.0s, [0.074, 0.09, 0.961, 0.973]: sit, talk to (e.g., self, a person, a group).", "From 1545.0s to 1546.0s, [0.048, 0.129, 0.98, 0.991]->[0.079, 0.111, 0.968, 0.976]: stand, talk to (e.g., self, a person, a group).", "At 1544.0s, [0.043, 0.164, 0.921, 0.975]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1543.0s, [0.053, 0.06, 0.949, 0.988]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "QD3L10bUnBo.mkv", "clip": [1520, 1609], "conversations": [{"from": "human", "timestamps": 1550.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.017, 0.053, 0.933, 0.985]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1609.0, "value": "For the person at bbox [0.362, 0.24, 0.653, 0.833], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.362, 0.24, 0.653, 0.833]: get up, talk to (e.g., self, a person, a group).", "At 1607.0s, [0.0, 0.02, 0.912, 0.991]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1606.0s, [0.0, 0.075, 0.887, 0.969]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1605.0s, [0.071, 0.087, 0.934, 0.967]: stand, talk to (e.g., self, a person, a group).", "From 1603.0s to 1604.0s, [0.035, 0.114, 0.886, 0.966]->[0.074, 0.109, 0.911, 0.97]: sit, talk to (e.g., self, a person, a group).", "From 1601.0s to 1602.0s, [0.113, 0.074, 0.91, 0.977]->[0.062, 0.087, 0.942, 0.969]: stand, talk to (e.g., self, a person, a group).", "From 1598.0s to 1600.0s, [0.089, 0.077, 0.992, 0.988]->[0.151, 0.129, 0.978, 1.0]->[0.138, 0.11, 0.991, 0.968]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "QD3L10bUnBo.mkv", "clip": [1533, 1593], "conversations": [{"from": "human", "timestamps": 1563.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.025, 0.127, 0.846, 0.978]: sit."}, {"from": "human", "timestamps": 1593.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.0, 0.852, 0.961]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.0, 0.852, 0.961]: stand.", "At 1592.0s, [0.038, 0.067, 0.831, 0.956]: stand.", "From 1590.0s to 1591.0s, [0.0, 0.03, 1.0, 0.987]->[0.067, 0.017, 0.8, 0.974]: stand, talk to (e.g., self, a person, a group).", "At 1589.0s, [0.105, 0.0, 1.0, 0.955]: stand, listen to (a person)."]}]}, {"video": "QD3L10bUnBo.mkv", "clip": [1612, 1722], "conversations": [{"from": "human", "timestamps": 1642.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.112, 0.255, 0.943, 0.954]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1722.0, "value": "What actions did the person at bbox [0.055, 0.078, 0.869, 0.969] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.055, 0.078, 0.869, 0.969]: sit, talk to (e.g., self, a person, a group).", "At 1721.0s, [0.035, 0.112, 0.913, 0.959]: sit, talk to (e.g., self, a person, a group).", "At 1720.0s, [0.026, 0.071, 0.889, 0.976]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "QD3L10bUnBo.mkv", "clip": [1697, 1730], "conversations": [{"from": "human", "timestamps": 1727.0, "value": "For the person at bbox [0.001, 0.114, 0.969, 0.952], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.001, 0.114, 0.969, 0.952]: sit, listen to (a person), watch (a person).", "From 1725.0s to 1726.0s, [0.136, 0.075, 0.917, 0.97]->[0.11, 0.082, 0.935, 0.968]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1730.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.047, 0.137, 0.912, 0.986]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "QD3L10bUnBo.mkv", "clip": [1715, 1768], "conversations": [{"from": "human", "timestamps": 1745.0, "value": "What actions is the person in bbox [0.113, 0.117, 0.98, 0.968] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.113, 0.117, 0.98, 0.968]: stand, talk to (e.g., self, a person, a group).", "From 1742.0s to 1744.0s, [0.169, 0.077, 0.988, 0.963]->[0.179, 0.057, 0.965, 0.977]->[0.15, 0.077, 0.984, 0.972]: stand, talk to (e.g., self, a person, a group).", "At 1741.0s, [0.212, 0.073, 0.968, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1740.0s, [0.07, 0.121, 0.974, 0.987]: get up, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1768.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.233, 0.079, 0.479, 0.873]: listen to (a person), stand, watch (a person); [0.527, 0.144, 0.807, 0.866]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "QJzocCGLdHU.mp4", "clip": [906, 952], "conversations": [{"from": "human", "timestamps": 936.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.303, 0.259, 0.538, 0.936]?"}, {"from": "gpt", "value": ["At Now, [0.303, 0.259, 0.538, 0.936]: bend/bow (at the waist), hit (an object), listen to (a person).", "At 935.0s, [0.341, 0.185, 0.614, 0.64]: stand, talk to (e.g., self, a person, a group).", "At 934.0s, [0.317, 0.133, 0.59, 0.915]: stand, listen to (a person).", "At 933.0s, [0.333, 0.134, 0.535, 0.921]: stand, listen to (a person), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 952.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.133, 0.133, 0.363, 0.947]: listen to (a person), stand, touch (an object); [0.287, 0.188, 0.528, 0.885]: stand, talk to (e.g., self, a person, a group); [0.579, 0.166, 0.831, 0.95]: listen to (a person), stand, touch (an object)."}]}, {"video": "QJzocCGLdHU.mp4", "clip": [913, 980], "conversations": [{"from": "human", "timestamps": 943.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.145, 0.13, 0.365, 0.962]: listen to (a person), stand, touch (an object); [0.298, 0.157, 0.5, 0.874]: stand, talk to (e.g., self, a person, a group); [0.6, 0.163, 0.86, 0.965]: listen to (a person), stand, touch (an object)."}, {"from": "human", "timestamps": 980.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.067, 0.104, 0.849, 0.996]?"}, {"from": "gpt", "value": ["At Now, [0.067, 0.104, 0.849, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 978.0s to 979.0s, [0.033, 0.093, 0.791, 0.991]->[0.059, 0.1, 0.814, 0.977]: stand, talk to (e.g., self, a person, a group).", "At 977.0s, [0.08, 0.126, 0.785, 0.99]: stand, watch (a person)."]}]}, {"video": "QJzocCGLdHU.mp4", "clip": [1042, 1074], "conversations": [{"from": "human", "timestamps": 1072.0, "value": "For the person at bbox [0.104, 0.078, 0.958, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.104, 0.078, 0.958, 0.981]: stand, talk to (e.g., self, a person, a group).", "From 1066.0s to 1071.0s, [0.292, 0.224, 0.472, 0.886]->[0.295, 0.206, 0.497, 0.904]->[0.287, 0.201, 0.494, 0.891]->[0.295, 0.206, 0.471, 0.888]->[0.299, 0.208, 0.479, 0.887]->[0.024, 0.092, 0.916, 0.974]: stand, talk to (e.g., self, a person, a group).", "From 1064.0s to 1065.0s, [0.296, 0.205, 0.508, 0.908]->[0.308, 0.213, 0.486, 0.887]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1074.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.056, 0.076, 0.937, 0.979]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "QJzocCGLdHU.mp4", "clip": [1019, 1081], "conversations": [{"from": "human", "timestamps": 1049.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.105, 0.083, 0.825, 1.0]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1081.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.181, 0.126, 0.98, 0.997]?"}, {"from": "gpt", "value": ["In the current frame, [0.181, 0.126, 0.98, 0.997]: sit, stand, talk to (e.g., self, a person, a group).", "From 1066.0s to 1080.0s, [0.292, 0.224, 0.472, 0.886]->[0.295, 0.206, 0.497, 0.904]->[0.287, 0.201, 0.494, 0.891]->[0.295, 0.206, 0.471, 0.888]->[0.299, 0.208, 0.479, 0.887]->[0.024, 0.092, 0.916, 0.974]->[0.104, 0.078, 0.958, 0.981]->[0.111, 0.083, 0.951, 0.976]->[0.056, 0.076, 0.937, 0.979]->[0.067, 0.076, 0.972, 0.98]->[0.119, 0.103, 0.975, 0.973]->[0.103, 0.105, 0.966, 0.962]->[0.061, 0.094, 0.819, 0.985]->[0.08, 0.117, 0.914, 0.991]->[0.166, 0.173, 0.961, 0.984]: stand, talk to (e.g., self, a person, a group).", "From 1064.0s to 1065.0s, [0.296, 0.205, 0.508, 0.908]->[0.308, 0.213, 0.486, 0.887]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "QJzocCGLdHU.mp4", "clip": [1113, 1161], "conversations": [{"from": "human", "timestamps": 1143.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.214, 0.04, 0.994, 0.976]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1161.0, "value": "What actions is the person in bbox [0.432, 0.242, 0.922, 0.963] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.432, 0.242, 0.922, 0.963]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1160.0s, [0.527, 0.147, 0.99, 0.944]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1159.0s, [0.598, 0.097, 0.947, 0.925]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1158.0s, [0.606, 0.113, 0.954, 0.919]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1157.0s, [0.592, 0.101, 0.972, 0.94]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1155.0s to 1156.0s, [0.611, 0.126, 0.933, 0.964]->[0.582, 0.138, 0.942, 0.933]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1154.0s, [0.64, 0.09, 0.988, 0.912]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1153.0s, [0.629, 0.039, 0.962, 0.909]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "QJzocCGLdHU.mp4", "clip": [1160, 1246], "conversations": [{"from": "human", "timestamps": 1190.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.007, 0.01, 0.234, 0.989]: dance, drink; [0.19, 0.059, 0.402, 0.786]: carry/hold (an object), dance, watch (a person); [0.428, 0.126, 0.774, 0.935]: dance; [0.435, 0.095, 0.546, 0.405]: dance; [0.682, 0.147, 0.864, 0.714]: dance; [0.804, 0.029, 1.0, 0.735]: dance."}, {"from": "human", "timestamps": 1246.0, "value": "What actions did the person at bbox [0.283, 0.021, 0.997, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.283, 0.021, 0.997, 0.983]: stand, talk to (e.g., self, a person, a group).", "From 1242.0s to 1245.0s, [0.722, 0.18, 0.967, 0.959]->[0.72, 0.176, 0.968, 0.973]->[0.325, 0.025, 0.981, 0.987]->[0.211, 0.039, 0.999, 0.993]: stand, watch (a person)."]}]}, {"video": "QJzocCGLdHU.mp4", "clip": [1172, 1225], "conversations": [{"from": "human", "timestamps": 1202.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.309, 0.228, 0.964]: carry/hold (an object), dance; [0.137, 0.268, 0.478, 0.957]: carry/hold (an object), dance, drink; [0.482, 0.271, 0.912, 0.979]: dance, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1225.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.211, 0.223, 0.582, 0.97]?"}, {"from": "gpt", "value": ["At Now, [0.211, 0.223, 0.582, 0.97]: carry/hold (an object), dance, watch (a person).", "At 1224.0s, [0.23, 0.092, 0.629, 0.985]: carry/hold (an object), dance, talk to (e.g., self, a person, a group), watch (a person).", "At 1223.0s, [0.131, 0.227, 0.444, 0.971]: carry/hold (an object), drink, dance, listen to (a person), watch (a person).", "At 1222.0s, [0.195, 0.137, 0.497, 0.979]: carry/hold (an object), drink, dance, watch (a person).", "At 1221.0s, [0.357, 0.133, 0.661, 0.977]: drink, dance, watch (a person).", "At 1220.0s, [0.547, 0.154, 0.869, 0.962]: carry/hold (an object), dance, listen to (a person), sing to (e.g., self, a person, a group), watch (a person).", "At 1219.0s, [0.577, 0.115, 0.972, 0.963]: dance."]}]}, {"video": "QJzocCGLdHU.mp4", "clip": [1280, 1360], "conversations": [{"from": "human", "timestamps": 1310.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.09, 0.059, 0.381, 0.801]: watch (a person), dance, drink; [0.358, 0.044, 0.913, 0.973]: watch (a person), dance."}, {"from": "human", "timestamps": 1360.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.335, 0.146, 0.725, 0.928]?"}, {"from": "gpt", "value": ["At Now, [0.335, 0.146, 0.725, 0.928]: stand.", "From 1358.0s to 1359.0s, [0.466, 0.115, 0.623, 0.741]->[0.439, 0.125, 0.621, 0.741]: stand, watch (a person).", "At 1357.0s, [0.477, 0.121, 0.634, 0.753]: stand, hug (a person), watch (a person).", "From 1350.0s to 1355.0s, [0.527, 0.124, 0.681, 0.753]->[0.475, 0.124, 0.66, 0.762]->[0.478, 0.123, 0.639, 0.751]->[0.506, 0.119, 0.647, 0.749]->[0.505, 0.128, 0.654, 0.746]->[0.495, 0.13, 0.642, 0.735]: stand, watch (a person).", "At 1349.0s, [0.542, 0.126, 0.687, 0.754]: stand, listen to (a person), watch (a person).", "At 1348.0s, [0.539, 0.124, 0.684, 0.76]: walk, listen to (a person), watch (a person).", "From 1346.0s to 1347.0s, [0.496, 0.097, 0.688, 0.758]->[0.47, 0.11, 0.667, 0.765]: stand, watch (a person).", "At 1345.0s, [0.568, 0.087, 0.74, 0.772]: stand, listen to (a person), watch (a person).", "At 1344.0s, [0.493, 0.077, 0.733, 0.766]: stand, watch (a person)."]}]}, {"video": "QJzocCGLdHU.mp4", "clip": [1322, 1361], "conversations": [{"from": "human", "timestamps": 1352.0, "value": "Describe the actions of the person at bbox [0.385, 0.148, 0.525, 0.745] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.385, 0.148, 0.525, 0.745]: stand, carry/hold (an object), watch (a person).", "At 1351.0s, [0.396, 0.145, 0.538, 0.742]: stand, carry/hold (an object), watch (a person).", "At 1350.0s, [0.41, 0.144, 0.593, 0.744]: stand, carry/hold (an object), drink, watch (a person).", "At 1349.0s, [0.428, 0.144, 0.582, 0.747]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1347.0s to 1348.0s, [0.428, 0.128, 0.601, 0.771]->[0.439, 0.144, 0.573, 0.743]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1345.0s to 1346.0s, [0.448, 0.119, 0.617, 0.738]->[0.459, 0.128, 0.562, 0.717]: stand, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1361.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.044, 0.207, 0.448, 0.962]: stand; [0.27, 0.109, 0.723, 0.962]: listen to (a person), stand, watch (a person)."}]}, {"video": "QJzocCGLdHU.mp4", "clip": [1377, 1421], "conversations": [{"from": "human", "timestamps": 1407.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.162, 0.163, 0.618, 0.967]: stand, touch (an object); [0.191, 0.13, 0.825, 0.968]: watch (a person), stand."}, {"from": "human", "timestamps": 1421.0, "value": "Describe the actions of the person at bbox [0.661, 0.056, 0.864, 0.849] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.661, 0.056, 0.864, 0.849]: stand, push (another person), watch (a person).", "At 1420.0s, [0.071, 0.054, 0.841, 0.997]: stand, watch (a person)."]}]}, {"video": "QJzocCGLdHU.mp4", "clip": [1403, 1493], "conversations": [{"from": "human", "timestamps": 1433.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.262, 0.446, 0.85, 0.88]: lie/sleep."}, {"from": "human", "timestamps": 1493.0, "value": "What actions did the person at bbox [0.134, 0.003, 0.938, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.134, 0.003, 0.938, 0.987]: lie/sleep, watch (a person).", "From 1488.0s to 1492.0s, [0.303, 0.583, 0.875, 0.905]->[0.083, 0.027, 0.988, 0.989]->[0.089, 0.045, 0.964, 1.0]->[0.133, 0.043, 1.0, 0.989]->[0.108, 0.043, 0.982, 0.973]: lie/sleep, watch (a person).", "From 1486.0s to 1487.0s, [0.314, 0.641, 0.806, 0.913]->[0.315, 0.63, 0.806, 0.918]: listen to (a person), lie/sleep, watch (a person)."]}]}, {"video": "QJzocCGLdHU.mp4", "clip": [1482, 1616], "conversations": [{"from": "human", "timestamps": 1512.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.058, 0.11, 0.978, 0.988]: watch (a person), lie/sleep."}, {"from": "human", "timestamps": 1616.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.243, 0.173, 0.459, 0.776]?"}, {"from": "gpt", "value": ["At Now, [0.243, 0.173, 0.459, 0.776]: stand, touch (an object).", "At 1615.0s, [0.113, 0.181, 0.512, 0.803]: stand, touch (an object).", "At 1614.0s, [0.316, 0.289, 0.633, 0.756]: bend/bow (at the waist), open (e.g., a window, a car door), touch (an object).", "At 1613.0s, [0.337, 0.193, 0.587, 0.76]: stand, open (e.g., a window, a car door).", "From 1610.0s to 1612.0s, [0.25, 0.055, 0.998, 0.97]->[0.237, 0.069, 0.988, 0.978]->[0.347, 0.151, 0.582, 0.763]: stand, carry/hold (an object).", "From 1602.0s to 1609.0s, [0.346, 0.152, 0.565, 0.752]->[0.346, 0.153, 0.555, 0.753]->[0.353, 0.154, 0.557, 0.768]->[0.277, 0.049, 0.985, 0.982]->[0.246, 0.039, 1.0, 0.976]->[0.231, 0.072, 0.99, 0.97]->[0.252, 0.046, 0.998, 0.972]->[0.244, 0.044, 1.0, 0.967]: stand, answer phone."]}]}, {"video": "QJzocCGLdHU.mp4", "clip": [1529, 1561], "conversations": [{"from": "human", "timestamps": 1559.0, "value": "What actions did the person at bbox [0.555, 0.08, 0.846, 0.822] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.555, 0.08, 0.846, 0.822]: crouch/kneel.", "At 1558.0s, [0.55, 0.078, 0.851, 0.831]: crouch/kneel.", "From 1556.0s to 1557.0s, [0.562, 0.091, 0.875, 0.829]->[0.552, 0.093, 0.845, 0.831]: crouch/kneel, watch (a person)."]}, {"from": "human", "timestamps": 1561.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.035, 0.281, 0.951, 0.928]: lie/sleep; [0.543, 0.097, 0.864, 0.85]: crouch/kneel."}]}, {"video": "QJzocCGLdHU.mp4", "clip": [1596, 1664], "conversations": [{"from": "human", "timestamps": 1626.0, "value": "Describe the actions of the person at bbox [0.003, 0.042, 0.818, 0.962] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.003, 0.042, 0.818, 0.962]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1625.0s, [0.0, 0.038, 0.821, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1664.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.797, 0.075, 0.999, 0.936]: watch (a person), stand."}]}, {"video": "QMwT7DFA5O4.mkv", "clip": [941, 987], "conversations": [{"from": "human", "timestamps": 971.0, "value": "For the person at bbox [0.041, 0.26, 0.592, 0.988], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.041, 0.26, 0.592, 0.988]: stand, listen to (a person), watch (a person).", "From 968.0s to 970.0s, [0.051, 0.254, 0.642, 0.985]->[0.026, 0.249, 0.73, 0.983]->[0.065, 0.241, 0.613, 0.982]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 987.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.216, 0.036, 0.98, 0.975]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "QMwT7DFA5O4.mkv", "clip": [1147, 1199], "conversations": [{"from": "human", "timestamps": 1177.0, "value": "What actions did the person at bbox [0.291, 0.014, 0.981, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.291, 0.014, 0.981, 0.985]: walk.", "From 1175.0s to 1176.0s, [0.176, 0.085, 1.0, 0.979]->[0.269, 0.104, 0.958, 0.975]: walk.", "From 1171.0s to 1173.0s, [0.407, 0.273, 0.855, 0.989]->[0.302, 0.252, 0.894, 0.987]->[0.314, 0.169, 0.945, 0.994]: walk."]}, {"from": "human", "timestamps": 1199.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.435, 0.056, 0.985, 0.994]: hand wave, stand."}]}, {"video": "QMwT7DFA5O4.mkv", "clip": [1176, 1250], "conversations": [{"from": "human", "timestamps": 1206.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.527, 0.475, 0.549, 0.541]: walk."}, {"from": "human", "timestamps": 1250.0, "value": "What actions did the person at bbox [0.253, 0.278, 0.834, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.253, 0.278, 0.834, 1.0]: walk, talk to (e.g., self, a person, a group).", "At 1249.0s, [0.33, 0.291, 0.781, 0.981]: walk, talk to (e.g., self, a person, a group).", "From 1244.0s to 1248.0s, [0.266, 0.28, 0.471, 0.859]->[0.335, 0.273, 0.545, 0.909]->[0.32, 0.282, 0.533, 0.971]->[0.195, 0.331, 0.495, 1.0]->[0.12, 0.23, 0.45, 0.998]: walk."]}]}, {"video": "QMwT7DFA5O4.mkv", "clip": [1269, 1344], "conversations": [{"from": "human", "timestamps": 1299.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.198, 0.242, 0.386, 0.735]: listen to (a person), sit, watch (a person); [0.398, 0.238, 0.544, 0.666]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1344.0, "value": "Describe the actions of the person at bbox [0.136, 0.0, 0.975, 0.989] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.136, 0.0, 0.975, 0.989]: sit, talk to (e.g., self, a person, a group).", "At 1343.0s, [0.097, 0.002, 0.978, 0.995]: sit, stand, talk to (e.g., self, a person, a group).", "At 1342.0s, [0.078, 0.03, 1.0, 0.978]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "QMwT7DFA5O4.mkv", "clip": [1380, 1444], "conversations": [{"from": "human", "timestamps": 1410.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.019, 0.059, 0.875, 0.997]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1444.0, "value": "What actions did the person at bbox [0.088, 0.044, 0.879, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.088, 0.044, 0.879, 0.982]: stand, talk to (e.g., self, a person, a group).", "At 1443.0s, [0.037, 0.05, 0.876, 0.982]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "QMwT7DFA5O4.mkv", "clip": [1409, 1450], "conversations": [{"from": "human", "timestamps": 1439.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.237, 0.06, 0.655, 0.936]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1450.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.026, 0.024, 0.941, 0.994]?"}, {"from": "gpt", "value": ["At Now, [0.026, 0.024, 0.941, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 1449.0s, [0.0, 0.0, 0.705, 0.995]: stand, talk to (e.g., self, a person, a group).", "At 1448.0s, [0.0, 0.006, 0.841, 0.994]: stand, watch (a person)."]}]}, {"video": "QMwT7DFA5O4.mkv", "clip": [1474, 1558], "conversations": [{"from": "human", "timestamps": 1504.0, "value": "What actions did the person at bbox [0.007, 0.014, 0.569, 0.955] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.007, 0.014, 0.569, 0.955]: sit, carry/hold (an object).", "From 1500.0s to 1503.0s, [0.082, 0.163, 0.449, 0.641]->[0.027, 0.111, 0.488, 0.971]->[0.021, 0.115, 0.529, 0.934]->[0.018, 0.012, 0.592, 0.955]: sit, carry/hold (an object).", "At 1499.0s, [0.047, 0.209, 0.553, 0.967]: sit, talk to (e.g., self, a person, a group).", "At 1498.0s, [0.132, 0.246, 0.512, 0.893]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1497.0s, [0.233, 0.306, 0.557, 0.933]: sit, talk to (e.g., self, a person, a group).", "At 1496.0s, [0.41, 0.321, 0.595, 0.615]: sit, carry/hold (an object).", "At 1495.0s, [0.482, 0.345, 0.662, 0.934]: sit, watch (a person).", "From 1493.0s to 1494.0s, [0.485, 0.326, 0.636, 0.553]->[0.472, 0.346, 0.688, 0.894]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1558.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.114, 0.148, 0.958, 0.989]: listen to (a person), sit, watch (a person)."}]}, {"video": "QMwT7DFA5O4.mkv", "clip": [1632, 1729], "conversations": [{"from": "human", "timestamps": 1662.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.314, 0.018, 0.939, 0.981]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1729.0, "value": "What actions did the person at bbox [0.09, 0.215, 0.492, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.09, 0.215, 0.492, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1728.0s, [0.064, 0.197, 0.537, 0.975]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "QMwT7DFA5O4.mkv", "clip": [1697, 1728], "conversations": [{"from": "human", "timestamps": 1727.0, "value": "For the person at bbox [0.366, 0.214, 0.993, 0.939], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.366, 0.214, 0.993, 0.939]: sit, touch (an object), listen to (a person), watch (a person).", "At 1726.0s, [0.631, 0.3, 0.928, 0.914]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1725.0s, [0.633, 0.319, 0.925, 0.91]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1724.0s, [0.623, 0.325, 0.913, 0.819]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1723.0s, [0.609, 0.353, 0.901, 0.96]: sit, stand, carry/hold (an object), listen to (a person).", "At 1722.0s, [0.598, 0.352, 0.906, 0.938]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1721.0s, [0.616, 0.313, 0.899, 0.945]: sit, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1720.0s, [0.597, 0.309, 0.903, 0.94]: sit, listen to (a person).", "From 1718.0s to 1719.0s, [0.57, 0.309, 0.887, 0.926]->[0.6, 0.294, 0.912, 0.923]: sit, listen to (a person), watch (a person).", "At 1717.0s, [0.596, 0.312, 0.908, 0.804]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1716.0s, [0.601, 0.302, 0.983, 0.798]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1715.0s, [0.6, 0.33, 0.899, 0.948]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1714.0s, [0.587, 0.331, 0.896, 0.955]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1728.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.064, 0.197, 0.537, 0.975]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.335, 0.11, 0.985, 0.942]: listen to (a person), sit, watch (a person)."}]}, {"video": "QaIMUi-elFo.mkv", "clip": [875, 914], "conversations": [{"from": "human", "timestamps": 905.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.22, 0.069, 0.458, 1.0]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.411, 0.122, 0.634, 0.979]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.603, 0.21, 0.985, 0.987]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 914.0, "value": "What actions did the person at bbox [0.289, 0.175, 0.606, 0.978] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.289, 0.175, 0.606, 0.978]: stand, carry/hold (an object).", "At 913.0s, [0.265, 0.079, 0.57, 0.997]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 911.0s to 912.0s, [0.367, 0.066, 0.616, 1.0]->[0.369, 0.066, 0.597, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [879, 1011], "conversations": [{"from": "human", "timestamps": 909.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.152, 0.1, 0.381, 0.985]: carry/hold (an object), stand, watch (a person); [0.383, 0.13, 0.609, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.641, 0.226, 0.98, 0.964]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1011.0, "value": "Describe the actions of the person at bbox [0.197, 0.232, 0.562, 0.806] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.197, 0.232, 0.562, 0.806]: sit, carry/hold (an object).", "At 1010.0s, [0.223, 0.233, 0.549, 0.799]: sit.", "At 1009.0s, [0.257, 0.237, 0.568, 0.804]: sit, touch (an object), watch (a person)."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [940, 1004], "conversations": [{"from": "human", "timestamps": 970.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.001, 0.515, 0.145, 1.0]: watch (a person), sit; [0.007, 0.246, 0.195, 0.703]: listen to (a person), watch (a person), walk; [0.213, 0.18, 0.394, 0.697]: listen to (a person), stand, watch (a person); [0.368, 0.174, 0.494, 0.668]: listen to (a person), watch (a person), walk; [0.49, 0.512, 0.721, 0.983]: listen to (a person), sit, watch (a person); [0.491, 0.343, 0.604, 0.676]: listen to (a person), stand, watch (a person); [0.648, 0.28, 0.804, 0.671]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1004.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.582, 0.144, 0.973]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.582, 0.144, 0.973]: sit.", "From 1001.0s to 1003.0s, [0.0, 0.59, 0.141, 0.97]->[0.0, 0.584, 0.142, 0.954]->[0.0, 0.598, 0.146, 0.966]: sit."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1045, 1079], "conversations": [{"from": "human", "timestamps": 1075.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.059, 0.055, 0.4, 0.969]: listen to (a person), stand, watch (a person); [0.382, 0.115, 0.712, 0.96]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.653, 0.505, 0.948, 1.0]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1079.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.012, 0.066, 0.846, 0.991]?"}, {"from": "gpt", "value": ["In the current frame, [0.012, 0.066, 0.846, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1078.0s, [0.691, 0.411, 0.989, 0.991]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1076.0s to 1077.0s, [0.669, 0.465, 0.969, 0.992]->[0.692, 0.438, 0.978, 0.995]: stand, listen to (a person), watch (a person).", "From 1074.0s to 1075.0s, [0.647, 0.504, 0.939, 1.0]->[0.653, 0.505, 0.948, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1073.0s, [0.67, 0.503, 0.934, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1071.0s to 1072.0s, [0.18, 0.063, 0.803, 0.973]->[0.202, 0.069, 0.81, 0.985]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1069.0s to 1070.0s, [0.21, 0.068, 0.818, 0.981]->[0.188, 0.059, 0.801, 0.979]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1068.0s, [0.652, 0.467, 0.883, 0.974]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1066.0s to 1067.0s, [0.635, 0.453, 0.886, 0.989]->[0.646, 0.442, 0.894, 0.981]: stand, listen to (a person), watch (a person).", "At 1065.0s, [0.579, 0.37, 0.865, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1064.0s, [0.599, 0.343, 0.872, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1062.0s to 1063.0s, [0.608, 0.365, 0.887, 0.998]->[0.536, 0.381, 0.887, 0.965]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1061.0s, [0.488, 0.364, 0.9, 1.0]: stand, hand shake, listen to (a person), watch (a person).", "At 1060.0s, [0.486, 0.363, 0.895, 0.998]: stand, hand shake, talk to (e.g., self, a person, a group), watch (a person).", "From 1056.0s to 1059.0s, [0.513, 0.359, 0.906, 0.993]->[0.479, 0.34, 0.895, 0.989]->[0.502, 0.374, 0.898, 0.972]->[0.516, 0.352, 0.885, 1.0]: stand, carry/hold (an object), hand shake, listen to (a person), watch (a person).", "At 1055.0s, [0.686, 0.45, 0.913, 0.983]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1053.0s to 1054.0s, [0.0, 0.054, 0.84, 1.0]->[0.0, 0.091, 0.874, 0.999]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1051.0s to 1052.0s, [0.647, 0.431, 0.89, 0.983]->[0.621, 0.432, 0.879, 0.984]: stand, listen to (a person), watch (a person).", "From 1048.0s to 1050.0s, [0.64, 0.446, 0.899, 0.986]->[0.663, 0.45, 0.897, 0.99]->[0.649, 0.482, 0.881, 0.993]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1047.0s, [0.622, 0.469, 0.89, 0.98]: stand, listen to (a person), watch (a person).", "From 1044.0s to 1046.0s, [0.581, 0.367, 0.685, 0.737]->[0.501, 0.357, 0.626, 0.718]->[0.646, 0.479, 0.878, 0.991]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1043.0s, [0.795, 0.365, 0.899, 0.74]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1021, 1112], "conversations": [{"from": "human", "timestamps": 1051.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.078, 0.048, 0.4, 0.969]: listen to (a person), stand, watch (a person); [0.345, 0.187, 0.669, 0.98]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.647, 0.431, 0.89, 0.983]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1112.0, "value": "Describe the actions of the person at bbox [0.568, 0.041, 0.886, 0.839] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.568, 0.041, 0.886, 0.839]: carry/hold (an object), crouch/kneel, touch (an object), talk to (e.g., self, a person, a group).", "From 1108.0s to 1111.0s, [0.512, 0.119, 0.811, 0.943]->[0.513, 0.055, 0.83, 0.897]->[0.42, 0.023, 0.84, 0.93]->[0.557, 0.055, 0.87, 0.845]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group).", "From 1106.0s to 1107.0s, [0.498, 0.215, 0.804, 0.999]->[0.54, 0.084, 0.794, 0.926]: carry/hold (an object), crouch/kneel, play with pets, talk to (e.g., self, a person, a group).", "At 1105.0s, [0.538, 0.24, 0.764, 0.992]: stand, carry/hold (an object), crouch/kneel, play with pets, talk to (e.g., self, a person, a group).", "From 1103.0s to 1104.0s, [0.481, 0.316, 0.752, 0.979]->[0.477, 0.319, 0.76, 0.97]: carry/hold (an object), crouch/kneel, touch (an object), talk to (e.g., self, a person, a group).", "At 1102.0s, [0.553, 0.084, 0.779, 0.903]: walk, carry/hold (an object).", "At 1101.0s, [0.583, 0.267, 0.82, 0.853]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1061, 1092], "conversations": [{"from": "human", "timestamps": 1091.0, "value": "What actions is the person in bbox [0.684, 0.51, 0.967, 0.986] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.684, 0.51, 0.967, 0.986]: stand, listen to (a person), watch (a person).", "At 1090.0s, [0.698, 0.511, 0.965, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1089.0s, [0.651, 0.41, 0.954, 0.992]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1087.0s to 1088.0s, [0.658, 0.406, 0.953, 0.998]->[0.64, 0.396, 0.934, 0.992]: stand, listen to (a person), watch (a person).", "From 1083.0s to 1086.0s, [0.656, 0.409, 0.972, 0.986]->[0.675, 0.38, 0.973, 0.992]->[0.669, 0.414, 0.963, 0.995]->[0.656, 0.413, 0.955, 0.998]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1082.0s, [0.012, 0.063, 0.844, 0.989]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1081.0s, [0.002, 0.06, 0.844, 0.992]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1080.0s, [0.0, 0.078, 0.879, 0.988]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1092.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.026, 0.078, 0.437, 0.993]: stand, talk to (e.g., self, a person, a group); [0.381, 0.221, 0.717, 0.918]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.688, 0.539, 0.948, 0.998]: listen to (a person), stand, watch (a person)."}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1152, 1199], "conversations": [{"from": "human", "timestamps": 1182.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.127, 0.08, 0.497, 0.996]: stand, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1199.0, "value": "For the person at bbox [0.146, 0.11, 0.644, 0.953], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.146, 0.11, 0.644, 0.953]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1198.0s, [0.248, 0.391, 0.459, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1197.0s, [0.241, 0.391, 0.454, 0.994]: stand, carry/hold (an object), listen to (a person).", "At 1196.0s, [0.242, 0.394, 0.438, 0.992]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1195.0s, [0.247, 0.381, 0.45, 0.968]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1193.0s to 1194.0s, [0.163, 0.392, 0.369, 0.982]->[0.173, 0.387, 0.391, 0.987]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1191.0s to 1192.0s, [0.038, 0.478, 0.269, 0.978]->[0.172, 0.427, 0.392, 0.987]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1112, 1203], "conversations": [{"from": "human", "timestamps": 1142.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.051, 0.671, 0.973]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.61, 0.386, 1.0, 0.98]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1203.0, "value": "For the person at bbox [0.124, 0.081, 0.719, 0.98], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.124, 0.081, 0.719, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1202.0s, [0.14, 0.085, 0.734, 0.97]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1199.0s to 1201.0s, [0.146, 0.11, 0.644, 0.953]->[0.131, 0.091, 0.71, 0.99]->[0.138, 0.088, 0.728, 0.976]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1198.0s, [0.248, 0.391, 0.459, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1197.0s, [0.241, 0.391, 0.454, 0.994]: stand, carry/hold (an object), listen to (a person).", "At 1196.0s, [0.242, 0.394, 0.438, 0.992]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1195.0s, [0.247, 0.381, 0.45, 0.968]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1193.0s to 1194.0s, [0.163, 0.392, 0.369, 0.982]->[0.173, 0.387, 0.391, 0.987]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1191.0s to 1192.0s, [0.038, 0.478, 0.269, 0.978]->[0.172, 0.427, 0.392, 0.987]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1243, 1278], "conversations": [{"from": "human", "timestamps": 1273.0, "value": "For the person at bbox [0.252, 0.322, 0.46, 0.966], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.252, 0.322, 0.46, 0.966]: stand, carry/hold (an object), listen to (a person), take (an object) from (a person), watch (a person).", "At 1272.0s, [0.292, 0.323, 0.489, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1271.0s, [0.264, 0.314, 0.444, 0.923]: stand, listen to (a person), watch (a person).", "At 1270.0s, [0.295, 0.352, 0.425, 0.893]: stand, watch (a person).", "At 1269.0s, [0.171, 0.414, 0.358, 0.901]: dance.", "At 1268.0s, [0.158, 0.351, 0.324, 0.939]: dance, listen to (a person).", "At 1267.0s, [0.316, 0.319, 0.531, 0.979]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1266.0s, [0.305, 0.329, 0.492, 0.968]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1265.0s, [0.311, 0.309, 0.495, 0.967]: stand, listen to (a person), sing to (e.g., self, a person, a group), talk to (e.g., self, a person, a group).", "At 1263.0s, [0.373, 0.275, 0.589, 0.942]: dance, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1262.0s, [0.353, 0.294, 0.577, 0.944]: dance, listen to (a person), watch (a person).", "From 1260.0s to 1261.0s, [0.317, 0.323, 0.574, 0.94]->[0.33, 0.328, 0.529, 0.923]: dance, listen to (a person), sing to (e.g., self, a person, a group).", "From 1257.0s to 1259.0s, [0.071, 0.111, 0.706, 0.988]->[0.177, 0.085, 0.976, 1.0]->[0.209, 0.086, 0.953, 0.961]: dance, sing to (e.g., self, a person, a group), watch (a person).", "At 1256.0s, [0.269, 0.086, 0.69, 0.994]: stand, dance, talk to (e.g., self, a person, a group), watch (a person).", "From 1254.0s to 1255.0s, [0.303, 0.364, 0.531, 0.946]->[0.267, 0.336, 0.513, 0.944]: dance, listen to (a person), watch (a person).", "At 1253.0s, [0.278, 0.369, 0.489, 0.955]: dance, listen to (a person), sing to (e.g., self, a person, a group), watch (a person).", "At 1252.0s, [0.297, 0.371, 0.469, 0.95]: stand, sing to (e.g., self, a person, a group).", "From 1250.0s to 1251.0s, [0.301, 0.368, 0.484, 0.946]->[0.291, 0.395, 0.441, 0.944]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1249.0s, [0.281, 0.39, 0.522, 0.948]: dance, listen to (a person), sing to (e.g., self, a person, a group), watch (a person).", "At 1248.0s, [0.302, 0.376, 0.54, 0.951]: dance, listen to (a person).", "At 1247.0s, [0.322, 0.366, 0.499, 0.93]: dance, listen to (a person), watch (a person).", "At 1246.0s, [0.278, 0.35, 0.463, 0.904]: dance, listen to (a person), sing to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1278.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.106, 0.205, 0.278, 0.721]: walk."}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1257, 1336], "conversations": [{"from": "human", "timestamps": 1287.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.258, 0.2, 0.423, 0.926]: read, stand, talk to (e.g., self, a person, a group); [0.656, 0.123, 0.744, 0.531]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1336.0, "value": "For the person at bbox [0.645, 0.008, 0.997, 0.944], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.645, 0.008, 0.997, 0.944]: stand, listen to (a person), watch (a person).", "From 1326.0s to 1335.0s, [0.576, 0.0, 0.997, 0.996]->[0.49, 0.035, 0.992, 0.99]->[0.656, 0.006, 1.0, 0.978]->[0.635, 0.003, 0.996, 0.971]->[0.64, 0.017, 1.0, 0.954]->[0.649, 0.023, 1.0, 0.962]->[0.621, 0.01, 0.993, 0.947]->[0.637, 0.008, 1.0, 0.955]->[0.626, 0.012, 1.0, 0.959]->[0.64, 0.016, 1.0, 0.996]: stand, listen to (a person), watch (a person).", "At 1325.0s, [0.471, 0.056, 0.986, 1.0]: stand, listen to (a person)."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1311, 1359], "conversations": [{"from": "human", "timestamps": 1341.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.138, 0.091, 0.704, 0.965]: read, stand; [0.602, 0.0, 0.992, 0.981]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1359.0, "value": "For the person at bbox [0.524, 0.043, 0.941, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.524, 0.043, 0.941, 0.994]: stand, listen to (a person), watch (a person).", "At 1358.0s, [0.497, 0.062, 0.941, 0.986]: stand, listen to (a person), watch (a person).", "At 1357.0s, [0.503, 0.075, 0.952, 0.972]: stand, touch (an object), listen to (a person), watch (a person).", "At 1356.0s, [0.433, 0.101, 0.934, 0.988]: sit, stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1355.0s, [0.453, 0.089, 0.931, 0.984]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1354.0s, [0.416, 0.067, 0.911, 0.987]: stand, kiss (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1347.0s to 1353.0s, [0.615, 0.018, 0.99, 0.982]->[0.638, 0.0, 0.997, 0.993]->[0.662, 0.031, 1.0, 0.964]->[0.687, 0.01, 1.0, 0.981]->[0.607, 0.002, 0.999, 0.996]->[0.625, 0.0, 0.999, 0.996]->[0.496, 0.032, 0.937, 0.991]: stand, listen to (a person), watch (a person).", "At 1346.0s, [0.629, 0.0, 0.996, 0.991]: stand, watch (a person).", "At 1345.0s, [0.643, 0.011, 1.0, 0.995]: stand, listen to (a person), watch (a person)."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1400, 1435], "conversations": [{"from": "human", "timestamps": 1430.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.197, 0.371, 0.522, 0.998]: stand; [0.47, 0.049, 0.894, 0.977]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1435.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.142, 0.106, 0.822, 0.989]?"}, {"from": "gpt", "value": ["At Now, [0.142, 0.106, 0.822, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1433.0s, [0.189, 0.133, 0.778, 0.996]: watch (a person)."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1397, 1468], "conversations": [{"from": "human", "timestamps": 1427.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.122, 0.038, 0.793, 0.982]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1468.0, "value": "For the person at bbox [0.637, 0.134, 0.811, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.637, 0.134, 0.811, 0.992]: stand, listen to (a person), watch (a person).", "At 1467.0s, [0.632, 0.138, 0.805, 0.988]: stand, listen to (a person), watch (a person).", "At 1466.0s, [0.628, 0.136, 0.803, 0.988]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1463.0s to 1465.0s, [0.582, 0.155, 0.758, 0.945]->[0.585, 0.134, 0.761, 0.923]->[0.607, 0.127, 0.776, 0.932]: stand, listen to (a person), watch (a person).", "From 1461.0s to 1462.0s, [0.683, 0.394, 0.896, 0.994]->[0.595, 0.295, 0.806, 0.981]: walk, listen to (a person), watch (a person)."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1472, 1546], "conversations": [{"from": "human", "timestamps": 1502.0, "value": "For the person at bbox [0.323, 0.3, 0.487, 0.846], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.323, 0.3, 0.487, 0.846]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1501.0s, [0.334, 0.35, 0.488, 0.848]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1500.0s, [0.38, 0.398, 0.504, 0.748]: walk."]}, {"from": "human", "timestamps": 1546.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.193, 0.199, 0.511, 0.971]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.343, 0.093, 0.929, 1.0]: listen to (a person), sit, watch (a person)."}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1528, 1593], "conversations": [{"from": "human", "timestamps": 1558.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.097, 0.145, 0.271, 0.99]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.583, 0.328, 0.662, 0.781]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.586, 0.129, 0.825, 0.969]: stand."}, {"from": "human", "timestamps": 1593.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.065, 0.073, 0.473, 0.982]?"}, {"from": "gpt", "value": ["In the current frame, [0.065, 0.073, 0.473, 0.982]: stand, listen to (a person), watch (a person).", "At 1592.0s, [0.067, 0.07, 0.48, 0.991]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1590.0s to 1591.0s, [0.067, 0.075, 0.475, 0.999]->[0.055, 0.04, 0.467, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1539, 1570], "conversations": [{"from": "human", "timestamps": 1569.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.637, 0.085, 0.904, 0.988]?"}, {"from": "gpt", "value": ["In the current frame, [0.637, 0.085, 0.904, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1568.0s, [0.621, 0.093, 0.899, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1567.0s, [0.622, 0.121, 0.909, 0.974]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1565.0s to 1566.0s, [0.662, 0.088, 0.916, 0.983]->[0.62, 0.108, 0.911, 0.976]: stand, listen to (a person), watch (a person).", "At 1564.0s, [0.646, 0.114, 0.952, 0.973]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1563.0s, [0.57, 0.124, 0.958, 0.976]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1561.0s to 1562.0s, [0.541, 0.081, 0.762, 0.921]->[0.577, 0.058, 0.854, 0.942]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1570.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.071, 0.099, 0.4, 0.972]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.38, 0.471, 0.71, 0.992]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.658, 0.085, 0.907, 0.983]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1589, 1668], "conversations": [{"from": "human", "timestamps": 1619.0, "value": "Describe the actions of the person at bbox [0.288, 0.475, 0.503, 0.989] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.288, 0.475, 0.503, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1617.0s to 1618.0s, [0.237, 0.493, 0.463, 1.0]->[0.266, 0.482, 0.497, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 1616.0s, [0.202, 0.517, 0.406, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1668.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.223, 0.111, 0.737, 0.984]: listen to (a person), sit; [0.542, 0.007, 0.996, 0.986]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1619, 1710], "conversations": [{"from": "human", "timestamps": 1649.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.112, 0.18, 0.435, 0.977]: sit, watch (a person), listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.365, 0.04, 0.946, 0.985]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1710.0, "value": "Describe the actions of the person at bbox [0.537, 0.093, 0.98, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.537, 0.093, 0.98, 1.0]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1708.0s to 1709.0s, [0.529, 0.107, 0.976, 0.997]->[0.531, 0.09, 0.978, 1.0]: stand, touch (an object), listen to (a person), watch (a person).", "At 1707.0s, [0.518, 0.128, 0.976, 0.986]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1704.0s to 1706.0s, [0.351, 0.191, 0.892, 0.989]->[0.533, 0.112, 0.939, 0.985]->[0.571, 0.1, 0.967, 0.989]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1703.0s, [0.661, 0.271, 0.904, 0.993]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1702.0s, [0.635, 0.315, 0.79, 0.907]: run/jog, watch (a person).", "At 1701.0s, [0.636, 0.297, 0.78, 0.767]: run/jog, talk to (e.g., self, a person, a group), watch (a person).", "At 1700.0s, [0.674, 0.25, 0.762, 0.646]: walk, close (e.g., a door, a box), watch (a person).", "At 1699.0s, [0.68, 0.262, 0.801, 0.63]: stand, close (e.g., a door, a box)."]}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1652, 1685], "conversations": [{"from": "human", "timestamps": 1682.0, "value": "What actions is the person in bbox [0.457, 0.143, 0.867, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.457, 0.143, 0.867, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1681.0s, [0.422, 0.209, 0.816, 0.946]: carry/hold (an object), get up, talk to (e.g., self, a person, a group), watch (a person).", "At 1680.0s, [0.429, 0.07, 0.825, 0.969]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1685.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.113, 0.302, 0.481, 0.993]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.382, 0.074, 0.888, 0.979]: listen to (a person), stand, watch (a person)."}]}, {"video": "QaIMUi-elFo.mkv", "clip": [1723, 1795], "conversations": [{"from": "human", "timestamps": 1753.0, "value": "What actions is the person in bbox [0.011, 0.106, 0.373, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.011, 0.106, 0.373, 0.978]: sit, carry/hold (an object), touch (an object), watch (a person).", "At 1752.0s, [0.017, 0.126, 0.346, 0.977]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1751.0s, [0.025, 0.146, 0.382, 0.86]: sit, carry/hold (an object), listen to (a person).", "At 1750.0s, [0.024, 0.176, 0.383, 0.99]: sit, carry/hold (an object), touch (an object), listen to (a person).", "From 1748.0s to 1749.0s, [0.033, 0.132, 0.407, 0.973]->[0.013, 0.162, 0.352, 0.949]: sit, carry/hold (an object), read, listen to (a person).", "At 1747.0s, [0.071, 0.116, 0.42, 0.849]: sit, carry/hold (an object), listen to (a person).", "At 1746.0s, [0.064, 0.051, 0.625, 0.975]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1745.0s, [0.07, 0.071, 0.611, 0.959]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1741.0s to 1744.0s, [0.063, 0.12, 0.61, 0.988]->[0.064, 0.117, 0.597, 0.988]->[0.066, 0.111, 0.607, 0.99]->[0.065, 0.07, 0.62, 0.981]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1740.0s, [0.074, 0.117, 0.611, 0.985]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1795.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.25, 0.125, 0.556, 0.791]: sit, watch (a person), listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.48, 0.088, 0.918, 0.939]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "QotkBTEePI8.mkv", "clip": [880, 919], "conversations": [{"from": "human", "timestamps": 910.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.063, 0.097, 0.469, 0.959]?"}, {"from": "gpt", "value": ["In the current frame, [0.063, 0.097, 0.469, 0.959]: stand.", "From 905.0s to 908.0s, [0.104, 0.327, 0.267, 0.972]->[0.085, 0.325, 0.253, 0.97]->[0.073, 0.362, 0.291, 0.957]->[0.072, 0.341, 0.295, 0.97]: stand.", "From 902.0s to 904.0s, [0.073, 0.245, 0.278, 0.975]->[0.133, 0.311, 0.276, 0.944]->[0.09, 0.323, 0.275, 0.961]: stand, watch (a person)."]}, {"from": "human", "timestamps": 919.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.351, 0.011, 0.872, 0.963]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "QotkBTEePI8.mkv", "clip": [879, 1015], "conversations": [{"from": "human", "timestamps": 909.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.306, 0.088, 0.713, 0.959]: stand."}, {"from": "human", "timestamps": 1015.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.348, 0.268, 0.488, 0.857]?"}, {"from": "gpt", "value": ["In the current frame, [0.348, 0.268, 0.488, 0.857]: walk.", "At 1014.0s, [0.487, 0.235, 0.737, 0.793]: stand, touch (an object).", "From 1012.0s to 1013.0s, [0.493, 0.264, 0.694, 0.958]->[0.583, 0.339, 0.86, 0.954]: stand, carry/hold (an object).", "At 1011.0s, [0.341, 0.356, 0.424, 0.927]: walk, carry/hold (an object).", "At 1010.0s, [0.108, 0.254, 0.263, 0.946]: walk, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "QotkBTEePI8.mkv", "clip": [1001, 1069], "conversations": [{"from": "human", "timestamps": 1031.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.427, 0.229, 0.844, 0.924]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1069.0, "value": "What actions is the person in bbox [0.22, 0.096, 0.467, 0.973] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.22, 0.096, 0.467, 0.973]: sit, eat, listen to (a person).", "At 1068.0s, [0.226, 0.147, 0.498, 0.935]: sit, eat.", "At 1067.0s, [0.26, 0.159, 0.51, 0.929]: sit, carry/hold (an object), eat, read.", "At 1066.0s, [0.279, 0.151, 0.523, 0.817]: sit, eat, read, touch (an object).", "At 1065.0s, [0.274, 0.136, 0.52, 0.827]: sit, eat, touch (an object).", "From 1063.0s to 1064.0s, [0.259, 0.149, 0.595, 0.871]->[0.265, 0.125, 0.638, 0.858]: sit, carry/hold (an object), eat, read.", "At 1062.0s, [0.246, 0.161, 0.638, 0.929]: sit, eat, read.", "At 1061.0s, [0.245, 0.163, 0.63, 0.935]: sit, eat, read, touch (an object).", "At 1060.0s, [0.244, 0.166, 0.63, 0.96]: sit, read, touch (an object).", "At 1059.0s, [0.249, 0.173, 0.629, 0.946]: sit, eat, touch (an object).", "From 1057.0s to 1058.0s, [0.232, 0.115, 0.449, 0.983]->[0.238, 0.154, 0.488, 0.991]: sit, carry/hold (an object), touch (an object).", "From 1055.0s to 1056.0s, [0.21, 0.122, 0.567, 0.956]->[0.202, 0.108, 0.539, 0.936]: sit, carry/hold (an object), eat.", "From 1053.0s to 1054.0s, [0.204, 0.125, 0.495, 0.921]->[0.219, 0.131, 0.514, 0.935]: sit, carry/hold (an object), eat, listen to (a person).", "At 1052.0s, [0.203, 0.119, 0.452, 0.932]: sit, eat, listen to (a person), watch (a person).", "At 1051.0s, [0.164, 0.134, 0.436, 0.901]: sit, eat, listen to (a person).", "At 1050.0s, [0.255, 0.135, 0.472, 0.909]: sit, eat, touch (an object), listen to (a person).", "At 1049.0s, [0.266, 0.146, 0.492, 0.794]: sit, eat, touch (an object).", "At 1048.0s, [0.257, 0.145, 0.489, 0.914]: sit, eat, touch (an object), listen to (a person).", "At 1047.0s, [0.268, 0.146, 0.489, 0.809]: sit, eat, touch (an object).", "From 1045.0s to 1046.0s, [0.256, 0.141, 0.597, 0.837]->[0.25, 0.141, 0.595, 0.79]: sit, eat, listen to (a person).", "From 1042.0s to 1044.0s, [0.235, 0.126, 0.529, 0.814]->[0.261, 0.166, 0.623, 0.829]->[0.26, 0.147, 0.619, 0.834]: sit, eat, touch (an object), listen to (a person).", "From 1038.0s to 1041.0s, [0.219, 0.127, 0.459, 0.972]->[0.222, 0.108, 0.455, 0.841]->[0.224, 0.105, 0.469, 0.852]->[0.24, 0.121, 0.466, 0.928]: sit, eat, talk to (e.g., self, a person, a group).", "At 1037.0s, [0.209, 0.154, 0.478, 0.907]: sit, eat, touch (an object), listen to (a person).", "From 1035.0s to 1036.0s, [0.212, 0.143, 0.466, 0.857]->[0.207, 0.136, 0.469, 0.886]: sit, carry/hold (an object), eat, touch (an object), watch (a person).", "At 1034.0s, [0.215, 0.149, 0.467, 0.927]: sit, eat, touch (an object).", "At 1033.0s, [0.212, 0.136, 0.467, 0.824]: sit, carry/hold (an object), eat, touch (an object), talk to (e.g., self, a person, a group).", "At 1032.0s, [0.185, 0.138, 0.422, 0.86]: sit, carry/hold (an object), eat, talk to (e.g., self, a person, a group).", "At 1031.0s, [0.427, 0.229, 0.844, 0.924]: sit, carry/hold (an object).", "At 1030.0s, [0.512, 0.363, 0.828, 0.937]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1029.0s, [0.581, 0.287, 0.85, 0.936]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "QotkBTEePI8.mkv", "clip": [1012, 1095], "conversations": [{"from": "human", "timestamps": 1042.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.235, 0.126, 0.529, 0.814]: listen to (a person), sit, eat, touch (an object); [0.502, 0.21, 0.686, 0.643]: watch (a person), sit, eat, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1095.0, "value": "What actions is the person in bbox [0.406, 0.696, 0.449, 0.962] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.406, 0.696, 0.449, 0.962]: walk, carry/hold (an object).", "From 1088.0s to 1094.0s, [0.641, 0.677, 0.716, 0.847]->[0.637, 0.657, 0.714, 0.847]->[0.645, 0.639, 0.729, 0.847]->[0.655, 0.623, 0.733, 0.845]->[0.657, 0.589, 0.745, 0.845]->[0.658, 0.55, 0.754, 0.847]->[0.663, 0.52, 0.762, 0.851]: walk, carry/hold (an object)."]}]}, {"video": "QotkBTEePI8.mkv", "clip": [1069, 1137], "conversations": [{"from": "human", "timestamps": 1099.0, "value": "For the person at bbox [0.544, 0.719, 0.584, 0.931], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.544, 0.719, 0.584, 0.931]: walk, carry/hold (an object).", "From 1088.0s to 1098.0s, [0.641, 0.677, 0.716, 0.847]->[0.637, 0.657, 0.714, 0.847]->[0.645, 0.639, 0.729, 0.847]->[0.655, 0.623, 0.733, 0.845]->[0.657, 0.589, 0.745, 0.845]->[0.658, 0.55, 0.754, 0.847]->[0.663, 0.52, 0.762, 0.851]->[0.406, 0.696, 0.449, 0.962]->[0.449, 0.704, 0.493, 0.943]->[0.492, 0.713, 0.537, 0.937]->[0.522, 0.713, 0.562, 0.933]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1137.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.402, 0.154, 0.728, 0.81]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "QotkBTEePI8.mkv", "clip": [1209, 1240], "conversations": [{"from": "human", "timestamps": 1239.0, "value": "For the person at bbox [0.355, 0.059, 0.862, 0.959], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.355, 0.059, 0.862, 0.959]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1238.0s, [0.372, 0.062, 0.871, 0.95]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1236.0s to 1237.0s, [0.394, 0.07, 0.887, 0.958]->[0.342, 0.078, 0.862, 0.948]: sit, talk to (e.g., self, a person, a group).", "From 1234.0s to 1235.0s, [0.378, 0.027, 0.833, 0.968]->[0.421, 0.078, 0.875, 0.96]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1240.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.133, 0.039, 0.579, 0.98]: sit, watch (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "QotkBTEePI8.mkv", "clip": [1240, 1317], "conversations": [{"from": "human", "timestamps": 1270.0, "value": "For the person at bbox [0.116, 0.08, 0.415, 0.96], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.116, 0.08, 0.415, 0.96]: sit, smoke, listen to (a person), watch (a person).", "At 1269.0s, [0.136, 0.094, 0.478, 0.932]: sit, smoke, talk to (e.g., self, a person, a group).", "At 1268.0s, [0.136, 0.086, 0.473, 0.917]: sit, smoke, listen to (a person), talk to (e.g., self, a person, a group).", "At 1267.0s, [0.125, 0.082, 0.461, 0.925]: sit, smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1266.0s, [0.117, 0.096, 0.464, 0.894]: sit, carry/hold (an object), smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1265.0s, [0.109, 0.129, 0.519, 0.968]: sit, smoke, listen to (a person), talk to (e.g., self, a person, a group).", "From 1263.0s to 1264.0s, [0.16, 0.121, 0.391, 0.93]->[0.123, 0.116, 0.509, 0.919]: sit, smoke, talk to (e.g., self, a person, a group).", "At 1262.0s, [0.147, 0.08, 0.379, 0.961]: sit, smoke, talk to (e.g., self, a person, a group), watch (a person).", "From 1255.0s to 1261.0s, [0.178, 0.13, 0.4, 0.94]->[0.159, 0.124, 0.389, 0.916]->[0.135, 0.136, 0.383, 0.934]->[0.153, 0.114, 0.388, 0.916]->[0.159, 0.126, 0.387, 0.94]->[0.149, 0.114, 0.387, 0.928]->[0.158, 0.117, 0.383, 0.939]: sit, smoke, listen to (a person), watch (a person).", "At 1254.0s, [0.23, 0.022, 0.754, 0.993]: sit, smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1253.0s, [0.187, 0.008, 0.613, 0.977]: sit, smoke, listen to (a person)."]}, {"from": "human", "timestamps": 1317.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.257, 0.005, 0.925, 0.978]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "QotkBTEePI8.mkv", "clip": [1266, 1355], "conversations": [{"from": "human", "timestamps": 1296.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.336, 0.019, 0.93, 0.97]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1355.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.16, 0.061, 0.436, 0.956]?"}, {"from": "gpt", "value": ["At Now, [0.16, 0.061, 0.436, 0.956]: stand.", "At 1354.0s, [0.16, 0.061, 0.424, 0.961]: stand.", "From 1350.0s to 1353.0s, [0.334, 0.517, 0.377, 0.771]->[0.336, 0.52, 0.379, 0.785]->[0.337, 0.517, 0.382, 0.707]->[0.165, 0.048, 0.44, 0.962]: sit.", "At 1349.0s, [0.338, 0.521, 0.378, 0.722]: stand, touch (an object)."]}]}, {"video": "QotkBTEePI8.mkv", "clip": [1304, 1374], "conversations": [{"from": "human", "timestamps": 1334.0, "value": "What actions did the person at bbox [0.357, 0.022, 0.496, 0.887] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.357, 0.022, 0.496, 0.887]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1326.0s to 1333.0s, [0.354, 0.02, 0.509, 0.787]->[0.32, 0.028, 0.468, 0.79]->[0.32, 0.026, 0.464, 0.8]->[0.328, 0.032, 0.471, 0.792]->[0.33, 0.023, 0.477, 0.86]->[0.334, 0.021, 0.479, 0.836]->[0.335, 0.027, 0.483, 0.884]->[0.365, 0.027, 0.511, 0.86]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1374.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.38, 0.212, 0.553, 0.874]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.514, 0.117, 0.695, 0.905]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "QotkBTEePI8.mkv", "clip": [1440, 1494], "conversations": [{"from": "human", "timestamps": 1470.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.5, 0.385, 0.639, 0.924]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1494.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.642, 0.174, 0.92, 0.948]?"}, {"from": "gpt", "value": ["At Now, [0.642, 0.174, 0.92, 0.948]: stand, watch (a person).", "From 1492.0s to 1493.0s, [0.518, 0.13, 0.693, 0.834]->[0.674, 0.146, 0.921, 0.964]: stand, watch (a person).", "At 1491.0s, [0.528, 0.138, 0.701, 0.813]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1489.0s to 1490.0s, [0.519, 0.147, 0.688, 0.835]->[0.531, 0.126, 0.707, 0.817]: stand.", "At 1488.0s, [0.549, 0.142, 0.705, 0.837]: stand, watch (a person).", "At 1487.0s, [0.551, 0.115, 0.729, 0.836]: stand, listen to (a person), watch (a person).", "At 1486.0s, [0.547, 0.118, 0.719, 0.795]: stand, listen to (a person).", "From 1484.0s to 1485.0s, [0.55, 0.116, 0.721, 0.828]->[0.54, 0.12, 0.71, 0.808]: stand, listen to (a person), watch (a person).", "From 1482.0s to 1483.0s, [0.52, 0.135, 0.671, 0.894]->[0.598, 0.12, 0.728, 0.874]: walk, listen to (a person), watch (a person).", "From 1479.0s to 1481.0s, [0.445, 0.251, 0.663, 0.963]->[0.417, 0.118, 0.587, 0.733]->[0.368, 0.121, 0.534, 0.821]: walk.", "At 1478.0s, [0.549, 0.203, 0.721, 0.955]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1477.0s, [0.597, 0.196, 0.757, 0.964]: stand, watch (a person).", "From 1474.0s to 1476.0s, [0.283, 0.195, 0.521, 0.897]->[0.279, 0.188, 0.512, 0.814]->[0.254, 0.193, 0.477, 0.785]: stand, listen to (a person), watch (a person)."]}]}, {"video": "QotkBTEePI8.mkv", "clip": [1482, 1536], "conversations": [{"from": "human", "timestamps": 1512.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.367, 0.076, 0.728, 0.962]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1536.0, "value": "Describe the actions of the person at bbox [0.617, 0.197, 0.794, 0.95] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.617, 0.197, 0.794, 0.95]: stand, touch (an object).", "From 1533.0s to 1535.0s, [0.634, 0.202, 0.829, 0.953]->[0.637, 0.204, 0.835, 0.953]->[0.625, 0.201, 0.808, 0.952]: stand, touch (an object), watch (a person).", "From 1529.0s to 1532.0s, [0.618, 0.201, 0.801, 0.936]->[0.632, 0.197, 0.818, 0.937]->[0.634, 0.205, 0.819, 0.927]->[0.634, 0.196, 0.825, 0.955]: stand, touch (an object), listen to (a person), watch (a person).", "At 1528.0s, [0.602, 0.202, 0.794, 0.958]: stand, touch (an object), watch (a person)."]}]}, {"video": "QotkBTEePI8.mkv", "clip": [1499, 1612], "conversations": [{"from": "human", "timestamps": 1529.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.264, 0.017, 0.559, 0.914]: stand, touch (an object), talk to (e.g., self, a person, a group); [0.618, 0.201, 0.801, 0.936]: listen to (a person), stand, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1612.0, "value": "What actions is the person in bbox [0.751, 0.332, 0.874, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.751, 0.332, 0.874, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1611.0s, [0.75, 0.338, 0.878, 0.97]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1605.0s to 1610.0s, [0.759, 0.332, 0.874, 0.973]->[0.729, 0.335, 0.854, 0.966]->[0.708, 0.36, 0.849, 0.951]->[0.701, 0.351, 0.842, 0.973]->[0.749, 0.341, 0.879, 0.972]->[0.754, 0.335, 0.871, 0.975]: stand, listen to (a person), watch (a person).", "From 1603.0s to 1604.0s, [0.764, 0.327, 0.877, 0.955]->[0.768, 0.338, 0.88, 0.973]: stand, listen to (a person).", "At 1602.0s, [0.762, 0.337, 0.875, 0.966]: stand, talk to (e.g., self, a person, a group).", "At 1601.0s, [0.756, 0.34, 0.87, 0.962]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1599.0s to 1600.0s, [0.736, 0.332, 0.854, 0.968]->[0.749, 0.329, 0.862, 0.975]: stand, listen to (a person), watch (a person).", "At 1598.0s, [0.744, 0.329, 0.855, 0.97]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1597.0s, [0.74, 0.321, 0.842, 0.974]: stand, listen to (a person), watch (a person).", "From 1595.0s to 1596.0s, [0.622, 0.019, 0.925, 0.961]->[0.677, 0.04, 0.919, 0.98]: stand, listen to (a person).", "At 1594.0s, [0.64, 0.018, 0.934, 0.964]: stand."]}]}, {"video": "QotkBTEePI8.mkv", "clip": [1546, 1588], "conversations": [{"from": "human", "timestamps": 1576.0, "value": "For the person at bbox [0.257, 0.37, 0.464, 0.923], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.257, 0.37, 0.464, 0.923]: bend/bow (at the waist), carry/hold (an object), listen to (a person), watch (a person).", "At 1575.0s, [0.264, 0.395, 0.464, 0.962]: bend/bow (at the waist), lift/pick up.", "At 1574.0s, [0.269, 0.495, 0.491, 0.955]: bend/bow (at the waist), carry/hold (an object), lift/pick up.", "At 1573.0s, [0.286, 0.641, 0.487, 0.977]: bend/bow (at the waist)."]}, {"from": "human", "timestamps": 1588.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.472, 0.377, 0.653, 0.888]: carry/hold (an object), bend/bow (at the waist), listen to (a person); [0.573, 0.334, 0.786, 0.918]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "QotkBTEePI8.mkv", "clip": [1588, 1664], "conversations": [{"from": "human", "timestamps": 1618.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.424, 0.374, 0.615, 0.96]: listen to (a person), stand, touch (an object), talk to (e.g., self, a person, a group); [0.72, 0.313, 0.853, 0.96]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1664.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.319, 0.113, 0.584, 0.971]?"}, {"from": "gpt", "value": ["At Now, [0.319, 0.113, 0.584, 0.971]: walk, carry/hold (an object), touch (an object).", "From 1662.0s to 1663.0s, [0.202, 0.129, 0.419, 0.959]->[0.268, 0.059, 0.482, 0.943]: walk, carry/hold (an object).", "From 1660.0s to 1661.0s, [0.174, 0.136, 0.41, 0.956]->[0.162, 0.136, 0.406, 0.97]: stand, carry/hold (an object).", "From 1658.0s to 1659.0s, [0.193, 0.204, 0.424, 0.974]->[0.206, 0.152, 0.443, 0.968]: walk, carry/hold (an object).", "At 1657.0s, [0.207, 0.289, 0.389, 0.975]: stand, walk, carry/hold (an object).", "From 1654.0s to 1656.0s, [0.222, 0.294, 0.403, 0.959]->[0.483, 0.297, 0.65, 0.966]->[0.227, 0.394, 0.376, 0.979]: walk, carry/hold (an object).", "At 1653.0s, [0.182, 0.256, 0.286, 0.959]: stand, carry/hold (an object), read.", "At 1652.0s, [0.173, 0.259, 0.276, 0.968]: stand, carry/hold (an object).", "At 1651.0s, [0.141, 0.279, 0.243, 0.964]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "QotkBTEePI8.mkv", "clip": [1615, 1725], "conversations": [{"from": "human", "timestamps": 1645.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.324, 0.039, 0.76, 0.962]: watch (a person), stand."}, {"from": "human", "timestamps": 1725.0, "value": "What actions did the person at bbox [0.311, 0.054, 0.658, 0.962] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.311, 0.054, 0.658, 0.962]: walk, listen to (a person), watch (a person).", "At 1724.0s, [0.413, 0.069, 0.801, 0.951]: walk, listen to (a person), watch (a person).", "At 1723.0s, [0.435, 0.072, 0.775, 0.931]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1715.0s to 1722.0s, [0.753, 0.102, 0.901, 0.96]->[0.744, 0.088, 0.888, 0.969]->[0.747, 0.073, 0.896, 0.984]->[0.764, 0.081, 0.898, 0.954]->[0.76, 0.08, 0.888, 0.953]->[0.75, 0.085, 0.883, 0.961]->[0.757, 0.094, 0.894, 0.967]->[0.758, 0.086, 0.89, 0.959]: stand, listen to (a person), watch (a person).", "At 1714.0s, [0.7, 0.222, 0.916, 0.982]: bend/bow (at the waist), listen to (a person), watch (a person).", "From 1712.0s to 1713.0s, [0.73, 0.091, 0.897, 0.937]->[0.718, 0.103, 0.897, 0.965]: stand, listen to (a person), watch (a person).", "At 1711.0s, [0.735, 0.12, 0.889, 0.809]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "QotkBTEePI8.mkv", "clip": [1663, 1733], "conversations": [{"from": "human", "timestamps": 1693.0, "value": "What actions is the person in bbox [0.376, 0.124, 0.498, 0.876] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.376, 0.124, 0.498, 0.876]: stand, listen to (a person), watch (a person).", "At 1692.0s, [0.402, 0.126, 0.519, 0.872]: stand, listen to (a person), watch (a person).", "At 1691.0s, [0.4, 0.149, 0.512, 0.879]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1690.0s, [0.382, 0.225, 0.489, 0.856]: walk, watch (a person)."]}, {"from": "human", "timestamps": 1733.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.173, 0.26, 0.287, 0.604]: carry/hold (an object), stand; [0.192, 0.0, 0.882, 0.973]: walk."}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [880, 951], "conversations": [{"from": "human", "timestamps": 910.0, "value": "What actions is the person in bbox [0.117, 0.11, 0.99, 0.864] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.117, 0.11, 0.99, 0.864]: stand, talk to (e.g., self, a person, a group).", "At 909.0s, [0.106, 0.109, 0.908, 0.866]: stand, talk to (e.g., self, a person, a group).", "At 908.0s, [0.161, 0.112, 0.969, 0.861]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 951.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.123, 0.116, 0.98, 0.871]: watch (a person), sit."}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [927, 1006], "conversations": [{"from": "human", "timestamps": 957.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.061, 0.105, 0.996, 0.879]: watch (a person)."}, {"from": "human", "timestamps": 1006.0, "value": "For the person at bbox [0.228, 0.121, 0.911, 0.856], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.228, 0.121, 0.911, 0.856]: stand, talk to (e.g., self, a person, a group).", "At 1005.0s, [0.207, 0.092, 0.923, 0.874]: stand, talk to (e.g., self, a person, a group).", "At 1004.0s, [0.213, 0.116, 0.925, 0.87]: sit, talk to (e.g., self, a person, a group).", "From 1002.0s to 1003.0s, [0.121, 0.115, 0.987, 0.88]->[0.175, 0.113, 0.965, 0.884]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [942, 986], "conversations": [{"from": "human", "timestamps": 972.0, "value": "For the person at bbox [0.122, 0.118, 0.906, 0.864], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.122, 0.118, 0.906, 0.864]: sit, talk to (e.g., self, a person, a group).", "At 971.0s, [0.104, 0.115, 0.936, 0.866]: sit, talk to (e.g., self, a person, a group).", "At 970.0s, [0.107, 0.121, 0.96, 0.863]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 986.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.016, 0.104, 0.992, 0.886]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1014, 1121], "conversations": [{"from": "human", "timestamps": 1044.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.441, 0.17, 0.922, 0.875]: walk."}, {"from": "human", "timestamps": 1121.0, "value": "Describe the actions of the person at bbox [0.181, 0.108, 0.98, 0.877] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.181, 0.108, 0.98, 0.877]: sit, talk to (e.g., self, a person, a group).", "From 1118.0s to 1120.0s, [0.16, 0.12, 0.899, 0.871]->[0.157, 0.121, 1.0, 0.872]->[0.181, 0.129, 0.937, 0.866]: sit, talk to (e.g., self, a person, a group).", "At 1117.0s, [0.153, 0.108, 0.846, 0.857]: sit, stand, talk to (e.g., self, a person, a group).", "From 1113.0s to 1116.0s, [0.19, 0.115, 0.973, 0.889]->[0.19, 0.126, 0.938, 0.884]->[0.182, 0.127, 0.846, 0.861]->[0.205, 0.117, 0.91, 0.867]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1080, 1126], "conversations": [{"from": "human", "timestamps": 1110.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.066, 0.121, 0.983, 0.914]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1126.0, "value": "What actions did the person at bbox [0.0, 0.106, 1.0, 0.898] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.106, 1.0, 0.898]: sit, talk to (e.g., self, a person, a group).", "From 1122.0s to 1125.0s, [0.0, 0.089, 1.0, 0.88]->[0.01, 0.11, 0.984, 0.89]->[0.0, 0.104, 0.994, 0.871]->[0.0, 0.105, 0.995, 0.886]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1110, 1180], "conversations": [{"from": "human", "timestamps": 1140.0, "value": "What actions did the person at bbox [0.112, 0.202, 0.715, 0.9] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.112, 0.202, 0.715, 0.9]: sit, talk to (e.g., self, a person, a group).", "At 1139.0s, [0.071, 0.078, 1.0, 0.887]: sit, talk to (e.g., self, a person, a group).", "At 1138.0s, [0.0, 0.106, 0.989, 0.881]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1180.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.198, 0.114, 0.957, 0.872]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1165, 1240], "conversations": [{"from": "human", "timestamps": 1195.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.102, 1.0, 0.875]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1240.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.284, 0.438, 0.354, 0.707]?"}, {"from": "gpt", "value": ["At Now, [0.284, 0.438, 0.354, 0.707]: walk.", "At 1239.0s, [0.285, 0.444, 0.354, 0.699]: walk, listen to (a person), watch (a person).", "From 1233.0s to 1238.0s, [0.299, 0.51, 0.343, 0.681]->[0.292, 0.509, 0.335, 0.684]->[0.276, 0.491, 0.33, 0.687]->[0.285, 0.473, 0.339, 0.67]->[0.297, 0.453, 0.355, 0.67]->[0.293, 0.451, 0.356, 0.68]: walk.", "At 1232.0s, [0.292, 0.511, 0.338, 0.672]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1230.0s to 1231.0s, [0.291, 0.522, 0.331, 0.664]->[0.293, 0.516, 0.335, 0.667]: walk, listen to (a person), watch (a person).", "From 1226.0s to 1229.0s, [0.288, 0.524, 0.317, 0.646]->[0.293, 0.523, 0.328, 0.652]->[0.288, 0.524, 0.323, 0.658]->[0.29, 0.524, 0.329, 0.661]: walk."]}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1274, 1317], "conversations": [{"from": "human", "timestamps": 1304.0, "value": "What actions did the person at bbox [0.02, 0.113, 0.878, 0.855] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.02, 0.113, 0.878, 0.855]: stand, talk to (e.g., self, a person, a group).", "From 1301.0s to 1303.0s, [0.008, 0.105, 0.903, 0.854]->[0.014, 0.129, 0.932, 0.868]->[0.0, 0.113, 0.865, 0.867]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1317.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.019, 0.094, 0.722, 0.862]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1251, 1353], "conversations": [{"from": "human", "timestamps": 1281.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.035, 0.126, 0.897, 0.861]: watch (a person), stand."}, {"from": "human", "timestamps": 1353.0, "value": "For the person at bbox [0.021, 0.132, 0.842, 0.868], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.021, 0.132, 0.842, 0.868]: stand, talk to (e.g., self, a person, a group).", "At 1352.0s, [0.029, 0.129, 0.836, 0.875]: stand, talk to (e.g., self, a person, a group).", "At 1351.0s, [0.012, 0.098, 0.861, 0.873]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1292, 1338], "conversations": [{"from": "human", "timestamps": 1322.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.118, 0.98, 0.862]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1338.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.107, 0.153, 0.43, 0.87]?"}, {"from": "gpt", "value": ["At Now, [0.107, 0.153, 0.43, 0.87]: stand, listen to (a person), watch (a person).", "At 1337.0s, [0.097, 0.156, 0.43, 0.88]: stand, listen to (a person), watch (a person).", "At 1336.0s, [0.104, 0.156, 0.437, 0.877]: stand, listen to (a person).", "At 1335.0s, [0.212, 0.228, 0.413, 0.876]: stand, listen to (a person), watch (a person).", "From 1333.0s to 1334.0s, [0.213, 0.226, 0.42, 0.879]->[0.209, 0.225, 0.417, 0.878]: stand, listen to (a person).", "At 1332.0s, [0.208, 0.229, 0.42, 0.88]: stand, listen to (a person), watch (a person)."]}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1376, 1427], "conversations": [{"from": "human", "timestamps": 1406.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.033, 0.116, 0.956, 0.871]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1427.0, "value": "Describe the actions of the person at bbox [0.0, 0.113, 0.731, 0.878] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.113, 0.731, 0.878]: stand, talk to (e.g., self, a person, a group).", "From 1421.0s to 1426.0s, [0.006, 0.112, 0.854, 0.872]->[0.019, 0.096, 1.0, 0.881]->[0.004, 0.106, 0.951, 0.852]->[0.002, 0.097, 0.967, 0.858]->[0.04, 0.12, 0.908, 0.87]->[0.019, 0.115, 0.824, 0.878]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1366, 1483], "conversations": [{"from": "human", "timestamps": 1396.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.109, 0.844, 0.86]: listen to (a person), stand."}, {"from": "human", "timestamps": 1483.0, "value": "Describe the actions of the person at bbox [0.428, 0.228, 0.732, 0.865] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.428, 0.228, 0.732, 0.865]: stand.", "At 1482.0s, [0.54, 0.235, 0.816, 0.86]: stand, talk to (e.g., self, a person, a group).", "From 1479.0s to 1481.0s, [0.068, 0.129, 0.998, 0.859]->[0.604, 0.223, 0.821, 0.867]->[0.595, 0.22, 0.815, 0.875]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1478.0s, [0.071, 0.126, 0.989, 0.862]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1443, 1478], "conversations": [{"from": "human", "timestamps": 1473.0, "value": "For the person at bbox [0.063, 0.123, 1.0, 0.862], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.063, 0.123, 1.0, 0.862]: stand, talk to (e.g., self, a person, a group).", "From 1468.0s to 1472.0s, [0.075, 0.114, 0.996, 0.876]->[0.068, 0.122, 0.995, 0.864]->[0.05, 0.123, 0.994, 0.862]->[0.048, 0.111, 0.994, 0.864]->[0.023, 0.12, 0.99, 0.857]: stand, talk to (e.g., self, a person, a group).", "At 1467.0s, [0.091, 0.129, 1.0, 0.857]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1478.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.071, 0.126, 0.989, 0.862]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1505, 1549], "conversations": [{"from": "human", "timestamps": 1535.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.4, 0.224, 0.775, 0.895]: walk."}, {"from": "human", "timestamps": 1549.0, "value": "What actions is the person in bbox [0.096, 0.132, 0.989, 0.878] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.096, 0.132, 0.989, 0.878]: sit, stand.", "From 1547.0s to 1548.0s, [0.088, 0.168, 0.995, 0.848]->[0.078, 0.135, 0.982, 0.851]: sit."]}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1482, 1609], "conversations": [{"from": "human", "timestamps": 1512.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.064, 0.145, 0.999, 0.87]: stand."}, {"from": "human", "timestamps": 1609.0, "value": "Describe the actions of the person at bbox [0.478, 0.206, 0.952, 0.856] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.478, 0.206, 0.952, 0.856]: sit, talk to (e.g., self, a person, a group).", "From 1606.0s to 1608.0s, [0.468, 0.209, 0.95, 0.869]->[0.492, 0.206, 0.929, 0.859]->[0.479, 0.207, 0.955, 0.866]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1534, 1602], "conversations": [{"from": "human", "timestamps": 1564.0, "value": "For the person at bbox [0.016, 0.137, 0.781, 0.883], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.016, 0.137, 0.781, 0.883]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1563.0s, [0.022, 0.127, 0.817, 0.909]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1561.0s to 1562.0s, [0.012, 0.12, 0.776, 0.904]->[0.03, 0.115, 0.793, 0.9]: stand, talk to (e.g., self, a person, a group).", "At 1560.0s, [0.006, 0.128, 0.866, 0.902]: sit, talk to (e.g., self, a person, a group).", "At 1559.0s, [0.013, 0.14, 0.786, 0.889]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1558.0s, [0.024, 0.109, 0.623, 0.893]: sit, talk to (e.g., self, a person, a group).", "At 1557.0s, [0.019, 0.112, 0.738, 0.894]: stand, talk to (e.g., self, a person, a group).", "At 1556.0s, [0.003, 0.123, 0.706, 0.87]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1555.0s, [0.018, 0.13, 0.766, 0.882]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1602.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.028, 0.082, 0.746, 0.886]: sit."}]}, {"video": "Riu4ZKk4YdQ.webm", "clip": [1627, 1674], "conversations": [{"from": "human", "timestamps": 1657.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.006, 0.119, 0.998, 0.889]: listen to (a person), sit, stand."}, {"from": "human", "timestamps": 1674.0, "value": "What actions did the person at bbox [0.009, 0.105, 0.708, 0.895] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.009, 0.105, 0.708, 0.895]: sit, talk to (e.g., self, a person, a group).", "From 1672.0s to 1673.0s, [0.014, 0.092, 0.756, 0.875]->[0.021, 0.098, 0.768, 0.887]: sit, talk to (e.g., self, a person, a group).", "From 1669.0s to 1671.0s, [0.23, 0.162, 0.493, 0.606]->[0.24, 0.172, 0.507, 0.567]->[0.015, 0.108, 0.754, 0.87]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1666.0s to 1668.0s, [0.015, 0.109, 0.887, 0.886]->[0.14, 0.162, 0.487, 0.853]->[0.161, 0.171, 0.445, 0.807]: sit, talk to (e.g., self, a person, a group).", "At 1665.0s, [0.009, 0.107, 0.865, 0.877]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1664.0s, [0.021, 0.058, 0.907, 0.886]: sit, talk to (e.g., self, a person, a group).", "From 1660.0s to 1663.0s, [0.016, 0.098, 0.965, 0.892]->[0.004, 0.129, 0.97, 0.887]->[0.003, 0.109, 0.948, 0.909]->[0.002, 0.117, 0.902, 0.881]: stand, talk to (e.g., self, a person, a group).", "At 1659.0s, [0.004, 0.087, 0.993, 0.886]: sit, talk to (e.g., self, a person, a group).", "At 1658.0s, [0.006, 0.089, 0.975, 0.881]: sit.", "At 1657.0s, [0.006, 0.119, 0.998, 0.889]: sit, stand, listen to (a person)."]}]}, {"video": "Rm518TUhbRY.mkv", "clip": [1174, 1212], "conversations": [{"from": "human", "timestamps": 1204.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.111, 0.049, 0.989, 0.972]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1212.0, "value": "Describe the actions of the person at bbox [0.0, 0.085, 0.863, 0.994] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.085, 0.863, 0.994]: sit, talk to (e.g., self, a person, a group).", "At 1211.0s, [0.042, 0.154, 0.873, 0.965]: sit, talk to (e.g., self, a person, a group).", "At 1210.0s, [0.07, 0.188, 0.733, 0.976]: sit."]}]}, {"video": "Rm518TUhbRY.mkv", "clip": [1259, 1325], "conversations": [{"from": "human", "timestamps": 1289.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.356, 0.109, 0.936, 0.976]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1325.0, "value": "Describe the actions of the person at bbox [0.218, 0.059, 0.995, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.218, 0.059, 0.995, 0.986]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1324.0s, [0.169, 0.064, 0.991, 0.997]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1323.0s, [0.228, 0.066, 0.986, 0.974]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "Rm518TUhbRY.mkv", "clip": [1315, 1377], "conversations": [{"from": "human", "timestamps": 1345.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.173, 0.111, 0.634, 0.961]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1377.0, "value": "What actions did the person at bbox [0.505, 0.419, 0.673, 0.734] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.505, 0.419, 0.673, 0.734]: carry/hold (an object), crouch/kneel, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1376.0s, [0.51, 0.425, 0.635, 0.737]: crouch/kneel, throw, talk to (e.g., self, a person, a group), watch (a person).", "At 1375.0s, [0.513, 0.424, 0.636, 0.735]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1374.0s, [0.498, 0.421, 0.652, 0.732]: crouch/kneel, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Rm518TUhbRY.mkv", "clip": [1353, 1445], "conversations": [{"from": "human", "timestamps": 1383.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.195, 0.457, 0.396, 0.861]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1445.0, "value": "For the person at bbox [0.239, 0.265, 0.676, 0.983], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.239, 0.265, 0.676, 0.983]: stand, touch (an object).", "From 1443.0s to 1444.0s, [0.142, 0.283, 0.515, 0.969]->[0.186, 0.31, 0.592, 0.979]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1442.0s, [0.176, 0.289, 0.609, 0.941]: stand, push (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1441.0s, [0.236, 0.28, 0.648, 0.96]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1440.0s, [0.487, 0.216, 0.88, 0.977]: stand, talk to (e.g., self, a person, a group).", "From 1432.0s to 1439.0s, [0.118, 0.189, 0.685, 0.984]->[0.0, 0.091, 0.568, 0.997]->[0.0, 0.18, 0.616, 0.97]->[0.031, 0.036, 0.706, 0.974]->[0.136, 0.131, 0.713, 0.981]->[0.367, 0.124, 0.915, 0.988]->[0.506, 0.179, 0.908, 0.986]->[0.524, 0.112, 0.92, 0.979]: walk.", "From 1428.0s to 1431.0s, [0.0, 0.037, 0.716, 0.977]->[0.003, 0.0, 0.59, 1.0]->[0.0, 0.037, 0.603, 0.987]->[0.007, 0.037, 0.623, 0.973]: walk, talk to (e.g., self, a person, a group).", "At 1427.0s, [0.002, 0.022, 0.589, 0.989]: walk.", "At 1426.0s, [0.014, 0.066, 0.611, 0.976]: stand.", "At 1425.0s, [0.002, 0.082, 0.494, 0.978]: stand, touch (an object).", "At 1424.0s, [0.026, 0.126, 0.703, 0.986]: stand, hit (an object), touch (an object).", "At 1423.0s, [0.009, 0.062, 0.562, 0.987]: stand, touch (an object)."]}]}, {"video": "Rm518TUhbRY.mkv", "clip": [1419, 1456], "conversations": [{"from": "human", "timestamps": 1449.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.359, 0.267, 0.726, 0.943]: watch (a person), stand, open (e.g., a window, a car door), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1456.0, "value": "What actions did the person at bbox [0.39, 0.099, 0.999, 0.967] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.39, 0.099, 0.999, 0.967]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1455.0s, [0.351, 0.138, 0.97, 0.967]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1454.0s, [0.176, 0.108, 0.962, 0.982]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Rm518TUhbRY.mkv", "clip": [1476, 1543], "conversations": [{"from": "human", "timestamps": 1506.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.112, 0.11, 0.891, 0.975]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1543.0, "value": "Describe the actions of the person at bbox [0.704, 0.221, 0.866, 0.798] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.704, 0.221, 0.866, 0.798]: walk, listen to (a person), watch (a person).", "At 1542.0s, [0.552, 0.208, 0.765, 0.833]: walk, listen to (a person), watch (a person)."]}]}, {"video": "Rm518TUhbRY.mkv", "clip": [1478, 1590], "conversations": [{"from": "human", "timestamps": 1508.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.188, 0.15, 0.846, 0.972]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1590.0, "value": "Describe the actions of the person at bbox [0.351, 0.134, 0.841, 0.96] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.351, 0.134, 0.841, 0.96]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1589.0s, [0.182, 0.226, 0.699, 0.948]: walk, talk to (e.g., self, a person, a group).", "At 1588.0s, [0.254, 0.159, 0.728, 0.966]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1586.0s to 1587.0s, [0.133, 0.07, 0.672, 0.97]->[0.273, 0.143, 0.688, 0.982]: walk, talk to (e.g., self, a person, a group).", "At 1585.0s, [0.111, 0.145, 0.447, 0.916]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1582.0s to 1584.0s, [0.278, 0.216, 0.648, 0.957]->[0.243, 0.142, 0.671, 0.971]->[0.197, 0.119, 0.62, 0.954]: walk, talk to (e.g., self, a person, a group).", "From 1580.0s to 1581.0s, [0.042, 0.176, 0.653, 0.947]->[0.276, 0.229, 0.591, 0.913]: stand, talk to (e.g., self, a person, a group).", "From 1578.0s to 1579.0s, [0.165, 0.192, 0.522, 0.955]->[0.19, 0.209, 0.626, 0.961]: walk, talk to (e.g., self, a person, a group).", "From 1576.0s to 1577.0s, [0.069, 0.161, 0.597, 0.983]->[0.022, 0.159, 0.537, 0.973]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1575.0s, [0.014, 0.181, 0.62, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1573.0s to 1574.0s, [0.109, 0.025, 0.783, 0.955]->[0.065, 0.166, 0.837, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 1572.0s, [0.084, 0.054, 0.899, 0.952]: stand.", "From 1570.0s to 1571.0s, [0.025, 0.462, 0.983, 0.967]->[0.031, 0.083, 0.967, 0.97]: get up.", "At 1569.0s, [0.112, 0.129, 0.754, 1.0]: sit.", "From 1565.0s to 1568.0s, [0.08, 0.11, 0.801, 0.959]->[0.076, 0.106, 0.806, 1.0]->[0.041, 0.154, 0.702, 0.977]->[0.154, 0.094, 0.717, 0.969]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "Rm518TUhbRY.mkv", "clip": [1601, 1651], "conversations": [{"from": "human", "timestamps": 1631.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.557, 0.071, 0.977, 0.896]: run/jog."}, {"from": "human", "timestamps": 1651.0, "value": "Describe the actions of the person at bbox [0.301, 0.032, 1.0, 0.928] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.301, 0.032, 1.0, 0.928]: bend/bow (at the waist).", "At 1649.0s, [0.397, 0.067, 1.0, 1.0]: stand.", "At 1648.0s, [0.487, 0.046, 0.998, 0.946]: stand, talk to (e.g., self, a person, a group).", "At 1647.0s, [0.393, 0.012, 0.983, 0.972]: bend/bow (at the waist).", "At 1646.0s, [0.248, 0.109, 0.981, 0.96]: bend/bow (at the waist), listen to (a person)."]}]}, {"video": "Rm518TUhbRY.mkv", "clip": [1628, 1679], "conversations": [{"from": "human", "timestamps": 1658.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.138, 0.016, 0.998, 0.969]: run/jog."}, {"from": "human", "timestamps": 1679.0, "value": "What actions is the person in bbox [0.682, 0.364, 0.776, 0.676] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.682, 0.364, 0.776, 0.676]: stand, carry/hold (an object), watch (a person).", "At 1678.0s, [0.68, 0.382, 0.784, 0.67]: stand, carry/hold (an object), close (e.g., a door, a box), listen to (a person).", "At 1677.0s, [0.66, 0.384, 0.748, 0.612]: stand, carry/hold (an object).", "At 1676.0s, [0.66, 0.401, 0.75, 0.618]: walk, carry/hold (an object), watch (a person).", "At 1675.0s, [0.694, 0.396, 0.811, 0.611]: walk, carry/hold (an object), touch (an object), watch (a person)."]}]}, {"video": "S0tkhGJjwLA.mkv", "clip": [882, 941], "conversations": [{"from": "human", "timestamps": 912.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.066, 0.018, 0.621, 0.986]: listen to (a person), stand; [0.511, 0.079, 0.993, 0.98]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 941.0, "value": "For the person at bbox [0.191, 0.047, 0.58, 0.993], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.191, 0.047, 0.58, 0.993]: walk.", "From 932.0s to 940.0s, [0.367, 0.08, 0.587, 0.986]->[0.476, 0.424, 0.653, 0.976]->[0.467, 0.454, 0.64, 0.984]->[0.357, 0.165, 0.595, 0.982]->[0.333, 0.149, 0.589, 1.0]->[0.383, 0.179, 0.584, 0.985]->[0.187, 0.056, 0.58, 0.975]->[0.193, 0.059, 0.594, 0.984]->[0.179, 0.082, 0.566, 0.989]: walk.", "At 931.0s, [0.368, 0.08, 0.588, 0.97]: walk, listen (e.g., to music).", "At 930.0s, [0.37, 0.081, 0.575, 0.955]: walk."]}]}, {"video": "S0tkhGJjwLA.mkv", "clip": [917, 995], "conversations": [{"from": "human", "timestamps": 947.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.308, 0.49, 0.626, 0.984]: sit, play musical instrument."}, {"from": "human", "timestamps": 995.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.523, 0.097, 0.989, 0.994]?"}, {"from": "gpt", "value": ["In the current frame, [0.523, 0.097, 0.989, 0.994]: stand, listen to (a person), watch (a person).", "From 990.0s to 994.0s, [0.606, 0.009, 1.0, 0.976]->[0.598, 0.0, 1.0, 0.985]->[0.52, 0.019, 1.0, 0.967]->[0.483, 0.092, 0.957, 0.985]->[0.488, 0.092, 0.965, 0.979]: stand, listen to (a person), watch (a person).", "At 988.0s, [0.523, 0.094, 1.0, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 987.0s, [0.47, 0.106, 0.987, 0.982]: stand, talk to (e.g., self, a person, a group).", "At 986.0s, [0.635, 0.126, 1.0, 0.979]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 984.0s to 985.0s, [0.353, 0.16, 0.753, 0.98]->[0.506, 0.182, 0.819, 0.97]: stand, listen to (a person), watch (a person).", "At 983.0s, [0.67, 0.213, 1.0, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 982.0s, [0.656, 0.192, 0.997, 0.998]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 981.0s, [0.288, 0.191, 0.741, 0.975]: stand, watch (a person).", "From 976.0s to 980.0s, [0.221, 0.038, 0.726, 0.96]->[0.299, 0.011, 0.763, 0.976]->[0.223, 0.02, 0.67, 0.967]->[0.355, 0.079, 0.567, 1.0]->[0.308, 0.062, 0.565, 0.964]: dance.", "From 965.0s to 974.0s, [0.137, 0.015, 0.774, 1.0]->[0.308, 0.032, 0.871, 0.997]->[0.235, 0.04, 0.863, 0.991]->[0.431, 0.017, 0.824, 0.963]->[0.327, 0.027, 0.848, 0.975]->[0.611, 0.02, 0.887, 0.977]->[0.565, 0.021, 0.995, 1.0]->[0.57, 0.018, 0.983, 0.997]->[0.072, 0.0, 0.785, 0.993]->[0.38, 0.0, 1.0, 0.984]: dance.", "From 963.0s to 964.0s, [0.015, 0.004, 0.844, 0.984]->[0.052, 0.0, 0.95, 0.965]: stand.", "From 958.0s to 962.0s, [0.284, 0.036, 0.637, 0.997]->[0.201, 0.01, 0.68, 0.973]->[0.215, 0.001, 0.675, 0.983]->[0.198, 0.021, 0.629, 0.966]->[0.179, 0.052, 0.616, 0.998]: dance.", "At 957.0s, [0.329, 0.046, 0.684, 0.998]: stand."]}]}, {"video": "S0tkhGJjwLA.mkv", "clip": [942, 979], "conversations": [{"from": "human", "timestamps": 972.0, "value": "What actions did the person at bbox [0.57, 0.018, 0.983, 0.997] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.57, 0.018, 0.983, 0.997]: dance.", "From 965.0s to 971.0s, [0.137, 0.015, 0.774, 1.0]->[0.308, 0.032, 0.871, 0.997]->[0.235, 0.04, 0.863, 0.991]->[0.431, 0.017, 0.824, 0.963]->[0.327, 0.027, 0.848, 0.975]->[0.611, 0.02, 0.887, 0.977]->[0.565, 0.021, 0.995, 1.0]: dance.", "From 963.0s to 964.0s, [0.015, 0.004, 0.844, 0.984]->[0.052, 0.0, 0.95, 0.965]: stand.", "From 958.0s to 962.0s, [0.284, 0.036, 0.637, 0.997]->[0.201, 0.01, 0.68, 0.973]->[0.215, 0.001, 0.675, 0.983]->[0.198, 0.021, 0.629, 0.966]->[0.179, 0.052, 0.616, 0.998]: dance.", "At 957.0s, [0.329, 0.046, 0.684, 0.998]: stand."]}, {"from": "human", "timestamps": 979.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.355, 0.079, 0.567, 1.0]: dance."}]}, {"video": "S0tkhGJjwLA.mkv", "clip": [1002, 1074], "conversations": [{"from": "human", "timestamps": 1032.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.007, 0.018, 0.539, 0.998]: listen to (a person), stand, watch (a person); [0.501, 0.109, 0.99, 0.989]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1074.0, "value": "Describe the actions of the person at bbox [0.383, 0.089, 0.731, 0.992] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.383, 0.089, 0.731, 0.992]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1073.0s, [0.346, 0.049, 0.627, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1072.0s, [0.007, 0.0, 0.465, 0.986]: stand, talk to (e.g., self, a person, a group).", "From 1069.0s to 1071.0s, [0.488, 0.019, 1.0, 0.995]->[0.508, 0.04, 0.993, 0.995]->[0.546, 0.041, 0.988, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1068.0s, [0.486, 0.016, 0.999, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1066.0s to 1067.0s, [0.531, 0.021, 0.981, 0.981]->[0.572, 0.018, 0.998, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1065.0s, [0.481, 0.074, 0.975, 0.935]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1062.0s to 1064.0s, [0.0, 0.153, 0.404, 0.985]->[0.0, 0.09, 0.343, 0.961]->[0.0, 0.21, 0.322, 0.973]: stand, watch (a person).", "At 1061.0s, [0.0, 0.182, 0.468, 0.999]: stand, hand shake, listen to (a person), watch (a person)."]}]}, {"video": "S0tkhGJjwLA.mkv", "clip": [1093, 1138], "conversations": [{"from": "human", "timestamps": 1123.0, "value": "What actions did the person at bbox [0.361, 0.056, 0.947, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.361, 0.056, 0.947, 0.986]: stand, open (e.g., a window, a car door), talk to (e.g., self, a person, a group).", "At 1122.0s, [0.546, 0.066, 0.925, 0.99]: stand, open (e.g., a window, a car door), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1138.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.51, 0.064, 0.773, 0.511]: sit, answer phone."}]}, {"video": "S0tkhGJjwLA.mkv", "clip": [1119, 1190], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "What actions did the person at bbox [0.113, 0.021, 1.0, 0.966] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.113, 0.021, 1.0, 0.966]: sit, answer phone.", "At 1148.0s, [0.368, 0.0, 0.978, 1.0]: sit, stand, answer phone.", "From 1145.0s to 1147.0s, [0.38, 0.0, 0.961, 1.0]->[0.344, 0.006, 0.982, 0.996]->[0.291, 0.002, 0.96, 0.986]: sit, answer phone.", "From 1138.0s to 1143.0s, [0.51, 0.064, 0.773, 0.511]->[0.305, 0.001, 0.983, 1.0]->[0.287, 0.0, 0.953, 0.99]->[0.296, 0.0, 0.971, 0.993]->[0.347, 0.0, 0.967, 0.994]->[0.356, 0.0, 0.932, 1.0]: sit, answer phone.", "At 1137.0s, [0.529, 0.078, 0.774, 0.511]: sit, answer phone, carry/hold (an object).", "At 1136.0s, [0.484, 0.073, 0.735, 0.486]: sit, answer phone."]}, {"from": "human", "timestamps": 1190.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.016, 0.099, 0.415, 0.95]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.394, 0.006, 0.996, 0.9]: listen to (a person), stand, lift/pick up."}]}, {"video": "S0tkhGJjwLA.mkv", "clip": [1250, 1296], "conversations": [{"from": "human", "timestamps": 1280.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.441, 0.108, 0.986, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1296.0, "value": "For the person at bbox [0.754, 0.169, 1.0, 0.989], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.754, 0.169, 1.0, 0.989]: stand, listen to (a person), watch (a person).", "From 1294.0s to 1295.0s, [0.745, 0.16, 1.0, 0.984]->[0.775, 0.162, 1.0, 0.985]: stand, listen to (a person), watch (a person).", "At 1293.0s, [0.285, 0.023, 0.957, 0.974]: stand, watch (a person)."]}]}, {"video": "S0tkhGJjwLA.mkv", "clip": [1360, 1392], "conversations": [{"from": "human", "timestamps": 1390.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.012, 0.0, 0.668, 0.991]: watch (a person), stand; [0.535, 0.169, 0.991, 1.0]: watch (a person), stand."}, {"from": "human", "timestamps": 1392.0, "value": "What actions is the person in bbox [0.239, 0.029, 0.909, 0.958] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.239, 0.029, 0.909, 0.958]: stand.", "From 1383.0s to 1391.0s, [0.155, 0.004, 0.956, 0.972]->[0.415, 0.002, 0.989, 0.993]->[0.504, 0.174, 1.0, 0.995]->[0.533, 0.178, 0.997, 1.0]->[0.394, 0.012, 0.996, 0.988]->[0.367, 0.01, 0.993, 0.988]->[0.53, 0.166, 1.0, 0.995]->[0.535, 0.169, 0.991, 1.0]->[0.521, 0.123, 0.999, 1.0]: stand, watch (a person).", "From 1379.0s to 1382.0s, [0.27, 0.026, 0.924, 0.972]->[0.577, 0.223, 1.0, 1.0]->[0.572, 0.228, 1.0, 0.998]->[0.576, 0.23, 1.0, 0.98]: stand, listen to (a person), watch (a person).", "At 1378.0s, [0.146, 0.006, 0.929, 0.966]: stand, watch (a person).", "At 1376.0s, [0.285, 0.02, 0.987, 0.976]: stand.", "From 1371.0s to 1375.0s, [0.563, 0.197, 0.996, 0.986]->[0.283, 0.008, 0.947, 0.964]->[0.253, 0.011, 0.942, 0.968]->[0.567, 0.224, 0.994, 0.989]->[0.538, 0.256, 1.0, 0.996]: stand, watch (a person).", "At 1370.0s, [0.563, 0.219, 0.999, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1365.0s to 1369.0s, [0.315, 0.01, 1.0, 0.974]->[0.308, 0.011, 1.0, 0.977]->[0.298, 0.014, 1.0, 0.991]->[0.299, 0.012, 1.0, 0.983]->[0.185, 0.021, 1.0, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 1364.0s, [0.244, 0.014, 0.917, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1363.0s, [0.825, 0.14, 1.0, 0.996]: stand, listen to (a person).", "At 1362.0s, [0.759, 0.154, 1.0, 0.952]: stand, listen to (a person), watch (a person).", "From 1359.0s to 1361.0s, [0.358, 0.001, 0.994, 0.992]->[0.381, 0.001, 1.0, 0.981]->[0.434, 0.0, 1.0, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "S0tkhGJjwLA.mkv", "clip": [1411, 1481], "conversations": [{"from": "human", "timestamps": 1441.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.059, 0.009, 0.808, 0.967]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.539, 0.005, 0.997, 0.99]: listen to (a person), stand, kiss (a person), watch (a person)."}, {"from": "human", "timestamps": 1481.0, "value": "For the person at bbox [0.47, 0.0, 1.0, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.47, 0.0, 1.0, 0.994]: stand, listen to (a person), watch (a person).", "At 1480.0s, [0.359, 0.061, 0.503, 0.961]: stand.", "From 1478.0s to 1479.0s, [0.391, 0.035, 0.64, 0.968]->[0.354, 0.042, 0.577, 0.983]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1476.0s, [0.616, 0.007, 1.0, 0.978]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "S0tkhGJjwLA.mkv", "clip": [1478, 1582], "conversations": [{"from": "human", "timestamps": 1508.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.101, 0.031, 1.0, 0.959]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1582.0, "value": "What actions is the person in bbox [0.063, 0.0, 0.671, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.063, 0.0, 0.671, 0.984]: stand, watch (a person).", "From 1579.0s to 1581.0s, [0.037, 0.007, 0.651, 0.99]->[0.083, 0.0, 0.687, 0.99]->[0.088, 0.0, 0.671, 0.992]: stand.", "At 1578.0s, [0.004, 0.005, 0.381, 0.988]: stand, watch (a person)."]}]}, {"video": "S0tkhGJjwLA.mkv", "clip": [1607, 1685], "conversations": [{"from": "human", "timestamps": 1637.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.631, 0.395, 0.988, 0.974]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1685.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.06, 0.001, 0.928, 0.961]?"}, {"from": "gpt", "value": ["At Now, [0.06, 0.001, 0.928, 0.961]: sit.", "At 1684.0s, [0.0, 0.006, 0.858, 0.98]: sit.", "From 1682.0s to 1683.0s, [0.0, 0.0, 0.736, 0.986]->[0.006, 0.016, 0.764, 0.965]: stand."]}]}, {"video": "S0tkhGJjwLA.mkv", "clip": [1733, 1797], "conversations": [{"from": "human", "timestamps": 1763.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.021, 0.006, 0.76, 0.995]: sit, eat."}, {"from": "human", "timestamps": 1797.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.019, 0.905, 0.979]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.019, 0.905, 0.979]: sit.", "At 1796.0s, [0.003, 0.009, 0.868, 0.991]: sit."]}]}, {"video": "SHBMiL5f_3Q.mkv", "clip": [873, 907], "conversations": [{"from": "human", "timestamps": 903.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.034, 0.374, 0.709, 0.977]: stand."}, {"from": "human", "timestamps": 907.0, "value": "For the person at bbox [0.083, 0.263, 0.749, 0.991], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.083, 0.263, 0.749, 0.991]: stand.", "At 906.0s, [0.13, 0.439, 0.714, 0.988]: stand.", "At 905.0s, [0.141, 0.531, 0.728, 0.994]: bend/bow (at the waist).", "From 902.0s to 903.0s, [0.102, 0.266, 0.825, 0.984]->[0.034, 0.374, 0.709, 0.977]: stand."]}]}, {"video": "SHBMiL5f_3Q.mkv", "clip": [906, 1015], "conversations": [{"from": "human", "timestamps": 936.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.37, 0.11, 0.902, 0.991]: walk."}, {"from": "human", "timestamps": 1015.0, "value": "What actions did the person at bbox [0.369, 0.214, 0.994, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.369, 0.214, 0.994, 0.99]: stand, watch (a person).", "At 1014.0s, [0.447, 0.144, 0.99, 0.97]: stand, watch (a person).", "From 1004.0s to 1013.0s, [0.448, 0.186, 0.999, 0.974]->[0.483, 0.172, 0.996, 0.993]->[0.461, 0.184, 0.998, 0.968]->[0.461, 0.18, 0.996, 0.971]->[0.456, 0.176, 0.997, 0.969]->[0.431, 0.167, 0.996, 0.977]->[0.442, 0.155, 1.0, 1.0]->[0.481, 0.161, 1.0, 0.964]->[0.432, 0.169, 0.998, 0.973]->[0.445, 0.167, 1.0, 0.967]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1002.0s to 1003.0s, [0.405, 0.211, 0.965, 0.978]->[0.43, 0.19, 1.0, 0.971]: stand, listen to (a person), watch (a person).", "At 1001.0s, [0.424, 0.217, 0.975, 0.972]: stand, watch (a person).", "From 997.0s to 1000.0s, [0.459, 0.219, 0.987, 0.981]->[0.459, 0.222, 1.0, 0.986]->[0.47, 0.219, 0.995, 0.977]->[0.413, 0.227, 0.986, 0.966]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "SHBMiL5f_3Q.mkv", "clip": [959, 1004], "conversations": [{"from": "human", "timestamps": 989.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.02, 0.494, 0.979]: listen to (a person), stand, grab (a person), watch (a person); [0.373, 0.284, 0.967, 0.981]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1004.0, "value": "What actions is the person in bbox [0.448, 0.186, 0.999, 0.974] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.448, 0.186, 0.999, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1002.0s to 1003.0s, [0.405, 0.211, 0.965, 0.978]->[0.43, 0.19, 1.0, 0.971]: stand, listen to (a person), watch (a person).", "At 1001.0s, [0.424, 0.217, 0.975, 0.972]: stand, watch (a person).", "From 997.0s to 1000.0s, [0.459, 0.219, 0.987, 0.981]->[0.459, 0.222, 1.0, 0.986]->[0.47, 0.219, 0.995, 0.977]->[0.413, 0.227, 0.986, 0.966]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "SHBMiL5f_3Q.mkv", "clip": [996, 1063], "conversations": [{"from": "human", "timestamps": 1026.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.073, 0.061, 0.628, 0.985]?"}, {"from": "gpt", "value": ["At Now, [0.073, 0.061, 0.628, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1024.0s to 1025.0s, [0.056, 0.054, 0.637, 0.98]->[0.063, 0.064, 0.61, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1022.0s to 1023.0s, [0.07, 0.091, 0.662, 0.984]->[0.055, 0.082, 0.621, 0.994]: stand, listen to (a person), watch (a person).", "From 1020.0s to 1021.0s, [0.126, 0.152, 0.741, 0.987]->[0.082, 0.09, 0.657, 0.973]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1017.0s to 1019.0s, [0.048, 0.188, 0.891, 0.973]->[0.055, 0.036, 0.857, 0.98]->[0.064, 0.078, 0.814, 0.975]: stand, watch (a person).", "At 1016.0s, [0.0, 0.14, 0.73, 0.986]: get up, watch (a person).", "From 1014.0s to 1015.0s, [0.003, 0.0, 0.646, 0.989]->[0.001, 0.005, 0.535, 0.993]: stand, watch (a person).", "From 1004.0s to 1013.0s, [0.0, 0.0, 0.588, 0.994]->[0.01, 0.0, 0.68, 0.99]->[0.003, 0.01, 0.606, 0.986]->[0.003, 0.004, 0.619, 0.991]->[0.002, 0.001, 0.599, 0.987]->[0.007, 0.0, 0.616, 0.988]->[0.0, 0.0, 0.559, 0.986]->[0.0, 0.0, 0.634, 0.988]->[0.006, 0.008, 0.657, 0.994]->[0.012, 0.003, 0.692, 0.99]: stand, listen to (a person), watch (a person).", "From 1002.0s to 1003.0s, [0.0, 0.0, 0.479, 0.971]->[0.0, 0.0, 0.525, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1001.0s, [0.0, 0.002, 0.495, 0.99]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 997.0s to 1000.0s, [0.0, 0.0, 0.53, 0.982]->[0.0, 0.0, 0.516, 0.98]->[0.0, 0.003, 0.529, 0.982]->[0.0, 0.004, 0.52, 0.971]: stand, listen to (a person), watch (a person).", "From 995.0s to 996.0s, [0.02, 0.082, 0.66, 0.986]->[0.058, 0.076, 0.763, 1.0]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1063.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.331, 0.449, 0.692, 0.996]: walk."}]}, {"video": "SHBMiL5f_3Q.mkv", "clip": [1023, 1106], "conversations": [{"from": "human", "timestamps": 1053.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.119, 0.262, 0.471, 0.985]: watch (a person), stand; [0.409, 0.162, 0.835, 1.0]: stand."}, {"from": "human", "timestamps": 1106.0, "value": "For the person at bbox [0.136, 0.103, 0.497, 0.964], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.136, 0.103, 0.497, 0.964]: bend/bow (at the waist), touch (an object), listen to (a person), watch (a person).", "From 1104.0s to 1105.0s, [0.138, 0.093, 0.495, 0.975]->[0.139, 0.104, 0.497, 0.972]: bend/bow (at the waist), touch (an object), listen to (a person), watch (a person).", "At 1103.0s, [0.13, 0.117, 0.494, 0.978]: bend/bow (at the waist), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1102.0s, [0.111, 0.208, 0.479, 0.98]: bend/bow (at the waist), touch (an object), listen to (a person), watch (a person).", "At 1101.0s, [0.105, 0.094, 0.433, 0.983]: stand, watch (a person).", "At 1100.0s, [0.111, 0.058, 0.396, 0.994]: stand, listen to (a person), watch (a person).", "At 1099.0s, [0.166, 0.117, 0.455, 0.993]: stand, watch (a person).", "At 1098.0s, [0.188, 0.201, 0.434, 0.988]: walk, listen to (a person), watch (a person).", "At 1097.0s, [0.332, 0.302, 0.558, 0.993]: walk, watch (a person).", "From 1091.0s to 1096.0s, [0.222, 0.26, 0.777, 0.99]->[0.151, 0.239, 0.604, 0.974]->[0.184, 0.138, 0.565, 0.981]->[0.131, 0.111, 0.494, 0.953]->[0.18, 0.334, 0.41, 0.986]->[0.309, 0.313, 0.476, 0.985]: walk.", "From 1089.0s to 1090.0s, [0.147, 0.156, 0.797, 0.984]->[0.151, 0.163, 0.786, 0.976]: stand.", "At 1088.0s, [0.155, 0.166, 0.797, 0.987]: stand, watch (a person).", "At 1087.0s, [0.033, 0.007, 1.0, 0.999]: bend/bow (at the waist), carry/hold (an object), watch (a person).", "At 1086.0s, [0.033, 0.024, 0.963, 0.959]: crouch/kneel."]}]}, {"video": "SHBMiL5f_3Q.mkv", "clip": [1063, 1112], "conversations": [{"from": "human", "timestamps": 1093.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.184, 0.138, 0.565, 0.981]: walk."}, {"from": "human", "timestamps": 1112.0, "value": "Describe the actions of the person at bbox [0.165, 0.206, 0.577, 0.979] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.165, 0.206, 0.577, 0.979]: bend/bow (at the waist), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1108.0s to 1111.0s, [0.126, 0.123, 0.489, 0.963]->[0.142, 0.179, 0.551, 0.987]->[0.143, 0.195, 0.551, 0.974]->[0.159, 0.212, 0.604, 0.988]: bend/bow (at the waist), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1104.0s to 1107.0s, [0.138, 0.093, 0.495, 0.975]->[0.139, 0.104, 0.497, 0.972]->[0.136, 0.103, 0.497, 0.964]->[0.14, 0.099, 0.49, 0.984]: bend/bow (at the waist), touch (an object), listen to (a person), watch (a person).", "At 1103.0s, [0.13, 0.117, 0.494, 0.978]: bend/bow (at the waist), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1102.0s, [0.111, 0.208, 0.479, 0.98]: bend/bow (at the waist), touch (an object), listen to (a person), watch (a person).", "At 1101.0s, [0.105, 0.094, 0.433, 0.983]: stand, watch (a person).", "At 1100.0s, [0.111, 0.058, 0.396, 0.994]: stand, listen to (a person), watch (a person).", "At 1099.0s, [0.166, 0.117, 0.455, 0.993]: stand, watch (a person).", "At 1098.0s, [0.188, 0.201, 0.434, 0.988]: walk, listen to (a person), watch (a person).", "At 1097.0s, [0.332, 0.302, 0.558, 0.993]: walk, watch (a person).", "From 1091.0s to 1096.0s, [0.222, 0.26, 0.777, 0.99]->[0.151, 0.239, 0.604, 0.974]->[0.184, 0.138, 0.565, 0.981]->[0.131, 0.111, 0.494, 0.953]->[0.18, 0.334, 0.41, 0.986]->[0.309, 0.313, 0.476, 0.985]: walk.", "From 1089.0s to 1090.0s, [0.147, 0.156, 0.797, 0.984]->[0.151, 0.163, 0.786, 0.976]: stand.", "At 1088.0s, [0.155, 0.166, 0.797, 0.987]: stand, watch (a person).", "At 1087.0s, [0.033, 0.007, 1.0, 0.999]: bend/bow (at the waist), carry/hold (an object), watch (a person).", "At 1086.0s, [0.033, 0.024, 0.963, 0.959]: crouch/kneel."]}]}, {"video": "SHBMiL5f_3Q.mkv", "clip": [1212, 1258], "conversations": [{"from": "human", "timestamps": 1242.0, "value": "What actions is the person in bbox [0.419, 0.336, 0.544, 0.586] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.419, 0.336, 0.544, 0.586]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1241.0s, [0.494, 0.28, 0.61, 0.523]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1240.0s, [0.673, 0.338, 0.782, 0.634]: walk, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1258.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.005, 0.2, 0.185, 0.985]: listen to (a person), stand, watch (a person); [0.416, 0.342, 0.531, 0.601]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.611, 0.149, 0.911, 0.99]: listen to (a person), stand, watch (a person)."}]}, {"video": "SHBMiL5f_3Q.mkv", "clip": [1253, 1285], "conversations": [{"from": "human", "timestamps": 1283.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.51, 0.174, 0.99, 0.996]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1285.0, "value": "For the person at bbox [0.0, 0.011, 0.592, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.011, 0.592, 0.992]: stand, carry/hold (an object), listen to (a person), take (an object) from (a person).", "At 1284.0s, [0.0, 0.01, 0.331, 1.0]: walk, listen to (a person)."]}]}, {"video": "SHBMiL5f_3Q.mkv", "clip": [1239, 1365], "conversations": [{"from": "human", "timestamps": 1269.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.196, 0.142, 0.808, 0.987]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.789, 0.035, 1.0, 0.989]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1365.0, "value": "For the person at bbox [0.022, 0.115, 0.304, 0.982], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.022, 0.115, 0.304, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1363.0s to 1364.0s, [0.014, 0.109, 0.298, 0.984]->[0.022, 0.103, 0.304, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1362.0s, [0.016, 0.109, 0.3, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1361.0s, [0.013, 0.107, 0.305, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1360.0s, [0.017, 0.11, 0.288, 0.988]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1356.0s to 1359.0s, [0.011, 0.104, 0.286, 0.979]->[0.019, 0.104, 0.283, 0.992]->[0.012, 0.115, 0.29, 0.976]->[0.016, 0.113, 0.292, 0.983]: stand, talk to (e.g., self, a person, a group).", "From 1352.0s to 1355.0s, [0.015, 0.104, 0.287, 0.986]->[0.013, 0.108, 0.287, 0.996]->[0.018, 0.109, 0.285, 0.984]->[0.014, 0.112, 0.282, 0.992]: stand, listen to (a person).", "At 1351.0s, [0.09, 0.151, 0.309, 0.975]: stand, watch (e.g., TV), listen to (a person).", "At 1350.0s, [0.03, 0.153, 0.25, 0.969]: stand, listen to (a person).", "From 1348.0s to 1349.0s, [0.011, 0.175, 0.249, 0.969]->[0.014, 0.157, 0.231, 0.964]: stand.", "At 1347.0s, [0.053, 0.191, 0.276, 0.973]: stand, listen to (a person)."]}]}, {"video": "SHBMiL5f_3Q.mkv", "clip": [1317, 1374], "conversations": [{"from": "human", "timestamps": 1347.0, "value": "What actions did the person at bbox [0.286, 0.211, 0.484, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.286, 0.211, 0.484, 0.985]: walk, carry/hold (an object), read.", "From 1345.0s to 1346.0s, [0.02, 0.079, 0.396, 0.976]->[0.082, 0.225, 0.326, 0.985]: walk, read.", "From 1343.0s to 1344.0s, [0.558, 0.107, 1.0, 0.986]->[0.149, 0.016, 0.834, 0.98]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1374.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.591, 0.092, 1.0, 0.972]: walk."}]}, {"video": "SHBMiL5f_3Q.mkv", "clip": [1372, 1403], "conversations": [{"from": "human", "timestamps": 1402.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.425, 0.358, 0.994]: listen to (a person), sit, watch (a person); [0.71, 0.039, 1.0, 0.994]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1403.0, "value": "What actions did the person at bbox [0.0, 0.427, 0.349, 0.994] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.427, 0.349, 0.994]: sit, listen to (a person), watch (a person).", "From 1400.0s to 1402.0s, [0.0, 0.426, 0.46, 0.994]->[0.0, 0.428, 0.494, 0.998]->[0.0, 0.425, 0.358, 0.994]: sit, listen to (a person), watch (a person).", "At 1399.0s, [0.014, 0.427, 0.411, 0.994]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1398.0s, [0.084, 0.418, 0.423, 0.998]: sit, carry/hold (an object), watch (a person).", "At 1397.0s, [0.138, 0.463, 0.484, 0.991]: sit, carry/hold (an object).", "From 1394.0s to 1396.0s, [0.137, 0.504, 0.513, 0.99]->[0.14, 0.5, 0.51, 0.987]->[0.141, 0.482, 0.505, 0.991]: sit, read.", "At 1393.0s, [0.137, 0.5, 0.516, 0.988]: sit, read, touch (an object).", "At 1392.0s, [0.134, 0.514, 0.527, 0.995]: sit, read.", "From 1390.0s to 1391.0s, [0.133, 0.471, 0.58, 1.0]->[0.143, 0.5, 0.544, 0.998]: sit, carry/hold (an object), read.", "At 1389.0s, [0.132, 0.483, 0.583, 0.999]: sit, read.", "At 1388.0s, [0.052, 0.42, 0.446, 0.992]: sit, carry/hold (an object).", "At 1387.0s, [0.198, 0.39, 0.631, 0.994]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "SHBMiL5f_3Q.mkv", "clip": [1497, 1554], "conversations": [{"from": "human", "timestamps": 1527.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.005, 0.016, 0.355, 0.942]: carry/hold (an object), stand, listen to (a person)."}, {"from": "human", "timestamps": 1554.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.358, 0.0, 0.997, 0.996]?"}, {"from": "gpt", "value": ["In the current frame, [0.358, 0.0, 0.997, 0.996]: stand.", "From 1552.0s to 1553.0s, [0.369, 0.0, 0.996, 0.998]->[0.472, 0.035, 1.0, 0.969]: stand."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [907, 949], "conversations": [{"from": "human", "timestamps": 937.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.046, 0.203, 0.271, 0.679]: sit; [0.246, 0.136, 0.878, 0.991]: sit."}, {"from": "human", "timestamps": 949.0, "value": "Describe the actions of the person at bbox [0.252, 0.103, 0.961, 0.942] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.252, 0.103, 0.961, 0.942]: sit, talk to (e.g., self, a person, a group).", "From 938.0s to 948.0s, [0.166, 0.049, 0.881, 0.974]->[0.189, 0.134, 0.823, 0.955]->[0.16, 0.12, 0.832, 0.977]->[0.171, 0.073, 0.821, 0.96]->[0.173, 0.065, 0.852, 0.974]->[0.224, 0.047, 0.913, 0.973]->[0.223, 0.117, 0.933, 0.975]->[0.211, 0.1, 0.946, 0.936]->[0.244, 0.072, 0.969, 0.937]->[0.225, 0.089, 0.945, 0.922]->[0.244, 0.055, 0.95, 0.956]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [901, 975], "conversations": [{"from": "human", "timestamps": 931.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.239, 0.153, 0.947, 0.98]: sit, talk to (e.g., self, a person, a group); [0.798, 0.354, 0.953, 0.713]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 975.0, "value": "What actions did the person at bbox [0.463, 0.211, 0.719, 0.557] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.463, 0.211, 0.719, 0.557]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 969.0s to 974.0s, [0.526, 0.222, 0.714, 0.545]->[0.529, 0.231, 0.723, 0.569]->[0.538, 0.221, 0.712, 0.555]->[0.505, 0.21, 0.711, 0.572]->[0.508, 0.221, 0.706, 0.545]->[0.503, 0.213, 0.711, 0.549]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 968.0s, [0.548, 0.224, 0.723, 0.619]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 967.0s, [0.531, 0.217, 0.719, 0.603]: sit, touch (an object), listen to (a person).", "At 966.0s, [0.541, 0.222, 0.716, 0.585]: sit, touch (an object), listen to (a person), watch (a person).", "At 965.0s, [0.542, 0.216, 0.715, 0.585]: sit, carry/hold (an object), listen to (a person).", "At 964.0s, [0.549, 0.214, 0.714, 0.569]: sit, touch (an object), listen to (a person), watch (a person).", "At 963.0s, [0.546, 0.239, 0.716, 0.598]: sit, carry/hold (an object), listen to (a person).", "At 962.0s, [0.546, 0.227, 0.716, 0.581]: sit, listen to (a person)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [932, 1011], "conversations": [{"from": "human", "timestamps": 962.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.039, 0.241, 0.425, 0.994]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.247, 0.196, 0.397, 0.588]: listen to (a person), sit; [0.546, 0.227, 0.716, 0.581]: listen to (a person), sit; [0.606, 0.216, 0.827, 0.713]: listen to (a person), sit, watch (a person); [0.728, 0.172, 0.971, 0.985]: listen to (a person), sit, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1011.0, "value": "Describe the actions of the person at bbox [0.081, 0.224, 0.461, 0.889] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.081, 0.224, 0.461, 0.889]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 999.0s to 1010.0s, [0.034, 0.215, 0.451, 0.91]->[0.049, 0.234, 0.491, 0.905]->[0.046, 0.246, 0.499, 0.915]->[0.064, 0.243, 0.541, 0.901]->[0.048, 0.234, 0.495, 0.908]->[0.042, 0.228, 0.49, 0.918]->[0.048, 0.237, 0.494, 0.904]->[0.037, 0.221, 0.433, 0.921]->[0.072, 0.216, 0.496, 0.892]->[0.1, 0.212, 0.487, 0.886]->[0.09, 0.214, 0.505, 0.872]->[0.07, 0.224, 0.465, 0.895]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [991, 1030], "conversations": [{"from": "human", "timestamps": 1021.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.04, 0.193, 0.222, 0.625]: carry/hold (an object), sit; [0.052, 0.232, 0.483, 0.902]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.386, 0.052, 0.679, 0.722]: listen to (a person), sit; [0.753, 0.112, 0.965, 0.983]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1030.0, "value": "What actions did the person at bbox [0.522, 0.112, 0.841, 0.894] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.522, 0.112, 0.841, 0.894]: sit, listen to (a person), watch (a person).", "From 1026.0s to 1029.0s, [0.463, 0.136, 0.729, 0.79]->[0.48, 0.129, 0.75, 0.794]->[0.514, 0.11, 0.784, 0.816]->[0.518, 0.116, 0.837, 0.846]: sit, listen to (a person), watch (a person).", "From 1021.0s to 1025.0s, [0.386, 0.052, 0.679, 0.722]->[0.442, 0.135, 0.712, 0.79]->[0.443, 0.127, 0.716, 0.811]->[0.461, 0.137, 0.728, 0.796]->[0.469, 0.146, 0.729, 0.801]: sit, listen to (a person)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1007, 1100], "conversations": [{"from": "human", "timestamps": 1037.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.073, 0.15, 0.447, 0.699]: sit; [0.354, 0.194, 0.671, 0.741]: watch (a person), sit; [0.639, 0.155, 0.952, 0.855]: sit."}, {"from": "human", "timestamps": 1100.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.038, 0.148, 0.32, 0.985]?"}, {"from": "gpt", "value": ["In the current frame, [0.038, 0.148, 0.32, 0.985]: stand, listen to (a person), watch (a person).", "At 1099.0s, [0.035, 0.136, 0.414, 1.0]: stand, listen to (a person), watch (a person).", "At 1098.0s, [0.029, 0.131, 0.398, 0.993]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1097.0s, [0.067, 0.129, 0.579, 0.986]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1091, 1134], "conversations": [{"from": "human", "timestamps": 1121.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.204, 0.165, 0.695, 0.996]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.825, 0.021, 0.982, 0.988]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1134.0, "value": "For the person at bbox [0.065, 0.197, 0.482, 0.988], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.065, 0.197, 0.482, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1133.0s, [0.023, 0.191, 0.46, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1140, 1180], "conversations": [{"from": "human", "timestamps": 1170.0, "value": "Describe the actions of the person at bbox [0.034, 0.23, 0.348, 0.989] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.034, 0.23, 0.348, 0.989]: stand, listen to (a person), watch (a person).", "From 1166.0s to 1169.0s, [0.039, 0.236, 0.343, 0.987]->[0.04, 0.24, 0.347, 0.99]->[0.042, 0.232, 0.348, 0.984]->[0.042, 0.221, 0.348, 0.992]: stand, listen to (a person), watch (a person).", "From 1163.0s to 1165.0s, [0.036, 0.214, 0.325, 0.98]->[0.036, 0.23, 0.328, 0.989]->[0.035, 0.255, 0.363, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1180.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.103, 0.055, 0.92, 0.991]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1117, 1241], "conversations": [{"from": "human", "timestamps": 1147.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.04, 0.2, 0.346, 0.996]: listen to (a person), stand, watch (a person); [0.731, 0.189, 0.965, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1241.0, "value": "What actions is the person in bbox [0.344, 0.13, 0.968, 0.989] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.344, 0.13, 0.968, 0.989]: stand, talk to (e.g., self, a person, a group).", "From 1238.0s to 1240.0s, [0.333, 0.167, 0.971, 0.98]->[0.325, 0.152, 0.971, 0.975]->[0.352, 0.15, 0.977, 0.984]: stand, talk to (e.g., self, a person, a group).", "At 1237.0s, [0.394, 0.087, 0.937, 0.972]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1212, 1257], "conversations": [{"from": "human", "timestamps": 1242.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.335, 0.128, 0.977, 0.982]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1257.0, "value": "Describe the actions of the person at bbox [0.36, 0.112, 0.971, 0.993] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.36, 0.112, 0.971, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 1253.0s to 1256.0s, [0.252, 0.117, 0.96, 0.98]->[0.376, 0.111, 0.968, 0.979]->[0.378, 0.085, 0.965, 0.997]->[0.345, 0.094, 0.969, 0.983]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1248, 1286], "conversations": [{"from": "human", "timestamps": 1278.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.093, 0.399, 0.558, 0.988]: listen to (a person), sit, watch (a person); [0.575, 0.117, 0.752, 0.812]: watch (a person), sing to (e.g., self, a person, a group), dance."}, {"from": "human", "timestamps": 1286.0, "value": "Describe the actions of the person at bbox [0.167, 0.516, 0.482, 0.99] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.167, 0.516, 0.482, 0.99]: sit, listen to (a person), watch (a person).", "At 1285.0s, [0.136, 0.515, 0.424, 0.999]: sit, listen to (a person), watch (a person).", "At 1284.0s, [0.144, 0.516, 0.422, 0.996]: sit, listen (e.g., to music), listen to (a person), watch (a person).", "At 1283.0s, [0.144, 0.498, 0.428, 0.993]: sit, listen to (a person), watch (a person).", "From 1279.0s to 1282.0s, [0.128, 0.412, 0.518, 0.984]->[0.13, 0.444, 0.49, 0.998]->[0.128, 0.473, 0.45, 0.991]->[0.134, 0.521, 0.424, 0.998]: sit, watch (a person).", "At 1278.0s, [0.093, 0.399, 0.558, 0.988]: sit, listen to (a person), watch (a person).", "From 1276.0s to 1277.0s, [0.056, 0.309, 0.6, 0.98]->[0.07, 0.362, 0.583, 0.975]: sit, watch (a person).", "From 1273.0s to 1275.0s, [0.045, 0.201, 0.638, 1.0]->[0.045, 0.263, 0.627, 0.981]->[0.053, 0.28, 0.606, 0.979]: sit, listen to (a person), watch (a person).", "At 1272.0s, [0.031, 0.23, 0.623, 0.982]: sit, stand, listen to (a person), watch (a person).", "At 1271.0s, [0.039, 0.159, 0.649, 0.978]: sit, listen to (a person), watch (a person).", "At 1270.0s, [0.038, 0.151, 0.672, 0.979]: sit, watch (a person).", "At 1269.0s, [0.052, 0.154, 0.661, 0.996]: sit, stand, listen to (a person), watch (a person).", "At 1268.0s, [0.083, 0.186, 0.872, 0.98]: sit, watch (a person).", "At 1267.0s, [0.087, 0.205, 0.856, 0.979]: sit, press."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1241, 1351], "conversations": [{"from": "human", "timestamps": 1271.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.039, 0.159, 0.649, 0.978]: listen to (a person), sit, watch (a person); [0.526, 0.2, 0.786, 0.711]: watch (a person), sing to (e.g., self, a person, a group), dance."}, {"from": "human", "timestamps": 1351.0, "value": "Describe the actions of the person at bbox [0.043, 0.223, 0.282, 0.902] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.043, 0.223, 0.282, 0.902]: sit, talk to (e.g., self, a person, a group).", "From 1347.0s to 1350.0s, [0.043, 0.14, 0.517, 0.979]->[0.0, 0.095, 0.395, 0.957]->[0.025, 0.079, 0.478, 0.99]->[0.047, 0.239, 0.313, 0.966]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1346.0s, [0.029, 0.124, 0.543, 0.974]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1343.0s to 1345.0s, [0.043, 0.183, 0.684, 0.981]->[0.06, 0.152, 0.738, 0.951]->[0.097, 0.112, 0.746, 0.961]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1342.0s, [0.068, 0.18, 0.599, 0.967]: sit, talk to (e.g., self, a person, a group).", "From 1340.0s to 1341.0s, [0.076, 0.168, 0.638, 0.957]->[0.11, 0.172, 0.646, 0.957]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1339.0s, [0.049, 0.117, 0.696, 0.992]: sit, talk to (e.g., self, a person, a group).", "At 1338.0s, [0.124, 0.14, 0.681, 0.967]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1335.0s to 1337.0s, [0.048, 0.248, 0.414, 0.971]->[0.052, 0.24, 0.411, 0.966]->[0.053, 0.246, 0.405, 0.972]: sit.", "From 1332.0s to 1334.0s, [0.054, 0.251, 0.419, 0.951]->[0.051, 0.244, 0.408, 0.983]->[0.052, 0.241, 0.415, 0.978]: sit, touch (an object).", "From 1330.0s to 1331.0s, [0.045, 0.248, 0.372, 0.96]->[0.049, 0.227, 0.386, 0.964]: sit.", "At 1329.0s, [0.042, 0.234, 0.309, 0.975]: sit, carry/hold (an object), watch (a person).", "From 1327.0s to 1328.0s, [0.04, 0.227, 0.288, 0.979]->[0.04, 0.231, 0.291, 0.991]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1326.0s, [0.039, 0.228, 0.289, 0.981]: sit, carry/hold (an object), watch (a person)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1334, 1378], "conversations": [{"from": "human", "timestamps": 1364.0, "value": "What actions did the person at bbox [0.014, 0.104, 0.472, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.014, 0.104, 0.472, 0.981]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1363.0s, [0.057, 0.149, 0.57, 0.955]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1361.0s to 1362.0s, [0.017, 0.125, 0.48, 0.97]->[0.013, 0.125, 0.448, 0.965]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1360.0s, [0.027, 0.119, 0.536, 0.966]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1355.0s to 1359.0s, [0.079, 0.185, 0.624, 0.963]->[0.104, 0.177, 0.608, 0.952]->[0.073, 0.155, 0.622, 1.0]->[0.082, 0.173, 0.645, 0.958]->[0.02, 0.115, 0.554, 0.977]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1353.0s to 1354.0s, [0.084, 0.162, 0.673, 0.978]->[0.084, 0.171, 0.633, 0.974]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1351.0s to 1352.0s, [0.043, 0.223, 0.282, 0.902]->[0.046, 0.221, 0.309, 0.891]: sit, talk to (e.g., self, a person, a group).", "From 1347.0s to 1350.0s, [0.043, 0.14, 0.517, 0.979]->[0.0, 0.095, 0.395, 0.957]->[0.025, 0.079, 0.478, 0.99]->[0.047, 0.239, 0.313, 0.966]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1346.0s, [0.029, 0.124, 0.543, 0.974]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1343.0s to 1345.0s, [0.043, 0.183, 0.684, 0.981]->[0.06, 0.152, 0.738, 0.951]->[0.097, 0.112, 0.746, 0.961]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1342.0s, [0.068, 0.18, 0.599, 0.967]: sit, talk to (e.g., self, a person, a group).", "From 1340.0s to 1341.0s, [0.076, 0.168, 0.638, 0.957]->[0.11, 0.172, 0.646, 0.957]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1339.0s, [0.049, 0.117, 0.696, 0.992]: sit, talk to (e.g., self, a person, a group).", "At 1338.0s, [0.124, 0.14, 0.681, 0.967]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1335.0s to 1337.0s, [0.048, 0.248, 0.414, 0.971]->[0.052, 0.24, 0.411, 0.966]->[0.053, 0.246, 0.405, 0.972]: sit.", "From 1332.0s to 1334.0s, [0.054, 0.251, 0.419, 0.951]->[0.051, 0.244, 0.408, 0.983]->[0.052, 0.241, 0.415, 0.978]: sit, touch (an object).", "From 1330.0s to 1331.0s, [0.045, 0.248, 0.372, 0.96]->[0.049, 0.227, 0.386, 0.964]: sit.", "At 1329.0s, [0.042, 0.234, 0.309, 0.975]: sit, carry/hold (an object), watch (a person).", "From 1327.0s to 1328.0s, [0.04, 0.227, 0.288, 0.979]->[0.04, 0.231, 0.291, 0.991]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1326.0s, [0.039, 0.228, 0.289, 0.981]: sit, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1378.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.041, 0.183, 0.468, 1.0]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.041, 0.255, 0.184, 0.653]: listen to (a person), stand, watch (a person); [0.043, 0.257, 0.096, 0.489]: stand; [0.48, 0.12, 0.977, 0.983]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.508, 0.293, 0.575, 0.635]: carry/hold (an object), stand, watch (a person)."}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1364, 1420], "conversations": [{"from": "human", "timestamps": 1394.0, "value": "What actions did the person at bbox [0.314, 0.173, 0.809, 0.973] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.314, 0.173, 0.809, 0.973]: stand, carry/hold (an object), listen to (a person).", "From 1392.0s to 1393.0s, [0.39, 0.185, 0.828, 0.976]->[0.31, 0.183, 0.762, 0.974]: stand, carry/hold (an object), listen to (a person).", "At 1391.0s, [0.441, 0.157, 0.966, 0.997]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1389.0s to 1390.0s, [0.406, 0.163, 0.904, 0.988]->[0.424, 0.179, 0.976, 0.982]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1385.0s to 1388.0s, [0.368, 0.167, 0.922, 0.987]->[0.44, 0.174, 0.938, 0.99]->[0.412, 0.141, 0.885, 0.986]->[0.413, 0.199, 0.897, 0.981]: stand, listen to (a person).", "At 1384.0s, [0.363, 0.159, 0.927, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1383.0s, [0.458, 0.154, 0.976, 0.993]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1382.0s, [0.439, 0.139, 0.942, 0.988]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1381.0s, [0.504, 0.138, 0.975, 0.983]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1379.0s to 1380.0s, [0.468, 0.112, 0.973, 0.983]->[0.514, 0.126, 0.976, 0.978]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1377.0s to 1378.0s, [0.482, 0.127, 0.977, 0.979]->[0.48, 0.12, 0.977, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1376.0s, [0.507, 0.125, 0.976, 0.981]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1375.0s, [0.497, 0.115, 0.976, 0.982]: stand, listen to (a person).", "At 1374.0s, [0.521, 0.111, 0.972, 0.974]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1373.0s, [0.449, 0.128, 0.97, 0.984]: stand, hand shake, listen to (a person), watch (a person).", "At 1372.0s, [0.445, 0.13, 0.969, 0.993]: stand, hand shake, talk to (e.g., self, a person, a group), watch (a person).", "At 1371.0s, [0.472, 0.16, 0.968, 0.995]: stand, hand shake, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1370.0s, [0.397, 0.137, 0.973, 0.973]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1420.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.038, 0.202, 0.159, 0.974]: carry/hold (an object), stand, watch (a person); [0.119, 0.281, 0.644, 0.993]: carry/hold (an object), walk; [0.19, 0.167, 0.405, 0.566]: listen to (a person), talk to (e.g., self, a person, a group), walk; [0.421, 0.184, 0.69, 0.98]: watch (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1350, 1446], "conversations": [{"from": "human", "timestamps": 1380.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.071, 0.318, 0.207, 0.968]: watch (a person); [0.122, 0.199, 0.556, 0.987]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.149, 0.257, 0.29, 0.533]: listen to (a person), stand, watch (a person); [0.49, 0.282, 0.581, 0.785]: carry/hold (an object), stand; [0.514, 0.126, 0.976, 0.978]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1446.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.095, 0.114, 0.904, 0.97]?"}, {"from": "gpt", "value": ["At Now, [0.095, 0.114, 0.904, 0.97]: sit, talk to (e.g., self, a person, a group).", "From 1443.0s to 1445.0s, [0.108, 0.076, 0.888, 0.976]->[0.105, 0.15, 0.884, 0.989]->[0.106, 0.125, 0.888, 0.975]: sit, talk to (e.g., self, a person, a group).", "From 1441.0s to 1442.0s, [0.124, 0.114, 0.887, 0.983]->[0.091, 0.09, 0.872, 0.981]: stand, talk to (e.g., self, a person, a group).", "At 1440.0s, [0.08, 0.159, 0.538, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1439.0s, [0.087, 0.124, 0.521, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1438.0s, [0.067, 0.121, 0.552, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1437.0s, [0.079, 0.121, 0.53, 0.976]: stand, listen to (a person), watch (a person).", "From 1435.0s to 1436.0s, [0.205, 0.149, 0.615, 0.967]->[0.21, 0.133, 0.635, 0.984]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1433.0s to 1434.0s, [0.215, 0.132, 0.644, 0.974]->[0.195, 0.155, 0.634, 0.97]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1431.0s to 1432.0s, [0.202, 0.145, 0.648, 0.955]->[0.216, 0.138, 0.638, 0.991]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1430.0s, [0.202, 0.163, 0.638, 0.912]: stand, carry/hold (an object), watch (a person).", "At 1429.0s, [0.217, 0.153, 0.598, 0.946]: stand, carry/hold (an object).", "From 1427.0s to 1428.0s, [0.18, 0.202, 0.571, 0.979]->[0.217, 0.198, 0.56, 0.976]: walk, carry/hold (an object), listen to (a person).", "At 1426.0s, [0.251, 0.178, 0.54, 0.987]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1425.0s, [0.336, 0.158, 0.597, 0.973]: walk, carry/hold (an object), listen to (a person).", "From 1423.0s to 1424.0s, [0.446, 0.184, 0.68, 0.939]->[0.346, 0.187, 0.67, 0.979]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1422.0s, [0.14, 0.231, 0.252, 0.652]: stand, carry/hold (an object)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1426, 1487], "conversations": [{"from": "human", "timestamps": 1456.0, "value": "What actions did the person at bbox [0.616, 0.191, 0.938, 0.992] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.616, 0.191, 0.938, 0.992]: stand.", "At 1455.0s, [0.699, 0.254, 0.921, 0.956]: stand."]}, {"from": "human", "timestamps": 1487.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.022, 0.0, 0.886, 0.97]: stand."}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1473, 1530], "conversations": [{"from": "human", "timestamps": 1503.0, "value": "What actions did the person at bbox [0.26, 0.113, 0.896, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.26, 0.113, 0.896, 0.983]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1500.0s to 1502.0s, [0.081, 0.111, 0.837, 0.999]->[0.285, 0.13, 0.96, 0.978]->[0.231, 0.129, 0.919, 0.978]: sit, talk to (e.g., self, a person, a group).", "At 1499.0s, [0.098, 0.109, 0.856, 0.982]: sit.", "At 1498.0s, [0.184, 0.104, 0.877, 0.985]: sit, listen to (a person).", "At 1497.0s, [0.237, 0.087, 0.957, 0.98]: stand."]}, {"from": "human", "timestamps": 1530.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.02, 0.142, 0.415, 1.0]: listen to (a person), sit, watch (a person); [0.533, 0.207, 0.953, 1.0]: watch (a person), sit."}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1507, 1606], "conversations": [{"from": "human", "timestamps": 1537.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.037, 0.168, 0.603, 0.949]: sit, talk to (e.g., self, a person, a group); [0.447, 0.152, 0.962, 0.971]: listen to (a person), sit, grab (a person); [0.531, 0.202, 0.68, 0.543]: sit."}, {"from": "human", "timestamps": 1606.0, "value": "Describe the actions of the person at bbox [0.083, 0.101, 0.856, 0.993] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.083, 0.101, 0.856, 0.993]: sit, talk to (e.g., self, a person, a group).", "At 1605.0s, [0.092, 0.099, 0.877, 0.984]: sit, talk to (e.g., self, a person, a group).", "From 1603.0s to 1604.0s, [0.095, 0.104, 0.869, 0.99]->[0.094, 0.103, 0.857, 0.981]: stand, talk to (e.g., self, a person, a group).", "At 1602.0s, [0.097, 0.1, 0.851, 0.98]: talk to (e.g., self, a person, a group).", "From 1600.0s to 1601.0s, [0.093, 0.1, 0.895, 0.986]->[0.075, 0.097, 0.875, 0.984]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1552, 1596], "conversations": [{"from": "human", "timestamps": 1582.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.033, 0.154, 0.645, 0.943]: sit, touch (an object), talk to (e.g., self, a person, a group); [0.476, 0.204, 0.65, 0.523]: sit, touch (an object); [0.527, 0.131, 0.952, 0.98]: listen to (a person), sit, touch (an object), grab (a person)."}, {"from": "human", "timestamps": 1596.0, "value": "Describe the actions of the person at bbox [0.09, 0.108, 0.851, 0.989] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.09, 0.108, 0.851, 0.989]: sit, talk to (e.g., self, a person, a group).", "At 1595.0s, [0.088, 0.102, 0.866, 0.994]: sit, stand, talk to (e.g., self, a person, a group).", "At 1594.0s, [0.097, 0.105, 0.868, 0.986]: sit, talk to (e.g., self, a person, a group).", "At 1593.0s, [0.086, 0.084, 0.889, 0.985]: sit, stand, talk to (e.g., self, a person, a group).", "From 1591.0s to 1592.0s, [0.078, 0.105, 0.854, 0.989]->[0.076, 0.105, 0.859, 0.988]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1615, 1733], "conversations": [{"from": "human", "timestamps": 1645.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.074, 0.081, 0.873, 0.972]: listen to (a person), sit."}, {"from": "human", "timestamps": 1733.0, "value": "What actions did the person at bbox [0.11, 0.155, 0.671, 0.959] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.11, 0.155, 0.671, 0.959]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1732.0s, [0.117, 0.161, 0.664, 0.962]: sit, talk to (e.g., self, a person, a group).", "At 1731.0s, [0.096, 0.156, 0.641, 0.973]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1726.0s to 1730.0s, [0.149, 0.162, 0.686, 0.938]->[0.083, 0.112, 0.645, 0.955]->[0.057, 0.079, 0.628, 0.966]->[0.075, 0.103, 0.662, 0.969]->[0.096, 0.11, 0.668, 0.972]: sit, talk to (e.g., self, a person, a group).", "From 1724.0s to 1725.0s, [0.116, 0.15, 0.696, 0.935]->[0.113, 0.117, 0.668, 0.96]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1719.0s to 1723.0s, [0.15, 0.125, 0.679, 0.948]->[0.136, 0.141, 0.681, 0.949]->[0.105, 0.141, 0.704, 0.949]->[0.113, 0.14, 0.691, 0.945]->[0.117, 0.137, 0.684, 0.956]: sit, talk to (e.g., self, a person, a group).", "At 1718.0s, [0.15, 0.125, 0.676, 0.951]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1717.0s, [0.059, 0.141, 0.682, 0.956]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1716.0s, [0.064, 0.075, 0.726, 0.966]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1715.0s, [0.064, 0.117, 0.689, 0.955]: sit, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "T-Fc9ctuNVI.mkv", "clip": [1664, 1718], "conversations": [{"from": "human", "timestamps": 1694.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.04, 0.19, 0.215, 0.64]: sit; [0.051, 0.189, 0.674, 0.999]: listen to (a person), sit, watch (a person); [0.591, 0.128, 0.956, 0.985]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1718.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.521, 0.197, 0.72, 0.563]?"}, {"from": "gpt", "value": ["In the current frame, [0.521, 0.197, 0.72, 0.563]: sit.", "At 1717.0s, [0.535, 0.198, 0.701, 0.515]: sit, watch (a person).", "At 1716.0s, [0.561, 0.212, 0.712, 0.496]: sit."]}]}, {"video": "T26G6_AjJZ4.mkv", "clip": [895, 1010], "conversations": [{"from": "human", "timestamps": 925.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.001, 0.049, 0.392, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.404, 0.185, 0.955, 0.984]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1010.0, "value": "What actions did the person at bbox [0.344, 0.39, 0.594, 0.936] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.344, 0.39, 0.594, 0.936]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 1009.0s, [0.347, 0.398, 0.616, 0.912]: listen to (a person), watch (a person).", "At 1008.0s, [0.363, 0.379, 0.6, 0.98]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 1007.0s, [0.351, 0.383, 0.552, 0.972]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1006.0s, [0.351, 0.385, 0.577, 0.978]: bend/bow (at the waist), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1004.0s, [0.358, 0.423, 0.646, 0.961]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "T26G6_AjJZ4.mkv", "clip": [957, 992], "conversations": [{"from": "human", "timestamps": 987.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.223, 0.28, 0.828, 0.925]?"}, {"from": "gpt", "value": ["In the current frame, [0.223, 0.28, 0.828, 0.925]: fight/hit (a person), watch (a person), martial art.", "At 986.0s, [0.008, 0.149, 0.518, 0.982]: fight/hit (a person), martial art.", "From 984.0s to 985.0s, [0.101, 0.301, 0.591, 0.982]->[0.186, 0.245, 0.581, 0.99]: fight/hit (a person), watch (a person), martial art."]}, {"from": "human", "timestamps": 992.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.018, 0.169, 0.607, 1.0]: watch (a person), martial art, fight/hit (a person); [0.465, 0.103, 0.981, 0.969]: watch (a person), fight/hit (a person)."}]}, {"video": "T26G6_AjJZ4.mkv", "clip": [1163, 1212], "conversations": [{"from": "human", "timestamps": 1193.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.016, 0.402, 0.132, 0.855]: walk; [0.209, 0.389, 0.278, 0.663]: watch (a person), walk; [0.259, 0.448, 0.356, 0.833]: watch (a person), stand; [0.294, 0.465, 0.454, 1.0]: listen to (a person), stand; [0.418, 0.438, 0.545, 0.705]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.504, 0.427, 0.62, 0.692]: touch (an object), watch (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.566, 0.461, 0.763, 0.982]: carry/hold (an object), stand, cook; [0.805, 0.496, 0.993, 0.996]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1212.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.014, 0.088, 0.428, 0.976]?"}, {"from": "gpt", "value": ["At Now, [0.014, 0.088, 0.428, 0.976]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1211.0s, [0.008, 0.1, 0.442, 0.984]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1210.0s, [0.0, 0.103, 0.422, 0.988]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1209.0s, [0.005, 0.09, 0.401, 0.971]: stand, listen to (a person), watch (a person).", "At 1208.0s, [0.013, 0.111, 0.481, 0.972]: stand, listen to (a person).", "At 1207.0s, [0.0, 0.118, 0.453, 0.985]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1206.0s, [0.374, 0.259, 0.652, 0.987]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1203.0s to 1205.0s, [0.492, 0.228, 0.696, 0.991]->[0.484, 0.23, 0.689, 0.974]->[0.468, 0.227, 0.646, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "T26G6_AjJZ4.mkv", "clip": [1220, 1257], "conversations": [{"from": "human", "timestamps": 1250.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.03, 0.003, 0.935, 0.979]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1257.0, "value": "What actions is the person in bbox [0.018, 0.007, 0.885, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.018, 0.007, 0.885, 0.985]: stand, talk to (e.g., self, a person, a group).", "From 1253.0s to 1256.0s, [0.0, 0.015, 0.827, 0.983]->[0.0, 0.003, 0.87, 0.984]->[0.034, 0.0, 0.881, 0.99]->[0.054, 0.02, 0.902, 0.999]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "T26G6_AjJZ4.mkv", "clip": [1265, 1299], "conversations": [{"from": "human", "timestamps": 1295.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.038, 0.1, 0.709, 0.982]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group); [0.687, 0.024, 0.988, 1.0]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1299.0, "value": "What actions is the person in bbox [0.004, 0.171, 0.278, 0.99] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.004, 0.171, 0.278, 0.99]: stand, talk to (e.g., self, a person, a group).", "At 1298.0s, [0.006, 0.181, 0.256, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 1297.0s, [0.002, 0.166, 0.154, 1.0]: stand, listen to (a person), watch (a person).", "From 1292.0s to 1295.0s, [0.426, 0.012, 0.977, 0.967]->[0.466, 0.009, 0.994, 0.963]->[0.555, 0.021, 0.993, 0.976]->[0.687, 0.024, 0.988, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1291.0s, [0.436, 0.011, 0.97, 0.976]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1289.0s to 1290.0s, [0.494, 0.031, 0.984, 0.984]->[0.464, 0.029, 0.993, 0.973]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1288.0s, [0.477, 0.025, 0.986, 0.982]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1287.0s, [0.469, 0.018, 0.989, 0.974]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1286.0s, [0.005, 0.008, 0.185, 0.959]: walk, watch (a person).", "At 1285.0s, [0.429, 0.058, 0.798, 0.973]: stand, watch (a person).", "From 1283.0s to 1284.0s, [0.218, 0.0, 0.943, 1.0]->[0.383, 0.129, 0.732, 0.913]: stand, listen to (a person), watch (a person)."]}]}, {"video": "T26G6_AjJZ4.mkv", "clip": [1274, 1373], "conversations": [{"from": "human", "timestamps": 1304.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.36, 0.136, 0.793, 0.991]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1373.0, "value": "What actions is the person in bbox [0.627, 0.219, 0.995, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.627, 0.219, 0.995, 0.982]: stand, watch (a person).", "At 1372.0s, [0.628, 0.171, 0.994, 0.982]: stand, watch (a person)."]}]}, {"video": "T26G6_AjJZ4.mkv", "clip": [1384, 1483], "conversations": [{"from": "human", "timestamps": 1414.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.001, 0.223, 0.286, 0.988]: sit, talk to (e.g., self, a person, a group); [0.755, 0.082, 0.891, 0.983]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1483.0, "value": "What actions is the person in bbox [0.235, 0.277, 0.884, 0.973] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.235, 0.277, 0.884, 0.973]: bend/bow (at the waist), carry/hold (an object).", "From 1481.0s to 1482.0s, [0.07, 0.003, 0.764, 0.97]->[0.178, 0.095, 0.868, 0.985]: stand, carry/hold (an object).", "At 1480.0s, [0.121, 0.05, 1.0, 0.973]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "T26G6_AjJZ4.mkv", "clip": [1457, 1490], "conversations": [{"from": "human", "timestamps": 1487.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.221, 0.148, 0.779, 0.982]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1490.0, "value": "What actions did the person at bbox [0.248, 0.065, 0.778, 0.999] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.248, 0.065, 0.778, 0.999]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1488.0s to 1489.0s, [0.266, 0.065, 0.86, 0.983]->[0.245, 0.069, 0.814, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1485.0s to 1487.0s, [0.254, 0.201, 0.776, 1.0]->[0.248, 0.237, 0.726, 1.0]->[0.221, 0.148, 0.779, 0.982]: stand, carry/hold (an object).", "From 1483.0s to 1484.0s, [0.235, 0.277, 0.884, 0.973]->[0.183, 0.149, 0.838, 0.984]: bend/bow (at the waist), carry/hold (an object).", "From 1481.0s to 1482.0s, [0.07, 0.003, 0.764, 0.97]->[0.178, 0.095, 0.868, 0.985]: stand, carry/hold (an object).", "At 1480.0s, [0.121, 0.05, 1.0, 0.973]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "T26G6_AjJZ4.mkv", "clip": [1476, 1525], "conversations": [{"from": "human", "timestamps": 1506.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.034, 0.305, 0.38, 0.991]: listen to (a person), stand, watch (a person); [0.527, 0.015, 0.994, 0.97]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1525.0, "value": "For the person at bbox [0.033, 0.031, 0.958, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.033, 0.031, 0.958, 1.0]: stand, watch (a person).", "From 1523.0s to 1524.0s, [0.006, 0.025, 0.953, 1.0]->[0.024, 0.018, 0.897, 0.995]: stand."]}]}, {"video": "T26G6_AjJZ4.mkv", "clip": [1498, 1617], "conversations": [{"from": "human", "timestamps": 1528.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.002, 0.184, 0.294, 0.966]: listen to (a person), stand, watch (a person); [0.369, 0.103, 0.958, 0.98]: stand."}, {"from": "human", "timestamps": 1617.0, "value": "Describe the actions of the person at bbox [0.111, 0.0, 0.926, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.111, 0.0, 0.926, 0.982]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1616.0s, [0.106, 0.014, 0.942, 0.976]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1615.0s, [0.095, 0.0, 0.952, 0.984]: sit, carry/hold (an object), touch (an object).", "From 1613.0s to 1614.0s, [0.101, 0.008, 0.91, 0.978]->[0.094, 0.001, 0.944, 0.991]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1612.0s, [0.093, 0.003, 1.0, 0.983]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1607.0s to 1611.0s, [0.079, 0.0, 0.902, 0.978]->[0.062, 0.003, 0.943, 0.976]->[0.083, 0.004, 0.925, 0.979]->[0.072, 0.003, 0.904, 0.978]->[0.09, 0.009, 0.939, 0.977]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1606.0s, [0.055, 0.001, 0.909, 0.972]: sit, carry/hold (an object), drink.", "At 1605.0s, [0.08, 0.017, 0.935, 0.962]: sit, drink."]}]}, {"video": "T26G6_AjJZ4.mkv", "clip": [1533, 1566], "conversations": [{"from": "human", "timestamps": 1563.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.011, 0.008, 0.177, 0.954]: listen to (a person), sit; [0.373, 0.068, 0.99, 0.994]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1566.0, "value": "What actions is the person in bbox [0.66, 0.133, 0.988, 0.987] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.66, 0.133, 0.988, 0.987]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1565.0s, [0.656, 0.141, 0.983, 0.982]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1563.0s to 1564.0s, [0.373, 0.068, 0.99, 0.994]->[0.654, 0.142, 0.989, 0.969]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1562.0s, [0.39, 0.054, 0.998, 0.983]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1561.0s, [0.376, 0.051, 0.985, 0.98]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1560.0s, [0.817, 0.164, 0.996, 0.979]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1559.0s, [0.787, 0.158, 0.998, 0.984]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1558.0s, [0.821, 0.159, 0.995, 0.938]: sit, stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1557.0s, [0.812, 0.147, 0.996, 0.986]: stand, touch (an object), listen to (a person), watch (a person).", "At 1556.0s, [0.815, 0.166, 0.994, 0.961]: stand, listen to (a person), watch (a person).", "At 1555.0s, [0.795, 0.158, 0.999, 0.97]: stand, touch (an object), listen to (a person), watch (a person).", "At 1554.0s, [0.832, 0.152, 0.992, 0.986]: sit, stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1553.0s, [0.822, 0.169, 0.998, 0.998]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1552.0s, [0.746, 0.171, 0.995, 0.963]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1550.0s to 1551.0s, [0.732, 0.157, 0.995, 0.972]->[0.747, 0.159, 0.998, 0.98]: stand, touch (an object), listen to (a person).", "At 1549.0s, [0.492, 0.189, 0.805, 0.825]: walk, listen to (a person).", "At 1548.0s, [0.007, 0.008, 0.485, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1547.0s, [0.014, 0.168, 0.695, 0.974]: bend/bow (at the waist), push (an object), talk to (e.g., self, a person, a group).", "At 1546.0s, [0.004, 0.119, 0.541, 0.959]: carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "T26G6_AjJZ4.mkv", "clip": [1670, 1708], "conversations": [{"from": "human", "timestamps": 1700.0, "value": "What actions is the person in bbox [0.138, 0.177, 0.703, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.138, 0.177, 0.703, 1.0]: stand, fight/hit (a person), listen to (a person), watch (a person).", "At 1699.0s, [0.28, 0.102, 0.876, 0.971]: stand, fight/hit (a person).", "At 1698.0s, [0.046, 0.104, 0.528, 0.996]: stand, fight/hit (a person), watch (a person)."]}, {"from": "human", "timestamps": 1708.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.357, 0.034, 0.799, 0.979]: watch (a person), sit."}]}, {"video": "TCmNvNLRWrc.mkv", "clip": [909, 959], "conversations": [{"from": "human", "timestamps": 939.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.551, 0.167, 0.886, 0.991]: smoke, walk."}, {"from": "human", "timestamps": 959.0, "value": "Describe the actions of the person at bbox [0.79, 0.306, 0.997, 0.747] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.79, 0.306, 0.997, 0.747]: sit, touch (an object), listen to (a person), watch (a person).", "From 954.0s to 958.0s, [0.429, 0.392, 0.586, 0.675]->[0.558, 0.407, 0.716, 0.718]->[0.67, 0.384, 0.874, 0.74]->[0.782, 0.316, 0.998, 0.733]->[0.789, 0.305, 1.0, 0.736]: sit, touch (an object), listen to (a person), watch (a person).", "At 953.0s, [0.381, 0.413, 0.548, 0.68]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "TCmNvNLRWrc.mkv", "clip": [884, 972], "conversations": [{"from": "human", "timestamps": 914.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.764, 0.22, 0.912, 0.789]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 972.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.15, 0.286, 0.366, 0.633]?"}, {"from": "gpt", "value": ["In the current frame, [0.15, 0.286, 0.366, 0.633]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 971.0s, [0.165, 0.287, 0.369, 0.616]: sit, carry/hold (an object), touch (an object), listen to (a person)."]}]}, {"video": "TCmNvNLRWrc.mkv", "clip": [953, 1003], "conversations": [{"from": "human", "timestamps": 983.0, "value": "What actions is the person in bbox [0.751, 0.203, 0.876, 0.673] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.751, 0.203, 0.876, 0.673]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 982.0s, [0.761, 0.209, 0.902, 0.683]: stand, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1003.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.258, 0.066, 0.831, 0.978]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "TCmNvNLRWrc.mkv", "clip": [1132, 1177], "conversations": [{"from": "human", "timestamps": 1162.0, "value": "What actions did the person at bbox [0.532, 0.002, 0.965, 0.521] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.532, 0.002, 0.965, 0.521]: sit, touch (an object), watch (a person).", "At 1161.0s, [0.554, 0.0, 0.952, 0.541]: sit, touch (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1177.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.197, 0.084, 0.603, 0.865]: read, sit, talk to (e.g., self, a person, a group)."}]}, {"video": "TCmNvNLRWrc.mkv", "clip": [1202, 1249], "conversations": [{"from": "human", "timestamps": 1232.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.174, 0.11, 0.659, 0.875]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1249.0, "value": "Describe the actions of the person at bbox [0.182, 0.092, 0.629, 0.879] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.182, 0.092, 0.629, 0.879]: sit, read, talk to (e.g., self, a person, a group).", "At 1248.0s, [0.181, 0.098, 0.633, 0.886]: sit, listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "TCmNvNLRWrc.mkv", "clip": [1269, 1306], "conversations": [{"from": "human", "timestamps": 1299.0, "value": "For the person at bbox [0.684, 0.222, 0.815, 0.778], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.684, 0.222, 0.815, 0.778]: walk, carry/hold (an object).", "At 1298.0s, [0.68, 0.247, 0.806, 0.741]: walk, carry/hold (an object).", "From 1296.0s to 1297.0s, [0.707, 0.244, 0.811, 0.647]->[0.707, 0.239, 0.82, 0.689]: walk, carry/hold (an object), watch (a person).", "From 1293.0s to 1295.0s, [0.73, 0.252, 0.817, 0.585]->[0.733, 0.243, 0.825, 0.604]->[0.715, 0.255, 0.807, 0.625]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1291.0s to 1292.0s, [0.752, 0.237, 0.827, 0.547]->[0.74, 0.247, 0.817, 0.556]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1306.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.384, 0.118, 0.769, 0.984]: carry/hold (an object), walk."}]}, {"video": "TCmNvNLRWrc.mkv", "clip": [1245, 1352], "conversations": [{"from": "human", "timestamps": 1275.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.138, 0.218, 0.404, 0.989]: carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), walk; [0.42, 0.25, 0.701, 0.973]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1352.0, "value": "What actions did the person at bbox [0.37, 0.234, 0.583, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.37, 0.234, 0.583, 0.985]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1350.0s to 1351.0s, [0.343, 0.215, 0.547, 0.952]->[0.37, 0.216, 0.574, 0.969]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1349.0s, [0.367, 0.193, 0.57, 0.954]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1345.0s to 1348.0s, [0.329, 0.196, 0.597, 0.97]->[0.311, 0.175, 0.556, 0.976]->[0.391, 0.182, 0.62, 0.884]->[0.389, 0.183, 0.607, 0.971]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1343.0s to 1344.0s, [0.234, 0.195, 0.544, 0.979]->[0.291, 0.187, 0.574, 0.98]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1342.0s, [0.298, 0.156, 0.582, 0.963]: stand, walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1341.0s, [0.476, 0.142, 0.729, 0.95]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1339.0s to 1340.0s, [0.633, 0.154, 0.9, 0.987]->[0.628, 0.156, 0.907, 0.997]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1338.0s, [0.642, 0.166, 0.906, 0.98]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1337.0s, [0.632, 0.18, 0.906, 0.981]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1336.0s, [0.616, 0.169, 0.897, 0.979]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1334.0s to 1335.0s, [0.629, 0.153, 0.908, 0.987]->[0.636, 0.157, 0.923, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1333.0s, [0.656, 0.16, 0.913, 0.996]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1332.0s, [0.648, 0.153, 0.914, 0.992]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1330.0s to 1331.0s, [0.632, 0.156, 0.898, 0.983]->[0.62, 0.157, 0.887, 0.997]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1328.0s to 1329.0s, [0.639, 0.148, 0.901, 0.989]->[0.635, 0.139, 0.894, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1327.0s, [0.604, 0.137, 0.89, 0.98]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1326.0s, [0.63, 0.151, 0.855, 0.976]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "TCmNvNLRWrc.mkv", "clip": [1294, 1356], "conversations": [{"from": "human", "timestamps": 1324.0, "value": "For the person at bbox [0.191, 0.058, 0.684, 0.986], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.191, 0.058, 0.684, 0.986]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1323.0s, [0.266, 0.088, 0.674, 0.94]: walk, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1356.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.443, 0.247, 0.651, 0.921]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk; [0.618, 0.155, 0.902, 0.961]: carry/hold (an object), listen to (a person), watch (a person), walk."}]}, {"video": "TCmNvNLRWrc.mkv", "clip": [1481, 1577], "conversations": [{"from": "human", "timestamps": 1511.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.278, 0.431, 0.997]: ride (e.g., a bike, a car, a horse), sit, watch (a person), listen to (a person), talk to (e.g., self, a person, a group); [0.379, 0.072, 0.689, 0.654]: ride (e.g., a bike, a car, a horse), sit, listen to (a person); [0.463, 0.082, 1.0, 1.0]: ride (e.g., a bike, a car, a horse), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1577.0, "value": "For the person at bbox [0.501, 0.03, 0.953, 0.997], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.501, 0.03, 0.953, 0.997]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1576.0s, [0.596, 0.0, 0.974, 0.825]: stand, carry/hold (an object)."]}]}, {"video": "TCmNvNLRWrc.mkv", "clip": [1539, 1616], "conversations": [{"from": "human", "timestamps": 1569.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.258, 0.979]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1616.0, "value": "For the person at bbox [0.521, 0.567, 0.706, 0.972], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.521, 0.567, 0.706, 0.972]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1615.0s, [0.444, 0.594, 0.711, 0.993]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "TCmNvNLRWrc.mkv", "clip": [1654, 1737], "conversations": [{"from": "human", "timestamps": 1684.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.249, 0.376, 0.986]: watch (a person), walk; [0.34, 0.123, 0.891, 0.984]: carry/hold (an object), listen to (a person), watch (a person), walk."}, {"from": "human", "timestamps": 1737.0, "value": "For the person at bbox [0.391, 0.209, 0.836, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.391, 0.209, 0.836, 1.0]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1736.0s, [0.508, 0.251, 0.892, 0.977]: walk, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "TCmNvNLRWrc.mkv", "clip": [1721, 1796], "conversations": [{"from": "human", "timestamps": 1751.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.089, 0.073, 0.535, 1.0]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.545, 0.236, 0.906, 0.998]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1796.0, "value": "What actions did the person at bbox [0.0, 0.044, 0.996, 0.996] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.044, 0.996, 0.996]: lie/sleep.", "At 1795.0s, [0.0, 0.048, 0.997, 0.994]: lie/sleep."]}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [890, 928], "conversations": [{"from": "human", "timestamps": 920.0, "value": "For the person at bbox [0.544, 0.17, 0.842, 0.989], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.544, 0.17, 0.842, 0.989]: walk, watch (a person).", "At 919.0s, [0.15, 0.151, 0.485, 0.983]: walk.", "At 918.0s, [0.016, 0.126, 0.324, 0.976]: stand.", "At 917.0s, [0.022, 0.13, 0.328, 0.967]: stand, carry/hold (an object).", "At 916.0s, [0.01, 0.176, 0.286, 0.984]: stand, lift/pick up.", "From 914.0s to 915.0s, [0.045, 0.148, 0.329, 1.0]->[0.053, 0.159, 0.343, 0.98]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 928.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.117, 0.141, 0.638, 0.982]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [943, 1015], "conversations": [{"from": "human", "timestamps": 973.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.136, 0.159, 0.551, 0.989]: listen to (a person), stand, watch (a person); [0.447, 0.047, 0.885, 0.992]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1015.0, "value": "Describe the actions of the person at bbox [0.015, 0.481, 0.261, 0.925] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.015, 0.481, 0.261, 0.925]: sit, eat, touch (an object), listen to (a person), watch (a person).", "At 1014.0s, [0.008, 0.424, 0.233, 0.809]: sit, eat, touch (an object), listen to (a person).", "At 1013.0s, [0.007, 0.426, 0.233, 0.807]: sit, drink, touch (an object), listen to (a person).", "At 1012.0s, [0.009, 0.434, 0.225, 0.948]: sit, drink, eat, touch (an object), listen to (a person).", "At 1011.0s, [0.01, 0.431, 0.153, 0.836]: sit, eat, touch (an object), listen to (a person).", "At 1010.0s, [0.006, 0.451, 0.159, 0.984]: sit, eat, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [995, 1078], "conversations": [{"from": "human", "timestamps": 1025.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.014, 0.525, 0.266, 0.969]: listen to (a person), sit, eat; [0.197, 0.384, 0.416, 0.771]: listen to (a person), sit, eat, touch (an object); [0.268, 0.578, 0.651, 0.992]: sit, touch (an object), listen to (a person), watch (a person), eat; [0.413, 0.194, 0.661, 0.643]: carry/hold (an object), stand, give/serve (an object) to (a person), talk to (e.g., self, a person, a group); [0.545, 0.371, 0.725, 0.715]: carry/hold (an object), sit, eat, listen to (a person); [0.704, 0.361, 0.956, 0.71]: listen to (a person), sit, eat, touch (an object)."}, {"from": "human", "timestamps": 1078.0, "value": "Describe the actions of the person at bbox [0.763, 0.178, 0.897, 0.954] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.763, 0.178, 0.897, 0.954]: stand, watch (a person).", "At 1077.0s, [0.701, 0.185, 0.869, 0.958]: stand, carry/hold (an object).", "At 1076.0s, [0.629, 0.199, 0.853, 0.948]: walk, carry/hold (an object).", "At 1075.0s, [0.526, 0.227, 0.705, 0.923]: walk, carry/hold (an object), smoke.", "At 1074.0s, [0.365, 0.247, 0.515, 0.58]: walk.", "From 1072.0s to 1073.0s, [0.14, 0.223, 0.3, 0.601]->[0.254, 0.231, 0.377, 0.563]: walk, watch (a person).", "At 1071.0s, [0.205, 0.274, 0.315, 0.602]: stand, push (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1044, 1132], "conversations": [{"from": "human", "timestamps": 1074.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.365, 0.247, 0.515, 0.58]: walk; [0.672, 0.258, 0.765, 0.682]: stand, touch (an object)."}, {"from": "human", "timestamps": 1132.0, "value": "What actions did the person at bbox [0.631, 0.055, 0.983, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.631, 0.055, 0.983, 1.0]: stand, carry/hold (an object), smoke, watch (a person).", "At 1131.0s, [0.629, 0.079, 0.978, 1.0]: stand, watch (a person).", "At 1130.0s, [0.625, 0.067, 0.985, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1128.0s to 1129.0s, [0.631, 0.067, 0.981, 0.996]->[0.626, 0.075, 0.983, 0.999]: stand, listen to (a person), watch (a person).", "From 1124.0s to 1127.0s, [0.647, 0.07, 0.981, 0.996]->[0.639, 0.058, 0.984, 1.0]->[0.638, 0.068, 0.984, 0.988]->[0.638, 0.068, 0.985, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1123.0s, [0.645, 0.069, 0.984, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1122.0s, [0.649, 0.066, 0.984, 0.992]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1121.0s, [0.636, 0.06, 0.98, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1120.0s, [0.631, 0.063, 0.983, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1118.0s to 1119.0s, [0.624, 0.058, 0.988, 1.0]->[0.628, 0.063, 0.984, 0.997]: stand, listen to (a person), watch (a person).", "From 1116.0s to 1117.0s, [0.775, 0.053, 0.983, 0.983]->[0.623, 0.057, 0.976, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1081, 1124], "conversations": [{"from": "human", "timestamps": 1111.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.345, 0.156, 0.571, 0.931]: walk; [0.511, 0.156, 0.632, 0.897]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1124.0, "value": "For the person at bbox [0.242, 0.058, 0.597, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.242, 0.058, 0.597, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1123.0s, [0.245, 0.076, 0.608, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1122.0s, [0.233, 0.066, 0.587, 0.992]: stand, listen to (a person), watch (a person).", "At 1121.0s, [0.216, 0.073, 0.612, 0.992]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1120.0s, [0.216, 0.05, 0.563, 0.988]: stand, listen to (a person), watch (a person).", "From 1118.0s to 1119.0s, [0.167, 0.069, 0.566, 1.0]->[0.199, 0.08, 0.562, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1116.0s to 1117.0s, [0.101, 0.05, 0.504, 1.0]->[0.134, 0.088, 0.501, 0.98]: stand, listen to (a person), watch (a person).", "At 1115.0s, [0.171, 0.099, 0.489, 0.992]: walk, listen to (a person), watch (a person).", "From 1111.0s to 1114.0s, [0.345, 0.156, 0.571, 0.931]->[0.479, 0.154, 0.698, 0.985]->[0.508, 0.087, 0.814, 0.994]->[0.317, 0.094, 0.648, 0.984]: walk.", "At 1110.0s, [0.168, 0.123, 0.574, 0.987]: stand.", "From 1107.0s to 1109.0s, [0.122, 0.104, 0.532, 0.999]->[0.127, 0.101, 0.533, 1.0]->[0.155, 0.1, 0.554, 0.991]: stand, listen to (a person).", "From 1105.0s to 1106.0s, [0.123, 0.102, 0.523, 0.988]->[0.131, 0.089, 0.524, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 1104.0s, [0.116, 0.102, 0.518, 0.994]: stand, listen to (a person).", "From 1102.0s to 1103.0s, [0.105, 0.095, 0.499, 0.989]->[0.108, 0.095, 0.506, 1.0]: stand, listen to (a person), watch (a person).", "At 1101.0s, [0.104, 0.093, 0.495, 0.999]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1100.0s, [0.103, 0.088, 0.494, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1099.0s, [0.1, 0.1, 0.511, 1.0]: stand, listen to (a person), watch (a person).", "At 1098.0s, [0.098, 0.09, 0.505, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1097.0s, [0.094, 0.091, 0.496, 0.997]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1095.0s to 1096.0s, [0.314, 0.282, 0.4, 0.652]->[0.082, 0.086, 0.489, 0.994]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1093.0s to 1094.0s, [0.271, 0.258, 0.401, 0.612]->[0.266, 0.283, 0.383, 0.652]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1121, 1163], "conversations": [{"from": "human", "timestamps": 1151.0, "value": "Describe the actions of the person at bbox [0.259, 0.343, 0.537, 0.98] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.259, 0.343, 0.537, 0.98]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1149.0s to 1150.0s, [0.181, 0.35, 0.495, 0.945]->[0.288, 0.28, 0.556, 0.993]: walk, carry/hold (an object).", "At 1148.0s, [0.186, 0.444, 0.511, 0.997]: stand, carry/hold (an object), touch (an object).", "From 1145.0s to 1147.0s, [0.257, 0.416, 0.48, 0.994]->[0.302, 0.428, 0.492, 0.986]->[0.316, 0.441, 0.515, 0.991]: stand, carry/hold (an object).", "At 1144.0s, [0.362, 0.448, 0.584, 1.0]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1163.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.281, 0.525, 0.346, 0.777]: walk; [0.343, 0.507, 0.418, 0.769]: carry/hold (an object), walk."}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1110, 1243], "conversations": [{"from": "human", "timestamps": 1140.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.395, 0.152, 0.58, 0.829]: carry/hold (an object), sit; [0.729, 0.176, 0.892, 0.78]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1243.0, "value": "For the person at bbox [0.061, 0.181, 0.747, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.061, 0.181, 0.747, 1.0]: stand, carry/hold (an object), watch (a person).", "At 1242.0s, [0.14, 0.142, 0.82, 1.0]: stand, carry/hold (an object), watch (a person).", "At 1241.0s, [0.018, 0.247, 0.441, 0.991]: walk.", "At 1240.0s, [0.008, 0.294, 0.196, 0.989]: walk, watch (a person).", "From 1238.0s to 1239.0s, [0.017, 0.28, 0.321, 0.994]->[0.015, 0.288, 0.294, 0.992]: stand, touch (an object), watch (a person).", "From 1235.0s to 1237.0s, [0.015, 0.29, 0.34, 0.993]->[0.014, 0.289, 0.335, 0.995]->[0.015, 0.286, 0.336, 0.992]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1234.0s, [0.052, 0.061, 0.719, 0.985]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1232.0s to 1233.0s, [0.044, 0.062, 0.739, 0.999]->[0.031, 0.052, 0.719, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1231.0s, [0.07, 0.07, 0.713, 0.99]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1176, 1231], "conversations": [{"from": "human", "timestamps": 1206.0, "value": "What actions is the person in bbox [0.051, 0.282, 0.346, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.051, 0.282, 0.346, 0.993]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1205.0s, [0.159, 0.288, 0.411, 0.986]: stand, close (e.g., a door, a box)."]}, {"from": "human", "timestamps": 1231.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.07, 0.07, 0.713, 0.99]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1255, 1288], "conversations": [{"from": "human", "timestamps": 1285.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.013, 0.214, 0.326, 0.663]: watch (a person), stand, touch (an object); [0.326, 0.269, 0.587, 0.689]: watch (a person), stand, touch (an object); [0.633, 0.249, 0.918, 0.708]: watch (a person), stand."}, {"from": "human", "timestamps": 1288.0, "value": "For the person at bbox [0.202, 0.245, 0.651, 0.993], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.202, 0.245, 0.651, 0.993]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse).", "At 1287.0s, [0.198, 0.094, 0.669, 0.978]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse).", "At 1286.0s, [0.235, 0.199, 0.765, 0.934]: sit, ride (e.g., a bike, a car, a horse).", "At 1285.0s, [0.633, 0.249, 0.918, 0.708]: stand, watch (a person).", "At 1284.0s, [0.631, 0.239, 0.918, 0.699]: stand, touch (an object), watch (a person)."]}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1356, 1388], "conversations": [{"from": "human", "timestamps": 1386.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.228, 0.123, 0.637, 1.0]: carry/hold (an object), sit; [0.612, 0.189, 0.981, 0.992]: watch (a person), sit."}, {"from": "human", "timestamps": 1388.0, "value": "What actions is the person in bbox [0.119, 0.083, 0.757, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.119, 0.083, 0.757, 0.984]: carry/hold (an object), touch (an object), jump/leap.", "At 1387.0s, [0.188, 0.123, 0.602, 0.99]: stand, carry/hold (an object).", "At 1386.0s, [0.228, 0.123, 0.637, 1.0]: sit, carry/hold (an object).", "At 1385.0s, [0.217, 0.12, 0.625, 1.0]: sit, watch (a person)."]}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1374, 1464], "conversations": [{"from": "human", "timestamps": 1404.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.229, 0.465, 0.481, 0.863]: talk to (e.g., self, a person, a group), walk; [0.738, 0.016, 0.984, 0.542]: carry/hold (an object), jump/leap, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1464.0, "value": "What actions is the person in bbox [0.498, 0.347, 0.932, 0.996] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.498, 0.347, 0.932, 0.996]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1461.0s to 1463.0s, [0.473, 0.359, 0.927, 0.995]->[0.485, 0.357, 0.936, 0.981]->[0.482, 0.37, 0.932, 0.982]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1460.0s, [0.116, 0.068, 0.769, 0.977]: sit, stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1458.0s to 1459.0s, [0.076, 0.064, 0.806, 0.973]->[0.097, 0.062, 0.791, 0.986]: sit, carry/hold (an object), watch (a person).", "At 1457.0s, [0.098, 0.103, 0.778, 0.976]: stand, carry/hold (an object), listen to (a person)."]}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1446, 1483], "conversations": [{"from": "human", "timestamps": 1476.0, "value": "What actions is the person in bbox [0.18, 0.107, 0.788, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.18, 0.107, 0.788, 0.985]: sit, carry/hold (an object).", "From 1472.0s to 1475.0s, [0.171, 0.039, 0.808, 0.986]->[0.172, 0.053, 0.768, 0.989]->[0.162, 0.059, 0.769, 0.981]->[0.16, 0.08, 0.759, 0.986]: sit, carry/hold (an object).", "At 1471.0s, [0.145, 0.064, 0.734, 0.987]: sit, stand, carry/hold (an object).", "From 1468.0s to 1470.0s, [0.454, 0.348, 0.939, 0.977]->[0.453, 0.337, 0.948, 0.995]->[0.156, 0.051, 0.788, 0.98]: sit, carry/hold (an object), watch (a person).", "From 1461.0s to 1467.0s, [0.473, 0.359, 0.927, 0.995]->[0.485, 0.357, 0.936, 0.981]->[0.482, 0.37, 0.932, 0.982]->[0.498, 0.347, 0.932, 0.996]->[0.53, 0.316, 0.927, 0.976]->[0.464, 0.347, 0.937, 0.99]->[0.488, 0.352, 0.932, 0.99]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1460.0s, [0.116, 0.068, 0.769, 0.977]: sit, stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1458.0s to 1459.0s, [0.076, 0.064, 0.806, 0.973]->[0.097, 0.062, 0.791, 0.986]: sit, carry/hold (an object), watch (a person).", "At 1457.0s, [0.098, 0.103, 0.778, 0.976]: stand, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1483.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.048, 0.169, 0.323, 0.887]: carry/hold (an object), stand, stir, listen to (a person); [0.602, 0.318, 0.966, 0.981]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1475, 1546], "conversations": [{"from": "human", "timestamps": 1505.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.137, 0.314, 0.518, 0.957]: touch (an object), bend/bow (at the waist); [0.535, 0.031, 0.937, 0.978]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1546.0, "value": "For the person at bbox [0.659, 0.052, 0.997, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.659, 0.052, 0.997, 0.994]: sit, listen to (a person), watch (a person).", "From 1542.0s to 1545.0s, [0.648, 0.073, 0.993, 0.988]->[0.611, 0.056, 0.994, 1.0]->[0.499, 0.055, 0.99, 1.0]->[0.648, 0.06, 0.992, 1.0]: sit, listen to (a person), watch (a person).", "At 1541.0s, [0.635, 0.052, 0.983, 0.973]: sit, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1533, 1569], "conversations": [{"from": "human", "timestamps": 1563.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.07, 0.31, 0.341, 0.825]?"}, {"from": "gpt", "value": ["In the current frame, [0.07, 0.31, 0.341, 0.825]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1557.0s to 1562.0s, [0.007, 0.064, 0.71, 0.988]->[0.019, 0.067, 0.695, 0.98]->[0.012, 0.068, 0.709, 0.992]->[0.016, 0.078, 0.7, 0.981]->[0.01, 0.075, 0.699, 0.986]->[0.063, 0.314, 0.358, 0.832]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1556.0s, [0.017, 0.088, 0.705, 0.977]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1555.0s, [0.012, 0.075, 0.704, 0.979]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1554.0s, [0.018, 0.071, 0.699, 0.978]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1551.0s to 1553.0s, [0.003, 0.075, 0.704, 0.99]->[0.01, 0.068, 0.705, 0.989]->[0.016, 0.056, 0.701, 0.98]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1550.0s, [0.011, 0.083, 0.697, 0.982]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1549.0s, [0.018, 0.072, 0.691, 0.983]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1547.0s to 1548.0s, [0.013, 0.095, 0.693, 0.984]->[0.019, 0.083, 0.696, 0.977]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1546.0s, [0.014, 0.101, 0.693, 0.982]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1545.0s, [0.021, 0.113, 0.687, 0.987]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1544.0s, [0.007, 0.083, 0.695, 0.986]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1541.0s to 1543.0s, [0.019, 0.12, 0.719, 0.997]->[0.011, 0.099, 0.708, 0.987]->[0.014, 0.096, 0.683, 0.984]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1569.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.127, 0.411, 0.478, 0.995]: carry/hold (an object), sit, smoke, watch (a person); [0.385, 0.194, 0.549, 0.672]: ride (e.g., a bike, a car, a horse), sit, watch (a person), talk to (e.g., self, a person, a group); [0.588, 0.203, 0.736, 0.697]: carry/hold (an object), sit, ride (e.g., a bike, a car, a horse)."}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1600, 1641], "conversations": [{"from": "human", "timestamps": 1630.0, "value": "What actions did the person at bbox [0.703, 0.109, 0.896, 0.951] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.703, 0.109, 0.896, 0.951]: stand, carry/hold (an object), watch (a person).", "At 1629.0s, [0.705, 0.107, 0.895, 0.97]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1641.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.019, 0.103, 0.393, 0.98]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.618, 0.063, 0.924, 0.985]: listen to (a person), stand, watch (a person)."}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1609, 1690], "conversations": [{"from": "human", "timestamps": 1639.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.054, 0.049, 0.423, 0.988]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.631, 0.05, 0.99, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1690.0, "value": "What actions is the person in bbox [0.782, 0.111, 0.986, 0.728] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.782, 0.111, 0.986, 0.728]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1689.0s, [0.741, 0.109, 0.955, 0.766]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1688.0s, [0.088, 0.103, 0.401, 0.965]: stand, listen to (a person), watch (a person)."]}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1686, 1732], "conversations": [{"from": "human", "timestamps": 1716.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.21, 0.323, 0.326, 0.752]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.238, 0.588, 0.426, 0.896]: listen to (a person), fall down, watch (a person); [0.375, 0.324, 0.501, 0.807]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.461, 0.326, 0.863, 0.979]: listen to (a person), sit, watch (a person); [0.671, 0.097, 0.839, 0.659]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1732.0, "value": "What actions did the person at bbox [0.332, 0.202, 0.524, 0.527] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.332, 0.202, 0.524, 0.527]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1731.0s, [0.644, 0.251, 0.863, 0.983]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1730.0s, [0.657, 0.26, 0.882, 0.979]: stand, carry/hold (an object), listen to (a person)."]}]}, {"video": "TM5MPJIq1Is.mkv", "clip": [1715, 1763], "conversations": [{"from": "human", "timestamps": 1745.0, "value": "For the person at bbox [0.013, 0.096, 0.297, 0.996], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.013, 0.096, 0.297, 0.996]: stand, listen to (a person), watch (a person).", "At 1744.0s, [0.009, 0.114, 0.284, 0.985]: stand, listen to (a person), watch (a person).", "From 1742.0s to 1743.0s, [0.009, 0.111, 0.281, 0.968]->[0.009, 0.109, 0.283, 0.982]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1739.0s to 1741.0s, [0.014, 0.108, 0.358, 0.989]->[0.009, 0.108, 0.36, 1.0]->[0.014, 0.109, 0.336, 0.991]: stand, listen to (a person), watch (a person).", "At 1738.0s, [0.011, 0.102, 0.348, 0.993]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1733.0s to 1737.0s, [0.068, 0.058, 0.415, 0.992]->[0.037, 0.077, 0.4, 0.976]->[0.014, 0.08, 0.389, 0.982]->[0.017, 0.087, 0.363, 0.987]->[0.016, 0.092, 0.363, 0.997]: stand, listen to (a person), watch (a person).", "At 1732.0s, [0.198, 0.365, 0.611, 0.993]: listen to (a person), watch (a person).", "From 1730.0s to 1731.0s, [0.06, 0.083, 0.442, 1.0]->[0.073, 0.089, 0.456, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1763.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.025, 0.301, 0.347, 0.936]: listen to (a person), sit, eat; [0.193, 0.404, 0.487, 0.742]: sit, touch (an object), talk to (e.g., self, a person, a group), drink; [0.678, 0.346, 0.966, 0.975]: listen to (a person), sit, eat, touch (an object)."}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [947, 993], "conversations": [{"from": "human", "timestamps": 977.0, "value": "What actions did the person at bbox [0.026, 0.033, 0.728, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.026, 0.033, 0.728, 0.985]: sit.", "At 976.0s, [0.004, 0.025, 0.812, 0.972]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 993.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.18, 0.044, 0.766, 0.973]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1012, 1055], "conversations": [{"from": "human", "timestamps": 1042.0, "value": "For the person at bbox [0.042, 0.49, 0.22, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.042, 0.49, 0.22, 0.981]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1041.0s, [0.042, 0.488, 0.236, 0.969]: sit, carry/hold (an object), listen to (a person).", "At 1040.0s, [0.045, 0.486, 0.235, 0.973]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1032.0s to 1039.0s, [0.044, 0.478, 0.232, 0.955]->[0.042, 0.479, 0.224, 0.942]->[0.043, 0.476, 0.232, 0.95]->[0.045, 0.484, 0.232, 0.964]->[0.044, 0.484, 0.23, 0.97]->[0.047, 0.487, 0.225, 0.988]->[0.044, 0.489, 0.236, 0.957]->[0.041, 0.486, 0.235, 0.964]: sit, carry/hold (an object), listen to (a person).", "At 1031.0s, [0.073, 0.512, 0.235, 0.969]: sit, carry/hold (an object), touch (an object), listen to (a person).", "From 1029.0s to 1030.0s, [0.076, 0.516, 0.254, 0.973]->[0.073, 0.507, 0.254, 0.981]: sit, carry/hold (an object), touch (an object).", "At 1028.0s, [0.078, 0.509, 0.253, 0.982]: sit, touch (an object).", "At 1027.0s, [0.073, 0.508, 0.246, 0.967]: sit, eat, touch (an object)."]}, {"from": "human", "timestamps": 1055.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.058, 0.173, 0.342, 0.698]: sit, touch (an object), talk to (e.g., self, a person, a group); [0.25, 0.321, 0.858, 0.96]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.616, 0.196, 0.998, 0.875]: listen to (a person), sit, touch (an object), watch (a person)."}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1042, 1136], "conversations": [{"from": "human", "timestamps": 1072.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.011, 0.178, 0.348, 0.953]: listen to (a person), sit, touch (an object), watch (a person); [0.268, 0.317, 0.744, 0.998]: listen to (a person), sit, watch (a person); [0.67, 0.131, 1.0, 0.682]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1136.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.226, 0.093, 0.905, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.226, 0.093, 0.905, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "From 1134.0s to 1135.0s, [0.237, 0.085, 0.917, 0.972]->[0.178, 0.084, 0.901, 0.977]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1133.0s, [0.233, 0.093, 0.9, 1.0]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "From 1129.0s to 1132.0s, [0.214, 0.069, 0.901, 0.975]->[0.189, 0.125, 0.946, 0.993]->[0.228, 0.089, 0.898, 0.979]->[0.245, 0.084, 0.917, 0.995]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1127.0s to 1128.0s, [0.185, 0.101, 0.947, 0.997]->[0.181, 0.082, 0.911, 1.0]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1126.0s, [0.202, 0.094, 0.923, 0.992]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1125.0s, [0.193, 0.084, 0.906, 0.997]: sit, drink, talk to (e.g., self, a person, a group).", "At 1124.0s, [0.154, 0.083, 0.941, 0.991]: sit, carry/hold (an object), drink, talk to (e.g., self, a person, a group).", "From 1121.0s to 1123.0s, [0.213, 0.082, 0.931, 0.993]->[0.196, 0.087, 0.919, 0.989]->[0.175, 0.089, 0.934, 0.992]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1120.0s, [0.192, 0.079, 0.919, 0.993]: sit, drink, talk to (e.g., self, a person, a group).", "At 1119.0s, [0.217, 0.077, 0.971, 0.994]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1118.0s, [0.204, 0.082, 0.92, 0.991]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1117.0s, [0.208, 0.077, 0.939, 1.0]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1116.0s, [0.188, 0.091, 0.915, 0.986]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1115.0s, [0.195, 0.097, 0.895, 0.982]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1113.0s to 1114.0s, [0.178, 0.097, 0.915, 0.99]->[0.185, 0.089, 0.91, 0.986]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1112.0s, [0.209, 0.084, 0.93, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1111.0s, [0.169, 0.098, 0.91, 0.994]: sit, drink, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1107.0s to 1110.0s, [0.243, 0.088, 0.93, 0.998]->[0.148, 0.103, 0.931, 0.99]->[0.192, 0.079, 0.903, 0.994]->[0.195, 0.094, 0.928, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1106.0s, [0.158, 0.095, 0.902, 0.985]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1105.0s, [0.179, 0.091, 0.911, 0.995]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1104.0s, [0.143, 0.102, 0.929, 0.986]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1102.0s to 1103.0s, [0.204, 0.086, 0.887, 0.992]->[0.252, 0.077, 0.944, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1101.0s, [0.21, 0.071, 0.909, 0.999]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1100.0s, [0.18, 0.087, 0.911, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "From 1098.0s to 1099.0s, [0.175, 0.1, 0.89, 0.996]->[0.187, 0.093, 0.922, 0.995]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1097.0s, [0.185, 0.096, 0.918, 1.0]: sit, drink, talk to (e.g., self, a person, a group).", "At 1096.0s, [0.169, 0.105, 0.953, 1.0]: sit, carry/hold (an object), drink, talk to (e.g., self, a person, a group)."]}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1052, 1115], "conversations": [{"from": "human", "timestamps": 1082.0, "value": "What actions is the person in bbox [0.567, 0.179, 0.999, 0.836] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.567, 0.179, 0.999, 0.836]: sit, touch (an object), listen to (a person), watch (a person).", "From 1078.0s to 1081.0s, [0.567, 0.184, 0.996, 0.862]->[0.581, 0.184, 0.998, 0.835]->[0.561, 0.188, 0.999, 0.857]->[0.571, 0.18, 1.0, 0.807]: sit, touch (an object), listen to (a person), watch (a person).", "From 1073.0s to 1077.0s, [0.663, 0.137, 1.0, 0.675]->[0.667, 0.136, 1.0, 0.681]->[0.657, 0.132, 1.0, 0.667]->[0.655, 0.138, 0.998, 0.673]->[0.657, 0.131, 1.0, 0.672]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1072.0s, [0.67, 0.131, 1.0, 0.682]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1071.0s, [0.663, 0.138, 1.0, 0.696]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1070.0s, [0.664, 0.132, 1.0, 0.682]: sit, eat, talk to (e.g., self, a person, a group), watch (a person).", "From 1068.0s to 1069.0s, [0.678, 0.115, 1.0, 0.644]->[0.669, 0.135, 1.0, 0.683]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1115.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.195, 0.097, 0.895, 0.982]: sit, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1112, 1188], "conversations": [{"from": "human", "timestamps": 1142.0, "value": "Describe the actions of the person at bbox [0.191, 0.082, 0.708, 0.945] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.191, 0.082, 0.708, 0.945]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1141.0s, [0.12, 0.083, 0.719, 0.967]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1140.0s, [0.157, 0.076, 0.707, 0.953]: sit, drink, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1138.0s to 1139.0s, [0.318, 0.116, 0.939, 1.0]->[0.376, 0.118, 0.912, 1.0]: sit, drink, touch (an object).", "From 1134.0s to 1137.0s, [0.237, 0.085, 0.917, 0.972]->[0.178, 0.084, 0.901, 0.977]->[0.226, 0.093, 0.905, 1.0]->[0.309, 0.092, 0.936, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1133.0s, [0.233, 0.093, 0.9, 1.0]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "From 1129.0s to 1132.0s, [0.214, 0.069, 0.901, 0.975]->[0.189, 0.125, 0.946, 0.993]->[0.228, 0.089, 0.898, 0.979]->[0.245, 0.084, 0.917, 0.995]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1127.0s to 1128.0s, [0.185, 0.101, 0.947, 0.997]->[0.181, 0.082, 0.911, 1.0]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1126.0s, [0.202, 0.094, 0.923, 0.992]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1125.0s, [0.193, 0.084, 0.906, 0.997]: sit, drink, talk to (e.g., self, a person, a group).", "At 1124.0s, [0.154, 0.083, 0.941, 0.991]: sit, carry/hold (an object), drink, talk to (e.g., self, a person, a group).", "From 1121.0s to 1123.0s, [0.213, 0.082, 0.931, 0.993]->[0.196, 0.087, 0.919, 0.989]->[0.175, 0.089, 0.934, 0.992]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1120.0s, [0.192, 0.079, 0.919, 0.993]: sit, drink, talk to (e.g., self, a person, a group).", "At 1119.0s, [0.217, 0.077, 0.971, 0.994]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1118.0s, [0.204, 0.082, 0.92, 0.991]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1117.0s, [0.208, 0.077, 0.939, 1.0]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1116.0s, [0.188, 0.091, 0.915, 0.986]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1115.0s, [0.195, 0.097, 0.895, 0.982]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1113.0s to 1114.0s, [0.178, 0.097, 0.915, 0.99]->[0.185, 0.089, 0.91, 0.986]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1112.0s, [0.209, 0.084, 0.93, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1111.0s, [0.169, 0.098, 0.91, 0.994]: sit, drink, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1107.0s to 1110.0s, [0.243, 0.088, 0.93, 0.998]->[0.148, 0.103, 0.931, 0.99]->[0.192, 0.079, 0.903, 0.994]->[0.195, 0.094, 0.928, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1106.0s, [0.158, 0.095, 0.902, 0.985]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1105.0s, [0.179, 0.091, 0.911, 0.995]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1104.0s, [0.143, 0.102, 0.929, 0.986]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1102.0s to 1103.0s, [0.204, 0.086, 0.887, 0.992]->[0.252, 0.077, 0.944, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1101.0s, [0.21, 0.071, 0.909, 0.999]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1100.0s, [0.18, 0.087, 0.911, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "From 1098.0s to 1099.0s, [0.175, 0.1, 0.89, 0.996]->[0.187, 0.093, 0.922, 0.995]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1097.0s, [0.185, 0.096, 0.918, 1.0]: sit, drink, talk to (e.g., self, a person, a group).", "At 1096.0s, [0.169, 0.105, 0.953, 1.0]: sit, carry/hold (an object), drink, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1188.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.204, 0.09, 0.859, 1.0]: sit, touch (an object), talk to (e.g., self, a person, a group), drink."}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1137, 1256], "conversations": [{"from": "human", "timestamps": 1167.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.23, 0.083, 0.84, 1.0]: sit, touch (an object), talk to (e.g., self, a person, a group), drink."}, {"from": "human", "timestamps": 1256.0, "value": "What actions did the person at bbox [0.918, 0.19, 0.996, 0.473] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.918, 0.19, 0.996, 0.473]: stand.", "At 1255.0s, [0.917, 0.19, 0.997, 0.474]: stand.", "At 1254.0s, [0.918, 0.192, 0.996, 0.474]: stand, watch (a person).", "From 1251.0s to 1253.0s, [0.919, 0.195, 0.997, 0.47]->[0.919, 0.193, 0.997, 0.474]->[0.916, 0.193, 0.996, 0.476]: stand.", "From 1249.0s to 1250.0s, [0.921, 0.196, 0.997, 0.473]->[0.918, 0.191, 0.996, 0.474]: stand, watch (a person)."]}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1177, 1258], "conversations": [{"from": "human", "timestamps": 1207.0, "value": "What actions did the person at bbox [0.179, 0.133, 0.901, 0.98] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.179, 0.133, 0.901, 0.98]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1206.0s, [0.134, 0.101, 0.766, 0.947]: sit, drink, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1205.0s, [0.136, 0.095, 0.717, 0.957]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "From 1203.0s to 1204.0s, [0.11, 0.099, 0.719, 0.938]->[0.097, 0.1, 0.732, 0.955]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1202.0s, [0.115, 0.099, 0.718, 0.947]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1201.0s, [0.094, 0.11, 0.738, 0.934]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1199.0s to 1200.0s, [0.121, 0.084, 0.71, 0.934]->[0.126, 0.09, 0.717, 0.931]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "From 1197.0s to 1198.0s, [0.141, 0.085, 0.755, 0.977]->[0.108, 0.092, 0.716, 0.924]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1196.0s, [0.133, 0.094, 0.706, 0.927]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1195.0s, [0.205, 0.083, 0.868, 0.984]: sit, drink, touch (an object), listen to (a person), watch (a person).", "From 1192.0s to 1194.0s, [0.203, 0.09, 0.871, 0.991]->[0.187, 0.089, 0.875, 0.975]->[0.173, 0.089, 0.896, 0.995]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1191.0s, [0.2, 0.096, 0.876, 1.0]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1190.0s, [0.21, 0.102, 0.864, 1.0]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1186.0s to 1189.0s, [0.195, 0.085, 0.887, 0.995]->[0.189, 0.096, 0.88, 0.991]->[0.204, 0.09, 0.859, 1.0]->[0.191, 0.086, 0.878, 0.993]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1185.0s, [0.187, 0.095, 0.871, 0.992]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1184.0s, [0.198, 0.096, 0.885, 0.994]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1181.0s to 1183.0s, [0.207, 0.112, 0.874, 0.987]->[0.157, 0.102, 0.882, 0.988]->[0.218, 0.104, 0.86, 0.982]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1180.0s, [0.19, 0.099, 0.879, 0.994]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1179.0s, [0.17, 0.101, 0.907, 0.995]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1177.0s to 1178.0s, [0.218, 0.063, 0.871, 0.991]->[0.189, 0.1, 0.88, 0.996]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "From 1175.0s to 1176.0s, [0.238, 0.066, 0.85, 0.992]->[0.217, 0.067, 0.895, 0.991]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "From 1171.0s to 1174.0s, [0.219, 0.069, 0.759, 0.981]->[0.205, 0.084, 0.888, 0.997]->[0.222, 0.078, 0.895, 1.0]->[0.233, 0.071, 0.843, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1170.0s, [0.267, 0.095, 0.868, 0.995]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1169.0s, [0.196, 0.078, 0.811, 0.993]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1168.0s, [0.203, 0.078, 0.866, 1.0]: sit, carry/hold (an object), drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1167.0s, [0.23, 0.083, 0.84, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1166.0s, [0.213, 0.08, 0.86, 0.987]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1165.0s, [0.227, 0.071, 0.893, 0.985]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1164.0s, [0.201, 0.115, 0.912, 0.987]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1162.0s to 1163.0s, [0.185, 0.074, 0.883, 0.99]->[0.223, 0.094, 0.873, 0.994]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "From 1160.0s to 1161.0s, [0.197, 0.089, 0.864, 0.994]->[0.213, 0.093, 0.844, 0.99]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1159.0s, [0.217, 0.068, 0.871, 0.987]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1158.0s, [0.198, 0.076, 0.838, 0.988]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1155.0s to 1157.0s, [0.195, 0.12, 0.926, 0.991]->[0.224, 0.095, 0.896, 1.0]->[0.213, 0.086, 0.903, 0.991]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1154.0s, [0.205, 0.106, 0.913, 0.991]: sit, drink, touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1153.0s, [0.107, 0.088, 0.746, 0.977]: sit, drink, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1152.0s, [0.12, 0.084, 0.735, 0.969]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1151.0s, [0.226, 0.084, 0.891, 0.989]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1150.0s, [0.229, 0.088, 0.898, 0.991]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1148.0s to 1149.0s, [0.211, 0.069, 0.892, 0.998]->[0.209, 0.087, 0.911, 1.0]: sit, drink, talk to (e.g., self, a person, a group).", "At 1147.0s, [0.185, 0.087, 0.975, 1.0]: sit, drink, touch (an object), talk to (e.g., self, a person, a group).", "At 1146.0s, [0.223, 0.101, 0.907, 0.992]: sit, drink, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1258.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.188, 0.338, 0.212, 0.434]: watch (a person), stand; [0.231, 0.358, 0.258, 0.45]: stand; [0.375, 0.418, 0.404, 0.519]: stand; [0.762, 0.135, 0.877, 0.509]: stand; [0.917, 0.191, 0.998, 0.474]: stand."}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1254, 1288], "conversations": [{"from": "human", "timestamps": 1284.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.066, 0.403, 0.383, 0.98]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.401, 0.681, 0.549, 0.984]: listen to (a person), stand, watch (a person); [0.49, 0.06, 0.949, 0.947]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1288.0, "value": "Describe the actions of the person at bbox [0.511, 0.055, 0.969, 0.97] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.511, 0.055, 0.969, 0.97]: sit, carry/hold (an object), watch (a person).", "At 1287.0s, [0.497, 0.041, 0.971, 0.988]: sit, carry/hold (an object).", "From 1283.0s to 1286.0s, [0.529, 0.051, 0.959, 0.966]->[0.49, 0.06, 0.949, 0.947]->[0.496, 0.088, 0.958, 0.915]->[0.508, 0.075, 0.962, 0.956]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1280.0s to 1282.0s, [0.539, 0.041, 0.96, 0.964]->[0.493, 0.05, 0.962, 0.992]->[0.49, 0.072, 0.963, 0.961]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1273.0s to 1279.0s, [0.549, 0.04, 0.963, 0.957]->[0.513, 0.041, 0.951, 0.966]->[0.516, 0.036, 0.965, 0.941]->[0.541, 0.099, 0.956, 0.942]->[0.525, 0.044, 0.966, 0.95]->[0.532, 0.062, 0.967, 0.936]->[0.543, 0.09, 0.969, 0.944]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1271.0s to 1272.0s, [0.523, 0.082, 0.968, 0.901]->[0.516, 0.041, 0.975, 0.948]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1250, 1374], "conversations": [{"from": "human", "timestamps": 1280.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.067, 0.386, 0.394, 0.982]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.404, 0.679, 0.512, 0.984]: listen to (a person), stand, watch (a person); [0.539, 0.041, 0.96, 0.964]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1374.0, "value": "What actions is the person in bbox [0.024, 0.618, 0.27, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.024, 0.618, 0.27, 1.0]: stand, watch (a person).", "At 1373.0s, [0.027, 0.619, 0.272, 1.0]: stand, watch (a person)."]}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1360, 1474], "conversations": [{"from": "human", "timestamps": 1390.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.225, 0.511, 0.423, 0.999]: carry/hold (an object), walk; [0.675, 0.423, 0.72, 0.571]: watch (a person), stand; [0.707, 0.394, 0.762, 0.566]: watch (a person), stand; [0.753, 0.236, 0.895, 0.533]: carry/hold (an object), walk; [0.925, 0.248, 1.0, 0.523]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1474.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.467, 0.306, 0.677, 0.961]?"}, {"from": "gpt", "value": ["At Now, [0.467, 0.306, 0.677, 0.961]: sit, ride (e.g., a bike, a car, a horse).", "From 1471.0s to 1473.0s, [0.036, 0.24, 0.786, 0.956]->[0.336, 0.177, 0.753, 0.973]->[0.409, 0.312, 0.679, 0.945]: sit, ride (e.g., a bike, a car, a horse)."]}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1451, 1494], "conversations": [{"from": "human", "timestamps": 1481.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.444, 0.623, 0.527, 0.85]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.623, 0.63, 0.668, 0.81]: watch (a person), close (e.g., a door, a box), walk; [0.754, 0.641, 0.805, 0.818]: carry/hold (an object), walk; [0.772, 0.562, 0.864, 0.84]: ride (e.g., a bike, a car, a horse), sit, watch (a person), talk to (e.g., self, a person, a group); [0.885, 0.625, 0.958, 0.843]: walk."}, {"from": "human", "timestamps": 1494.0, "value": "Describe the actions of the person at bbox [0.299, 0.311, 0.407, 0.605] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.299, 0.311, 0.407, 0.605]: stand, play with pets, listen to (a person), watch (a person).", "From 1490.0s to 1493.0s, [0.79, 0.24, 1.0, 0.768]->[0.764, 0.249, 0.996, 0.765]->[0.805, 0.249, 0.992, 0.708]->[0.297, 0.311, 0.446, 0.621]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1489.0s, [0.76, 0.239, 0.99, 0.757]: stand, listen to (a person), watch (a person).", "From 1487.0s to 1488.0s, [0.703, 0.271, 1.0, 0.94]->[0.751, 0.242, 0.998, 0.832]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1486.0s, [0.776, 0.327, 1.0, 0.979]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1488, 1548], "conversations": [{"from": "human", "timestamps": 1518.0, "value": "What actions is the person in bbox [0.119, 0.075, 0.789, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.119, 0.075, 0.789, 0.982]: stand, talk to (e.g., self, a person, a group).", "From 1516.0s to 1517.0s, [0.111, 0.059, 0.705, 0.989]->[0.134, 0.067, 0.838, 0.98]: stand, carry/hold (an object).", "At 1515.0s, [0.002, 0.077, 0.577, 0.999]: stand, carry/hold (an object), watch (a person).", "At 1514.0s, [0.0, 0.062, 0.385, 0.993]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1548.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.297, 0.077, 0.891, 0.988]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1525, 1604], "conversations": [{"from": "human", "timestamps": 1555.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.271, 0.085, 0.895, 0.985]: watch (a person), stand."}, {"from": "human", "timestamps": 1604.0, "value": "For the person at bbox [0.176, 0.462, 0.276, 0.846], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.176, 0.462, 0.276, 0.846]: stand, watch (a person).", "From 1601.0s to 1603.0s, [0.173, 0.463, 0.279, 0.846]->[0.172, 0.473, 0.276, 0.859]->[0.175, 0.471, 0.279, 0.847]: stand, watch (a person).", "At 1600.0s, [0.182, 0.458, 0.271, 0.832]: stand, listen to (a person), watch (a person).", "At 1599.0s, [0.174, 0.466, 0.283, 0.843]: stand, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1596, 1645], "conversations": [{"from": "human", "timestamps": 1626.0, "value": "Describe the actions of the person at bbox [0.574, 0.146, 0.808, 0.721] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.574, 0.146, 0.808, 0.721]: sit, carry/hold (an object), sing to (e.g., self, a person, a group).", "From 1624.0s to 1625.0s, [0.571, 0.107, 0.936, 0.737]->[0.561, 0.158, 0.783, 0.755]: sit, carry/hold (an object), sing to (e.g., self, a person, a group).", "At 1623.0s, [0.552, 0.103, 0.899, 0.783]: sit, carry/hold (an object), touch (an object), sing to (e.g., self, a person, a group).", "At 1622.0s, [0.589, 0.135, 0.809, 0.762]: sit, carry/hold (an object), sing to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1645.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.059, 0.264, 0.316, 0.763]: carry/hold (an object), sit, smoke, watch (a person); [0.242, 0.265, 0.516, 0.64]: carry/hold (an object), sit, watch (a person); [0.417, 0.171, 0.659, 0.611]: watch (a person), sit, drink."}]}, {"video": "TcB0IFBwk-k.mkv", "clip": [1607, 1723], "conversations": [{"from": "human", "timestamps": 1637.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.562, 0.08, 0.933, 0.747]: carry/hold (an object), sit, touch (an object), sing to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1723.0, "value": "Describe the actions of the person at bbox [0.518, 0.046, 0.994, 0.998] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.518, 0.046, 0.994, 0.998]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1722.0s, [0.489, 0.02, 0.977, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1720.0s to 1721.0s, [0.5, 0.004, 0.978, 1.0]->[0.497, 0.02, 0.989, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1718.0s to 1719.0s, [0.497, 0.029, 0.998, 1.0]->[0.506, 0.029, 0.996, 1.0]: stand, listen to (a person), watch (a person).", "From 1715.0s to 1717.0s, [0.439, 0.032, 1.0, 1.0]->[0.452, 0.041, 1.0, 0.997]->[0.47, 0.018, 0.994, 1.0]: stand, listen to (a person).", "At 1714.0s, [0.45, 0.005, 1.0, 1.0]: stand, carry/hold (an object), listen to (a person).", "At 1713.0s, [0.414, 0.028, 1.0, 1.0]: stand, listen to (a person).", "At 1712.0s, [0.43, 0.032, 1.0, 1.0]: stand, listen to (a person), watch (a person)."]}]}, {"video": "TzaVHtLXOzY.mkv", "clip": [995, 1065], "conversations": [{"from": "human", "timestamps": 1025.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.095, 0.104, 0.781, 0.988]?"}, {"from": "gpt", "value": ["In the current frame, [0.095, 0.104, 0.781, 0.988]: sit.", "At 1024.0s, [0.127, 0.09, 0.767, 0.968]: stand.", "At 1023.0s, [0.16, 0.102, 0.859, 0.998]: stand, talk to (e.g., self, a person, a group).", "At 1022.0s, [0.156, 0.114, 0.862, 0.99]: stand.", "At 1021.0s, [0.1, 0.12, 0.869, 1.0]: sit, carry/hold (an object).", "At 1020.0s, [0.069, 0.117, 0.806, 0.983]: sit, stand, answer phone.", "At 1019.0s, [0.041, 0.16, 0.873, 0.981]: sit, answer phone.", "From 1015.0s to 1018.0s, [0.071, 0.16, 0.729, 0.993]->[0.025, 0.138, 0.781, 1.0]->[0.054, 0.088, 0.829, 1.0]->[0.107, 0.144, 0.845, 0.996]: stand, answer phone.", "From 1012.0s to 1014.0s, [0.13, 0.16, 0.723, 1.0]->[0.031, 0.152, 0.717, 1.0]->[0.042, 0.138, 0.698, 0.997]: sit, answer phone."]}, {"from": "human", "timestamps": 1065.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.035, 0.01, 0.759, 0.98]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "TzaVHtLXOzY.mkv", "clip": [1272, 1315], "conversations": [{"from": "human", "timestamps": 1302.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.252, 0.25, 0.852, 0.989]: carry/hold (an object), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1315.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.169, 0.046, 0.878, 0.95]?"}, {"from": "gpt", "value": ["At Now, [0.169, 0.046, 0.878, 0.95]: stand, talk to (e.g., self, a person, a group).", "At 1314.0s, [0.223, 0.069, 0.921, 0.958]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "TzaVHtLXOzY.mkv", "clip": [1322, 1377], "conversations": [{"from": "human", "timestamps": 1352.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.121, 0.249, 0.607, 0.96]: carry/hold (an object), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1377.0, "value": "What actions did the person at bbox [0.285, 0.289, 0.926, 0.98] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.285, 0.289, 0.926, 0.98]: stand, drink.", "From 1373.0s to 1376.0s, [0.318, 0.169, 0.889, 1.0]->[0.359, 0.273, 0.942, 0.993]->[0.357, 0.203, 0.968, 0.995]->[0.311, 0.248, 0.926, 0.988]: stand, drink."]}]}, {"video": "TzaVHtLXOzY.mkv", "clip": [1493, 1595], "conversations": [{"from": "human", "timestamps": 1523.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.443, 0.071, 0.916, 0.989]: stand."}, {"from": "human", "timestamps": 1595.0, "value": "Describe the actions of the person at bbox [0.235, 0.064, 0.981, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.235, 0.064, 0.981, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1594.0s, [0.221, 0.059, 1.0, 0.998]: sit, listen to (a person)."]}]}, {"video": "TzaVHtLXOzY.mkv", "clip": [1549, 1583], "conversations": [{"from": "human", "timestamps": 1579.0, "value": "For the person at bbox [0.19, 0.059, 1.0, 0.983], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.19, 0.059, 1.0, 0.983]: sit.", "At 1578.0s, [0.206, 0.053, 1.0, 0.984]: sit.", "From 1576.0s to 1577.0s, [0.208, 0.041, 1.0, 1.0]->[0.182, 0.06, 1.0, 0.994]: sit, talk to (e.g., self, a person, a group).", "From 1571.0s to 1575.0s, [0.228, 0.046, 1.0, 1.0]->[0.235, 0.066, 1.0, 1.0]->[0.228, 0.064, 1.0, 0.999]->[0.213, 0.052, 1.0, 0.997]->[0.233, 0.061, 1.0, 0.997]: sit.", "At 1570.0s, [0.22, 0.047, 1.0, 0.996]: sit, talk to (e.g., self, a person, a group).", "From 1568.0s to 1569.0s, [0.229, 0.057, 0.983, 1.0]->[0.231, 0.062, 1.0, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1583.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.234, 0.057, 0.993, 0.998]: listen to (a person), sit."}]}, {"video": "TzaVHtLXOzY.mkv", "clip": [1654, 1722], "conversations": [{"from": "human", "timestamps": 1684.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.629, 0.044, 1.0, 0.973]: walk."}, {"from": "human", "timestamps": 1722.0, "value": "What actions is the person in bbox [0.375, 0.008, 0.994, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.375, 0.008, 0.994, 1.0]: stand, carry/hold (an object).", "From 1715.0s to 1721.0s, [0.296, 0.032, 0.974, 1.0]->[0.183, 0.04, 0.968, 0.963]->[0.213, 0.045, 0.911, 0.99]->[0.235, 0.038, 0.965, 1.0]->[0.226, 0.052, 0.957, 0.975]->[0.186, 0.051, 0.98, 0.985]->[0.281, 0.0, 0.964, 0.982]: stand, carry/hold (an object)."]}]}, {"video": "TzaVHtLXOzY.mkv", "clip": [1721, 1781], "conversations": [{"from": "human", "timestamps": 1751.0, "value": "What actions is the person in bbox [0.054, 0.0, 0.949, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.054, 0.0, 0.949, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1749.0s to 1750.0s, [0.011, 0.004, 0.907, 0.986]->[0.01, 0.0, 0.953, 0.98]: stand."]}, {"from": "human", "timestamps": 1781.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.139, 0.119, 0.386, 0.912]: carry/hold (an object), walk; [0.267, 0.055, 0.998, 0.988]: watch (a person), sit, touch (an object)."}]}, {"video": "UOfuzrwkclM.mkv", "clip": [893, 939], "conversations": [{"from": "human", "timestamps": 923.0, "value": "Describe the actions of the person at bbox [0.349, 0.327, 0.616, 0.744] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.349, 0.327, 0.616, 0.744]: sit, carry/hold (an object), touch (an object).", "At 922.0s, [0.348, 0.32, 0.612, 0.729]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 921.0s, [0.347, 0.324, 0.614, 0.736]: sit, carry/hold (an object), touch (an object).", "At 920.0s, [0.347, 0.351, 0.615, 0.796]: sit, carry/hold (an object).", "From 916.0s to 919.0s, [0.345, 0.34, 0.636, 0.74]->[0.346, 0.361, 0.638, 0.742]->[0.345, 0.361, 0.643, 0.735]->[0.338, 0.358, 0.635, 0.744]: sit, touch (an object), write.", "At 915.0s, [0.365, 0.306, 0.641, 0.738]: sit, carry/hold (an object), listen to (a person).", "At 914.0s, [0.425, 0.327, 0.646, 0.735]: sit, carry/hold (an object), touch (an object).", "At 913.0s, [0.479, 0.308, 0.609, 0.588]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 912.0s, [0.491, 0.306, 0.615, 0.582]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 939.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.021, 0.204, 0.454, 0.949]: watch (a person), listen to (a person), carry/hold (an object), stand, take (an object) from (a person); [0.452, 0.426, 0.704, 0.854]: sit, touch (an object), watch (a person), give/serve (an object) to (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.772, 0.484, 0.983, 0.792]: sit, touch (an object), drink."}]}, {"video": "UOfuzrwkclM.mkv", "clip": [1021, 1054], "conversations": [{"from": "human", "timestamps": 1051.0, "value": "What actions did the person at bbox [0.225, 0.254, 0.895, 0.883] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.225, 0.254, 0.895, 0.883]: sit, drive (e.g., a car, a truck).", "At 1050.0s, [0.427, 0.263, 0.789, 0.808]: sit, drive (e.g., a car, a truck)."]}, {"from": "human", "timestamps": 1054.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.386, 0.263, 0.904, 0.802]: sit, drive (e.g., a car, a truck)."}]}, {"video": "UOfuzrwkclM.mkv", "clip": [1114, 1181], "conversations": [{"from": "human", "timestamps": 1144.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.05, 0.401, 0.154, 0.889]: sit; [0.136, 0.053, 0.412, 0.936]: carry/hold (an object), run/jog."}, {"from": "human", "timestamps": 1181.0, "value": "Describe the actions of the person at bbox [0.172, 0.685, 0.787, 0.903] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.172, 0.685, 0.787, 0.903]: read, lie/sleep.", "From 1176.0s to 1180.0s, [0.231, 0.661, 0.746, 0.9]->[0.179, 0.682, 0.74, 0.904]->[0.18, 0.663, 0.762, 0.901]->[0.168, 0.654, 0.78, 0.901]->[0.221, 0.674, 0.749, 0.898]: read, lie/sleep.", "At 1175.0s, [0.291, 0.652, 0.822, 0.903]: read, touch (an object), lie/sleep.", "From 1172.0s to 1174.0s, [0.639, 0.696, 0.985, 0.899]->[0.561, 0.657, 0.984, 0.908]->[0.426, 0.642, 0.929, 0.906]: read, lie/sleep."]}]}, {"video": "UOfuzrwkclM.mkv", "clip": [1162, 1219], "conversations": [{"from": "human", "timestamps": 1192.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.007, 0.64, 0.368, 0.831]: lie/sleep; [0.215, 0.394, 0.38, 0.559]: watch (a person), sit; [0.221, 0.651, 0.815, 0.889]: read, lie/sleep."}, {"from": "human", "timestamps": 1219.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.527, 0.531, 0.977, 0.757]?"}, {"from": "gpt", "value": ["In the current frame, [0.527, 0.531, 0.977, 0.757]: lie/sleep.", "From 1213.0s to 1218.0s, [0.52, 0.539, 0.983, 0.757]->[0.535, 0.503, 0.968, 0.754]->[0.522, 0.517, 0.977, 0.749]->[0.513, 0.524, 0.984, 0.742]->[0.517, 0.526, 0.978, 0.762]->[0.524, 0.532, 0.971, 0.748]: lie/sleep.", "At 1212.0s, [0.522, 0.543, 0.977, 0.759]: touch (an object), lie/sleep.", "From 1210.0s to 1211.0s, [0.535, 0.538, 0.97, 0.749]->[0.527, 0.534, 0.99, 0.752]: lie/sleep.", "At 1209.0s, [0.532, 0.544, 0.979, 0.802]: touch (an object), lie/sleep.", "From 1206.0s to 1208.0s, [0.523, 0.54, 0.962, 0.746]->[0.527, 0.538, 0.986, 0.756]->[0.518, 0.538, 0.987, 0.769]: lie/sleep.", "From 1201.0s to 1205.0s, [0.521, 0.311, 0.997, 0.798]->[0.517, 0.375, 0.928, 0.784]->[0.526, 0.374, 0.975, 0.785]->[0.527, 0.506, 0.968, 0.76]->[0.526, 0.535, 0.969, 0.776]: touch (an object), lie/sleep.", "From 1198.0s to 1200.0s, [0.542, 0.514, 0.973, 0.754]->[0.554, 0.436, 0.978, 0.791]->[0.582, 0.278, 0.992, 0.765]: lie/sleep."]}]}, {"video": "UOfuzrwkclM.mkv", "clip": [1249, 1287], "conversations": [{"from": "human", "timestamps": 1279.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.293, 0.76, 0.957, 0.936]: lie/sleep; [0.578, 0.084, 0.827, 0.834]: watch (a person), walk."}, {"from": "human", "timestamps": 1287.0, "value": "For the person at bbox [0.394, 0.175, 0.507, 0.562], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.394, 0.175, 0.507, 0.562]: walk.", "At 1286.0s, [0.75, 0.207, 0.858, 0.683]: walk."]}]}, {"video": "UOfuzrwkclM.mkv", "clip": [1272, 1354], "conversations": [{"from": "human", "timestamps": 1302.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.069, 0.377, 0.107, 0.517]: carry/hold (an object), watch (a person), listen to (a person); [0.176, 0.132, 0.331, 0.751]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.341, 0.39, 0.454, 0.588]: watch (a person), sit, touch (an object); [0.474, 0.374, 0.517, 0.54]: walk; [0.647, 0.385, 0.895, 0.879]: sit, watch (a person), listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1354.0, "value": "Describe the actions of the person at bbox [0.614, 0.136, 0.818, 0.61] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.614, 0.136, 0.818, 0.61]: stand, smoke.", "At 1353.0s, [0.623, 0.115, 0.817, 0.609]: stand, carry/hold (an object), smoke."]}]}, {"video": "UOfuzrwkclM.mkv", "clip": [1302, 1340], "conversations": [{"from": "human", "timestamps": 1332.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.063, 0.172, 0.485, 0.81]?"}, {"from": "gpt", "value": ["At Now, [0.063, 0.172, 0.485, 0.81]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1327.0s to 1331.0s, [0.007, 0.119, 0.303, 0.96]->[0.007, 0.118, 0.306, 0.952]->[0.01, 0.102, 0.291, 0.946]->[0.003, 0.113, 0.3, 0.943]->[0.086, 0.178, 0.498, 0.793]: sit, listen to (a person), watch (a person).", "From 1325.0s to 1326.0s, [0.068, 0.156, 0.502, 0.775]->[0.067, 0.161, 0.509, 0.785]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1324.0s, [0.072, 0.152, 0.499, 0.77]: sit, talk to (e.g., self, a person, a group).", "From 1322.0s to 1323.0s, [0.049, 0.27, 0.345, 0.8]->[0.048, 0.299, 0.336, 0.797]: sit, listen to (a person), watch (a person).", "At 1321.0s, [0.051, 0.327, 0.305, 0.823]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1319.0s to 1320.0s, [0.023, 0.362, 0.297, 0.796]->[0.05, 0.306, 0.36, 0.753]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1316.0s to 1318.0s, [0.014, 0.399, 0.294, 0.812]->[0.003, 0.401, 0.245, 0.896]->[0.007, 0.423, 0.307, 0.89]: sit, listen to (a person), watch (a person).", "At 1315.0s, [0.016, 0.288, 0.287, 0.819]: sit, listen to (a person).", "At 1314.0s, [0.014, 0.294, 0.316, 0.782]: sit, carry/hold (an object), listen to (a person).", "From 1312.0s to 1313.0s, [0.02, 0.322, 0.292, 0.787]->[0.008, 0.324, 0.31, 0.83]: sit, listen to (a person), watch (a person).", "At 1311.0s, [0.05, 0.291, 0.287, 0.852]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1310.0s, [0.067, 0.325, 0.309, 0.889]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1308.0s to 1309.0s, [0.142, 0.336, 0.336, 0.78]->[0.1, 0.363, 0.347, 0.825]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1340.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.208, 0.29, 0.496, 0.934]: carry/hold (an object), stand; [0.625, 0.123, 0.946, 0.952]: watch (a person), stand."}]}, {"video": "UOfuzrwkclM.mkv", "clip": [1374, 1405], "conversations": [{"from": "human", "timestamps": 1404.0, "value": "Describe the actions of the person at bbox [0.508, 0.488, 0.977, 0.948] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.508, 0.488, 0.977, 0.948]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1403.0s, [0.49, 0.483, 0.982, 0.957]: sit, listen to (a person).", "At 1402.0s, [0.474, 0.496, 0.969, 0.961]: sit, drink, listen to (a person), watch (a person).", "From 1393.0s to 1401.0s, [0.394, 0.256, 0.951, 0.942]->[0.286, 0.258, 0.928, 0.959]->[0.372, 0.265, 0.944, 0.954]->[0.372, 0.241, 0.946, 0.962]->[0.399, 0.246, 0.945, 0.956]->[0.417, 0.245, 0.942, 0.96]->[0.308, 0.243, 0.931, 0.952]->[0.35, 0.254, 0.938, 0.936]->[0.485, 0.49, 0.962, 0.939]: sit, listen to (a person), watch (a person).", "At 1392.0s, [0.399, 0.261, 0.948, 0.946]: sit, watch (a person).", "From 1386.0s to 1391.0s, [0.463, 0.296, 0.928, 0.938]->[0.529, 0.305, 0.896, 0.962]->[0.441, 0.301, 0.845, 0.956]->[0.478, 0.3, 0.826, 0.955]->[0.451, 0.29, 0.815, 0.952]->[0.44, 0.297, 0.811, 0.957]: sit, listen to (a person), watch (a person).", "From 1383.0s to 1385.0s, [0.457, 0.301, 0.944, 0.939]->[0.467, 0.314, 0.892, 0.956]->[0.406, 0.308, 0.91, 0.946]: sit, watch (a person).", "From 1376.0s to 1382.0s, [0.678, 0.072, 0.995, 0.79]->[0.673, 0.062, 0.99, 0.946]->[0.677, 0.063, 0.993, 0.78]->[0.719, 0.069, 0.992, 0.934]->[0.718, 0.075, 0.989, 0.936]->[0.662, 0.048, 0.985, 0.919]->[0.718, 0.068, 0.986, 0.936]: sit, listen to (a person), watch (a person).", "At 1375.0s, [0.349, 0.074, 0.974, 0.973]: sit, watch (a person).", "At 1374.0s, [0.514, 0.203, 0.977, 0.942]: sit, drink, watch (a person).", "At 1373.0s, [0.485, 0.203, 0.968, 0.943]: sit, listen to (a person), watch (a person).", "From 1367.0s to 1372.0s, [0.457, 0.507, 0.618, 0.881]->[0.478, 0.469, 0.722, 0.918]->[0.458, 0.379, 0.767, 0.857]->[0.456, 0.29, 0.829, 0.807]->[0.453, 0.265, 0.883, 0.858]->[0.481, 0.243, 0.99, 0.938]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1405.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.008, 0.122, 0.227, 0.934]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.595, 0.477, 0.983, 0.953]: watch (a person), sit, talk to (e.g., self, a person, a group), drink."}]}, {"video": "UOfuzrwkclM.mkv", "clip": [1438, 1471], "conversations": [{"from": "human", "timestamps": 1468.0, "value": "What actions did the person at bbox [0.337, 0.157, 0.51, 0.786] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.337, 0.157, 0.51, 0.786]: walk, watch (a person).", "At 1467.0s, [0.283, 0.163, 0.435, 0.741]: walk, watch (a person).", "At 1466.0s, [0.234, 0.175, 0.369, 0.662]: walk, listen to (a person), watch (a person).", "From 1463.0s to 1465.0s, [0.005, 0.057, 0.077, 0.568]->[0.051, 0.121, 0.14, 0.525]->[0.144, 0.175, 0.222, 0.55]: walk, watch (a person)."]}, {"from": "human", "timestamps": 1471.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.017, 0.42, 0.406, 0.932]: watch (a person), sit; [0.37, 0.09, 0.566, 0.904]: watch (a person), walk; [0.67, 0.458, 0.87, 0.939]: watch (a person), sit."}]}, {"video": "UOfuzrwkclM.mkv", "clip": [1511, 1547], "conversations": [{"from": "human", "timestamps": 1541.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.105, 0.459, 0.522, 0.94]: watch (a person), stand, fight/hit (a person); [0.555, 0.372, 0.884, 0.975]: carry/hold (an object), fight/hit (a person), watch (a person), walk."}, {"from": "human", "timestamps": 1547.0, "value": "For the person at bbox [0.027, 0.433, 0.465, 0.926], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.027, 0.433, 0.465, 0.926]: bend/bow (at the waist), fight/hit (a person).", "From 1545.0s to 1546.0s, [0.766, 0.067, 0.986, 0.922]->[0.057, 0.191, 0.362, 0.928]: walk."]}]}, {"video": "UOfuzrwkclM.mkv", "clip": [1628, 1663], "conversations": [{"from": "human", "timestamps": 1658.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.038, 0.237, 0.259, 0.943]: carry/hold (an object), stand, watch (a person); [0.79, 0.255, 0.974, 0.907]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1663.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.251, 0.256, 0.457, 0.944]?"}, {"from": "gpt", "value": ["At Now, [0.251, 0.256, 0.457, 0.944]: walk, carry/hold (an object).", "At 1662.0s, [0.229, 0.256, 0.447, 0.936]: walk, carry/hold (an object).", "At 1661.0s, [0.061, 0.238, 0.265, 0.934]: carry/hold (an object), watch (a person).", "From 1658.0s to 1660.0s, [0.038, 0.237, 0.259, 0.943]->[0.022, 0.234, 0.209, 0.939]->[0.012, 0.236, 0.207, 0.949]: stand, carry/hold (an object), watch (a person).", "At 1657.0s, [0.037, 0.235, 0.266, 0.937]: stand, carry/hold (an object).", "At 1656.0s, [0.113, 0.234, 0.332, 0.93]: walk, carry/hold (an object).", "From 1654.0s to 1655.0s, [0.264, 0.232, 0.416, 0.938]->[0.174, 0.229, 0.379, 0.944]: walk, carry/hold (an object), listen to (a person).", "From 1652.0s to 1653.0s, [0.377, 0.269, 0.54, 0.903]->[0.313, 0.249, 0.493, 0.927]: walk, carry/hold (an object)."]}]}, {"video": "UOfuzrwkclM.mkv", "clip": [1621, 1733], "conversations": [{"from": "human", "timestamps": 1651.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.473, 0.262, 0.573, 0.781]: carry/hold (an object), walk; [0.508, 0.243, 0.667, 0.894]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1733.0, "value": "What actions is the person in bbox [0.552, 0.167, 0.668, 0.53] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.552, 0.167, 0.668, 0.53]: walk.", "At 1732.0s, [0.58, 0.156, 0.694, 0.524]: walk, sail boat.", "At 1731.0s, [0.614, 0.169, 0.701, 0.328]: stand, close (e.g., a door, a box)."]}]}, {"video": "UOfuzrwkclM.mkv", "clip": [1659, 1714], "conversations": [{"from": "human", "timestamps": 1689.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.092, 0.141, 0.456, 0.96]: carry/hold (an object), walk; [0.438, 0.065, 0.817, 0.94]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1714.0, "value": "Describe the actions of the person at bbox [0.781, 0.268, 0.931, 0.853] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.781, 0.268, 0.931, 0.853]: walk, carry/hold (an object).", "From 1711.0s to 1713.0s, [0.616, 0.276, 0.733, 0.605]->[0.683, 0.268, 0.821, 0.785]->[0.702, 0.265, 0.86, 0.814]: walk, carry/hold (an object).", "At 1710.0s, [0.58, 0.278, 0.696, 0.455]: walk, touch (an object).", "At 1709.0s, [0.542, 0.261, 0.669, 0.444]: walk, carry/hold (an object), touch (an object).", "From 1707.0s to 1708.0s, [0.374, 0.259, 0.454, 0.414]->[0.468, 0.266, 0.555, 0.421]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1705.0s to 1706.0s, [0.314, 0.261, 0.438, 0.495]->[0.337, 0.256, 0.43, 0.454]: stand, carry/hold (an object).", "At 1704.0s, [0.258, 0.21, 0.337, 0.501]: walk, carry/hold (an object)."]}]}, {"video": "U_WzY2k8IBM.mkv", "clip": [899, 988], "conversations": [{"from": "human", "timestamps": 929.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.015, 0.223, 0.631, 0.872]: listen to (a person), sit; [0.509, 0.208, 0.954, 0.89]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 988.0, "value": "What actions is the person in bbox [0.019, 0.139, 0.457, 0.883] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.019, 0.139, 0.457, 0.883]: stand.", "From 982.0s to 987.0s, [0.337, 0.188, 0.801, 0.886]->[0.103, 0.18, 0.563, 0.883]->[0.026, 0.176, 0.404, 0.88]->[0.023, 0.154, 0.423, 0.884]->[0.016, 0.148, 0.454, 0.88]->[0.027, 0.129, 0.463, 0.88]: stand.", "At 981.0s, [0.511, 0.192, 0.965, 0.885]: stand, listen to (a person)."]}]}, {"video": "U_WzY2k8IBM.mkv", "clip": [972, 1018], "conversations": [{"from": "human", "timestamps": 1002.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.512, 0.152, 0.991, 0.887]: watch (a person), sit."}, {"from": "human", "timestamps": 1018.0, "value": "What actions did the person at bbox [0.023, 0.158, 0.473, 0.899] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.023, 0.158, 0.473, 0.899]: stand.", "At 1017.0s, [0.02, 0.157, 0.486, 0.9]: stand."]}]}, {"video": "U_WzY2k8IBM.mkv", "clip": [993, 1033], "conversations": [{"from": "human", "timestamps": 1023.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.026, 0.088, 0.799, 0.892]: stand; [0.61, 0.382, 0.902, 0.883]: stand."}, {"from": "human", "timestamps": 1033.0, "value": "What actions did the person at bbox [0.011, 0.104, 0.612, 0.895] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.011, 0.104, 0.612, 0.895]: stand.", "At 1032.0s, [0.019, 0.096, 0.562, 0.889]: stand."]}]}, {"video": "U_WzY2k8IBM.mkv", "clip": [1001, 1098], "conversations": [{"from": "human", "timestamps": 1031.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.329, 0.168, 0.953, 0.88]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1098.0, "value": "What actions did the person at bbox [0.534, 0.36, 0.617, 0.782] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.534, 0.36, 0.617, 0.782]: stand.", "At 1097.0s, [0.53, 0.356, 0.615, 0.763]: stand, watch (a person)."]}]}, {"video": "U_WzY2k8IBM.mkv", "clip": [1052, 1089], "conversations": [{"from": "human", "timestamps": 1082.0, "value": "For the person at bbox [0.28, 0.111, 0.996, 0.837], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.28, 0.111, 0.996, 0.837]: stand, carry/hold (an object).", "At 1081.0s, [0.297, 0.12, 0.991, 0.818]: sit, touch (an object)."]}, {"from": "human", "timestamps": 1089.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.363, 0.313, 0.41, 0.54]: walk."}]}, {"video": "U_WzY2k8IBM.mkv", "clip": [1321, 1366], "conversations": [{"from": "human", "timestamps": 1351.0, "value": "Describe the actions of the person at bbox [0.165, 0.305, 0.258, 0.764] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.165, 0.305, 0.258, 0.764]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1350.0s, [0.163, 0.307, 0.255, 0.763]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1349.0s, [0.174, 0.319, 0.244, 0.766]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1366.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.377, 0.359, 0.46, 0.843]: watch (a person), stand; [0.462, 0.39, 0.516, 0.553]: stand; [0.62, 0.292, 0.757, 0.842]: talk to (e.g., self, a person, a group), walk; [0.701, 0.155, 0.995, 0.87]: stand."}]}, {"video": "U_WzY2k8IBM.mkv", "clip": [1350, 1429], "conversations": [{"from": "human", "timestamps": 1380.0, "value": "What actions did the person at bbox [0.299, 0.098, 0.958, 0.882] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.299, 0.098, 0.958, 0.882]: stand.", "At 1379.0s, [0.322, 0.121, 0.972, 0.853]: stand.", "From 1377.0s to 1378.0s, [0.32, 0.102, 0.958, 0.855]->[0.331, 0.112, 0.968, 0.87]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1429.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.067, 0.13, 0.517, 0.903]: stand, kiss (a person); [0.164, 0.131, 0.954, 0.917]: stand."}]}, {"video": "U_WzY2k8IBM.mkv", "clip": [1489, 1537], "conversations": [{"from": "human", "timestamps": 1519.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.39, 0.179, 0.525, 0.698]: watch (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group), take (an object) from (a person); [0.518, 0.124, 0.895, 0.872]: carry/hold (an object), watch (a person), walk."}, {"from": "human", "timestamps": 1537.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.005, 0.155, 0.438, 0.89]?"}, {"from": "gpt", "value": ["At Now, [0.005, 0.155, 0.438, 0.89]: stand, listen to (a person).", "At 1536.0s, [0.005, 0.157, 0.436, 0.896]: stand, listen to (a person).", "From 1533.0s to 1535.0s, [0.012, 0.149, 0.528, 0.894]->[0.009, 0.152, 0.437, 0.889]->[0.001, 0.15, 0.432, 0.893]: stand, listen to (a person), watch (a person).", "From 1531.0s to 1532.0s, [0.003, 0.177, 0.445, 0.874]->[0.004, 0.178, 0.463, 0.904]: stand, listen to (a person).", "At 1530.0s, [0.008, 0.168, 0.456, 0.901]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1529.0s, [0.003, 0.16, 0.437, 0.891]: stand, listen to (a person), watch (a person).", "From 1527.0s to 1528.0s, [0.0, 0.173, 0.438, 0.902]->[0.002, 0.165, 0.439, 0.888]: stand, listen to (a person).", "At 1526.0s, [0.007, 0.174, 0.431, 0.899]: stand, listen to (a person), watch (a person).", "At 1525.0s, [0.004, 0.159, 0.388, 0.901]: sit, listen to (a person), watch (a person).", "At 1524.0s, [0.057, 0.302, 0.487, 0.886]: sit, watch (a person).", "At 1523.0s, [0.028, 0.163, 0.464, 0.905]: stand, watch (a person).", "At 1522.0s, [0.039, 0.124, 0.504, 0.904]: stand, carry/hold (an object), give/serve (an object) to (a person), watch (a person).", "At 1521.0s, [0.076, 0.163, 0.507, 0.895]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1520.0s, [0.228, 0.169, 0.521, 0.813]: walk, carry/hold (an object), watch (a person)."]}]}, {"video": "U_WzY2k8IBM.mkv", "clip": [1614, 1675], "conversations": [{"from": "human", "timestamps": 1644.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.428, 0.11, 1.0, 0.903]: watch (a person), sit."}, {"from": "human", "timestamps": 1675.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.32, 0.249, 0.426, 0.627]?"}, {"from": "gpt", "value": ["At Now, [0.32, 0.249, 0.426, 0.627]: walk, carry/hold (an object).", "At 1674.0s, [0.371, 0.257, 0.459, 0.605]: walk, carry/hold (an object)."]}]}, {"video": "U_WzY2k8IBM.mkv", "clip": [1611, 1720], "conversations": [{"from": "human", "timestamps": 1641.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.336, 0.076, 0.995, 0.941]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1720.0, "value": "Describe the actions of the person at bbox [0.257, 0.172, 0.726, 0.885] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.257, 0.172, 0.726, 0.885]: walk, carry/hold (an object), watch (a person).", "At 1719.0s, [0.295, 0.172, 0.659, 0.893]: walk, carry/hold (an object), watch (a person)."]}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [909, 964], "conversations": [{"from": "human", "timestamps": 939.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.043, 0.072, 0.924, 0.986]: carry/hold (an object), sit, drink."}, {"from": "human", "timestamps": 964.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.032, 0.081, 0.813, 0.987]?"}, {"from": "gpt", "value": ["At Now, [0.032, 0.081, 0.813, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 963.0s, [0.014, 0.086, 0.785, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1013, 1064], "conversations": [{"from": "human", "timestamps": 1043.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.394, 0.124, 0.717, 0.976]: walk."}, {"from": "human", "timestamps": 1064.0, "value": "What actions is the person in bbox [0.403, 0.12, 0.679, 0.967] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.403, 0.12, 0.679, 0.967]: stand, open (e.g., a window, a car door).", "At 1063.0s, [0.403, 0.185, 0.668, 0.975]: stand, carry/hold (an object), push (an object).", "From 1059.0s to 1062.0s, [0.299, 0.116, 0.663, 0.96]->[0.35, 0.118, 0.612, 0.989]->[0.327, 0.125, 0.658, 0.953]->[0.378, 0.122, 0.653, 0.963]: stand, carry/hold (an object).", "At 1058.0s, [0.299, 0.161, 0.63, 0.982]: stand, carry/hold (an object), touch (an object).", "At 1057.0s, [0.24, 0.142, 0.565, 0.975]: stand, carry/hold (an object), open (e.g., a window, a car door).", "From 1054.0s to 1056.0s, [0.364, 0.097, 0.724, 0.975]->[0.34, 0.077, 0.755, 0.973]->[0.335, 0.14, 0.769, 0.984]: walk, carry/hold (an object).", "From 1052.0s to 1053.0s, [0.394, 0.1, 0.894, 0.984]->[0.432, 0.0, 0.852, 0.975]: bend/bow (at the waist), carry/hold (an object).", "At 1051.0s, [0.307, 0.045, 0.861, 0.99]: bend/bow (at the waist).", "At 1050.0s, [0.242, 0.127, 0.882, 0.973]: walk.", "At 1049.0s, [0.404, 0.159, 0.666, 0.983]: walk, carry/hold (an object).", "From 1047.0s to 1048.0s, [0.357, 0.173, 0.715, 0.988]->[0.424, 0.175, 0.716, 0.966]: stand, carry/hold (an object).", "At 1046.0s, [0.33, 0.197, 0.654, 0.988]: stand, touch (an object).", "From 1041.0s to 1045.0s, [0.43, 0.121, 0.789, 0.99]->[0.403, 0.111, 0.777, 0.994]->[0.394, 0.124, 0.717, 0.976]->[0.407, 0.138, 0.757, 1.0]->[0.351, 0.176, 0.857, 0.985]: walk.", "From 1039.0s to 1040.0s, [0.398, 0.135, 0.821, 0.974]->[0.451, 0.133, 0.853, 1.0]: stand.", "At 1038.0s, [0.385, 0.155, 0.815, 0.991]: stand, talk to (e.g., self, a person, a group).", "At 1037.0s, [0.358, 0.161, 0.783, 0.99]: stand, push (an object).", "At 1036.0s, [0.409, 0.144, 0.785, 0.961]: stand, carry/hold (an object), push (an object).", "From 1034.0s to 1035.0s, [0.428, 0.142, 0.801, 0.981]->[0.449, 0.149, 0.808, 0.965]: stand, carry/hold (an object).", "At 1033.0s, [0.416, 0.148, 0.8, 0.981]: stand, carry/hold (an object), listen (e.g., to music).", "From 1030.0s to 1032.0s, [0.362, 0.109, 0.733, 0.979]->[0.414, 0.145, 0.75, 0.978]->[0.415, 0.147, 0.806, 0.965]: stand, carry/hold (an object).", "At 1029.0s, [0.378, 0.114, 0.739, 0.98]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1028.0s, [0.416, 0.092, 0.738, 0.989]: stand.", "At 1027.0s, [0.502, 0.116, 0.878, 0.982]: walk.", "From 1022.0s to 1026.0s, [0.481, 0.193, 0.622, 0.819]->[0.471, 0.165, 0.609, 0.767]->[0.458, 0.167, 0.612, 0.745]->[0.496, 0.168, 0.63, 0.77]->[0.518, 0.173, 0.64, 0.755]: stand.", "From 1019.0s to 1021.0s, [0.445, 0.306, 0.626, 0.754]->[0.446, 0.309, 0.632, 0.764]->[0.443, 0.293, 0.631, 0.78]: bend/bow (at the waist), carry/hold (an object).", "From 1015.0s to 1018.0s, [0.439, 0.178, 0.589, 0.689]->[0.446, 0.172, 0.599, 0.806]->[0.454, 0.175, 0.6, 0.684]->[0.471, 0.195, 0.608, 0.704]: stand, carry/hold (an object).", "From 1009.0s to 1014.0s, [0.514, 0.221, 0.607, 0.522]->[0.491, 0.202, 0.593, 0.585]->[0.497, 0.201, 0.611, 0.618]->[0.503, 0.197, 0.618, 0.65]->[0.453, 0.188, 0.601, 0.667]->[0.425, 0.176, 0.585, 0.669]: walk, carry/hold (an object)."]}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [997, 1099], "conversations": [{"from": "human", "timestamps": 1027.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.502, 0.116, 0.878, 0.982]: walk."}, {"from": "human", "timestamps": 1099.0, "value": "Describe the actions of the person at bbox [0.455, 0.26, 0.674, 0.744] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.455, 0.26, 0.674, 0.744]: bend/bow (at the waist), carry/hold (an object).", "From 1095.0s to 1098.0s, [0.307, 0.18, 0.52, 0.881]->[0.26, 0.2, 0.472, 0.943]->[0.296, 0.224, 0.475, 0.934]->[0.374, 0.248, 0.538, 0.747]: walk, carry/hold (an object).", "At 1094.0s, [0.346, 0.255, 0.542, 0.822]: bend/bow (at the waist), carry/hold (an object).", "At 1093.0s, [0.266, 0.202, 0.479, 0.826]: bend/bow (at the waist), carry/hold (an object), lift/pick up.", "At 1092.0s, [0.258, 0.372, 0.446, 0.963]: bend/bow (at the waist), carry/hold (an object), touch (an object).", "From 1090.0s to 1091.0s, [0.39, 0.139, 0.649, 0.751]->[0.233, 0.193, 0.484, 0.968]: walk, carry/hold (an object).", "At 1089.0s, [0.404, 0.038, 0.703, 0.987]: stand, carry/hold (an object).", "At 1088.0s, [0.387, 0.183, 0.708, 0.955]: bend/bow (at the waist), carry/hold (an object), lift/pick up.", "From 1086.0s to 1087.0s, [0.417, 0.131, 0.628, 0.835]->[0.383, 0.08, 0.647, 0.973]: walk, carry/hold (an object).", "At 1085.0s, [0.409, 0.347, 0.677, 0.797]: bend/bow (at the waist), carry/hold (an object), put down.", "At 1084.0s, [0.378, 0.327, 0.64, 0.797]: bend/bow (at the waist), put down.", "At 1083.0s, [0.381, 0.148, 0.577, 0.787]: stand, carry/hold (an object).", "From 1076.0s to 1082.0s, [0.593, 0.085, 0.896, 0.94]->[0.403, 0.107, 0.842, 0.975]->[0.438, 0.12, 0.772, 0.973]->[0.288, 0.089, 0.628, 0.759]->[0.243, 0.12, 0.539, 0.882]->[0.293, 0.166, 0.586, 0.676]->[0.304, 0.159, 0.588, 0.777]: walk, carry/hold (an object).", "At 1075.0s, [0.505, 0.17, 0.904, 0.985]: bend/bow (at the waist), carry/hold (an object), lift/pick up.", "From 1073.0s to 1074.0s, [0.467, 0.226, 0.817, 0.984]->[0.476, 0.283, 0.926, 0.987]: bend/bow (at the waist), carry/hold (an object).", "From 1071.0s to 1072.0s, [0.48, 0.077, 0.744, 0.985]->[0.549, 0.101, 0.85, 0.946]: stand, carry/hold (an object).", "At 1070.0s, [0.506, 0.093, 0.799, 0.98]: stand, carry/hold (an object), close (e.g., a door, a box).", "At 1069.0s, [0.493, 0.092, 0.773, 0.976]: stand, carry/hold (an object).", "At 1068.0s, [0.481, 0.154, 0.805, 0.974]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1076, 1113], "conversations": [{"from": "human", "timestamps": 1106.0, "value": "What actions is the person in bbox [0.521, 0.056, 0.757, 0.743] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.521, 0.056, 0.757, 0.743]: walk, carry/hold (an object).", "At 1105.0s, [0.448, 0.119, 0.668, 0.692]: walk, carry/hold (an object).", "At 1104.0s, [0.53, 0.185, 0.682, 0.468]: bend/bow (at the waist)."]}, {"from": "human", "timestamps": 1113.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.085, 0.342, 0.315, 0.929]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1138, 1181], "conversations": [{"from": "human", "timestamps": 1168.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.256, 0.21, 0.397, 0.688]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1181.0, "value": "Describe the actions of the person at bbox [0.11, 0.363, 0.42, 0.962] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.11, 0.363, 0.42, 0.962]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1180.0s, [0.1, 0.367, 0.394, 0.95]: sit, carry/hold (an object).", "At 1179.0s, [0.116, 0.359, 0.397, 0.979]: sit, carry/hold (an object), watch (a person).", "At 1178.0s, [0.134, 0.36, 0.334, 0.842]: sit, touch (an object), watch (a person)."]}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1123, 1214], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.142, 0.236, 0.272, 0.608]: carry/hold (an object), pull (an object), walk."}, {"from": "human", "timestamps": 1214.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.2, 0.101, 0.711, 0.989]?"}, {"from": "gpt", "value": ["In the current frame, [0.2, 0.101, 0.711, 0.989]: stand, carry/hold (an object).", "At 1213.0s, [0.148, 0.071, 0.631, 0.991]: walk, carry/hold (an object).", "At 1212.0s, [0.055, 0.135, 0.534, 0.992]: stand, carry/hold (an object), hand shake."]}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1256, 1360], "conversations": [{"from": "human", "timestamps": 1286.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.035, 0.028, 0.24, 0.529]: listen to (a person), stand, watch (a person); [0.258, 0.213, 0.426, 0.872]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.465, 0.232, 0.665, 0.856]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1360.0, "value": "What actions is the person in bbox [0.492, 0.145, 0.878, 0.987] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.492, 0.145, 0.878, 0.987]: walk.", "From 1357.0s to 1359.0s, [0.318, 0.101, 0.765, 0.995]->[0.411, 0.115, 0.782, 0.988]->[0.248, 0.176, 0.763, 0.996]: walk.", "At 1356.0s, [0.221, 0.123, 0.761, 0.977]: stand, hand clap.", "From 1354.0s to 1355.0s, [0.302, 0.106, 0.717, 0.986]->[0.4, 0.085, 0.843, 0.985]: stand, carry/hold (an object)."]}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1308, 1367], "conversations": [{"from": "human", "timestamps": 1338.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.249, 0.34, 0.419, 0.575]: get up."}, {"from": "human", "timestamps": 1367.0, "value": "For the person at bbox [0.129, 0.1, 0.53, 0.658], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.129, 0.1, 0.53, 0.658]: sit.", "At 1366.0s, [0.237, 0.123, 0.709, 0.972]: sit.", "At 1365.0s, [0.205, 0.152, 0.668, 1.0]: bend/bow (at the waist).", "At 1364.0s, [0.451, 0.006, 0.808, 0.978]: walk, talk to (e.g., self, a person, a group).", "From 1357.0s to 1363.0s, [0.318, 0.101, 0.765, 0.995]->[0.411, 0.115, 0.782, 0.988]->[0.248, 0.176, 0.763, 0.996]->[0.492, 0.145, 0.878, 0.987]->[0.571, 0.037, 0.882, 0.742]->[0.521, 0.06, 0.804, 0.72]->[0.471, 0.055, 0.734, 1.0]: walk.", "At 1356.0s, [0.221, 0.123, 0.761, 0.977]: stand, hand clap.", "From 1354.0s to 1355.0s, [0.302, 0.106, 0.717, 0.986]->[0.4, 0.085, 0.843, 0.985]: stand, carry/hold (an object)."]}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1345, 1393], "conversations": [{"from": "human", "timestamps": 1375.0, "value": "Describe the actions of the person at bbox [0.289, 0.218, 0.746, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.289, 0.218, 0.746, 0.988]: walk.", "At 1374.0s, [0.016, 0.144, 0.506, 0.999]: walk.", "At 1373.0s, [0.062, 0.155, 0.743, 0.956]: touch (an object), get up.", "From 1369.0s to 1372.0s, [0.116, 0.135, 0.507, 0.679]->[0.093, 0.137, 0.461, 0.917]->[0.124, 0.137, 0.46, 0.712]->[0.107, 0.129, 0.461, 0.947]: sit.", "At 1368.0s, [0.112, 0.124, 0.508, 0.705]: sit, watch (e.g., TV).", "From 1366.0s to 1367.0s, [0.237, 0.123, 0.709, 0.972]->[0.129, 0.1, 0.53, 0.658]: sit.", "At 1365.0s, [0.205, 0.152, 0.668, 1.0]: bend/bow (at the waist).", "At 1364.0s, [0.451, 0.006, 0.808, 0.978]: walk, talk to (e.g., self, a person, a group).", "From 1357.0s to 1363.0s, [0.318, 0.101, 0.765, 0.995]->[0.411, 0.115, 0.782, 0.988]->[0.248, 0.176, 0.763, 0.996]->[0.492, 0.145, 0.878, 0.987]->[0.571, 0.037, 0.882, 0.742]->[0.521, 0.06, 0.804, 0.72]->[0.471, 0.055, 0.734, 1.0]: walk.", "At 1356.0s, [0.221, 0.123, 0.761, 0.977]: stand, hand clap.", "From 1354.0s to 1355.0s, [0.302, 0.106, 0.717, 0.986]->[0.4, 0.085, 0.843, 0.985]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1393.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.282, 0.192, 0.442, 0.76]: walk; [0.544, 0.071, 0.747, 0.867]: listen to (a person), stand, watch (a person)."}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1405, 1471], "conversations": [{"from": "human", "timestamps": 1435.0, "value": "Describe the actions of the person at bbox [0.018, 0.077, 0.824, 0.994] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.018, 0.077, 0.824, 0.994]: sit, talk to (e.g., self, a person, a group).", "At 1434.0s, [0.0, 0.065, 0.879, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1433.0s, [0.007, 0.065, 0.919, 1.0]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1471.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.32, 0.0, 0.968, 0.966]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1509, 1545], "conversations": [{"from": "human", "timestamps": 1539.0, "value": "What actions is the person in bbox [0.057, 0.018, 0.846, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.057, 0.018, 0.846, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1538.0s, [0.193, 0.054, 0.888, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1545.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.211, 0.423, 0.361, 0.643]: sit, drink."}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1504, 1602], "conversations": [{"from": "human", "timestamps": 1534.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.078, 0.101, 0.804, 0.983]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1602.0, "value": "For the person at bbox [0.06, 0.172, 0.84, 0.956], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.06, 0.172, 0.84, 0.956]: sit, carry/hold (an object).", "At 1601.0s, [0.017, 0.182, 0.846, 0.992]: sit, carry/hold (an object), drink.", "From 1599.0s to 1600.0s, [0.037, 0.152, 0.9, 1.0]->[0.046, 0.181, 0.857, 0.979]: sit, carry/hold (an object)."]}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1524, 1595], "conversations": [{"from": "human", "timestamps": 1554.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.133, 0.517, 0.469, 0.823]: bend/bow (at the waist)."}, {"from": "human", "timestamps": 1595.0, "value": "What actions did the person at bbox [0.422, 0.197, 0.605, 0.952] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.422, 0.197, 0.605, 0.952]: stand.", "At 1594.0s, [0.416, 0.196, 0.596, 0.949]: stand.", "At 1593.0s, [0.363, 0.196, 0.615, 0.955]: stand, open (e.g., a window, a car door).", "At 1592.0s, [0.386, 0.148, 0.795, 0.997]: stand, open (e.g., a window, a car door), talk to (e.g., self, a person, a group).", "From 1583.0s to 1591.0s, [0.223, 0.083, 0.571, 0.985]->[0.273, 0.106, 0.627, 0.987]->[0.303, 0.199, 0.536, 0.979]->[0.367, 0.121, 0.601, 0.976]->[0.409, 0.139, 0.674, 0.988]->[0.411, 0.148, 0.832, 0.982]->[0.295, 0.115, 0.701, 0.981]->[0.286, 0.094, 0.718, 0.98]->[0.421, 0.139, 0.659, 0.965]: stand, talk to (e.g., self, a person, a group).", "From 1580.0s to 1582.0s, [0.255, 0.074, 0.58, 0.995]->[0.347, 0.091, 0.661, 0.988]->[0.394, 0.126, 0.626, 0.987]: stand.", "From 1575.0s to 1579.0s, [0.217, 0.146, 0.508, 0.778]->[0.276, 0.18, 0.59, 0.783]->[0.268, 0.172, 0.603, 0.983]->[0.351, 0.112, 0.62, 0.937]->[0.3, 0.124, 0.573, 0.977]: walk, talk to (e.g., self, a person, a group).", "At 1574.0s, [0.231, 0.144, 0.559, 0.753]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1573.0s, [0.344, 0.227, 0.749, 0.662]: crouch/kneel, touch (an object), get up, talk to (e.g., self, a person, a group).", "At 1572.0s, [0.434, 0.153, 0.771, 0.462]: bend/bow (at the waist), touch (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1630, 1668], "conversations": [{"from": "human", "timestamps": 1660.0, "value": "For the person at bbox [0.572, 0.168, 0.978, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.572, 0.168, 0.978, 1.0]: stand.", "At 1659.0s, [0.612, 0.176, 0.984, 0.999]: stand, listen to (a person).", "From 1650.0s to 1658.0s, [0.709, 0.191, 0.982, 0.992]->[0.689, 0.185, 0.988, 0.987]->[0.678, 0.171, 0.985, 0.988]->[0.675, 0.173, 0.986, 0.985]->[0.671, 0.153, 0.987, 0.986]->[0.674, 0.17, 0.988, 0.989]->[0.663, 0.177, 0.986, 0.979]->[0.672, 0.142, 0.987, 0.984]->[0.66, 0.138, 0.985, 0.987]: stand, listen to (a person), watch (a person).", "From 1648.0s to 1649.0s, [0.717, 0.208, 0.984, 0.992]->[0.732, 0.188, 0.987, 0.988]: stand.", "From 1641.0s to 1647.0s, [0.748, 0.114, 0.991, 1.0]->[0.708, 0.103, 0.985, 0.981]->[0.718, 0.111, 0.991, 0.995]->[0.716, 0.122, 0.984, 0.991]->[0.759, 0.142, 0.988, 0.984]->[0.757, 0.161, 0.985, 0.989]->[0.743, 0.257, 0.986, 0.995]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1668.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.243, 0.104, 0.984, 0.999]: stand."}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1622, 1684], "conversations": [{"from": "human", "timestamps": 1652.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.156, 0.285, 0.308, 0.915]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.678, 0.171, 0.985, 0.988]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1684.0, "value": "What actions is the person in bbox [0.113, 0.104, 0.883, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.113, 0.104, 0.883, 0.984]: sit, carry/hold (an object), eat.", "At 1683.0s, [0.097, 0.104, 0.832, 0.983]: sit, carry/hold (an object), eat, watch (e.g., TV)."]}]}, {"video": "UgZFdrNT6W0.mkv", "clip": [1717, 1752], "conversations": [{"from": "human", "timestamps": 1747.0, "value": "What actions is the person in bbox [0.519, 0.066, 0.932, 0.979] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.519, 0.066, 0.932, 0.979]: stand, listen to (a person), watch (a person).", "At 1746.0s, [0.567, 0.124, 0.947, 0.996]: stand, listen to (a person), watch (a person).", "At 1745.0s, [0.58, 0.181, 0.962, 0.997]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1743.0s to 1744.0s, [0.574, 0.209, 0.92, 0.985]->[0.579, 0.187, 0.952, 0.999]: stand, listen to (a person), watch (a person).", "At 1742.0s, [0.58, 0.227, 0.911, 0.993]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1741.0s, [0.588, 0.233, 0.936, 0.988]: stand, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1752.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.146, 0.086, 0.847, 0.995]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "UsLnxI_zGpY.mkv", "clip": [884, 927], "conversations": [{"from": "human", "timestamps": 914.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.168, 0.128, 0.938, 0.994]?"}, {"from": "gpt", "value": ["At Now, [0.168, 0.128, 0.938, 0.994]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 912.0s to 913.0s, [0.685, 0.231, 0.964, 0.831]->[0.615, 0.206, 0.97, 0.883]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 927.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.147, 0.096, 0.954, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "UsLnxI_zGpY.mkv", "clip": [1151, 1248], "conversations": [{"from": "human", "timestamps": 1181.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.132, 0.398, 0.333, 0.712]: listen to (a person), sit; [0.337, 0.385, 0.57, 0.682]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.614, 0.393, 0.773, 0.643]: listen to (a person), sit."}, {"from": "human", "timestamps": 1248.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.219, 0.144, 0.461, 0.796]?"}, {"from": "gpt", "value": ["At Now, [0.219, 0.144, 0.461, 0.796]: walk, listen to (a person).", "At 1247.0s, [0.295, 0.14, 0.504, 0.726]: walk, listen to (a person).", "At 1246.0s, [0.407, 0.139, 0.626, 0.734]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1245.0s, [0.407, 0.144, 0.647, 0.707]: stand, touch (an object), listen to (a person).", "At 1244.0s, [0.398, 0.108, 0.673, 0.729]: touch (an object), get up, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1243.0s, [0.363, 0.249, 0.591, 0.617]: sit, drink, listen to (a person), talk to (e.g., self, a person, a group).", "At 1242.0s, [0.285, 0.24, 0.573, 0.649]: sit, touch (an object), listen to (a person)."]}]}, {"video": "UsLnxI_zGpY.mkv", "clip": [1178, 1254], "conversations": [{"from": "human", "timestamps": 1208.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.077, 0.064, 0.911, 0.985]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1254.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.155, 0.123, 0.93, 0.98]?"}, {"from": "gpt", "value": ["In the current frame, [0.155, 0.123, 0.93, 0.98]: sit, carry/hold (an object).", "At 1253.0s, [0.128, 0.077, 0.955, 1.0]: sit."]}]}, {"video": "UsLnxI_zGpY.mkv", "clip": [1256, 1370], "conversations": [{"from": "human", "timestamps": 1286.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.249, 0.25, 0.803, 0.97]: watch (e.g., TV), sit; [0.677, 0.192, 0.999, 0.755]: watch (e.g., TV), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1370.0, "value": "For the person at bbox [0.159, 0.378, 0.469, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.159, 0.378, 0.469, 0.981]: sit, watch (e.g., TV).", "From 1368.0s to 1369.0s, [0.168, 0.417, 0.415, 0.985]->[0.173, 0.385, 0.458, 0.972]: sit, watch (e.g., TV), listen to (a person).", "At 1367.0s, [0.17, 0.441, 0.47, 1.0]: sit, watch (e.g., TV), listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "UsLnxI_zGpY.mkv", "clip": [1396, 1447], "conversations": [{"from": "human", "timestamps": 1426.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.138, 0.276, 0.447, 0.877]: watch (e.g., TV), sit, listen to (a person); [0.281, 0.209, 0.517, 0.789]: watch (e.g., TV), sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group); [0.496, 0.236, 0.699, 0.649]: watch (e.g., TV), sit, touch (an object), listen to (a person), carry/hold (an object); [0.645, 0.228, 0.897, 0.763]: watch (e.g., TV), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1447.0, "value": "For the person at bbox [0.479, 0.231, 0.794, 0.816], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.479, 0.231, 0.794, 0.816]: sit, watch (e.g., TV).", "At 1446.0s, [0.452, 0.214, 0.754, 0.84]: sit, watch (e.g., TV).", "From 1444.0s to 1445.0s, [0.467, 0.297, 0.772, 0.811]->[0.411, 0.283, 0.763, 0.879]: sit."]}]}, {"video": "UsLnxI_zGpY.mkv", "clip": [1547, 1610], "conversations": [{"from": "human", "timestamps": 1577.0, "value": "Describe the actions of the person at bbox [0.088, 0.457, 0.357, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.088, 0.457, 0.357, 0.981]: sit.", "At 1576.0s, [0.063, 0.487, 0.274, 0.974]: sit."]}, {"from": "human", "timestamps": 1610.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.201, 0.215, 0.274, 0.48]: listen to (a person), run/jog; [0.503, 0.167, 0.58, 0.262]: ride (e.g., a bike, a car, a horse), stand, hit (an object), watch (a person); [0.928, 0.168, 0.98, 0.341]: walk."}]}, {"video": "UsLnxI_zGpY.mkv", "clip": [1614, 1701], "conversations": [{"from": "human", "timestamps": 1644.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.27, 0.37, 0.349, 0.734]: ride (e.g., a bike, a car, a horse), carry/hold (an object), stand, hit (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1701.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.057, 0.128, 0.808, 0.968]?"}, {"from": "gpt", "value": ["At Now, [0.057, 0.128, 0.808, 0.968]: sit, listen to (a person), watch (a person).", "At 1700.0s, [0.043, 0.005, 1.0, 0.98]: sit, listen to (a person), watch (a person)."]}]}, {"video": "UsLnxI_zGpY.mkv", "clip": [1732, 1794], "conversations": [{"from": "human", "timestamps": 1762.0, "value": "What actions did the person at bbox [0.024, 0.006, 0.895, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.024, 0.006, 0.895, 0.982]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1761.0s, [0.009, 0.014, 0.913, 0.988]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1760.0s, [0.01, 0.032, 0.803, 0.986]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1794.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.169, 0.09, 0.948, 0.981]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist)."}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [930, 1020], "conversations": [{"from": "human", "timestamps": 960.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.01, 0.037, 0.796, 0.979]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1020.0, "value": "What actions is the person in bbox [0.307, 0.096, 0.988, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.307, 0.096, 0.988, 0.978]: sit.", "At 1019.0s, [0.342, 0.133, 0.973, 0.986]: sit.", "At 1018.0s, [0.331, 0.059, 0.872, 0.967]: sit, carry/hold (an object), touch (an object).", "At 1017.0s, [0.324, 0.113, 0.883, 0.993]: sit, touch (an object).", "From 1011.0s to 1016.0s, [0.578, 0.303, 0.876, 0.903]->[0.577, 0.291, 0.87, 0.901]->[0.563, 0.25, 0.868, 0.894]->[0.563, 0.188, 0.872, 0.854]->[0.606, 0.173, 0.893, 0.842]->[0.572, 0.177, 0.898, 0.791]: sit, drink."]}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [960, 1013], "conversations": [{"from": "human", "timestamps": 990.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.225, 0.035, 0.978, 0.972]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1013.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.563, 0.25, 0.868, 0.894]?"}, {"from": "gpt", "value": ["In the current frame, [0.563, 0.25, 0.868, 0.894]: sit, drink.", "From 1011.0s to 1012.0s, [0.578, 0.303, 0.876, 0.903]->[0.577, 0.291, 0.87, 0.901]: sit, drink."]}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1022, 1064], "conversations": [{"from": "human", "timestamps": 1052.0, "value": "For the person at bbox [0.102, 0.036, 0.922, 0.998], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.102, 0.036, 0.922, 0.998]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1051.0s, [0.139, 0.027, 0.991, 0.991]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1064.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.026, 0.058, 0.754, 0.993]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [998, 1095], "conversations": [{"from": "human", "timestamps": 1028.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.137, 0.121, 0.57, 1.0]: carry/hold (an object), sit, answer phone."}, {"from": "human", "timestamps": 1095.0, "value": "For the person at bbox [0.137, 0.126, 0.63, 0.97], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.137, 0.126, 0.63, 0.97]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1090.0s to 1094.0s, [0.116, 0.088, 0.61, 0.971]->[0.102, 0.085, 0.59, 0.971]->[0.167, 0.139, 0.62, 0.967]->[0.15, 0.116, 0.619, 0.978]->[0.151, 0.115, 0.628, 0.963]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1087.0s to 1089.0s, [0.095, 0.08, 0.59, 0.984]->[0.096, 0.087, 0.591, 0.983]->[0.112, 0.093, 0.612, 0.975]: sit, listen to (a person), watch (a person)."]}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1064, 1126], "conversations": [{"from": "human", "timestamps": 1094.0, "value": "What actions did the person at bbox [0.151, 0.115, 0.628, 0.963] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.151, 0.115, 0.628, 0.963]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1090.0s to 1093.0s, [0.116, 0.088, 0.61, 0.971]->[0.102, 0.085, 0.59, 0.971]->[0.167, 0.139, 0.62, 0.967]->[0.15, 0.116, 0.619, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1087.0s to 1089.0s, [0.095, 0.08, 0.59, 0.984]->[0.096, 0.087, 0.591, 0.983]->[0.112, 0.093, 0.612, 0.975]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1126.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.144, 0.035, 1.0, 0.973]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1122, 1195], "conversations": [{"from": "human", "timestamps": 1152.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.188, 0.034, 1.0, 0.991]: sit."}, {"from": "human", "timestamps": 1195.0, "value": "What actions is the person in bbox [0.312, 0.098, 0.728, 0.939] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.312, 0.098, 0.728, 0.939]: stand, talk to (e.g., self, a person, a group).", "From 1185.0s to 1194.0s, [0.334, 0.115, 0.714, 0.939]->[0.309, 0.118, 0.716, 0.908]->[0.315, 0.117, 0.709, 0.943]->[0.331, 0.11, 0.712, 0.946]->[0.313, 0.108, 0.716, 0.947]->[0.313, 0.111, 0.726, 0.932]->[0.316, 0.115, 0.727, 0.942]->[0.312, 0.108, 0.714, 0.955]->[0.299, 0.106, 0.718, 0.938]->[0.317, 0.117, 0.721, 0.91]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1162, 1250], "conversations": [{"from": "human", "timestamps": 1192.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.312, 0.108, 0.714, 0.955]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1250.0, "value": "What actions did the person at bbox [0.18, 0.601, 0.225, 0.745] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.18, 0.601, 0.225, 0.745]: walk, carry/hold (an object).", "From 1248.0s to 1249.0s, [0.354, 0.601, 0.4, 0.761]->[0.241, 0.602, 0.288, 0.751]: walk, carry/hold (an object)."]}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1254, 1328], "conversations": [{"from": "human", "timestamps": 1284.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.384, 0.137, 0.886, 0.765]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1328.0, "value": "Describe the actions of the person at bbox [0.158, 0.131, 0.996, 0.929] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.158, 0.131, 0.996, 0.929]: sit, touch (an object), listen to (a person), watch (a person).", "From 1326.0s to 1327.0s, [0.325, 0.335, 0.8, 0.859]->[0.32, 0.162, 0.96, 0.872]: sit, touch (an object).", "From 1323.0s to 1325.0s, [0.174, 0.302, 0.457, 0.905]->[0.362, 0.287, 0.635, 0.904]->[0.537, 0.255, 0.827, 0.906]: sit, carry/hold (an object)."]}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1362, 1417], "conversations": [{"from": "human", "timestamps": 1392.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.211, 0.516, 0.287, 0.823]: carry/hold (an object), walk; [0.224, 0.365, 0.412, 0.849]: carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), walk; [0.46, 0.407, 0.585, 0.846]: talk to (e.g., self, a person, a group), walk; [0.645, 0.515, 0.759, 0.822]: walk; [0.743, 0.342, 0.911, 0.852]: push (an object), walk."}, {"from": "human", "timestamps": 1417.0, "value": "For the person at bbox [0.347, 0.116, 0.712, 0.952], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.347, 0.116, 0.712, 0.952]: stand, talk to (e.g., self, a person, a group).", "From 1411.0s to 1416.0s, [0.329, 0.113, 0.717, 0.95]->[0.345, 0.114, 0.711, 0.953]->[0.333, 0.122, 0.738, 0.945]->[0.338, 0.111, 0.729, 0.973]->[0.341, 0.112, 0.726, 0.966]->[0.336, 0.126, 0.719, 0.972]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1401, 1457], "conversations": [{"from": "human", "timestamps": 1431.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.03, 0.233, 0.261, 0.841]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.604, 0.203, 0.967, 0.836]: listen to (a person), sit, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1457.0, "value": "What actions did the person at bbox [0.179, 0.03, 0.99, 0.975] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.179, 0.03, 0.99, 0.975]: sit, stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1456.0s, [0.188, 0.033, 1.0, 0.983]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1455.0s, [0.174, 0.033, 0.992, 0.98]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1454.0s, [0.167, 0.031, 0.998, 0.995]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1452, 1485], "conversations": [{"from": "human", "timestamps": 1482.0, "value": "What actions is the person in bbox [0.032, 0.052, 0.723, 0.991] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.032, 0.052, 0.723, 0.991]: sit, drink, talk to (e.g., self, a person, a group).", "From 1479.0s to 1481.0s, [0.013, 0.038, 0.763, 0.994]->[0.017, 0.052, 0.723, 0.99]->[0.001, 0.062, 0.752, 0.991]: sit, drink, talk to (e.g., self, a person, a group).", "At 1478.0s, [0.03, 0.045, 0.684, 0.971]: sit, drink, listen to (a person)."]}, {"from": "human", "timestamps": 1485.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.114, 0.046, 1.0, 0.999]: sit, drink, listen to (a person), watch (a person), stand."}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1481, 1525], "conversations": [{"from": "human", "timestamps": 1511.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.038, 0.561, 0.981]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1525.0, "value": "Describe the actions of the person at bbox [0.021, 0.059, 0.621, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.021, 0.059, 0.621, 0.984]: stand, talk to (e.g., self, a person, a group).", "At 1524.0s, [0.071, 0.059, 0.626, 0.977]: stand, talk to (e.g., self, a person, a group).", "At 1523.0s, [0.017, 0.059, 0.613, 0.976]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1480, 1585], "conversations": [{"from": "human", "timestamps": 1510.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.051, 0.039, 0.552, 0.985]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1585.0, "value": "What actions did the person at bbox [0.023, 0.099, 0.972, 0.996] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.023, 0.099, 0.972, 0.996]: lie/sleep.", "From 1583.0s to 1584.0s, [0.009, 0.083, 0.992, 0.997]->[0.022, 0.079, 0.99, 1.0]: lie/sleep."]}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1538, 1609], "conversations": [{"from": "human", "timestamps": 1568.0, "value": "Describe the actions of the person at bbox [0.103, 0.029, 0.855, 0.979] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.103, 0.029, 0.855, 0.979]: sit.", "From 1561.0s to 1567.0s, [0.133, 0.027, 0.883, 0.981]->[0.132, 0.034, 0.843, 0.98]->[0.113, 0.037, 0.845, 0.984]->[0.108, 0.032, 0.863, 0.987]->[0.084, 0.031, 0.854, 0.979]->[0.106, 0.031, 0.866, 0.985]->[0.113, 0.026, 0.867, 0.981]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1609.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.016, 0.018, 0.938, 0.989]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1589, 1636], "conversations": [{"from": "human", "timestamps": 1619.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.007, 0.009, 0.718, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1636.0, "value": "Describe the actions of the person at bbox [0.058, 0.025, 0.754, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.058, 0.025, 0.754, 0.983]: sit, hand clap, watch (a person).", "From 1634.0s to 1635.0s, [0.012, 0.213, 0.301, 0.93]->[0.029, 0.058, 0.747, 0.984]: sit, hand clap, sing to (e.g., self, a person, a group), watch (a person).", "At 1633.0s, [0.014, 0.201, 0.299, 0.927]: sit, carry/hold (an object), hand clap, talk to (e.g., self, a person, a group), watch (a person).", "From 1631.0s to 1632.0s, [0.017, 0.228, 0.301, 0.949]->[0.018, 0.262, 0.299, 0.936]: sit, hand clap, sing to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1604, 1693], "conversations": [{"from": "human", "timestamps": 1634.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.012, 0.213, 0.301, 0.93]: watch (a person), sit, hand clap, sing to (e.g., self, a person, a group); [0.366, 0.333, 0.495, 0.597]: listen to (a person), sit, hand clap, watch (a person); [0.566, 0.227, 0.814, 0.961]: listen to (a person), sit, watch (a person); [0.762, 0.225, 0.96, 0.687]: watch (a person), sit, hand clap, sing to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1693.0, "value": "What actions did the person at bbox [0.267, 0.007, 1.0, 0.976] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.267, 0.007, 1.0, 0.976]: stand, carry/hold (an object).", "From 1688.0s to 1692.0s, [0.302, 0.014, 1.0, 0.975]->[0.305, 0.011, 0.996, 0.98]->[0.275, 0.018, 0.999, 0.998]->[0.259, 0.025, 1.0, 0.978]->[0.27, 0.015, 1.0, 0.967]: stand, carry/hold (an object).", "From 1684.0s to 1687.0s, [0.281, 0.081, 0.98, 0.949]->[0.437, 0.093, 0.982, 0.953]->[0.354, 0.126, 0.963, 0.957]->[0.288, 0.017, 1.0, 0.983]: sit, carry/hold (an object)."]}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1676, 1717], "conversations": [{"from": "human", "timestamps": 1706.0, "value": "What actions did the person at bbox [0.128, 0.001, 1.0, 0.98] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.128, 0.001, 1.0, 0.98]: stand.", "From 1704.0s to 1705.0s, [0.141, 0.008, 1.0, 0.975]->[0.142, 0.002, 1.0, 0.987]: stand.", "At 1703.0s, [0.169, 0.0, 1.0, 0.98]: sit, stand.", "From 1695.0s to 1702.0s, [0.275, 0.003, 1.0, 0.989]->[0.266, 0.0, 1.0, 0.98]->[0.24, 0.0, 0.997, 0.985]->[0.242, 0.0, 0.992, 0.986]->[0.215, 0.0, 0.997, 0.98]->[0.187, 0.008, 1.0, 0.995]->[0.18, 0.007, 1.0, 0.997]->[0.172, 0.001, 1.0, 0.984]: stand.", "At 1694.0s, [0.281, 0.004, 1.0, 0.985]: sit, stand, carry/hold (an object).", "From 1688.0s to 1693.0s, [0.302, 0.014, 1.0, 0.975]->[0.305, 0.011, 0.996, 0.98]->[0.275, 0.018, 0.999, 0.998]->[0.259, 0.025, 1.0, 0.978]->[0.27, 0.015, 1.0, 0.967]->[0.267, 0.007, 1.0, 0.976]: stand, carry/hold (an object).", "From 1684.0s to 1687.0s, [0.281, 0.081, 0.98, 0.949]->[0.437, 0.093, 0.982, 0.953]->[0.354, 0.126, 0.963, 0.957]->[0.288, 0.017, 1.0, 0.983]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1717.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.207, 0.021, 0.995, 0.982]: carry/hold (an object), sit."}]}, {"video": "VRlpH1MbWUw.mp4", "clip": [1723, 1796], "conversations": [{"from": "human", "timestamps": 1753.0, "value": "Describe the actions of the person at bbox [0.179, 0.124, 0.335, 0.698] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.179, 0.124, 0.335, 0.698]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1752.0s, [0.189, 0.12, 0.333, 0.689]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1751.0s, [0.052, 0.18, 0.325, 0.737]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1796.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.065, 0.04, 0.667, 0.983]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "Vmef_8MY46w.mkv", "clip": [882, 934], "conversations": [{"from": "human", "timestamps": 912.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.001, 0.127, 0.135, 0.892]: carry/hold (an object), walk; [0.162, 0.311, 0.517, 0.913]: watch (a person), sit, smoke, talk to (e.g., self, a person, a group); [0.378, 0.357, 0.557, 0.703]: listen to (a person), sit; [0.555, 0.119, 0.851, 0.908]: smoke, walk."}, {"from": "human", "timestamps": 934.0, "value": "For the person at bbox [0.383, 0.147, 0.945, 0.919], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.383, 0.147, 0.945, 0.919]: stand, carry/hold (an object), smoke.", "At 933.0s, [0.243, 0.137, 0.893, 0.912]: stand, carry/hold (an object), smoke."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [903, 1020], "conversations": [{"from": "human", "timestamps": 933.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.243, 0.137, 0.893, 0.912]: carry/hold (an object), stand, smoke."}, {"from": "human", "timestamps": 1020.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.111, 0.112, 0.453, 0.969]?"}, {"from": "gpt", "value": ["In the current frame, [0.111, 0.112, 0.453, 0.969]: stand, listen to (a person), watch (a person).", "From 1015.0s to 1019.0s, [0.0, 0.223, 0.372, 0.931]->[0.0, 0.239, 0.385, 0.941]->[0.0, 0.235, 0.382, 0.927]->[0.005, 0.249, 0.369, 0.932]->[0.153, 0.152, 0.44, 0.913]: stand, listen to (a person), watch (a person).", "At 1014.0s, [0.0, 0.241, 0.379, 0.936]: stand, listen to (a person)."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [952, 1017], "conversations": [{"from": "human", "timestamps": 982.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.006, 0.254, 0.199, 0.874]: listen to (a person), sit, watch (a person); [0.188, 0.256, 0.345, 0.569]: listen to (a person), sit, watch (a person); [0.216, 0.106, 0.723, 0.906]: sit."}, {"from": "human", "timestamps": 1017.0, "value": "What actions is the person in bbox [0.0, 0.235, 0.382, 0.927] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.235, 0.382, 0.927]: stand, listen to (a person), watch (a person).", "From 1015.0s to 1016.0s, [0.0, 0.223, 0.372, 0.931]->[0.0, 0.239, 0.385, 0.941]: stand, listen to (a person), watch (a person).", "At 1014.0s, [0.0, 0.241, 0.379, 0.936]: stand, listen to (a person)."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [996, 1101], "conversations": [{"from": "human", "timestamps": 1026.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.068, 0.09, 0.483, 0.919]: listen to (a person), stand, watch (a person); [0.409, 0.324, 0.912, 0.914]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1101.0, "value": "For the person at bbox [0.502, 0.454, 0.662, 0.907], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.502, 0.454, 0.662, 0.907]: stand, grab (a person), watch (a person).", "At 1100.0s, [0.499, 0.43, 0.604, 0.89]: walk, grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1099.0s, [0.437, 0.46, 0.542, 0.894]: walk, grab (a person), listen to (a person), watch (a person).", "At 1098.0s, [0.431, 0.485, 0.54, 0.88]: walk, listen to (a person), watch (a person).", "At 1097.0s, [0.431, 0.489, 0.544, 0.863]: walk, grab (a person), listen to (a person).", "At 1096.0s, [0.397, 0.499, 0.483, 0.85]: walk, grab (a person)."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [1119, 1167], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.524, 0.112, 0.903, 0.945]?"}, {"from": "gpt", "value": ["At Now, [0.524, 0.112, 0.903, 0.945]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1146.0s to 1148.0s, [0.581, 0.088, 0.994, 0.931]->[0.541, 0.073, 0.999, 0.945]->[0.521, 0.13, 0.92, 0.933]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1145.0s, [0.557, 0.202, 0.906, 0.906]: stand, close (e.g., a door, a box), talk to (e.g., self, a person, a group), watch (a person).", "At 1144.0s, [0.423, 0.234, 0.7, 0.911]: stand, carry/hold (an object), close (e.g., a door, a box)."]}, {"from": "human", "timestamps": 1167.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.213, 0.374, 0.437, 0.921]: watch (a person), stand, close (e.g., a door, a box); [0.361, 0.235, 0.762, 0.912]: listen to (a person), stand; [0.619, 0.121, 0.897, 0.782]: stand."}]}, {"video": "Vmef_8MY46w.mkv", "clip": [1126, 1207], "conversations": [{"from": "human", "timestamps": 1156.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.006, 0.086, 0.583, 0.946]: stand."}, {"from": "human", "timestamps": 1207.0, "value": "For the person at bbox [0.355, 0.142, 0.989, 0.913], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.355, 0.142, 0.989, 0.913]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1205.0s to 1206.0s, [0.389, 0.142, 0.968, 0.922]->[0.337, 0.142, 0.96, 0.928]: stand, grab (a person), talk to (e.g., self, a person, a group).", "At 1204.0s, [0.364, 0.124, 0.968, 0.921]: stand.", "At 1203.0s, [0.36, 0.133, 0.955, 0.936]: stand, listen to (a person).", "At 1202.0s, [0.337, 0.131, 0.959, 0.918]: stand, listen to (a person), watch (a person)."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [1285, 1373], "conversations": [{"from": "human", "timestamps": 1315.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.226, 0.238, 0.53, 0.911]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1373.0, "value": "For the person at bbox [0.6, 0.136, 0.989, 0.934], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.6, 0.136, 0.989, 0.934]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1372.0s, [0.581, 0.119, 0.993, 0.936]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1369.0s to 1371.0s, [0.132, 0.057, 0.888, 0.951]->[0.174, 0.034, 0.94, 0.939]->[0.204, 0.046, 0.916, 0.933]: stand, talk to (e.g., self, a person, a group).", "At 1368.0s, [0.109, 0.092, 0.9, 0.944]: stand.", "At 1367.0s, [0.122, 0.063, 0.914, 0.965]: stand, watch (a person).", "At 1366.0s, [0.155, 0.06, 0.93, 0.949]: stand, listen to (a person), watch (a person)."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [1307, 1367], "conversations": [{"from": "human", "timestamps": 1337.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.134, 0.52, 0.94]: listen to (a person), stand, watch (a person); [0.383, 0.359, 0.601, 0.92]: listen to (a person), stand, watch (a person); [0.506, 0.165, 0.995, 0.928]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1367.0, "value": "What actions did the person at bbox [0.122, 0.063, 0.914, 0.965] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.122, 0.063, 0.914, 0.965]: stand, watch (a person).", "At 1366.0s, [0.155, 0.06, 0.93, 0.949]: stand, listen to (a person), watch (a person)."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [1372, 1438], "conversations": [{"from": "human", "timestamps": 1402.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.225, 0.083, 1.0, 0.924]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1438.0, "value": "For the person at bbox [0.406, 0.07, 0.953, 0.936], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.406, 0.07, 0.953, 0.936]: stand, talk to (e.g., self, a person, a group).", "At 1437.0s, [0.421, 0.083, 0.983, 0.939]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1436.0s, [0.428, 0.087, 0.958, 0.926]: stand."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [1376, 1457], "conversations": [{"from": "human", "timestamps": 1406.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.092, 0.439, 0.925]: stand, talk to (e.g., self, a person, a group); [0.348, 0.294, 0.634, 0.916]: listen to (a person), stand, watch (a person); [0.59, 0.196, 0.993, 0.954]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1457.0, "value": "What actions is the person in bbox [0.661, 0.346, 0.778, 0.486] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.661, 0.346, 0.778, 0.486]: sit, listen to (a person), watch (a person).", "At 1456.0s, [0.701, 0.329, 0.822, 0.479]: sit, play musical instrument, listen to (a person).", "From 1454.0s to 1455.0s, [0.862, 0.244, 0.999, 0.435]->[0.759, 0.301, 0.89, 0.472]: sit, play musical instrument."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [1522, 1607], "conversations": [{"from": "human", "timestamps": 1552.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.12, 0.293, 0.409, 0.915]: sit; [0.343, 0.296, 0.776, 0.921]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1607.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.392, 0.176, 0.917, 0.922]?"}, {"from": "gpt", "value": ["In the current frame, [0.392, 0.176, 0.917, 0.922]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1606.0s, [0.475, 0.23, 0.956, 0.938]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1605.0s, [0.505, 0.33, 0.891, 0.932]: sit, carry/hold (an object), listen to (a person).", "At 1604.0s, [0.486, 0.28, 0.977, 0.916]: sit, carry/hold (an object), touch (an object), listen to (a person).", "From 1601.0s to 1603.0s, [0.501, 0.27, 0.857, 0.84]->[0.423, 0.268, 0.843, 0.843]->[0.442, 0.287, 0.91, 0.896]: sit, carry/hold (an object), listen to (a person).", "At 1600.0s, [0.477, 0.267, 0.867, 0.859]: sit, carry/hold (an object), write, listen to (a person), talk to (e.g., self, a person, a group).", "At 1599.0s, [0.466, 0.257, 0.869, 0.868]: sit, carry/hold (an object), write, talk to (e.g., self, a person, a group).", "From 1597.0s to 1598.0s, [0.424, 0.242, 0.875, 0.847]->[0.502, 0.256, 0.864, 0.846]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1596.0s, [0.5, 0.245, 0.872, 0.856]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1595.0s, [0.518, 0.257, 0.865, 0.845]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1592.0s to 1594.0s, [0.449, 0.257, 0.873, 0.856]->[0.448, 0.256, 0.861, 0.853]->[0.484, 0.232, 0.864, 0.841]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1591.0s, [0.457, 0.271, 0.861, 0.853]: sit, carry/hold (an object), write, talk to (e.g., self, a person, a group).", "At 1590.0s, [0.461, 0.275, 0.865, 0.855]: sit, write, talk to (e.g., self, a person, a group)."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [1545, 1592], "conversations": [{"from": "human", "timestamps": 1575.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.556, 0.056, 0.919, 0.929]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1592.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.346, 0.132, 0.577, 0.735]?"}, {"from": "gpt", "value": ["At Now, [0.346, 0.132, 0.577, 0.735]: stand, listen to (a person), watch (a person).", "From 1590.0s to 1591.0s, [0.313, 0.13, 0.574, 0.718]->[0.348, 0.136, 0.577, 0.72]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1588.0s to 1589.0s, [0.335, 0.177, 0.591, 0.708]->[0.339, 0.132, 0.58, 0.714]: stand, watch (a person)."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [1608, 1645], "conversations": [{"from": "human", "timestamps": 1638.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.008, 0.101, 0.571, 0.933]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1645.0, "value": "What actions did the person at bbox [0.0, 0.103, 0.607, 0.957] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.103, 0.607, 0.957]: stand.", "At 1644.0s, [0.0, 0.102, 0.617, 0.944]: stand."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [1645, 1688], "conversations": [{"from": "human", "timestamps": 1675.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.175, 0.543, 0.243, 0.859]: carry/hold (an object), stand, listen to (a person); [0.246, 0.54, 0.31, 0.84]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1688.0, "value": "What actions did the person at bbox [0.314, 0.581, 0.362, 0.748] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.314, 0.581, 0.362, 0.748]: walk, carry/hold (an object).", "At 1687.0s, [0.316, 0.577, 0.361, 0.76]: walk, carry/hold (an object)."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [1668, 1725], "conversations": [{"from": "human", "timestamps": 1698.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.285, 0.468, 0.922]: smoke, bend/bow (at the waist); [0.347, 0.319, 0.626, 0.909]: watch (a person), stand; [0.6, 0.128, 0.997, 0.906]: watch (a person), stand, smoke."}, {"from": "human", "timestamps": 1725.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.405, 0.101, 0.967, 0.911]?"}, {"from": "gpt", "value": ["In the current frame, [0.405, 0.101, 0.967, 0.911]: stand, carry/hold (an object), smoke.", "At 1724.0s, [0.068, 0.109, 0.995, 0.944]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "Vmef_8MY46w.mkv", "clip": [1739, 1786], "conversations": [{"from": "human", "timestamps": 1769.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.008, 0.108, 0.583, 0.915]: carry/hold (an object), stand, listen to (a person); [0.489, 0.215, 0.81, 0.921]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1786.0, "value": "What actions is the person in bbox [0.0, 0.511, 0.206, 0.917] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.511, 0.206, 0.917]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1785.0s, [0.0, 0.504, 0.205, 0.91]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1783.0s to 1784.0s, [0.448, 0.112, 0.884, 0.894]->[0.388, 0.09, 0.907, 0.917]: stand, carry/hold (an object), listen to (a person).", "From 1780.0s to 1782.0s, [0.365, 0.079, 0.87, 0.928]->[0.431, 0.089, 0.854, 0.903]->[0.348, 0.075, 0.871, 0.909]: stand, carry/hold (an object)."]}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [908, 972], "conversations": [{"from": "human", "timestamps": 938.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.304, 0.317, 0.476, 0.981]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.449, 0.259, 0.62, 0.979]: watch (a person), stand."}, {"from": "human", "timestamps": 972.0, "value": "What actions is the person in bbox [0.26, 0.141, 0.692, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.26, 0.141, 0.692, 0.978]: stand, talk to (e.g., self, a person, a group).", "From 970.0s to 971.0s, [0.193, 0.128, 0.592, 0.987]->[0.244, 0.103, 0.693, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 969.0s, [0.0, 0.098, 0.201, 0.981]: stand, listen to (a person), watch (a person).", "From 967.0s to 968.0s, [0.003, 0.146, 0.264, 1.0]->[0.001, 0.124, 0.229, 0.996]: stand, listen to (a person)."]}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [957, 1019], "conversations": [{"from": "human", "timestamps": 987.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.116, 0.141, 0.424, 0.982]: listen to (a person), stand, watch (a person); [0.471, 0.18, 0.826, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.814, 0.032, 0.998, 0.98]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1019.0, "value": "Describe the actions of the person at bbox [0.68, 0.144, 0.857, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.68, 0.144, 0.857, 0.988]: stand, listen to (a person), watch (a person).", "From 1015.0s to 1018.0s, [0.677, 0.136, 0.863, 0.978]->[0.666, 0.147, 0.851, 0.981]->[0.655, 0.14, 0.829, 0.987]->[0.675, 0.148, 0.862, 0.984]: stand, listen to (a person).", "From 1012.0s to 1014.0s, [0.684, 0.14, 0.871, 0.984]->[0.681, 0.137, 0.868, 0.984]->[0.679, 0.138, 0.868, 0.982]: stand, listen to (a person), watch (a person)."]}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [995, 1036], "conversations": [{"from": "human", "timestamps": 1025.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.002, 0.142, 0.201, 0.975]: listen to (a person), stand; [0.174, 0.088, 0.377, 0.98]: listen to (a person), stand, watch (a person); [0.372, 0.149, 0.578, 0.972]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.701, 0.125, 0.883, 0.987]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1036.0, "value": "What actions is the person in bbox [0.255, 0.202, 0.494, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.255, 0.202, 0.494, 1.0]: stand, listen to (a person), watch (a person).", "From 1034.0s to 1035.0s, [0.336, 0.123, 0.554, 0.978]->[0.307, 0.132, 0.553, 0.988]: stand, listen to (a person), watch (a person).", "At 1033.0s, [0.318, 0.112, 0.594, 0.983]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1032.0s, [0.331, 0.126, 0.59, 0.98]: stand, listen to (a person).", "From 1029.0s to 1031.0s, [0.317, 0.134, 0.588, 0.981]->[0.327, 0.105, 0.596, 0.987]->[0.312, 0.109, 0.592, 0.989]: stand, listen to (a person), watch (a person)."]}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1057, 1124], "conversations": [{"from": "human", "timestamps": 1087.0, "value": "What actions is the person in bbox [0.128, 0.067, 0.651, 0.997] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.128, 0.067, 0.651, 0.997]: stand, listen to (a person), watch (a person).", "At 1086.0s, [0.412, 0.23, 0.668, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1124.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.342, 0.069, 0.768, 0.985]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1131, 1196], "conversations": [{"from": "human", "timestamps": 1161.0, "value": "Describe the actions of the person at bbox [0.257, 0.029, 0.573, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.257, 0.029, 0.573, 0.984]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1160.0s, [0.451, 0.044, 0.71, 0.828]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1196.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.256, 0.428, 0.473, 0.994]: carry/hold (an object), sit, listen to (a person); [0.632, 0.068, 0.879, 0.908]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1111, 1218], "conversations": [{"from": "human", "timestamps": 1141.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.133, 0.129, 0.427, 0.992]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.555, 0.042, 0.824, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1218.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.492, 0.236, 0.576, 0.568]?"}, {"from": "gpt", "value": ["At Now, [0.492, 0.236, 0.576, 0.568]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1215.0s to 1217.0s, [0.811, 0.26, 0.936, 0.894]->[0.763, 0.237, 0.885, 0.864]->[0.621, 0.236, 0.729, 0.828]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1214.0s, [0.802, 0.245, 0.919, 0.907]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1213.0s, [0.796, 0.249, 0.907, 0.898]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1177, 1243], "conversations": [{"from": "human", "timestamps": 1207.0, "value": "What actions is the person in bbox [0.308, 0.027, 0.644, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.308, 0.027, 0.644, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1206.0s, [0.285, 0.035, 0.645, 0.995]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1243.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.156, 0.345, 0.408, 1.0]: listen to (a person), sit, watch (a person); [0.376, 0.13, 0.628, 0.986]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1253, 1285], "conversations": [{"from": "human", "timestamps": 1283.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.352, 0.377, 0.49, 0.897]?"}, {"from": "gpt", "value": ["At Now, [0.352, 0.377, 0.49, 0.897]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1280.0s to 1282.0s, [0.55, 0.176, 0.849, 0.973]->[0.534, 0.18, 0.835, 0.966]->[0.563, 0.182, 0.851, 0.99]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1279.0s, [0.534, 0.204, 0.834, 0.991]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1278.0s, [0.455, 0.172, 0.854, 0.976]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1277.0s, [0.43, 0.177, 0.828, 0.978]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1275.0s to 1276.0s, [0.484, 0.16, 0.86, 0.982]->[0.459, 0.178, 0.838, 0.972]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1274.0s, [0.482, 0.17, 0.842, 0.971]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1273.0s, [0.459, 0.164, 0.854, 0.98]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1272.0s, [0.549, 0.252, 0.874, 0.986]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1271.0s, [0.557, 0.152, 0.91, 0.993]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1270.0s, [0.677, 0.064, 0.983, 0.984]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1268.0s to 1269.0s, [0.377, 0.0, 0.787, 1.0]->[0.416, 0.004, 0.794, 0.996]: bend/bow (at the waist), carry/hold (an object), listen to (a person), watch (a person).", "At 1267.0s, [0.378, 0.002, 0.791, 0.994]: bend/bow (at the waist), carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1285.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.2, 0.375, 0.322, 0.91]: listen to (a person), sit, watch (a person); [0.488, 0.25, 0.602, 0.897]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1267, 1326], "conversations": [{"from": "human", "timestamps": 1297.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.244, 0.205, 0.636, 0.989]: listen to (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1326.0, "value": "For the person at bbox [0.352, 0.059, 0.744, 0.965], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.352, 0.059, 0.744, 0.965]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1325.0s, [0.347, 0.045, 0.717, 0.973]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1322.0s to 1324.0s, [0.463, 0.205, 0.75, 0.991]->[0.478, 0.177, 0.761, 0.989]->[0.475, 0.19, 0.807, 0.973]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1320.0s to 1321.0s, [0.464, 0.234, 0.747, 0.972]->[0.487, 0.183, 0.785, 0.997]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1319.0s, [0.454, 0.238, 0.778, 1.0]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1317.0s to 1318.0s, [0.442, 0.2, 0.81, 0.984]->[0.437, 0.237, 0.83, 0.989]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1315.0s to 1316.0s, [0.49, 0.218, 0.761, 1.0]->[0.48, 0.213, 0.802, 0.998]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1310.0s to 1314.0s, [0.48, 0.194, 0.807, 0.975]->[0.457, 0.191, 0.788, 0.981]->[0.474, 0.214, 0.769, 0.991]->[0.471, 0.22, 0.766, 0.982]->[0.495, 0.25, 0.77, 0.981]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1310, 1373], "conversations": [{"from": "human", "timestamps": 1340.0, "value": "Describe the actions of the person at bbox [0.238, 0.209, 0.637, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.238, 0.209, 0.637, 0.981]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1339.0s, [0.208, 0.205, 0.643, 0.99]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1373.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.311, 0.143, 0.528, 1.0]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.462, 0.101, 0.768, 0.989]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1350, 1388], "conversations": [{"from": "human", "timestamps": 1380.0, "value": "What actions is the person in bbox [0.325, 0.141, 0.563, 0.998] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.325, 0.141, 0.563, 0.998]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1379.0s, [0.324, 0.143, 0.558, 0.987]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1378.0s, [0.332, 0.146, 0.556, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1377.0s, [0.324, 0.158, 0.558, 0.965]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1376.0s, [0.322, 0.15, 0.537, 0.971]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1375.0s, [0.31, 0.161, 0.525, 1.0]: stand, carry/hold (an object), listen to (a person).", "At 1374.0s, [0.314, 0.149, 0.535, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1373.0s, [0.311, 0.143, 0.528, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1372.0s, [0.308, 0.16, 0.558, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1371.0s, [0.305, 0.173, 0.53, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1362.0s to 1370.0s, [0.291, 0.095, 0.433, 0.989]->[0.285, 0.08, 0.433, 0.98]->[0.29, 0.096, 0.439, 0.982]->[0.296, 0.11, 0.455, 0.975]->[0.272, 0.131, 0.462, 0.981]->[0.231, 0.066, 0.469, 0.969]->[0.201, 0.063, 0.446, 0.982]->[0.198, 0.073, 0.431, 0.972]->[0.186, 0.07, 0.431, 0.985]: stand, listen to (a person), watch (a person).", "At 1361.0s, [0.303, 0.096, 0.439, 0.982]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1358.0s to 1360.0s, [0.314, 0.13, 0.476, 0.974]->[0.306, 0.115, 0.438, 0.972]->[0.313, 0.096, 0.442, 0.984]: stand, listen to (a person), watch (a person).", "At 1357.0s, [0.269, 0.184, 0.522, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1356.0s, [0.173, 0.195, 0.45, 0.983]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1355.0s, [0.167, 0.165, 0.485, 0.993]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1354.0s, [0.128, 0.093, 0.491, 0.99]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1353.0s, [0.178, 0.066, 0.587, 0.979]: stand, listen to (a person), watch (a person).", "From 1348.0s to 1352.0s, [0.281, 0.1, 0.679, 0.979]->[0.247, 0.094, 0.657, 0.982]->[0.262, 0.124, 0.677, 0.983]->[0.312, 0.108, 0.732, 0.992]->[0.248, 0.09, 0.677, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1388.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.162, 0.094, 0.415, 0.974]: listen to (a person), stand, watch (a person); [0.41, 0.111, 0.681, 0.999]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1354, 1468], "conversations": [{"from": "human", "timestamps": 1384.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.297, 0.168, 0.553, 0.987]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.471, 0.1, 0.797, 0.99]: listen to (a person), stand, drink."}, {"from": "human", "timestamps": 1468.0, "value": "For the person at bbox [0.465, 0.166, 0.727, 0.879], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.465, 0.166, 0.727, 0.879]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1465.0s to 1467.0s, [0.496, 0.174, 0.728, 0.922]->[0.503, 0.18, 0.721, 0.911]->[0.488, 0.158, 0.724, 0.909]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1464.0s, [0.556, 0.091, 0.973, 0.957]: sit, carry/hold (an object), listen to (a person).", "At 1463.0s, [0.554, 0.086, 0.986, 0.976]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1462.0s, [0.565, 0.063, 0.932, 0.971]: sit, carry/hold (an object), watch (a person).", "At 1461.0s, [0.407, 0.105, 0.817, 0.973]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1454, 1492], "conversations": [{"from": "human", "timestamps": 1484.0, "value": "Describe the actions of the person at bbox [0.648, 0.068, 0.995, 0.995] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.648, 0.068, 0.995, 0.995]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1482.0s to 1483.0s, [0.647, 0.068, 1.0, 0.995]->[0.635, 0.073, 0.998, 1.0]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1481.0s, [0.601, 0.092, 0.997, 0.971]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1480.0s, [0.564, 0.055, 0.982, 0.957]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1479.0s, [0.588, 0.051, 1.0, 0.964]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1478.0s, [0.626, 0.037, 0.992, 0.973]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1477.0s, [0.258, 0.112, 0.695, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1475.0s to 1476.0s, [0.232, 0.141, 0.695, 0.989]->[0.229, 0.116, 0.704, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1474.0s, [0.309, 0.117, 0.732, 0.995]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1472.0s to 1473.0s, [0.278, 0.117, 0.741, 0.995]->[0.276, 0.135, 0.765, 0.994]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1492.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.229, 0.348, 0.341, 0.933]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.524, 0.211, 0.73, 0.961]: carry/hold (an object), sing to (e.g., self, a person, a group), dance."}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1499, 1541], "conversations": [{"from": "human", "timestamps": 1529.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.378, 0.05, 0.748, 0.975]?"}, {"from": "gpt", "value": ["At Now, [0.378, 0.05, 0.748, 0.975]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1528.0s, [0.443, 0.143, 0.847, 0.992]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1527.0s, [0.484, 0.129, 0.832, 0.968]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1526.0s, [0.405, 0.023, 0.78, 0.949]: sit, listen to (a person), watch (a person).", "At 1525.0s, [0.436, 0.051, 0.756, 0.961]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1524.0s, [0.497, 0.048, 0.762, 0.911]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1541.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.054, 0.14, 0.554, 0.976]: carry/hold (an object), sit, talk to (e.g., self, a person, a group), listen to (a person); [0.647, 0.179, 0.952, 0.841]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1546, 1597], "conversations": [{"from": "human", "timestamps": 1576.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.14, 0.184, 0.255, 0.846]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.241, 0.197, 0.331, 0.864]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1597.0, "value": "What actions did the person at bbox [0.217, 0.031, 0.574, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.217, 0.031, 0.574, 0.99]: stand, listen to (a person).", "At 1596.0s, [0.239, 0.048, 0.636, 0.991]: stand, touch (an object), listen to (a person).", "At 1595.0s, [0.157, 0.032, 0.5, 0.981]: stand, listen to (a person), watch (a person).", "At 1594.0s, [0.39, 0.12, 0.757, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1593.0s, [0.307, 0.105, 0.676, 1.0]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1588, 1660], "conversations": [{"from": "human", "timestamps": 1618.0, "value": "For the person at bbox [0.268, 0.092, 0.631, 0.975], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.268, 0.092, 0.631, 0.975]: stand, talk to (e.g., self, a person, a group).", "At 1617.0s, [0.212, 0.151, 0.607, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1614.0s to 1616.0s, [0.21, 0.127, 0.595, 0.958]->[0.147, 0.107, 0.591, 0.983]->[0.213, 0.192, 0.608, 0.992]: stand, listen to (a person).", "At 1613.0s, [0.229, 0.097, 0.603, 0.96]: stand, dress/put on clothing, listen to (a person).", "At 1612.0s, [0.239, 0.063, 0.596, 0.979]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1611.0s, [0.206, 0.093, 0.575, 0.982]: stand, listen to (a person).", "From 1608.0s to 1610.0s, [0.057, 0.074, 0.518, 0.968]->[0.021, 0.012, 0.579, 0.983]->[0.004, 0.003, 0.472, 0.972]: stand, listen to (a person), watch (a person).", "At 1607.0s, [0.285, 0.186, 0.692, 0.993]: stand, hug (a person), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1660.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.072, 0.0, 0.512, 0.981]: stand, talk to (e.g., self, a person, a group); [0.392, 0.136, 0.713, 0.979]: listen to (a person), stand, watch (a person)."}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1609, 1680], "conversations": [{"from": "human", "timestamps": 1639.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.253, 0.196, 0.636, 0.981]: listen to (a person), sit; [0.579, 0.291, 0.812, 0.963]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1680.0, "value": "For the person at bbox [0.793, 0.129, 0.995, 0.974], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.793, 0.129, 0.995, 0.974]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1678.0s to 1679.0s, [0.654, 0.186, 0.998, 0.996]->[0.807, 0.15, 1.0, 0.981]: stand, talk to (e.g., self, a person, a group).", "At 1677.0s, [0.543, 0.114, 0.929, 0.993]: stand, listen to (a person).", "From 1674.0s to 1676.0s, [0.323, 0.184, 0.717, 0.983]->[0.551, 0.188, 0.845, 1.0]->[0.481, 0.186, 0.888, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1673.0s, [0.062, 0.024, 0.572, 0.982]: stand, listen to (a person), watch (a person).", "At 1672.0s, [0.053, 0.028, 0.579, 0.977]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1669.0s to 1671.0s, [0.321, 0.034, 0.658, 0.968]->[0.286, 0.068, 0.687, 0.984]->[0.324, 0.082, 0.706, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1667.0s to 1668.0s, [0.359, 0.071, 0.688, 0.975]->[0.274, 0.057, 0.635, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 1666.0s, [0.32, 0.064, 0.672, 0.975]: stand, listen to (a person), watch (a person).", "At 1665.0s, [0.065, 0.001, 0.504, 0.977]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1663.0s to 1664.0s, [0.09, 0.0, 0.522, 0.981]->[0.073, 0.0, 0.506, 0.983]: stand, listen to (a person), watch (a person).", "At 1662.0s, [0.072, 0.0, 0.501, 0.974]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1661.0s, [0.0, 0.006, 0.647, 0.97]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1660.0s, [0.072, 0.0, 0.512, 0.981]: stand, talk to (e.g., self, a person, a group).", "From 1658.0s to 1659.0s, [0.068, 0.004, 0.546, 1.0]->[0.073, 0.005, 0.548, 0.977]: stand, listen to (a person), watch (a person).", "At 1657.0s, [0.087, 0.0, 0.514, 0.972]: stand, listen to (a person).", "At 1656.0s, [0.087, 0.0, 0.53, 0.986]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1655.0s, [0.025, 0.0, 0.467, 0.966]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1652.0s to 1654.0s, [0.009, 0.0, 0.439, 0.979]->[0.04, 0.0, 0.466, 0.995]->[0.09, 0.01, 0.582, 0.995]: stand, listen to (a person).", "At 1651.0s, [0.0, 0.0, 0.457, 0.961]: stand, listen to (a person), watch (a person).", "At 1650.0s, [0.126, 0.077, 0.604, 0.986]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1649.0s, [0.004, 0.006, 0.233, 0.985]: walk, listen to (a person), watch (a person)."]}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1687, 1719], "conversations": [{"from": "human", "timestamps": 1717.0, "value": "Describe the actions of the person at bbox [0.341, 0.213, 0.79, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.341, 0.213, 0.79, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 1716.0s, [0.308, 0.193, 0.713, 0.991]: stand, talk to (e.g., self, a person, a group).", "At 1715.0s, [0.8, 0.38, 0.89, 0.899]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1719.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.369, 0.104, 0.85, 1.0]: talk to (e.g., self, a person, a group), walk."}]}, {"video": "VsYPP2I0aUQ.mkv", "clip": [1728, 1766], "conversations": [{"from": "human", "timestamps": 1758.0, "value": "What actions did the person at bbox [0.181, 0.0, 0.525, 0.999] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.181, 0.0, 0.525, 0.999]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1757.0s, [0.111, 0.006, 0.404, 0.979]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1756.0s, [0.004, 0.0, 0.302, 0.934]: walk, listen to (a person)."]}, {"from": "human", "timestamps": 1766.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.336, 0.044, 0.666, 0.976]: talk to (e.g., self, a person, a group), walk."}]}, {"video": "WKqbLbU68wU.mkv", "clip": [954, 1012], "conversations": [{"from": "human", "timestamps": 984.0, "value": "What actions is the person in bbox [0.559, 0.482, 0.787, 0.994] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.559, 0.482, 0.787, 0.994]: bend/bow (at the waist), carry/hold (an object).", "At 983.0s, [0.566, 0.284, 0.769, 0.993]: bend/bow (at the waist), carry/hold (an object), listen to (a person).", "At 982.0s, [0.609, 0.228, 0.792, 0.988]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 981.0s, [0.57, 0.249, 0.715, 0.978]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 980.0s, [0.525, 0.263, 0.683, 0.974]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1012.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.295, 0.171, 0.495, 0.967]: carry/hold (an object), stand, listen to (a person); [0.596, 0.098, 0.751, 0.892]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "WKqbLbU68wU.mkv", "clip": [1002, 1060], "conversations": [{"from": "human", "timestamps": 1032.0, "value": "What actions did the person at bbox [0.471, 0.365, 0.564, 0.764] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.471, 0.365, 0.564, 0.764]: walk, carry/hold (an object).", "From 1019.0s to 1031.0s, [0.329, 0.056, 0.459, 0.96]->[0.313, 0.007, 0.506, 0.98]->[0.395, 0.117, 0.559, 0.981]->[0.455, 0.191, 0.607, 0.981]->[0.448, 0.222, 0.632, 0.95]->[0.499, 0.234, 0.663, 0.888]->[0.486, 0.233, 0.635, 0.85]->[0.51, 0.271, 0.647, 0.83]->[0.501, 0.294, 0.632, 0.81]->[0.511, 0.329, 0.63, 0.794]->[0.49, 0.331, 0.594, 0.781]->[0.491, 0.36, 0.582, 0.765]->[0.467, 0.36, 0.557, 0.764]: walk, carry/hold (an object).", "At 1018.0s, [0.362, 0.019, 0.495, 0.95]: stand, carry/hold (an object).", "At 1016.0s, [0.343, 0.019, 0.487, 0.959]: stand, carry/hold (an object).", "At 1015.0s, [0.336, 0.154, 0.499, 0.943]: stand, carry/hold (an object), lift/pick up.", "At 1014.0s, [0.347, 0.15, 0.51, 0.958]: bend/bow (at the waist), carry/hold (an object), lift/pick up, touch (an object).", "At 1013.0s, [0.34, 0.304, 0.507, 0.981]: bend/bow (at the waist), carry/hold (an object), lift/pick up.", "From 1008.0s to 1012.0s, [0.32, 0.198, 0.48, 0.974]->[0.3, 0.188, 0.48, 0.972]->[0.305, 0.191, 0.483, 1.0]->[0.308, 0.194, 0.485, 0.966]->[0.295, 0.171, 0.495, 0.967]: stand, carry/hold (an object), listen to (a person).", "At 1007.0s, [0.293, 0.219, 0.485, 0.993]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1006.0s, [0.287, 0.252, 0.471, 0.972]: bend/bow (at the waist), carry/hold (an object), listen to (a person).", "At 1005.0s, [0.256, 0.325, 0.478, 0.985]: bend/bow (at the waist), carry/hold (an object).", "At 1004.0s, [0.323, 0.406, 0.495, 0.947]: bend/bow (at the waist), carry/hold (an object), touch (an object), listen to (a person).", "At 1003.0s, [0.306, 0.426, 0.506, 0.996]: bend/bow (at the waist), carry/hold (an object), listen to (a person).", "From 1001.0s to 1002.0s, [0.293, 0.413, 0.505, 0.941]->[0.287, 0.415, 0.507, 0.962]: bend/bow (at the waist), carry/hold (an object), turn (e.g., a screwdriver), listen to (a person).", "At 1000.0s, [0.309, 0.417, 0.507, 0.957]: bend/bow (at the waist), carry/hold (an object), listen to (a person).", "At 999.0s, [0.348, 0.427, 0.515, 0.967]: bend/bow (at the waist), carry/hold (an object).", "At 998.0s, [0.343, 0.444, 0.511, 0.943]: bend/bow (at the waist), carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1060.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.161, 0.31, 0.404, 0.991]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "WKqbLbU68wU.mkv", "clip": [1112, 1172], "conversations": [{"from": "human", "timestamps": 1142.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.002, 0.006, 0.725, 0.982]: walk; [0.005, 0.223, 0.209, 0.961]: run/jog."}, {"from": "human", "timestamps": 1172.0, "value": "For the person at bbox [0.163, 0.034, 0.756, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.163, 0.034, 0.756, 0.992]: stand, listen to (a person), watch (a person).", "At 1171.0s, [0.175, 0.038, 0.753, 0.975]: stand, listen to (a person), watch (a person)."]}]}, {"video": "WKqbLbU68wU.mkv", "clip": [1131, 1229], "conversations": [{"from": "human", "timestamps": 1161.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.223, 0.092, 0.73, 0.981]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1229.0, "value": "Describe the actions of the person at bbox [0.0, 0.106, 0.857, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.106, 0.857, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1228.0s, [0.0, 0.0, 0.916, 0.996]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "WKqbLbU68wU.mkv", "clip": [1275, 1329], "conversations": [{"from": "human", "timestamps": 1305.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.034, 0.338, 0.213, 0.996]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.449, 0.426, 0.569, 0.849]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group); [0.79, 0.227, 0.927, 0.985]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1329.0, "value": "What actions did the person at bbox [0.444, 0.28, 0.622, 0.995] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.444, 0.28, 0.622, 0.995]: walk, talk to (e.g., self, a person, a group).", "From 1327.0s to 1328.0s, [0.574, 0.305, 0.718, 0.984]->[0.509, 0.27, 0.706, 0.984]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "WKqbLbU68wU.mkv", "clip": [1427, 1468], "conversations": [{"from": "human", "timestamps": 1457.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.146, 0.098, 0.928, 0.977]: sit."}, {"from": "human", "timestamps": 1468.0, "value": "What actions is the person in bbox [0.087, 0.429, 0.256, 0.826] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.087, 0.429, 0.256, 0.826]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1467.0s, [0.086, 0.439, 0.261, 0.823]: sit, talk to (e.g., self, a person, a group).", "At 1466.0s, [0.092, 0.441, 0.249, 0.822]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1464.0s, [0.12, 0.086, 0.826, 0.976]: sit, talk to (e.g., self, a person, a group).", "From 1461.0s to 1462.0s, [0.118, 0.126, 0.823, 0.979]->[0.132, 0.103, 0.832, 0.98]: sit, talk to (e.g., self, a person, a group).", "At 1460.0s, [0.142, 0.114, 0.826, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "WKqbLbU68wU.mkv", "clip": [1474, 1526], "conversations": [{"from": "human", "timestamps": 1504.0, "value": "For the person at bbox [0.305, 0.201, 0.724, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.305, 0.201, 0.724, 1.0]: bend/bow (at the waist), carry/hold (an object).", "From 1501.0s to 1503.0s, [0.263, 0.005, 0.708, 1.0]->[0.325, 0.089, 0.778, 1.0]->[0.271, 0.161, 0.7, 1.0]: bend/bow (at the waist), carry/hold (an object).", "At 1500.0s, [0.354, 0.106, 0.803, 0.976]: bend/bow (at the waist), carry/hold (an object), push (an object).", "At 1499.0s, [0.167, 0.201, 0.609, 0.983]: bend/bow (at the waist), carry/hold (an object).", "At 1498.0s, [0.188, 0.285, 0.567, 1.0]: bend/bow (at the waist).", "From 1496.0s to 1497.0s, [0.289, 0.282, 0.683, 0.994]->[0.384, 0.23, 0.734, 1.0]: bend/bow (at the waist), carry/hold (an object).", "At 1495.0s, [0.549, 0.314, 0.822, 0.985]: walk, carry/hold (an object).", "At 1494.0s, [0.353, 0.336, 0.647, 0.99]: bend/bow (at the waist), carry/hold (an object)."]}, {"from": "human", "timestamps": 1526.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.143, 0.399, 0.992]: listen to (a person), stand, watch (a person); [0.625, 0.034, 1.0, 0.98]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "WKqbLbU68wU.mkv", "clip": [1527, 1610], "conversations": [{"from": "human", "timestamps": 1557.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.546, 0.034, 1.0, 0.994]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1610.0, "value": "Describe the actions of the person at bbox [0.0, 0.274, 0.345, 0.994] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.274, 0.345, 0.994]: stand, talk to (e.g., self, a person, a group).", "From 1607.0s to 1609.0s, [0.074, 0.208, 0.398, 1.0]->[0.004, 0.212, 0.331, 1.0]->[0.0, 0.258, 0.312, 0.998]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "WKqbLbU68wU.mkv", "clip": [1548, 1589], "conversations": [{"from": "human", "timestamps": 1578.0, "value": "For the person at bbox [0.125, 0.055, 0.581, 0.998], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.125, 0.055, 0.581, 0.998]: stand, carry/hold (an object), put down, take (an object) from (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1577.0s, [0.193, 0.021, 0.629, 0.983]: stand, carry/hold (an object), pull (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1589.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.397, 0.446, 0.435, 0.654]: listen to (a person), watch (a person), walk; [0.426, 0.457, 0.457, 0.649]: carry/hold (an object), watch (a person), walk; [0.466, 0.46, 0.502, 0.649]: carry/hold (an object), bend/bow (at the waist); [0.515, 0.441, 0.567, 0.661]: carry/hold (an object), stand."}]}, {"video": "WKqbLbU68wU.mkv", "clip": [1631, 1735], "conversations": [{"from": "human", "timestamps": 1661.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.181, 0.169, 0.497, 0.947]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1735.0, "value": "Describe the actions of the person at bbox [0.459, 0.483, 0.499, 0.707] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.459, 0.483, 0.499, 0.707]: stand, listen to (a person).", "From 1730.0s to 1734.0s, [0.504, 0.0, 0.996, 0.995]->[0.472, 0.002, 0.997, 0.995]->[0.518, 0.03, 1.0, 0.97]->[0.497, 0.0, 0.997, 0.995]->[0.459, 0.48, 0.498, 0.708]: stand, listen to (a person)."]}]}, {"video": "WVde9pyaHg4.mkv", "clip": [877, 913], "conversations": [{"from": "human", "timestamps": 907.0, "value": "What actions is the person in bbox [0.452, 0.606, 0.537, 0.717] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.452, 0.606, 0.537, 0.717]: stand.", "From 903.0s to 906.0s, [0.457, 0.607, 0.536, 0.717]->[0.462, 0.608, 0.537, 0.715]->[0.456, 0.608, 0.541, 0.716]->[0.453, 0.606, 0.541, 0.718]: stand."]}, {"from": "human", "timestamps": 913.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.669, 0.384, 0.747, 0.694]: stand; [0.744, 0.315, 0.856, 0.714]: stand, talk to (e.g., self, a person, a group); [0.836, 0.244, 0.993, 0.744]: listen to (a person), stand."}]}, {"video": "WVde9pyaHg4.mkv", "clip": [997, 1064], "conversations": [{"from": "human", "timestamps": 1027.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.001, 0.002, 0.554, 0.998]?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.002, 0.554, 0.998]: sit, stand, eat.", "At 1026.0s, [0.006, 0.004, 0.556, 0.994]: stand, carry/hold (an object), eat."]}, {"from": "human", "timestamps": 1064.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.012, 0.013, 0.983, 0.976]: lie/sleep."}]}, {"video": "WVde9pyaHg4.mkv", "clip": [1200, 1241], "conversations": [{"from": "human", "timestamps": 1230.0, "value": "For the person at bbox [0.748, 0.206, 0.993, 0.942], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.748, 0.206, 0.993, 0.942]: stand, touch (an object).", "At 1229.0s, [0.0, 0.239, 0.167, 0.87]: stand, carry/hold (an object).", "At 1228.0s, [0.003, 0.234, 0.177, 0.869]: stand, smoke."]}, {"from": "human", "timestamps": 1241.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.49, 0.163, 1.0, 1.0]: carry/hold (an object), stand."}]}, {"video": "WVde9pyaHg4.mkv", "clip": [1259, 1304], "conversations": [{"from": "human", "timestamps": 1289.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.002, 0.233, 0.18, 0.899]: stand, eat, touch (an object)."}, {"from": "human", "timestamps": 1304.0, "value": "For the person at bbox [0.0, 0.064, 0.187, 0.845], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.064, 0.187, 0.845]: stand, carry/hold (an object).", "From 1302.0s to 1303.0s, [0.0, 0.061, 0.239, 0.966]->[0.0, 0.041, 0.195, 0.987]: stand, carry/hold (an object).", "From 1299.0s to 1301.0s, [0.092, 0.139, 0.462, 0.975]->[0.154, 0.001, 0.504, 1.0]->[0.0, 0.1, 0.358, 0.994]: walk, carry/hold (an object).", "At 1298.0s, [0.0, 0.149, 0.192, 1.0]: walk."]}]}, {"video": "WVde9pyaHg4.mkv", "clip": [1387, 1425], "conversations": [{"from": "human", "timestamps": 1417.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.012, 0.0, 0.737, 0.969]: sit, eat, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1425.0, "value": "For the person at bbox [0.0, 0.0, 1.0, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.0, 1.0, 0.99]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1423.0s to 1424.0s, [0.002, 0.0, 0.931, 0.986]->[0.0, 0.0, 0.957, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "WVde9pyaHg4.mkv", "clip": [1380, 1458], "conversations": [{"from": "human", "timestamps": 1410.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.196, 0.008, 0.991, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1458.0, "value": "For the person at bbox [0.061, 0.093, 0.645, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.061, 0.093, 0.645, 0.994]: stand.", "At 1457.0s, [0.056, 0.04, 0.688, 0.991]: stand.", "At 1455.0s, [0.06, 0.064, 0.688, 0.999]: stand.", "At 1454.0s, [0.031, 0.046, 0.667, 0.989]: crouch/kneel.", "At 1453.0s, [0.018, 0.016, 0.653, 0.993]: stand."]}]}, {"video": "WVde9pyaHg4.mkv", "clip": [1492, 1538], "conversations": [{"from": "human", "timestamps": 1522.0, "value": "What actions did the person at bbox [0.248, 0.007, 0.999, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.248, 0.007, 0.999, 1.0]: walk, open (e.g., a window, a car door).", "From 1520.0s to 1521.0s, [0.13, 0.02, 0.978, 1.0]->[0.299, 0.07, 0.977, 1.0]: stand, open (e.g., a window, a car door).", "At 1519.0s, [0.028, 0.077, 0.724, 1.0]: walk, open (e.g., a window, a car door)."]}, {"from": "human", "timestamps": 1538.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.112, 0.406, 0.226, 0.804]: walk."}]}, {"video": "WVde9pyaHg4.mkv", "clip": [1513, 1602], "conversations": [{"from": "human", "timestamps": 1543.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.467, 0.413, 0.597, 0.88]: walk."}, {"from": "human", "timestamps": 1602.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.003, 0.0, 0.583, 0.982]?"}, {"from": "gpt", "value": ["In the current frame, [0.003, 0.0, 0.583, 0.982]: stand, talk to (e.g., self, a person, a group).", "At 1601.0s, [0.0, 0.0, 0.647, 0.996]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "WVde9pyaHg4.mkv", "clip": [1593, 1686], "conversations": [{"from": "human", "timestamps": 1623.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.486, 0.023, 0.993, 0.983]: stand."}, {"from": "human", "timestamps": 1686.0, "value": "What actions is the person in bbox [0.0, 0.077, 0.146, 0.979] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.077, 0.146, 0.979]: stand, listen to (a person), watch (a person).", "At 1684.0s, [0.0, 0.077, 0.156, 1.0]: stand, listen to (a person), watch (a person).", "At 1683.0s, [0.0, 0.059, 0.263, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1682.0s, [0.0, 0.077, 0.24, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1680.0s to 1681.0s, [0.019, 0.043, 0.58, 1.0]->[0.0, 0.106, 0.121, 0.889]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "WVde9pyaHg4.mkv", "clip": [1648, 1690], "conversations": [{"from": "human", "timestamps": 1678.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.123, 0.089, 0.812]: talk to (e.g., self, a person, a group); [0.362, 0.022, 0.994, 0.987]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1690.0, "value": "What actions is the person in bbox [0.026, 0.0, 0.969, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.026, 0.0, 0.969, 1.0]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1687.0s to 1689.0s, [0.016, 0.018, 0.686, 0.993]->[0.0, 0.0, 0.63, 1.0]->[0.0, 0.012, 0.767, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1686.0s, [0.0, 0.077, 0.146, 0.979]: stand, listen to (a person), watch (a person).", "At 1684.0s, [0.0, 0.077, 0.156, 1.0]: stand, listen to (a person), watch (a person).", "At 1683.0s, [0.0, 0.059, 0.263, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1682.0s, [0.0, 0.077, 0.24, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1680.0s to 1681.0s, [0.019, 0.043, 0.58, 1.0]->[0.0, 0.106, 0.121, 0.889]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "WlgxRNCHQzw.mkv", "clip": [878, 942], "conversations": [{"from": "human", "timestamps": 908.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.043, 0.522, 0.991]: get up."}, {"from": "human", "timestamps": 942.0, "value": "Describe the actions of the person at bbox [0.206, 0.098, 0.768, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.206, 0.098, 0.768, 0.986]: stand, carry/hold (an object), dress/put on clothing, talk to (e.g., self, a person, a group).", "From 939.0s to 941.0s, [0.232, 0.12, 0.762, 0.998]->[0.192, 0.109, 0.675, 0.977]->[0.191, 0.109, 0.705, 0.979]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "WlgxRNCHQzw.mkv", "clip": [957, 988], "conversations": [{"from": "human", "timestamps": 987.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.014, 0.706, 0.982]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.014, 0.706, 0.982]: stand.", "At 986.0s, [0.003, 0.051, 0.699, 0.978]: stand.", "At 985.0s, [0.051, 0.141, 0.822, 0.994]: get up.", "At 984.0s, [0.277, 0.097, 0.985, 0.987]: sit."]}, {"from": "human", "timestamps": 988.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.019, 0.704, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "WlgxRNCHQzw.mkv", "clip": [1021, 1074], "conversations": [{"from": "human", "timestamps": 1051.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.002, 0.019, 0.806, 0.985]?"}, {"from": "gpt", "value": ["In the current frame, [0.002, 0.019, 0.806, 0.985]: sit, talk to (e.g., self, a person, a group).", "At 1050.0s, [0.002, 0.0, 0.795, 0.972]: stand, talk to (e.g., self, a person, a group).", "At 1049.0s, [0.012, 0.015, 0.827, 0.983]: stand."]}, {"from": "human", "timestamps": 1074.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.168, 0.356, 0.936]: listen to (a person), hand shake, watch (a person), walk; [0.14, 0.136, 0.972, 0.99]: carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person), bend/bow (at the waist)."}]}, {"video": "WlgxRNCHQzw.mkv", "clip": [1127, 1174], "conversations": [{"from": "human", "timestamps": 1157.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.397, 0.073, 0.961, 0.994]?"}, {"from": "gpt", "value": ["In the current frame, [0.397, 0.073, 0.961, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 1156.0s, [0.41, 0.074, 0.964, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 1154.0s to 1155.0s, [0.407, 0.083, 0.957, 0.996]->[0.406, 0.074, 0.954, 0.997]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1153.0s, [0.39, 0.072, 0.943, 0.999]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1174.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.036, 0.043, 0.53, 1.0]: listen to (a person), stand; [0.507, 0.125, 0.937, 0.995]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "WlgxRNCHQzw.mkv", "clip": [1111, 1229], "conversations": [{"from": "human", "timestamps": 1141.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.001, 0.011, 0.507, 0.987]: stand, talk to (e.g., self, a person, a group); [0.51, 0.147, 0.976, 0.993]: watch (a person), stand."}, {"from": "human", "timestamps": 1229.0, "value": "Describe the actions of the person at bbox [0.057, 0.208, 0.932, 0.976] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.057, 0.208, 0.932, 0.976]: bend/bow (at the waist), lift/pick up.", "At 1228.0s, [0.127, 0.158, 0.807, 0.992]: stand.", "From 1226.0s to 1227.0s, [0.119, 0.178, 0.421, 0.879]->[0.351, 0.19, 0.716, 1.0]: walk.", "At 1225.0s, [0.124, 0.187, 0.469, 0.898]: stand, carry/hold (an object), touch (an object)."]}]}, {"video": "WlgxRNCHQzw.mkv", "clip": [1193, 1230], "conversations": [{"from": "human", "timestamps": 1223.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.19, 0.138, 0.809, 0.975]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1230.0, "value": "For the person at bbox [0.33, 0.173, 0.785, 0.986], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.33, 0.173, 0.785, 0.986]: bend/bow (at the waist), stand, carry/hold (an object).", "At 1229.0s, [0.057, 0.208, 0.932, 0.976]: bend/bow (at the waist), lift/pick up.", "At 1228.0s, [0.127, 0.158, 0.807, 0.992]: stand.", "From 1226.0s to 1227.0s, [0.119, 0.178, 0.421, 0.879]->[0.351, 0.19, 0.716, 1.0]: walk.", "At 1225.0s, [0.124, 0.187, 0.469, 0.898]: stand, carry/hold (an object), touch (an object)."]}]}, {"video": "WlgxRNCHQzw.mkv", "clip": [1266, 1375], "conversations": [{"from": "human", "timestamps": 1296.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.181, 0.487, 0.981]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.366, 0.165, 0.895, 0.993]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1375.0, "value": "For the person at bbox [0.008, 0.015, 0.644, 0.995], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.008, 0.015, 0.644, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1374.0s, [0.004, 0.066, 0.644, 0.984]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1373.0s, [0.002, 0.058, 0.658, 0.99]: sit, stand, listen to (a person), watch (a person)."]}]}, {"video": "WlgxRNCHQzw.mkv", "clip": [1377, 1449], "conversations": [{"from": "human", "timestamps": 1407.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.834, 0.154, 0.925, 0.363]: ride (e.g., a bike, a car, a horse), sit."}, {"from": "human", "timestamps": 1449.0, "value": "Describe the actions of the person at bbox [0.561, 0.055, 0.995, 0.964] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.561, 0.055, 0.995, 0.964]: sit, ride (e.g., a bike, a car, a horse), listen to (a person).", "From 1447.0s to 1448.0s, [0.541, 0.089, 0.993, 0.992]->[0.507, 0.075, 0.99, 0.987]: sit, ride (e.g., a bike, a car, a horse), listen to (a person).", "At 1446.0s, [0.635, 0.138, 1.0, 0.987]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1445.0s, [0.608, 0.1, 0.989, 0.981]: sit, ride (e.g., a bike, a car, a horse)."]}]}, {"video": "WlgxRNCHQzw.mkv", "clip": [1408, 1480], "conversations": [{"from": "human", "timestamps": 1438.0, "value": "For the person at bbox [0.377, 0.073, 0.955, 0.96], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.377, 0.073, 0.955, 0.96]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1437.0s, [0.443, 0.049, 0.986, 0.977]: sit, talk to (e.g., self, a person, a group).", "From 1435.0s to 1436.0s, [0.414, 0.07, 0.98, 0.982]->[0.43, 0.048, 0.981, 0.979]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1480.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.024, 0.624, 0.96]: sit."}]}, {"video": "WlgxRNCHQzw.mkv", "clip": [1465, 1527], "conversations": [{"from": "human", "timestamps": 1495.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.015, 0.801, 0.967]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.015, 0.801, 0.967]: sit, ride (e.g., a bike, a car, a horse).", "At 1494.0s, [0.0, 0.0, 0.841, 1.0]: sit, ride (e.g., a bike, a car, a horse)."]}, {"from": "human", "timestamps": 1527.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.442, 0.339, 0.925, 0.845]: watch (a person), lie/sleep, talk to (e.g., self, a person, a group)."}]}, {"video": "WlgxRNCHQzw.mkv", "clip": [1491, 1589], "conversations": [{"from": "human", "timestamps": 1521.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.056, 0.641, 0.975]: watch (a person), lift (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1589.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.006, 0.918, 0.946]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.006, 0.918, 0.946]: sit, ride (e.g., a bike, a car, a horse).", "From 1587.0s to 1588.0s, [0.026, 0.033, 0.914, 0.997]->[0.001, 0.006, 0.897, 1.0]: sit, ride (e.g., a bike, a car, a horse)."]}]}, {"video": "WlgxRNCHQzw.mkv", "clip": [1543, 1588], "conversations": [{"from": "human", "timestamps": 1573.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.167, 0.056, 0.914, 0.995]: watch (a person), stand."}, {"from": "human", "timestamps": 1588.0, "value": "For the person at bbox [0.001, 0.006, 0.897, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.006, 0.897, 1.0]: sit, ride (e.g., a bike, a car, a horse).", "At 1587.0s, [0.026, 0.033, 0.914, 0.997]: sit, ride (e.g., a bike, a car, a horse)."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [873, 924], "conversations": [{"from": "human", "timestamps": 903.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.021, 0.087, 0.633, 0.877]: watch (a person), stand."}, {"from": "human", "timestamps": 924.0, "value": "For the person at bbox [0.332, 0.103, 0.946, 0.874], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.332, 0.103, 0.946, 0.874]: stand, watch (a person).", "From 918.0s to 923.0s, [0.061, 0.148, 0.567, 0.875]->[0.033, 0.144, 0.522, 0.892]->[0.038, 0.143, 0.491, 0.879]->[0.038, 0.138, 0.518, 0.887]->[0.432, 0.123, 0.938, 0.866]->[0.288, 0.106, 0.945, 0.875]: stand.", "At 917.0s, [0.037, 0.131, 0.5, 0.887]: stand, watch (a person).", "At 916.0s, [0.01, 0.133, 0.487, 0.893]: stand, listen to (a person), watch (a person)."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [928, 977], "conversations": [{"from": "human", "timestamps": 958.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.106, 0.143, 0.531, 0.876]: watch (a person), stand; [0.631, 0.123, 0.991, 0.862]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 977.0, "value": "What actions is the person in bbox [0.159, 0.116, 0.876, 0.862] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.159, 0.116, 0.876, 0.862]: stand, talk to (e.g., self, a person, a group).", "At 976.0s, [0.239, 0.099, 0.899, 0.847]: stand, listen to (a person), watch (a person).", "From 973.0s to 975.0s, [0.459, 0.369, 0.819, 0.887]->[0.489, 0.372, 0.826, 0.88]->[0.495, 0.368, 0.823, 0.883]: stand, carry/hold (an object), listen to (a person).", "At 972.0s, [0.483, 0.371, 0.818, 0.888]: stand, carry/hold (an object), watch (a person).", "At 971.0s, [0.482, 0.374, 0.815, 0.889]: stand, carry/hold (an object).", "At 970.0s, [0.075, 0.141, 0.505, 0.871]: stand, carry/hold (an object), watch (a person).", "From 967.0s to 969.0s, [0.087, 0.149, 0.497, 0.913]->[0.067, 0.153, 0.5, 0.913]->[0.062, 0.154, 0.492, 0.873]: stand, watch (a person)."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [953, 1004], "conversations": [{"from": "human", "timestamps": 983.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.011, 0.107, 0.587, 0.89]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1004.0, "value": "What actions did the person at bbox [0.301, 0.365, 0.384, 0.599] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.301, 0.365, 0.384, 0.599]: stand, touch (an object), watch (a person).", "From 1002.0s to 1003.0s, [0.004, 0.133, 0.635, 0.88]->[0.013, 0.134, 0.622, 0.881]: stand, watch (a person).", "At 1001.0s, [0.009, 0.126, 0.622, 0.884]: stand, listen to (a person), watch (a person)."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1001, 1032], "conversations": [{"from": "human", "timestamps": 1031.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.324, 0.294, 0.563, 0.728]: carry/hold (an object), sit, touch (an object)."}, {"from": "human", "timestamps": 1032.0, "value": "What actions did the person at bbox [0.327, 0.312, 0.552, 0.731] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.327, 0.312, 0.552, 0.731]: sit, carry/hold (an object).", "At 1031.0s, [0.324, 0.294, 0.563, 0.728]: sit, carry/hold (an object), touch (an object).", "At 1030.0s, [0.323, 0.247, 0.521, 0.723]: sit, carry/hold (an object), crouch/kneel, touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1029.0s, [0.316, 0.227, 0.553, 0.717]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1043, 1136], "conversations": [{"from": "human", "timestamps": 1073.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.064, 0.2, 0.493, 0.872]: watch (a person), stand; [0.494, 0.222, 0.924, 0.88]: watch (a person), stand."}, {"from": "human", "timestamps": 1136.0, "value": "What actions did the person at bbox [0.111, 0.207, 0.383, 0.872] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.111, 0.207, 0.383, 0.872]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1134.0s to 1135.0s, [0.145, 0.208, 0.373, 0.879]->[0.108, 0.209, 0.431, 0.881]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1133.0s, [0.137, 0.216, 0.397, 0.868]: stand, listen to (a person)."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1083, 1138], "conversations": [{"from": "human", "timestamps": 1113.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.407, 0.122, 0.997, 0.87]?"}, {"from": "gpt", "value": ["At Now, [0.407, 0.122, 0.997, 0.87]: stand, watch (a person).", "At 1112.0s, [0.345, 0.133, 1.0, 0.878]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1138.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.222, 0.116, 0.944, 0.853]: listen to (a person), stand, watch (a person)."}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1112, 1191], "conversations": [{"from": "human", "timestamps": 1142.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.007, 0.133, 0.528, 0.894]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.514, 0.316, 0.955, 0.866]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1191.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.231, 0.126, 0.929, 0.872]?"}, {"from": "gpt", "value": ["At Now, [0.231, 0.126, 0.929, 0.872]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1188.0s to 1190.0s, [0.237, 0.127, 0.961, 0.878]->[0.222, 0.13, 0.923, 0.866]->[0.223, 0.104, 0.919, 0.872]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1114, 1221], "conversations": [{"from": "human", "timestamps": 1144.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.015, 0.109, 0.632, 0.881]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1221.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.592, 0.343, 0.899, 0.872]?"}, {"from": "gpt", "value": ["At Now, [0.592, 0.343, 0.899, 0.872]: stand, listen to (a person), watch (a person).", "From 1218.0s to 1220.0s, [0.573, 0.334, 0.856, 0.876]->[0.579, 0.336, 0.865, 0.875]->[0.584, 0.339, 0.883, 0.873]: stand, listen to (a person), watch (a person).", "At 1217.0s, [0.563, 0.335, 0.858, 0.875]: stand, listen to (a person)."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1177, 1227], "conversations": [{"from": "human", "timestamps": 1207.0, "value": "For the person at bbox [0.611, 0.37, 0.928, 0.876], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.611, 0.37, 0.928, 0.876]: stand.", "At 1206.0s, [0.648, 0.337, 0.915, 0.882]: stand."]}, {"from": "human", "timestamps": 1227.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.027, 0.115, 0.846, 0.865]: watch (a person), stand."}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1244, 1299], "conversations": [{"from": "human", "timestamps": 1274.0, "value": "For the person at bbox [0.142, 0.18, 0.744, 0.842], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.142, 0.18, 0.744, 0.842]: bend/bow (at the waist), touch (an object).", "From 1271.0s to 1273.0s, [0.117, 0.163, 0.61, 0.88]->[0.261, 0.427, 0.715, 0.877]->[0.02, 0.109, 0.647, 0.89]: bend/bow (at the waist), touch (an object).", "At 1270.0s, [0.0, 0.124, 0.104, 0.878]: walk.", "At 1269.0s, [0.488, 0.256, 0.725, 0.743]: stand, touch (an object).", "At 1268.0s, [0.513, 0.27, 0.741, 0.742]: stand, open (e.g., a window, a car door).", "From 1266.0s to 1267.0s, [0.072, 0.103, 0.977, 0.881]->[0.094, 0.118, 0.907, 0.891]: sit."]}, {"from": "human", "timestamps": 1299.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.102, 0.131, 0.685, 0.864]: watch (a person), stand."}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1237, 1355], "conversations": [{"from": "human", "timestamps": 1267.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.094, 0.118, 0.907, 0.891]: sit."}, {"from": "human", "timestamps": 1355.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.05, 0.082, 0.669, 0.895]?"}, {"from": "gpt", "value": ["At Now, [0.05, 0.082, 0.669, 0.895]: stand.", "From 1352.0s to 1354.0s, [0.014, 0.092, 0.654, 0.894]->[0.014, 0.083, 0.621, 0.884]->[0.032, 0.099, 0.652, 0.888]: stand."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1365, 1400], "conversations": [{"from": "human", "timestamps": 1395.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.024, 0.077, 0.581, 0.917]: watch (a person), stand."}, {"from": "human", "timestamps": 1400.0, "value": "What actions is the person in bbox [0.013, 0.088, 0.536, 0.907] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.013, 0.088, 0.536, 0.907]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1399.0s, [0.026, 0.08, 0.669, 0.908]: stand, talk to (e.g., self, a person, a group).", "At 1398.0s, [0.017, 0.086, 0.641, 0.899]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1399, 1461], "conversations": [{"from": "human", "timestamps": 1429.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.23, 0.076, 0.901, 0.9]: watch (a person), stand."}, {"from": "human", "timestamps": 1461.0, "value": "For the person at bbox [0.429, 0.117, 0.939, 0.865], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.429, 0.117, 0.939, 0.865]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1457.0s to 1460.0s, [0.572, 0.344, 0.81, 0.868]->[0.379, 0.124, 0.915, 0.882]->[0.429, 0.125, 0.894, 0.885]->[0.413, 0.124, 0.909, 0.883]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1454.0s to 1456.0s, [0.0, 0.111, 0.49, 0.883]->[0.003, 0.109, 0.479, 0.883]->[0.0, 0.12, 0.481, 0.88]: stand, watch (a person)."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1411, 1488], "conversations": [{"from": "human", "timestamps": 1441.0, "value": "What actions is the person in bbox [0.004, 0.09, 0.384, 0.908] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.004, 0.09, 0.384, 0.908]: stand, listen to (a person), watch (a person).", "At 1440.0s, [0.087, 0.14, 0.627, 0.897]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1488.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.312, 0.113, 0.899, 0.878]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1518, 1555], "conversations": [{"from": "human", "timestamps": 1548.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.041, 0.102, 0.737, 0.859]: stand, answer phone."}, {"from": "human", "timestamps": 1555.0, "value": "For the person at bbox [0.04, 0.112, 0.783, 0.848], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.04, 0.112, 0.783, 0.848]: stand, answer phone.", "At 1554.0s, [0.033, 0.117, 0.761, 0.853]: stand, answer phone."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1643, 1710], "conversations": [{"from": "human", "timestamps": 1673.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.204, 0.141, 0.964, 0.842]: watch (a person), stand."}, {"from": "human", "timestamps": 1710.0, "value": "Describe the actions of the person at bbox [0.049, 0.258, 0.562, 0.873] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.049, 0.258, 0.562, 0.873]: sit, listen to (a person).", "At 1709.0s, [0.05, 0.252, 0.555, 0.857]: sit, listen to (a person)."]}]}, {"video": "X5wWhZ2r9kc.mp4", "clip": [1686, 1720], "conversations": [{"from": "human", "timestamps": 1716.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.204, 0.151, 0.989, 0.869]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1720.0, "value": "For the person at bbox [0.555, 0.415, 0.691, 0.682], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.555, 0.415, 0.691, 0.682]: sit, touch (an object), listen to (a person).", "From 1718.0s to 1719.0s, [0.557, 0.415, 0.684, 0.707]->[0.557, 0.413, 0.684, 0.695]: sit, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "XF87VL5T0aA.mkv", "clip": [882, 951], "conversations": [{"from": "human", "timestamps": 912.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.162, 0.339, 0.412, 0.818]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group); [0.574, 0.361, 0.781, 0.791]: listen to (a person), sit, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 951.0, "value": "Describe the actions of the person at bbox [0.033, 0.166, 0.65, 0.831] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.033, 0.166, 0.65, 0.831]: sit, talk to (e.g., self, a person, a group).", "From 943.0s to 950.0s, [0.033, 0.165, 0.654, 0.828]->[0.034, 0.164, 0.651, 0.826]->[0.037, 0.17, 0.652, 0.829]->[0.031, 0.168, 0.642, 0.823]->[0.029, 0.16, 0.642, 0.837]->[0.017, 0.159, 0.611, 0.832]->[0.027, 0.165, 0.621, 0.831]->[0.024, 0.164, 0.662, 0.827]: sit, talk to (e.g., self, a person, a group).", "From 941.0s to 942.0s, [0.041, 0.169, 0.623, 0.823]->[0.04, 0.156, 0.631, 0.83]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 939.0s to 940.0s, [0.197, 0.175, 0.796, 0.829]->[0.189, 0.166, 0.789, 0.83]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 930.0s to 938.0s, [0.193, 0.167, 0.808, 0.823]->[0.183, 0.168, 0.814, 0.824]->[0.194, 0.169, 0.806, 0.829]->[0.192, 0.169, 0.785, 0.827]->[0.188, 0.168, 0.791, 0.828]->[0.185, 0.164, 0.818, 0.848]->[0.184, 0.156, 0.797, 0.823]->[0.205, 0.161, 0.815, 0.817]->[0.199, 0.167, 0.805, 0.818]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 929.0s, [0.188, 0.165, 0.803, 0.833]: sit, stand, touch (an object), talk to (e.g., self, a person, a group).", "From 927.0s to 928.0s, [0.207, 0.166, 0.798, 0.814]->[0.187, 0.168, 0.785, 0.826]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 926.0s, [0.199, 0.173, 0.793, 0.829]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "XF87VL5T0aA.mkv", "clip": [946, 1012], "conversations": [{"from": "human", "timestamps": 976.0, "value": "Describe the actions of the person at bbox [0.479, 0.305, 0.719, 0.827] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.479, 0.305, 0.719, 0.827]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 973.0s to 975.0s, [0.548, 0.291, 0.743, 0.831]->[0.502, 0.297, 0.731, 0.832]->[0.523, 0.309, 0.727, 0.83]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 971.0s to 972.0s, [0.558, 0.289, 0.752, 0.836]->[0.566, 0.293, 0.752, 0.836]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 969.0s to 970.0s, [0.606, 0.314, 0.773, 0.823]->[0.581, 0.281, 0.798, 0.831]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1012.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.077, 0.227, 0.605, 0.826]: sit, touch (an object), listen to (a person), watch (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.596, 0.164, 1.0, 0.844]: listen to (a person), sit, watch (a person), drink."}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1000, 1073], "conversations": [{"from": "human", "timestamps": 1030.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.056, 0.459, 0.157, 0.605]: carry/hold (an object), sit; [0.2, 0.488, 0.263, 0.61]: sit; [0.234, 0.435, 0.376, 0.667]: watch (a person), sit, eat, touch (an object); [0.589, 0.358, 0.769, 0.614]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.647, 0.288, 0.988, 0.825]: sit, touch (an object), listen to (a person), watch (a person), give/serve (an object) to (a person), eat."}, {"from": "human", "timestamps": 1073.0, "value": "Describe the actions of the person at bbox [0.051, 0.205, 0.408, 0.811] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.051, 0.205, 0.408, 0.811]: sit, touch (an object), listen to (a person), watch (a person).", "At 1072.0s, [0.074, 0.223, 0.4, 0.821]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1071.0s, [0.077, 0.233, 0.4, 0.825]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1070.0s, [0.073, 0.219, 0.395, 0.814]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1069.0s, [0.012, 0.163, 0.439, 0.814]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1068.0s, [0.013, 0.162, 0.425, 0.805]: sit, touch (an object), listen to (a person)."]}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1109, 1140], "conversations": [{"from": "human", "timestamps": 1139.0, "value": "What actions did the person at bbox [0.105, 0.294, 0.204, 0.566] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.105, 0.294, 0.204, 0.566]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1137.0s to 1138.0s, [0.093, 0.304, 0.188, 0.552]->[0.098, 0.297, 0.182, 0.55]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1140.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.105, 0.296, 0.197, 0.551]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.172, 0.31, 0.306, 0.83]: carry/hold (an object), stand, listen to (a person); [0.56, 0.41, 0.7, 0.837]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.68, 0.369, 0.81, 0.712]: listen to (a person), watch (a person), bend/bow (at the waist); [0.826, 0.414, 0.878, 0.553]: walk."}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1194, 1244], "conversations": [{"from": "human", "timestamps": 1224.0, "value": "What actions did the person at bbox [0.566, 0.232, 0.981, 0.828] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.566, 0.232, 0.981, 0.828]: stand, listen to (a person), watch (a person).", "At 1223.0s, [0.239, 0.195, 0.771, 0.827]: stand, listen to (a person).", "At 1222.0s, [0.251, 0.195, 0.759, 0.824]: stand, listen to (a person), watch (a person).", "At 1221.0s, [0.258, 0.21, 0.742, 0.83]: stand, listen to (a person).", "At 1220.0s, [0.261, 0.177, 0.752, 0.828]: stand, listen to (a person), watch (a person).", "At 1219.0s, [0.252, 0.172, 0.755, 0.834]: stand, listen to (a person).", "From 1217.0s to 1218.0s, [0.265, 0.169, 0.753, 0.83]->[0.255, 0.168, 0.766, 0.831]: stand, listen to (a person), watch (a person).", "At 1216.0s, [0.044, 0.17, 0.457, 0.825]: stand, watch (a person).", "From 1212.0s to 1215.0s, [0.037, 0.152, 0.512, 0.819]->[0.025, 0.159, 0.531, 0.823]->[0.029, 0.18, 0.453, 0.829]->[0.042, 0.171, 0.461, 0.82]: stand, listen to (a person), watch (a person).", "At 1211.0s, [0.041, 0.159, 0.536, 0.816]: stand, watch (a person).", "At 1210.0s, [0.017, 0.159, 0.601, 0.828]: stand.", "From 1207.0s to 1209.0s, [0.0, 0.166, 0.592, 0.82]->[0.0, 0.155, 0.565, 0.828]->[0.027, 0.165, 0.459, 0.83]: stand, watch (a person).", "At 1206.0s, [0.001, 0.157, 0.482, 0.818]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1244.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.438, 0.37, 0.833, 0.839]: watch (a person), stand, fight/hit (a person), martial art; [0.686, 0.358, 0.942, 0.837]: watch (a person), stand, fight/hit (a person), martial art."}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1232, 1289], "conversations": [{"from": "human", "timestamps": 1262.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.219, 0.303, 0.515, 0.824]: watch (a person), fight/hit (a person); [0.534, 0.218, 0.738, 0.827]: martial art, fight/hit (a person)."}, {"from": "human", "timestamps": 1289.0, "value": "What actions did the person at bbox [0.571, 0.163, 0.945, 0.816] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.571, 0.163, 0.945, 0.816]: carry/hold (an object), fight/hit (a person), martial art.", "At 1288.0s, [0.225, 0.212, 0.848, 0.787]: fight/hit (a person), watch (a person), martial art.", "At 1287.0s, [0.549, 0.448, 0.975, 0.842]: touch (an object), fight/hit (a person), martial art.", "At 1286.0s, [0.464, 0.454, 0.855, 0.826]: bend/bow (at the waist), touch (an object), fight/hit (a person).", "At 1285.0s, [0.537, 0.299, 0.889, 0.772]: watch (a person), martial art.", "At 1284.0s, [0.425, 0.243, 0.697, 0.828]: fight/hit (a person), martial art."]}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1240, 1369], "conversations": [{"from": "human", "timestamps": 1270.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.231, 0.154, 0.61, 0.745]: stand, fight/hit (a person)."}, {"from": "human", "timestamps": 1369.0, "value": "Describe the actions of the person at bbox [0.002, 0.243, 0.247, 0.808] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.002, 0.243, 0.247, 0.808]: sit, watch (a person).", "At 1368.0s, [0.0, 0.24, 0.255, 0.759]: sit."]}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1383, 1431], "conversations": [{"from": "human", "timestamps": 1413.0, "value": "What actions is the person in bbox [0.147, 0.18, 0.512, 0.835] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.147, 0.18, 0.512, 0.835]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1411.0s to 1412.0s, [0.131, 0.2, 0.505, 0.829]->[0.133, 0.189, 0.512, 0.837]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1431.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.001, 0.325, 0.088, 0.621]: listen to (a person), sit, watch (a person); [0.17, 0.287, 0.34, 0.584]: listen to (a person), sit, touch (an object), watch (a person); [0.484, 0.3, 0.613, 0.564]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group); [0.755, 0.234, 0.846, 0.515]: martial art; [0.787, 0.348, 1.0, 0.841]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.834, 0.256, 0.89, 0.543]: martial art; [0.867, 0.234, 0.97, 0.545]: martial art."}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1354, 1445], "conversations": [{"from": "human", "timestamps": 1384.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.15, 0.207, 0.758, 0.819]: sit; [0.589, 0.181, 0.809, 0.742]: watch (a person), martial art; [0.82, 0.202, 0.911, 0.667]: martial art."}, {"from": "human", "timestamps": 1445.0, "value": "What actions did the person at bbox [0.512, 0.424, 0.638, 0.81] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.512, 0.424, 0.638, 0.81]: stand, grab (a person), listen to (a person).", "At 1443.0s, [0.538, 0.159, 0.917, 0.836]: stand, watch (a person)."]}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1430, 1461], "conversations": [{"from": "human", "timestamps": 1460.0, "value": "For the person at bbox [0.699, 0.153, 0.997, 0.828], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.699, 0.153, 0.997, 0.828]: stand, watch (a person).", "At 1459.0s, [0.171, 0.158, 0.936, 0.823]: stand, watch (a person).", "At 1458.0s, [0.662, 0.166, 1.0, 0.837]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1461.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.02, 0.342, 0.314, 0.837]: watch (a person), sit; [0.61, 0.196, 0.938, 0.835]: stand, lift/pick up."}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1494, 1548], "conversations": [{"from": "human", "timestamps": 1524.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.149, 0.257, 0.335, 0.829]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.373, 0.258, 0.51, 0.835]: listen to (a person), stand, smoke, watch (a person); [0.678, 0.222, 0.842, 0.824]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.743, 0.157, 1.0, 0.832]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1548.0, "value": "What actions did the person at bbox [0.547, 0.336, 0.64, 0.534] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.547, 0.336, 0.64, 0.534]: stand, listen to (a person), watch (a person).", "From 1546.0s to 1547.0s, [0.546, 0.341, 0.659, 0.601]->[0.548, 0.335, 0.63, 0.541]: stand, listen to (a person), watch (a person)."]}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1479, 1586], "conversations": [{"from": "human", "timestamps": 1509.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.009, 0.167, 0.446, 0.833]: stand; [0.548, 0.405, 0.66, 0.84]: stand; [0.625, 0.252, 0.997, 0.831]: watch (a person), stand."}, {"from": "human", "timestamps": 1586.0, "value": "Describe the actions of the person at bbox [0.502, 0.201, 0.646, 0.832] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.502, 0.201, 0.646, 0.832]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1583.0s to 1585.0s, [0.503, 0.196, 0.647, 0.831]->[0.506, 0.198, 0.646, 0.83]->[0.503, 0.201, 0.645, 0.834]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1582.0s, [0.484, 0.191, 0.648, 0.813]: talk to (e.g., self, a person, a group), watch (a person), martial art.", "From 1580.0s to 1581.0s, [0.515, 0.171, 0.907, 0.835]->[0.507, 0.171, 0.891, 0.836]: stand, listen to (a person), watch (a person).", "At 1579.0s, [0.497, 0.164, 0.893, 0.838]: stand, listen to (a person).", "From 1575.0s to 1578.0s, [0.525, 0.168, 0.92, 0.84]->[0.479, 0.181, 0.875, 0.839]->[0.486, 0.178, 0.882, 0.84]->[0.488, 0.179, 0.877, 0.833]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1574.0s, [0.533, 0.168, 0.925, 0.841]: stand, listen to (a person), watch (a person).", "From 1572.0s to 1573.0s, [0.623, 0.201, 1.0, 0.835]->[0.609, 0.161, 1.0, 0.837]: bend/bow (at the waist), watch (a person)."]}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1532, 1584], "conversations": [{"from": "human", "timestamps": 1562.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.06, 0.172, 0.638, 0.823]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1584.0, "value": "What actions is the person in bbox [0.198, 0.29, 0.309, 0.728] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.198, 0.29, 0.309, 0.728]: stand, listen to (a person), watch (a person).", "At 1583.0s, [0.201, 0.281, 0.328, 0.732]: stand, listen to (a person), watch (a person)."]}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1631, 1737], "conversations": [{"from": "human", "timestamps": 1661.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.123, 0.242, 0.24, 0.604]: listen to (a person), stand, watch (a person); [0.189, 0.259, 0.389, 0.838]: listen to (a person), stand, watch (a person); [0.359, 0.264, 0.514, 0.833]: listen to (a person), stand, watch (a person); [0.525, 0.23, 0.726, 0.819]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.72, 0.312, 0.799, 0.65]: listen to (a person), stand, watch (a person); [0.765, 0.21, 0.922, 0.589]: listen to (a person), stand."}, {"from": "human", "timestamps": 1737.0, "value": "What actions did the person at bbox [0.051, 0.202, 0.229, 0.827] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.051, 0.202, 0.229, 0.827]: stand, listen to (a person), watch (a person).", "At 1736.0s, [0.0, 0.167, 0.189, 0.834]: stand, listen to (a person), watch (a person).", "From 1733.0s to 1735.0s, [0.0, 0.167, 0.176, 0.84]->[0.0, 0.164, 0.194, 0.832]->[0.0, 0.173, 0.194, 0.842]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1732.0s, [0.0, 0.165, 0.212, 0.841]: stand, listen to (a person), watch (a person).", "From 1729.0s to 1731.0s, [0.0, 0.207, 0.323, 0.817]->[0.007, 0.183, 0.323, 0.813]->[0.002, 0.172, 0.253, 0.844]: walk, watch (a person).", "At 1728.0s, [0.0, 0.166, 0.245, 0.831]: stand, watch (a person).", "At 1727.0s, [0.0, 0.166, 0.269, 0.831]: stand."]}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1657, 1719], "conversations": [{"from": "human", "timestamps": 1687.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.438, 0.257, 0.579, 0.834]?"}, {"from": "gpt", "value": ["In the current frame, [0.438, 0.257, 0.579, 0.834]: stand, listen to (a person), watch (a person).", "At 1686.0s, [0.441, 0.263, 0.573, 0.834]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1719.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.252, 0.331, 0.526, 0.836]: watch (a person), martial art, fight/hit (a person); [0.539, 0.252, 0.889, 0.825]: martial art, fight/hit (a person)."}]}, {"video": "XF87VL5T0aA.mkv", "clip": [1744, 1795], "conversations": [{"from": "human", "timestamps": 1774.0, "value": "What actions is the person in bbox [0.697, 0.301, 0.999, 0.833] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.697, 0.301, 0.999, 0.833]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1768.0s to 1773.0s, [0.702, 0.287, 1.0, 0.832]->[0.708, 0.298, 1.0, 0.834]->[0.702, 0.298, 1.0, 0.831]->[0.695, 0.295, 1.0, 0.84]->[0.696, 0.289, 1.0, 0.834]->[0.698, 0.295, 1.0, 0.836]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1766.0s to 1767.0s, [0.684, 0.278, 1.0, 0.837]->[0.693, 0.28, 1.0, 0.834]: stand, listen to (a person), watch (a person).", "At 1765.0s, [0.679, 0.279, 1.0, 0.838]: stand, watch (a person).", "From 1763.0s to 1764.0s, [0.676, 0.285, 1.0, 0.834]->[0.675, 0.278, 1.0, 0.832]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1762.0s, [0.692, 0.276, 1.0, 0.838]: stand, listen to (a person), watch (a person).", "At 1761.0s, [0.693, 0.277, 1.0, 0.834]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1759.0s to 1760.0s, [0.697, 0.287, 0.998, 0.832]->[0.691, 0.286, 1.0, 0.835]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1757.0s to 1758.0s, [0.682, 0.287, 1.0, 0.832]->[0.69, 0.284, 0.999, 0.838]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1754.0s to 1756.0s, [0.674, 0.276, 1.0, 0.831]->[0.682, 0.285, 1.0, 0.832]->[0.682, 0.282, 1.0, 0.835]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1753.0s, [0.675, 0.279, 1.0, 0.827]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1752.0s, [0.62, 0.242, 0.942, 0.845]: stand, listen to (a person), watch (a person).", "At 1751.0s, [0.62, 0.247, 0.935, 0.845]: listen to (a person), watch (a person).", "At 1750.0s, [0.622, 0.254, 0.939, 0.842]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1749.0s, [0.622, 0.253, 0.939, 0.844]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1795.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.014, 0.337, 0.104, 0.839]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.142, 0.369, 0.254, 0.822]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.558, 0.356, 0.628, 0.615]: walk."}]}, {"video": "XV_FF3WC7kA.mkv", "clip": [922, 958], "conversations": [{"from": "human", "timestamps": 952.0, "value": "Describe the actions of the person at bbox [0.538, 0.253, 0.604, 0.569] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.538, 0.253, 0.604, 0.569]: stand, grab (a person), hug (a person).", "From 950.0s to 951.0s, [0.556, 0.253, 0.614, 0.577]->[0.549, 0.25, 0.601, 0.571]: stand, watch (a person)."]}, {"from": "human", "timestamps": 958.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.002, 0.152, 0.228, 0.869]: stand, grab (a person); [0.167, 0.129, 0.396, 0.89]: watch (a person), stand; [0.399, 0.193, 0.644, 0.89]: watch (a person), stand, grab (a person); [0.584, 0.23, 0.755, 0.882]: watch (a person), stand, hug (a person); [0.731, 0.198, 0.978, 0.885]: stand."}]}, {"video": "XV_FF3WC7kA.mkv", "clip": [906, 998], "conversations": [{"from": "human", "timestamps": 936.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.14, 0.324, 0.447, 0.885]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 998.0, "value": "Describe the actions of the person at bbox [0.446, 0.24, 0.669, 0.784] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.446, 0.24, 0.669, 0.784]: sit, listen to (a person).", "From 995.0s to 997.0s, [0.448, 0.238, 0.681, 0.78]->[0.449, 0.238, 0.673, 0.757]->[0.451, 0.227, 0.657, 0.767]: sit, listen to (a person).", "At 994.0s, [0.447, 0.231, 0.667, 0.769]: sit, listen to (a person), watch (a person)."]}]}, {"video": "XV_FF3WC7kA.mkv", "clip": [946, 977], "conversations": [{"from": "human", "timestamps": 976.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.741, 0.189, 0.782, 0.499]?"}, {"from": "gpt", "value": ["At Now, [0.741, 0.189, 0.782, 0.499]: stand, text on/look at a cellphone, listen to (a person), talk to (e.g., self, a person, a group).", "At 975.0s, [0.737, 0.185, 0.784, 0.499]: stand, talk to (e.g., self, a person, a group).", "At 974.0s, [0.749, 0.191, 0.798, 0.482]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 973.0s, [0.729, 0.187, 0.784, 0.48]: stand, watch (a person).", "At 972.0s, [0.72, 0.19, 0.785, 0.484]: stand, take a photo, listen to (a person), watch (a person).", "At 971.0s, [0.727, 0.186, 0.788, 0.481]: stand, carry/hold (an object), take a photo, listen to (a person), watch (a person).", "At 970.0s, [0.727, 0.187, 0.785, 0.477]: stand, carry/hold (an object), take a photo."]}, {"from": "human", "timestamps": 977.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.499, 0.203, 0.544, 0.507]: watch (a person), stand; [0.535, 0.215, 0.572, 0.489]: watch (a person), stand; [0.691, 0.185, 0.749, 0.489]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.737, 0.191, 0.781, 0.499]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "XV_FF3WC7kA.mkv", "clip": [1186, 1223], "conversations": [{"from": "human", "timestamps": 1216.0, "value": "What actions is the person in bbox [0.276, 0.121, 0.628, 0.902] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.276, 0.121, 0.628, 0.902]: stand.", "At 1215.0s, [0.323, 0.137, 0.649, 0.895]: stand.", "From 1210.0s to 1213.0s, [0.077, 0.215, 0.45, 0.885]->[0.081, 0.233, 0.46, 0.888]->[0.087, 0.204, 0.447, 0.892]->[0.089, 0.216, 0.451, 0.89]: stand.", "From 1204.0s to 1209.0s, [0.403, 0.158, 0.742, 0.918]->[0.266, 0.142, 0.737, 0.894]->[0.0, 0.104, 0.533, 0.895]->[0.065, 0.155, 0.328, 0.883]->[0.151, 0.166, 0.434, 0.893]->[0.17, 0.181, 0.477, 0.892]: walk.", "At 1203.0s, [0.484, 0.21, 0.677, 0.899]: walk, touch (an object).", "At 1202.0s, [0.472, 0.187, 0.661, 0.88]: walk.", "At 1201.0s, [0.465, 0.199, 0.672, 0.88]: stand, carry/hold (an object), listen to (a person).", "At 1200.0s, [0.464, 0.223, 0.685, 0.884]: stand, touch (an object), listen to (a person), watch (a person).", "At 1199.0s, [0.461, 0.241, 0.68, 0.881]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1198.0s, [0.473, 0.243, 0.689, 0.879]: stand, touch (an object), listen to (a person).", "At 1197.0s, [0.498, 0.233, 0.705, 0.884]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1196.0s, [0.484, 0.264, 0.676, 0.883]: stand, touch (an object).", "At 1195.0s, [0.454, 0.239, 0.632, 0.88]: stand."]}, {"from": "human", "timestamps": 1223.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.437, 0.123, 0.77, 0.895]: stand."}]}, {"video": "XV_FF3WC7kA.mkv", "clip": [1262, 1300], "conversations": [{"from": "human", "timestamps": 1292.0, "value": "For the person at bbox [0.346, 0.159, 0.814, 0.892], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.346, 0.159, 0.814, 0.892]: stand, answer phone, carry/hold (an object).", "At 1291.0s, [0.366, 0.174, 0.745, 0.889]: stand, answer phone, carry/hold (an object).", "At 1290.0s, [0.452, 0.201, 0.863, 0.896]: stand, carry/hold (an object).", "At 1289.0s, [0.459, 0.206, 0.872, 0.9]: stand, carry/hold (an object), text on/look at a cellphone.", "At 1288.0s, [0.405, 0.175, 0.769, 0.901]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1300.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.057, 0.364, 0.361, 0.895]: stand, touch (an object); [0.642, 0.333, 0.944, 0.899]: stand, hit (an object)."}]}, {"video": "XV_FF3WC7kA.mkv", "clip": [1293, 1368], "conversations": [{"from": "human", "timestamps": 1323.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.183, 0.096, 0.726, 0.875]: sit."}, {"from": "human", "timestamps": 1368.0, "value": "What actions is the person in bbox [0.234, 0.131, 0.91, 0.879] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.234, 0.131, 0.91, 0.879]: sit, talk to (e.g., self, a person, a group).", "At 1367.0s, [0.24, 0.151, 0.976, 0.904]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "XV_FF3WC7kA.mkv", "clip": [1490, 1574], "conversations": [{"from": "human", "timestamps": 1520.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.006, 0.096, 0.665, 0.909]: watch (a person), stand; [0.446, 0.119, 0.647, 0.765]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1574.0, "value": "What actions did the person at bbox [0.298, 0.162, 0.68, 0.872] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.298, 0.162, 0.68, 0.872]: stand.", "At 1573.0s, [0.297, 0.174, 0.65, 0.853]: bend/bow (at the waist).", "At 1572.0s, [0.281, 0.201, 0.666, 0.869]: talk to (e.g., self, a person, a group)."]}]}, {"video": "XV_FF3WC7kA.mkv", "clip": [1633, 1735], "conversations": [{"from": "human", "timestamps": 1663.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.05, 0.113, 0.195, 0.878]: dance; [0.319, 0.335, 0.432, 0.7]: carry/hold (an object), sit, listen to (a person); [0.483, 0.34, 0.574, 0.612]: sit; [0.523, 0.356, 0.661, 0.689]: watch (a person), sit; [0.683, 0.643, 0.861, 0.89]: sit."}, {"from": "human", "timestamps": 1735.0, "value": "For the person at bbox [0.592, 0.369, 0.699, 0.659], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.592, 0.369, 0.699, 0.659]: sit, talk to (e.g., self, a person, a group).", "From 1733.0s to 1734.0s, [0.59, 0.379, 0.692, 0.665]->[0.595, 0.377, 0.7, 0.668]: sit, listen to (a person), watch (a person).", "At 1732.0s, [0.595, 0.389, 0.695, 0.66]: sit, carry/hold (an object), listen to (a person).", "From 1729.0s to 1731.0s, [0.593, 0.385, 0.698, 0.67]->[0.592, 0.385, 0.702, 0.671]->[0.599, 0.387, 0.698, 0.666]: sit, listen to (a person).", "From 1727.0s to 1728.0s, [0.596, 0.38, 0.695, 0.66]->[0.599, 0.376, 0.697, 0.664]: sit, listen to (a person), watch (a person)."]}]}, {"video": "XV_FF3WC7kA.mkv", "clip": [1688, 1720], "conversations": [{"from": "human", "timestamps": 1718.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.422, 0.39, 0.503, 0.572]: sit; [0.509, 0.411, 0.592, 0.56]: watch (a person), sit, hand clap."}, {"from": "human", "timestamps": 1720.0, "value": "What actions did the person at bbox [0.497, 0.419, 0.592, 0.569] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.497, 0.419, 0.592, 0.569]: sit, watch (a person).", "From 1716.0s to 1719.0s, [0.495, 0.405, 0.589, 0.556]->[0.499, 0.406, 0.573, 0.545]->[0.509, 0.411, 0.592, 0.56]->[0.512, 0.416, 0.594, 0.567]: sit, hand clap, watch (a person).", "At 1715.0s, [0.487, 0.407, 0.584, 0.55]: sit, watch (a person).", "At 1714.0s, [0.484, 0.399, 0.582, 0.549]: sit, listen to (a person), watch (a person)."]}]}, {"video": "XglAvHaEtHY.mp4", "clip": [957, 1013], "conversations": [{"from": "human", "timestamps": 987.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.058, 0.284, 0.168, 0.785]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.199, 0.338, 0.303, 0.761]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.277, 0.437, 0.418, 0.751]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.457, 0.419, 0.507, 0.606]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.57, 0.421, 0.632, 0.632]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.654, 0.387, 0.729, 0.66]: watch (a person), walk."}, {"from": "human", "timestamps": 1013.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.393, 0.29, 0.572, 0.815]?"}, {"from": "gpt", "value": ["At Now, [0.393, 0.29, 0.572, 0.815]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "From 1007.0s to 1012.0s, [0.365, 0.293, 0.592, 0.779]->[0.361, 0.313, 0.559, 0.748]->[0.391, 0.306, 0.583, 0.741]->[0.371, 0.304, 0.555, 0.722]->[0.406, 0.292, 0.576, 0.723]->[0.369, 0.299, 0.559, 0.721]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "XglAvHaEtHY.mp4", "clip": [1155, 1232], "conversations": [{"from": "human", "timestamps": 1185.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.034, 0.208, 0.107, 0.435]: carry/hold (an object), sit, ride (e.g., a bike, a car, a horse), watch (a person); [0.118, 0.185, 0.476, 0.865]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.531, 0.325, 0.82, 0.842]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1232.0, "value": "What actions is the person in bbox [0.338, 0.207, 0.784, 0.847] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.338, 0.207, 0.784, 0.847]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1229.0s to 1231.0s, [0.331, 0.203, 0.771, 0.85]->[0.317, 0.209, 0.776, 0.858]->[0.339, 0.202, 0.781, 0.866]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1226.0s to 1228.0s, [0.354, 0.204, 0.78, 0.839]->[0.319, 0.202, 0.779, 0.848]->[0.341, 0.219, 0.791, 0.84]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1224.0s to 1225.0s, [0.48, 0.18, 0.8, 0.837]->[0.394, 0.185, 0.801, 0.832]: stand, carry/hold (an object), watch (a person).", "From 1221.0s to 1223.0s, [0.324, 0.205, 0.775, 0.859]->[0.454, 0.175, 0.78, 0.837]->[0.474, 0.189, 0.801, 0.837]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1218.0s to 1220.0s, [0.597, 0.252, 0.742, 0.825]->[0.588, 0.251, 0.748, 0.823]->[0.494, 0.252, 0.741, 0.836]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1212.0s to 1217.0s, [0.313, 0.204, 0.688, 0.821]->[0.485, 0.22, 0.701, 0.843]->[0.562, 0.255, 0.733, 0.833]->[0.545, 0.254, 0.735, 0.829]->[0.584, 0.247, 0.739, 0.824]->[0.588, 0.253, 0.748, 0.835]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "XglAvHaEtHY.mp4", "clip": [1203, 1255], "conversations": [{"from": "human", "timestamps": 1233.0, "value": "Describe the actions of the person at bbox [0.332, 0.196, 0.789, 0.853] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.332, 0.196, 0.789, 0.853]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1229.0s to 1232.0s, [0.331, 0.203, 0.771, 0.85]->[0.317, 0.209, 0.776, 0.858]->[0.339, 0.202, 0.781, 0.866]->[0.338, 0.207, 0.784, 0.847]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1226.0s to 1228.0s, [0.354, 0.204, 0.78, 0.839]->[0.319, 0.202, 0.779, 0.848]->[0.341, 0.219, 0.791, 0.84]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1224.0s to 1225.0s, [0.48, 0.18, 0.8, 0.837]->[0.394, 0.185, 0.801, 0.832]: stand, carry/hold (an object), watch (a person).", "From 1221.0s to 1223.0s, [0.324, 0.205, 0.775, 0.859]->[0.454, 0.175, 0.78, 0.837]->[0.474, 0.189, 0.801, 0.837]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1218.0s to 1220.0s, [0.597, 0.252, 0.742, 0.825]->[0.588, 0.251, 0.748, 0.823]->[0.494, 0.252, 0.741, 0.836]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1212.0s to 1217.0s, [0.313, 0.204, 0.688, 0.821]->[0.485, 0.22, 0.701, 0.843]->[0.562, 0.255, 0.733, 0.833]->[0.545, 0.254, 0.735, 0.829]->[0.584, 0.247, 0.739, 0.824]->[0.588, 0.253, 0.748, 0.835]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1255.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.463, 0.416, 0.603, 0.831]: carry/hold (an object), stand, watch (a person); [0.798, 0.353, 0.885, 0.732]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "XglAvHaEtHY.mp4", "clip": [1237, 1304], "conversations": [{"from": "human", "timestamps": 1267.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.353, 0.188, 0.83]: carry/hold (an object), stand, listen to (a person); [0.272, 0.201, 0.544, 0.833]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.61, 0.181, 0.875, 0.785]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1304.0, "value": "What actions is the person in bbox [0.858, 0.214, 0.93, 0.472] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.858, 0.214, 0.93, 0.472]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1303.0s, [0.861, 0.217, 0.928, 0.466]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1302.0s, [0.388, 0.275, 0.432, 0.456]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1300.0s to 1301.0s, [0.387, 0.277, 0.429, 0.459]->[0.387, 0.272, 0.429, 0.458]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1298.0s to 1299.0s, [0.389, 0.279, 0.428, 0.461]->[0.388, 0.275, 0.43, 0.456]: stand, carry/hold (an object), watch (a person).", "At 1294.0s, [0.389, 0.277, 0.43, 0.452]: stand, carry/hold (an object), watch (a person).", "At 1293.0s, [0.395, 0.247, 0.456, 0.464]: walk, carry/hold (an object), watch (a person)."]}]}, {"video": "XglAvHaEtHY.mp4", "clip": [1292, 1350], "conversations": [{"from": "human", "timestamps": 1322.0, "value": "Describe the actions of the person at bbox [0.0, 0.227, 0.101, 0.596] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.227, 0.101, 0.596]: stand, carry/hold (an object).", "At 1321.0s, [0.0, 0.22, 0.095, 0.607]: stand, carry/hold (an object).", "From 1318.0s to 1320.0s, [0.001, 0.273, 0.07, 0.598]->[0.001, 0.234, 0.094, 0.607]->[0.0, 0.222, 0.096, 0.604]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1317.0s, [0.0, 0.279, 0.068, 0.595]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1316.0s, [0.0, 0.278, 0.067, 0.591]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1314.0s to 1315.0s, [0.0, 0.274, 0.069, 0.597]->[0.0, 0.275, 0.066, 0.595]: stand, carry/hold (an object), watch (a person).", "From 1312.0s to 1313.0s, [0.096, 0.207, 0.26, 0.64]->[0.097, 0.207, 0.251, 0.641]: sit, carry/hold (an object), listen to (a person).", "At 1311.0s, [0.095, 0.208, 0.263, 0.644]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1350.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.485, 0.497, 0.523, 0.622]: carry/hold (an object), sit, ride (e.g., a bike, a car, a horse)."}]}, {"video": "XglAvHaEtHY.mp4", "clip": [1359, 1426], "conversations": [{"from": "human", "timestamps": 1389.0, "value": "For the person at bbox [0.107, 0.473, 0.157, 0.69], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.107, 0.473, 0.157, 0.69]: stand, carry/hold (an object).", "At 1388.0s, [0.106, 0.475, 0.149, 0.692]: stand, carry/hold (an object), watch (a person).", "At 1387.0s, [0.109, 0.475, 0.16, 0.696]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1426.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.561, 0.37, 0.643, 0.71]: watch (a person), stand; [0.687, 0.426, 0.753, 0.657]: carry/hold (an object), stand, watch (a person); [0.837, 0.394, 0.913, 0.582]: watch (a person), stand."}]}, {"video": "XglAvHaEtHY.mp4", "clip": [1384, 1462], "conversations": [{"from": "human", "timestamps": 1414.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.008, 0.22, 0.359, 0.866]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.548, 0.178, 0.977, 0.835]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1462.0, "value": "For the person at bbox [0.516, 0.224, 0.576, 0.459], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.516, 0.224, 0.576, 0.459]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1461.0s, [0.518, 0.226, 0.575, 0.461]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1459.0s to 1460.0s, [0.523, 0.222, 0.579, 0.458]->[0.525, 0.224, 0.583, 0.459]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "XglAvHaEtHY.mp4", "clip": [1416, 1497], "conversations": [{"from": "human", "timestamps": 1446.0, "value": "What actions did the person at bbox [0.71, 0.283, 0.873, 0.764] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.71, 0.283, 0.873, 0.764]: stand, listen to (a person), watch (a person).", "From 1444.0s to 1445.0s, [0.715, 0.283, 0.875, 0.773]->[0.713, 0.285, 0.871, 0.733]: stand, watch (a person).", "At 1443.0s, [0.7, 0.28, 0.871, 0.693]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1442.0s, [0.716, 0.281, 0.874, 0.738]: stand, watch (a person).", "At 1441.0s, [0.717, 0.277, 0.875, 0.759]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1497.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.3, 0.209, 0.685, 0.856]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.752, 0.198, 0.997, 0.845]: listen to (a person), stand, watch (a person)."}]}, {"video": "XglAvHaEtHY.mp4", "clip": [1507, 1553], "conversations": [{"from": "human", "timestamps": 1537.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.338, 0.246, 0.641, 0.835]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.699, 0.195, 1.0, 0.829]: listen to (a person), stand, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1553.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.312, 0.252, 0.6, 0.834]?"}, {"from": "gpt", "value": ["At Now, [0.312, 0.252, 0.6, 0.834]: stand, listen to (a person), watch (a person).", "At 1552.0s, [0.313, 0.252, 0.602, 0.839]: stand, listen to (a person), watch (a person).", "From 1549.0s to 1551.0s, [0.314, 0.256, 0.598, 0.838]->[0.286, 0.243, 0.606, 0.837]->[0.31, 0.252, 0.604, 0.839]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1548.0s, [0.313, 0.242, 0.588, 0.837]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1544.0s to 1547.0s, [0.316, 0.25, 0.616, 0.835]->[0.306, 0.238, 0.584, 0.833]->[0.315, 0.25, 0.601, 0.838]->[0.3, 0.246, 0.6, 0.838]: stand, listen to (a person), watch (a person).", "At 1543.0s, [0.316, 0.254, 0.601, 0.835]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1542.0s, [0.309, 0.262, 0.614, 0.832]: stand, talk to (e.g., self, a person, a group).", "From 1535.0s to 1541.0s, [0.356, 0.238, 0.662, 0.841]->[0.326, 0.253, 0.657, 0.838]->[0.338, 0.246, 0.641, 0.835]->[0.342, 0.246, 0.643, 0.838]->[0.323, 0.24, 0.641, 0.837]->[0.325, 0.236, 0.635, 0.843]->[0.313, 0.239, 0.585, 0.84]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1533.0s to 1534.0s, [0.337, 0.204, 0.639, 0.826]->[0.35, 0.215, 0.681, 0.833]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1531.0s to 1532.0s, [0.3, 0.288, 0.523, 0.805]->[0.393, 0.253, 0.63, 0.794]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1530.0s, [0.286, 0.238, 0.768, 0.828]: walk, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 1528.0s to 1529.0s, [0.0, 0.281, 0.289, 0.833]->[0.024, 0.275, 0.384, 0.833]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1526.0s to 1527.0s, [0.049, 0.307, 0.378, 0.828]->[0.026, 0.296, 0.341, 0.819]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 1524.0s to 1525.0s, [0.038, 0.3, 0.361, 0.819]->[0.051, 0.306, 0.37, 0.811]: stand, read, talk to (e.g., self, a person, a group).", "At 1523.0s, [0.045, 0.309, 0.381, 0.837]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 1522.0s, [0.049, 0.303, 0.383, 0.833]: stand, carry/hold (an object), read.", "From 1518.0s to 1521.0s, [0.046, 0.303, 0.4, 0.838]->[0.053, 0.291, 0.391, 0.835]->[0.043, 0.296, 0.39, 0.837]->[0.052, 0.306, 0.393, 0.828]: stand, read.", "At 1517.0s, [0.054, 0.289, 0.406, 0.823]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group), watch (a person).", "From 1515.0s to 1516.0s, [0.05, 0.301, 0.416, 0.829]->[0.052, 0.305, 0.387, 0.825]: stand, read, listen to (a person).", "From 1512.0s to 1514.0s, [0.071, 0.297, 0.43, 0.821]->[0.055, 0.306, 0.39, 0.823]->[0.049, 0.303, 0.389, 0.83]: stand, read.", "At 1511.0s, [0.143, 0.287, 0.435, 0.814]: walk, read, talk to (e.g., self, a person, a group).", "At 1510.0s, [0.224, 0.284, 0.527, 0.833]: walk, carry/hold (an object), read, talk to (e.g., self, a person, a group), watch (a person).", "From 1506.0s to 1509.0s, [0.082, 0.215, 0.397, 0.839]->[0.099, 0.253, 0.427, 0.841]->[0.144, 0.266, 0.461, 0.828]->[0.168, 0.267, 0.517, 0.826]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1505.0s, [0.095, 0.229, 0.426, 0.835]: walk, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 1502.0s to 1504.0s, [0.003, 0.19, 0.332, 0.858]->[0.073, 0.2, 0.389, 0.842]->[0.032, 0.216, 0.387, 0.844]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "XglAvHaEtHY.mp4", "clip": [1495, 1566], "conversations": [{"from": "human", "timestamps": 1525.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.051, 0.306, 0.37, 0.811]: read, stand, talk to (e.g., self, a person, a group); [0.592, 0.199, 0.942, 0.836]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1566.0, "value": "For the person at bbox [0.68, 0.195, 0.997, 0.84], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.68, 0.195, 0.997, 0.84]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1562.0s to 1565.0s, [0.679, 0.195, 0.987, 0.836]->[0.681, 0.19, 0.988, 0.836]->[0.682, 0.199, 0.988, 0.843]->[0.682, 0.183, 0.99, 0.836]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1557.0s to 1561.0s, [0.678, 0.187, 0.986, 0.838]->[0.678, 0.192, 0.985, 0.832]->[0.682, 0.182, 0.986, 0.837]->[0.682, 0.187, 0.99, 0.839]->[0.683, 0.188, 0.987, 0.845]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1556.0s, [0.676, 0.182, 0.986, 0.831]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1553.0s to 1555.0s, [0.681, 0.188, 0.984, 0.827]->[0.679, 0.193, 0.983, 0.833]->[0.68, 0.196, 0.985, 0.831]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1552.0s, [0.679, 0.185, 0.982, 0.837]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1549.0s to 1551.0s, [0.682, 0.196, 0.981, 0.84]->[0.679, 0.198, 0.979, 0.841]->[0.677, 0.199, 0.981, 0.84]: stand, carry/hold (an object), listen to (a person).", "At 1548.0s, [0.676, 0.188, 0.981, 0.835]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1547.0s, [0.678, 0.203, 0.975, 0.853]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1545.0s to 1546.0s, [0.679, 0.195, 0.971, 0.841]->[0.682, 0.188, 0.974, 0.834]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1544.0s, [0.677, 0.19, 0.968, 0.836]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1543.0s, [0.678, 0.2, 0.985, 0.838]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1541.0s to 1542.0s, [0.684, 0.198, 1.0, 0.829]->[0.689, 0.198, 1.0, 0.832]: stand, touch (an object), listen to (a person).", "From 1535.0s to 1540.0s, [0.719, 0.188, 0.997, 0.822]->[0.698, 0.191, 1.0, 0.83]->[0.699, 0.195, 1.0, 0.829]->[0.693, 0.192, 1.0, 0.825]->[0.694, 0.199, 1.0, 0.834]->[0.695, 0.193, 0.998, 0.829]: stand, touch (an object), listen to (a person), watch (a person).", "From 1533.0s to 1534.0s, [0.625, 0.215, 0.855, 0.798]->[0.637, 0.183, 0.915, 0.829]: walk, listen to (a person).", "At 1532.0s, [0.565, 0.23, 0.749, 0.793]: walk, listen to (a person), watch (a person).", "From 1530.0s to 1531.0s, [0.752, 0.21, 1.0, 0.817]->[0.49, 0.249, 0.681, 0.776]: walk, listen to (a person).", "From 1523.0s to 1529.0s, [0.584, 0.196, 0.925, 0.833]->[0.592, 0.198, 0.939, 0.831]->[0.592, 0.199, 0.942, 0.836]->[0.582, 0.189, 0.94, 0.827]->[0.584, 0.188, 0.929, 0.833]->[0.591, 0.199, 0.93, 0.816]->[0.567, 0.183, 0.958, 0.839]: stand, listen to (a person), watch (a person).", "At 1522.0s, [0.573, 0.185, 0.931, 0.825]: stand, listen to (a person).", "From 1518.0s to 1521.0s, [0.602, 0.215, 0.946, 0.836]->[0.602, 0.199, 0.948, 0.833]->[0.592, 0.196, 0.934, 0.829]->[0.592, 0.179, 0.941, 0.824]: stand, listen to (a person), watch (a person).", "At 1517.0s, [0.607, 0.213, 0.944, 0.828]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1515.0s to 1516.0s, [0.603, 0.203, 0.953, 0.835]->[0.607, 0.212, 0.94, 0.828]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1514.0s, [0.619, 0.204, 0.954, 0.837]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1512.0s to 1513.0s, [0.613, 0.194, 1.0, 0.823]->[0.617, 0.206, 0.955, 0.836]: stand, listen to (a person), watch (a person).", "From 1509.0s to 1511.0s, [0.533, 0.222, 0.791, 0.843]->[0.609, 0.219, 0.867, 0.842]->[0.621, 0.202, 0.94, 0.841]: walk, listen to (a person).", "From 1506.0s to 1508.0s, [0.348, 0.189, 0.665, 0.836]->[0.374, 0.203, 0.713, 0.839]->[0.446, 0.227, 0.701, 0.841]: walk, listen to (a person), watch (a person).", "From 1504.0s to 1505.0s, [0.342, 0.233, 0.592, 0.836]->[0.323, 0.201, 0.599, 0.836]: walk, listen to (a person).", "From 1502.0s to 1503.0s, [0.25, 0.183, 0.49, 0.839]->[0.319, 0.194, 0.556, 0.84]: walk, listen to (a person), watch (a person)."]}]}, {"video": "XglAvHaEtHY.mp4", "clip": [1552, 1595], "conversations": [{"from": "human", "timestamps": 1582.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.23, 0.354, 0.827]: listen to (a person), stand, watch (a person); [0.608, 0.203, 0.987, 0.847]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1595.0, "value": "For the person at bbox [0.155, 0.181, 0.467, 0.842], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.155, 0.181, 0.467, 0.842]: stand, listen to (a person), watch (a person).", "At 1594.0s, [0.145, 0.189, 0.46, 0.845]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1591.0s to 1593.0s, [0.15, 0.193, 0.401, 0.845]->[0.136, 0.193, 0.437, 0.829]->[0.13, 0.18, 0.426, 0.83]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1587.0s to 1590.0s, [0.197, 0.229, 0.312, 0.775]->[0.232, 0.233, 0.37, 0.828]->[0.236, 0.227, 0.397, 0.838]->[0.219, 0.204, 0.411, 0.838]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "XglAvHaEtHY.mp4", "clip": [1727, 1761], "conversations": [{"from": "human", "timestamps": 1757.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.134, 0.192, 0.467, 0.846]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1761.0, "value": "What actions is the person in bbox [0.356, 0.321, 0.422, 0.489] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.356, 0.321, 0.422, 0.489]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1760.0s, [0.375, 0.325, 0.445, 0.525]: sit, carry/hold (an object), watch (a person)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [911, 953], "conversations": [{"from": "human", "timestamps": 941.0, "value": "What actions is the person in bbox [0.598, 0.086, 0.8, 0.969] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.598, 0.086, 0.8, 0.969]: walk, enter.", "At 940.0s, [0.594, 0.146, 0.691, 0.931]: walk."]}, {"from": "human", "timestamps": 953.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.607, 0.305, 0.876, 0.819]: sit."}]}, {"video": "YYWdB7h1INo.mkv", "clip": [914, 1000], "conversations": [{"from": "human", "timestamps": 944.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.04, 0.144, 0.623, 0.981]: turn (e.g., a screwdriver), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1000.0, "value": "What actions did the person at bbox [0.429, 0.078, 0.592, 0.929] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.429, 0.078, 0.592, 0.929]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 999.0s, [0.427, 0.061, 0.593, 0.909]: stand, listen to (a person), watch (a person).", "At 998.0s, [0.403, 0.076, 0.588, 0.872]: stand, play with pets, listen to (a person).", "At 997.0s, [0.389, 0.135, 0.565, 0.92]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 996.0s, [0.396, 0.127, 0.568, 0.916]: bend/bow (at the waist), stand, talk to (e.g., self, a person, a group), watch (a person).", "At 995.0s, [0.38, 0.096, 0.553, 0.936]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 993.0s to 994.0s, [0.401, 0.076, 0.56, 0.921]->[0.412, 0.063, 0.565, 0.919]: stand, listen to (a person), watch (a person).", "At 992.0s, [0.406, 0.069, 0.566, 0.934]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 991.0s, [0.385, 0.073, 0.555, 0.936]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 990.0s, [0.385, 0.093, 0.553, 0.955]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 989.0s, [0.372, 0.11, 0.571, 0.937]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 988.0s, [0.415, 0.088, 0.576, 0.96]: stand, play with pets, talk to (e.g., self, a person, a group).", "From 986.0s to 987.0s, [0.397, 0.079, 0.572, 0.935]->[0.411, 0.085, 0.577, 0.938]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 982.0s to 985.0s, [0.41, 0.063, 0.567, 0.93]->[0.383, 0.074, 0.55, 0.907]->[0.41, 0.081, 0.568, 0.948]->[0.375, 0.081, 0.549, 0.932]: stand, carry/hold (an object).", "At 981.0s, [0.403, 0.073, 0.571, 0.936]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 979.0s to 980.0s, [0.385, 0.07, 0.56, 0.922]->[0.391, 0.065, 0.56, 0.902]: stand.", "From 975.0s to 978.0s, [0.406, 0.074, 0.557, 0.89]->[0.4, 0.054, 0.552, 0.899]->[0.418, 0.061, 0.568, 0.9]->[0.417, 0.07, 0.563, 0.919]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 974.0s, [0.407, 0.066, 0.557, 0.925]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 971.0s to 973.0s, [0.407, 0.055, 0.56, 0.927]->[0.41, 0.056, 0.56, 0.929]->[0.409, 0.057, 0.56, 0.93]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 970.0s, [0.404, 0.05, 0.562, 0.928]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 968.0s to 969.0s, [0.38, 0.075, 0.548, 0.888]->[0.364, 0.066, 0.524, 0.884]: stand, carry/hold (an object).", "At 967.0s, [0.285, 0.092, 0.459, 0.895]: walk, carry/hold (an object).", "At 966.0s, [0.125, 0.119, 0.391, 0.986]: stand, carry/hold (an object), press.", "At 965.0s, [0.335, 0.089, 0.558, 0.79]: walk, carry/hold (an object).", "At 964.0s, [0.724, 0.057, 0.906, 0.934]: walk, open (e.g., a window, a car door)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [955, 987], "conversations": [{"from": "human", "timestamps": 985.0, "value": "What actions is the person in bbox [0.375, 0.081, 0.549, 0.932] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.375, 0.081, 0.549, 0.932]: stand, carry/hold (an object).", "From 982.0s to 984.0s, [0.41, 0.063, 0.567, 0.93]->[0.383, 0.074, 0.55, 0.907]->[0.41, 0.081, 0.568, 0.948]: stand, carry/hold (an object).", "At 981.0s, [0.403, 0.073, 0.571, 0.936]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 979.0s to 980.0s, [0.385, 0.07, 0.56, 0.922]->[0.391, 0.065, 0.56, 0.902]: stand.", "From 975.0s to 978.0s, [0.406, 0.074, 0.557, 0.89]->[0.4, 0.054, 0.552, 0.899]->[0.418, 0.061, 0.568, 0.9]->[0.417, 0.07, 0.563, 0.919]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 974.0s, [0.407, 0.066, 0.557, 0.925]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 971.0s to 973.0s, [0.407, 0.055, 0.56, 0.927]->[0.41, 0.056, 0.56, 0.929]->[0.409, 0.057, 0.56, 0.93]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 970.0s, [0.404, 0.05, 0.562, 0.928]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 968.0s to 969.0s, [0.38, 0.075, 0.548, 0.888]->[0.364, 0.066, 0.524, 0.884]: stand, carry/hold (an object).", "At 967.0s, [0.285, 0.092, 0.459, 0.895]: walk, carry/hold (an object).", "At 966.0s, [0.125, 0.119, 0.391, 0.986]: stand, carry/hold (an object), press.", "At 965.0s, [0.335, 0.089, 0.558, 0.79]: walk, carry/hold (an object).", "At 964.0s, [0.724, 0.057, 0.906, 0.934]: walk, open (e.g., a window, a car door)."]}, {"from": "human", "timestamps": 987.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.411, 0.085, 0.577, 0.938]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1005, 1078], "conversations": [{"from": "human", "timestamps": 1035.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.331, 0.186, 0.986]: watch (e.g., TV), sit."}, {"from": "human", "timestamps": 1078.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.001, 0.243, 0.317, 0.991]?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.243, 0.317, 0.991]: sit, talk to (e.g., self, a person, a group).", "From 1060.0s to 1077.0s, [0.0, 0.345, 0.197, 0.979]->[0.001, 0.343, 0.194, 0.982]->[0.002, 0.341, 0.194, 0.98]->[0.001, 0.336, 0.193, 0.988]->[0.001, 0.348, 0.189, 0.979]->[0.002, 0.345, 0.192, 0.983]->[0.001, 0.342, 0.192, 0.988]->[0.003, 0.344, 0.19, 0.981]->[0.001, 0.343, 0.186, 0.989]->[0.001, 0.34, 0.189, 0.993]->[0.001, 0.346, 0.191, 0.984]->[0.001, 0.348, 0.19, 0.985]->[0.002, 0.343, 0.184, 0.99]->[0.002, 0.338, 0.181, 0.986]->[0.001, 0.347, 0.183, 0.987]->[0.004, 0.345, 0.182, 0.989]->[0.001, 0.339, 0.185, 0.985]->[0.0, 0.339, 0.188, 0.985]: sit, watch (e.g., TV).", "At 1059.0s, [0.001, 0.345, 0.194, 0.982]: sit, stand, watch (e.g., TV).", "At 1058.0s, [0.0, 0.343, 0.195, 0.986]: sit, watch (e.g., TV)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1016, 1102], "conversations": [{"from": "human", "timestamps": 1046.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.354, 0.186, 0.98]: watch (e.g., TV), sit."}, {"from": "human", "timestamps": 1102.0, "value": "What actions did the person at bbox [0.001, 0.329, 0.173, 0.989] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.329, 0.173, 0.989]: sit, watch (e.g., TV).", "From 1095.0s to 1101.0s, [0.0, 0.325, 0.172, 0.99]->[0.003, 0.324, 0.165, 0.991]->[0.002, 0.334, 0.168, 0.984]->[0.0, 0.321, 0.166, 0.991]->[0.001, 0.324, 0.165, 0.983]->[0.0, 0.325, 0.165, 0.979]->[0.0, 0.339, 0.165, 0.99]: sit, watch (e.g., TV).", "At 1094.0s, [0.0, 0.332, 0.165, 0.99]: sit.", "At 1093.0s, [0.001, 0.325, 0.165, 0.985]: sit, watch (e.g., TV)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1053, 1115], "conversations": [{"from": "human", "timestamps": 1083.0, "value": "What actions is the person in bbox [0.002, 0.332, 0.17, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.002, 0.332, 0.17, 0.985]: sit, watch (e.g., TV).", "At 1082.0s, [0.003, 0.364, 0.151, 0.981]: stand, watch (e.g., TV).", "At 1081.0s, [0.001, 0.253, 0.445, 0.972]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1080.0s, [0.001, 0.167, 0.499, 0.989]: sit, stand, carry/hold (an object), press, talk to (e.g., self, a person, a group).", "At 1079.0s, [0.0, 0.257, 0.362, 0.986]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1078.0s, [0.001, 0.243, 0.317, 0.991]: sit, talk to (e.g., self, a person, a group).", "From 1060.0s to 1077.0s, [0.0, 0.345, 0.197, 0.979]->[0.001, 0.343, 0.194, 0.982]->[0.002, 0.341, 0.194, 0.98]->[0.001, 0.336, 0.193, 0.988]->[0.001, 0.348, 0.189, 0.979]->[0.002, 0.345, 0.192, 0.983]->[0.001, 0.342, 0.192, 0.988]->[0.003, 0.344, 0.19, 0.981]->[0.001, 0.343, 0.186, 0.989]->[0.001, 0.34, 0.189, 0.993]->[0.001, 0.346, 0.191, 0.984]->[0.001, 0.348, 0.19, 0.985]->[0.002, 0.343, 0.184, 0.99]->[0.002, 0.338, 0.181, 0.986]->[0.001, 0.347, 0.183, 0.987]->[0.004, 0.345, 0.182, 0.989]->[0.001, 0.339, 0.185, 0.985]->[0.0, 0.339, 0.188, 0.985]: sit, watch (e.g., TV).", "At 1059.0s, [0.001, 0.345, 0.194, 0.982]: sit, stand, watch (e.g., TV).", "At 1058.0s, [0.0, 0.343, 0.195, 0.986]: sit, watch (e.g., TV)."]}, {"from": "human", "timestamps": 1115.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.332, 0.17, 0.986]: watch (e.g., TV), sit."}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1152, 1196], "conversations": [{"from": "human", "timestamps": 1182.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.34, 0.208, 0.982]: watch (e.g., TV), sit; [0.736, 0.01, 0.996, 0.993]: watch (e.g., TV), sit."}, {"from": "human", "timestamps": 1196.0, "value": "What actions did the person at bbox [0.001, 0.341, 0.185, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.001, 0.341, 0.185, 0.983]: sit, watch (e.g., TV).", "From 1191.0s to 1195.0s, [0.0, 0.342, 0.202, 0.983]->[0.0, 0.336, 0.203, 0.984]->[0.0, 0.333, 0.195, 0.986]->[0.0, 0.328, 0.183, 0.978]->[0.002, 0.337, 0.18, 0.978]: sit, watch (e.g., TV).", "At 1190.0s, [0.0, 0.338, 0.204, 0.985]: stand, watch (e.g., TV).", "From 1180.0s to 1189.0s, [0.0, 0.337, 0.203, 0.978]->[0.0, 0.342, 0.209, 0.975]->[0.0, 0.34, 0.208, 0.982]->[0.0, 0.327, 0.195, 0.988]->[0.0, 0.335, 0.201, 0.981]->[0.0, 0.34, 0.201, 0.98]->[0.0, 0.337, 0.2, 0.98]->[0.0, 0.335, 0.199, 0.986]->[0.0, 0.34, 0.203, 0.981]->[0.0, 0.334, 0.203, 0.986]: sit, watch (e.g., TV).", "At 1179.0s, [0.0, 0.336, 0.204, 0.982]: sit, watch (e.g., TV), watch (a person).", "From 1174.0s to 1178.0s, [0.001, 0.341, 0.14, 0.987]->[0.001, 0.346, 0.174, 0.979]->[0.0, 0.337, 0.207, 0.978]->[0.004, 0.337, 0.203, 0.973]->[0.001, 0.342, 0.202, 0.973]: sit, watch (e.g., TV).", "At 1173.0s, [0.004, 0.29, 0.162, 0.982]: sit, carry/hold (an object), watch (e.g., TV)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1138, 1247], "conversations": [{"from": "human", "timestamps": 1168.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.002, 0.319, 0.196, 0.982]: watch (e.g., TV), sit; [0.749, 0.005, 0.999, 0.988]: watch (e.g., TV), sit."}, {"from": "human", "timestamps": 1247.0, "value": "For the person at bbox [0.371, 0.061, 0.703, 0.974], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.371, 0.061, 0.703, 0.974]: stand, talk to (e.g., self, a person, a group).", "At 1246.0s, [0.322, 0.06, 0.736, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1235, 1315], "conversations": [{"from": "human", "timestamps": 1265.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.28, 0.105, 0.854, 0.976]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1315.0, "value": "What actions is the person in bbox [0.447, 0.569, 0.472, 0.682] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.447, 0.569, 0.472, 0.682]: sit, ride (e.g., a bike, a car, a horse).", "From 1313.0s to 1314.0s, [0.444, 0.598, 0.464, 0.686]->[0.447, 0.59, 0.469, 0.689]: sit, ride (e.g., a bike, a car, a horse)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1240, 1365], "conversations": [{"from": "human", "timestamps": 1270.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.225, 0.074, 0.722, 0.97]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1365.0, "value": "For the person at bbox [0.564, 0.113, 0.751, 0.946], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.564, 0.113, 0.751, 0.946]: stand, carry/hold (an object).", "At 1364.0s, [0.536, 0.102, 0.733, 0.958]: stand, carry/hold (an object), listen to (a person).", "From 1362.0s to 1363.0s, [0.498, 0.059, 0.731, 0.954]->[0.539, 0.075, 0.737, 0.96]: stand, carry/hold (an object).", "At 1361.0s, [0.579, 0.088, 0.783, 0.945]: stand, carry/hold (an object), paint.", "From 1359.0s to 1360.0s, [0.403, 0.193, 0.538, 0.952]->[0.517, 0.144, 0.66, 0.953]: run/jog, paint.", "At 1358.0s, [0.409, 0.22, 0.542, 0.926]: walk, paint."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1291, 1356], "conversations": [{"from": "human", "timestamps": 1321.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.366, 0.514, 0.421, 0.694]: ride (e.g., a bike, a car, a horse), sit."}, {"from": "human", "timestamps": 1356.0, "value": "For the person at bbox [0.536, 0.179, 0.705, 0.946], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.536, 0.179, 0.705, 0.946]: walk, listen to (a person), watch (a person).", "At 1355.0s, [0.588, 0.17, 0.776, 0.977]: walk, listen to (a person), watch (a person).", "At 1354.0s, [0.69, 0.143, 0.883, 0.981]: walk, watch (a person)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1389, 1429], "conversations": [{"from": "human", "timestamps": 1419.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.184, 0.275, 0.296, 0.884]: watch (a person), stand; [0.32, 0.355, 0.385, 0.57]: carry/hold (an object), walk; [0.339, 0.328, 0.457, 0.805]: carry/hold (an object), walk; [0.848, 0.252, 0.933, 0.782]: watch (a person), stand."}, {"from": "human", "timestamps": 1429.0, "value": "What actions did the person at bbox [0.619, 0.258, 0.75, 0.89] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.619, 0.258, 0.75, 0.89]: walk, carry/hold (an object), lift/pick up.", "At 1428.0s, [0.704, 0.255, 0.807, 0.863]: walk, carry/hold (an object).", "At 1427.0s, [0.774, 0.252, 0.878, 0.835]: walk, carry/hold (an object), lift/pick up.", "At 1426.0s, [0.811, 0.306, 0.933, 0.825]: bend/bow (at the waist), lift/pick up.", "At 1425.0s, [0.878, 0.354, 0.997, 0.992]: walk, carry/hold (an object).", "At 1424.0s, [0.658, 0.329, 0.786, 0.986]: walk, carry/hold (an object), lift/pick up."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1398, 1484], "conversations": [{"from": "human", "timestamps": 1428.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.355, 0.264, 0.457, 0.795]: stand; [0.458, 0.311, 0.544, 0.708]: lift/pick up, listen to (a person), watch (a person), walk, carry/hold (an object); [0.704, 0.255, 0.807, 0.863]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1484.0, "value": "For the person at bbox [0.498, 0.121, 0.778, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.498, 0.121, 0.778, 1.0]: run/jog, carry/hold (an object).", "At 1483.0s, [0.534, 0.499, 0.726, 0.962]: carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1481.0s to 1482.0s, [0.522, 0.439, 0.72, 0.964]->[0.477, 0.441, 0.722, 0.957]: carry/hold (an object), crouch/kneel, listen to (a person), watch (a person).", "At 1480.0s, [0.521, 0.442, 0.724, 0.956]: sit, carry/hold (an object), crouch/kneel, listen to (a person), watch (a person).", "At 1479.0s, [0.52, 0.443, 0.721, 0.963]: carry/hold (an object), crouch/kneel, listen to (a person), watch (a person).", "At 1478.0s, [0.521, 0.44, 0.721, 0.959]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1475.0s to 1477.0s, [0.466, 0.449, 0.723, 0.95]->[0.514, 0.448, 0.721, 0.957]->[0.523, 0.442, 0.72, 0.96]: carry/hold (an object), crouch/kneel, listen to (a person), watch (a person).", "From 1473.0s to 1474.0s, [0.476, 0.447, 0.723, 0.959]->[0.487, 0.449, 0.724, 0.958]: sit, carry/hold (an object), listen to (a person).", "At 1472.0s, [0.5, 0.451, 0.722, 0.962]: carry/hold (an object), crouch/kneel, listen to (a person).", "At 1471.0s, [0.468, 0.443, 0.72, 0.952]: sit, carry/hold (an object), crouch/kneel, listen to (a person).", "From 1468.0s to 1470.0s, [0.517, 0.414, 0.716, 0.955]->[0.519, 0.415, 0.714, 0.955]->[0.521, 0.42, 0.714, 0.957]: carry/hold (an object), crouch/kneel, listen to (a person), watch (a person).", "At 1467.0s, [0.524, 0.411, 0.716, 0.954]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1466.0s, [0.522, 0.408, 0.718, 0.962]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1465.0s, [0.521, 0.41, 0.716, 0.961]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "From 1463.0s to 1464.0s, [0.514, 0.407, 0.718, 0.96]->[0.481, 0.414, 0.722, 0.963]: carry/hold (an object), crouch/kneel, listen to (a person), watch (a person).", "At 1462.0s, [0.526, 0.417, 0.718, 0.955]: sit, carry/hold (an object), crouch/kneel, listen to (a person).", "From 1460.0s to 1461.0s, [0.523, 0.417, 0.718, 0.961]->[0.514, 0.418, 0.72, 0.962]: carry/hold (an object), crouch/kneel, listen to (a person).", "From 1458.0s to 1459.0s, [0.52, 0.438, 0.728, 0.961]->[0.538, 0.419, 0.725, 0.961]: sit, carry/hold (an object), crouch/kneel, watch (a person).", "At 1457.0s, [0.513, 0.43, 0.724, 0.956]: carry/hold (an object), crouch/kneel, watch (a person).", "At 1456.0s, [0.525, 0.411, 0.713, 0.955]: carry/hold (an object), crouch/kneel.", "At 1455.0s, [0.541, 0.406, 0.714, 0.958]: sit, carry/hold (an object).", "At 1454.0s, [0.537, 0.404, 0.715, 0.957]: sit, carry/hold (an object), listen to (a person).", "At 1453.0s, [0.54, 0.401, 0.716, 0.958]: carry/hold (an object), crouch/kneel.", "At 1452.0s, [0.529, 0.418, 0.718, 0.965]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group).", "At 1451.0s, [0.527, 0.414, 0.717, 0.963]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1450.0s, [0.53, 0.419, 0.717, 0.96]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1448.0s to 1449.0s, [0.522, 0.425, 0.716, 0.962]->[0.523, 0.413, 0.718, 0.962]: carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1446.0s to 1447.0s, [0.527, 0.426, 0.721, 0.961]->[0.529, 0.429, 0.718, 0.96]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group).", "From 1444.0s to 1445.0s, [0.524, 0.428, 0.718, 0.958]->[0.529, 0.427, 0.718, 0.962]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1443.0s, [0.527, 0.434, 0.719, 0.964]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1442.0s, [0.513, 0.43, 0.716, 0.956]: sit, carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1441.0s, [0.525, 0.428, 0.715, 0.964]: carry/hold (an object), listen to (a person).", "At 1440.0s, [0.525, 0.435, 0.719, 0.966]: carry/hold (an object), crouch/kneel, listen to (a person), watch (a person).", "At 1439.0s, [0.522, 0.436, 0.72, 0.965]: carry/hold (an object), crouch/kneel, watch (a person).", "At 1438.0s, [0.523, 0.432, 0.719, 0.965]: sit, carry/hold (an object), crouch/kneel, watch (a person).", "At 1437.0s, [0.513, 0.413, 0.716, 0.961]: carry/hold (an object), crouch/kneel, watch (a person).", "At 1436.0s, [0.522, 0.408, 0.718, 0.963]: carry/hold (an object), crouch/kneel.", "At 1435.0s, [0.52, 0.408, 0.718, 0.965]: sit, carry/hold (an object).", "At 1434.0s, [0.519, 0.412, 0.717, 0.965]: sit, carry/hold (an object), watch (a person).", "From 1431.0s to 1433.0s, [0.501, 0.421, 0.726, 0.959]->[0.518, 0.411, 0.719, 0.952]->[0.508, 0.411, 0.718, 0.956]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1430.0s, [0.53, 0.428, 0.722, 0.964]: sit, carry/hold (an object), listen to (a person)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1428, 1474], "conversations": [{"from": "human", "timestamps": 1458.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.358, 0.388, 0.579, 0.944]: carry/hold (an object), crouch/kneel; [0.52, 0.438, 0.728, 0.961]: carry/hold (an object), sit, crouch/kneel, watch (a person)."}, {"from": "human", "timestamps": 1474.0, "value": "What actions is the person in bbox [0.487, 0.449, 0.724, 0.958] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.487, 0.449, 0.724, 0.958]: sit, carry/hold (an object), listen to (a person).", "At 1473.0s, [0.476, 0.447, 0.723, 0.959]: sit, carry/hold (an object), listen to (a person).", "At 1472.0s, [0.5, 0.451, 0.722, 0.962]: carry/hold (an object), crouch/kneel, listen to (a person).", "At 1471.0s, [0.468, 0.443, 0.72, 0.952]: sit, carry/hold (an object), crouch/kneel, listen to (a person).", "From 1468.0s to 1470.0s, [0.517, 0.414, 0.716, 0.955]->[0.519, 0.415, 0.714, 0.955]->[0.521, 0.42, 0.714, 0.957]: carry/hold (an object), crouch/kneel, listen to (a person), watch (a person).", "At 1467.0s, [0.524, 0.411, 0.716, 0.954]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1466.0s, [0.522, 0.408, 0.718, 0.962]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1465.0s, [0.521, 0.41, 0.716, 0.961]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "From 1463.0s to 1464.0s, [0.514, 0.407, 0.718, 0.96]->[0.481, 0.414, 0.722, 0.963]: carry/hold (an object), crouch/kneel, listen to (a person), watch (a person).", "At 1462.0s, [0.526, 0.417, 0.718, 0.955]: sit, carry/hold (an object), crouch/kneel, listen to (a person).", "From 1460.0s to 1461.0s, [0.523, 0.417, 0.718, 0.961]->[0.514, 0.418, 0.72, 0.962]: carry/hold (an object), crouch/kneel, listen to (a person).", "From 1458.0s to 1459.0s, [0.52, 0.438, 0.728, 0.961]->[0.538, 0.419, 0.725, 0.961]: sit, carry/hold (an object), crouch/kneel, watch (a person).", "At 1457.0s, [0.513, 0.43, 0.724, 0.956]: carry/hold (an object), crouch/kneel, watch (a person).", "At 1456.0s, [0.525, 0.411, 0.713, 0.955]: carry/hold (an object), crouch/kneel.", "At 1455.0s, [0.541, 0.406, 0.714, 0.958]: sit, carry/hold (an object).", "At 1454.0s, [0.537, 0.404, 0.715, 0.957]: sit, carry/hold (an object), listen to (a person).", "At 1453.0s, [0.54, 0.401, 0.716, 0.958]: carry/hold (an object), crouch/kneel.", "At 1452.0s, [0.529, 0.418, 0.718, 0.965]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group).", "At 1451.0s, [0.527, 0.414, 0.717, 0.963]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1450.0s, [0.53, 0.419, 0.717, 0.96]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1448.0s to 1449.0s, [0.522, 0.425, 0.716, 0.962]->[0.523, 0.413, 0.718, 0.962]: carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1446.0s to 1447.0s, [0.527, 0.426, 0.721, 0.961]->[0.529, 0.429, 0.718, 0.96]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group).", "From 1444.0s to 1445.0s, [0.524, 0.428, 0.718, 0.958]->[0.529, 0.427, 0.718, 0.962]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1443.0s, [0.527, 0.434, 0.719, 0.964]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1442.0s, [0.513, 0.43, 0.716, 0.956]: sit, carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1441.0s, [0.525, 0.428, 0.715, 0.964]: carry/hold (an object), listen to (a person).", "At 1440.0s, [0.525, 0.435, 0.719, 0.966]: carry/hold (an object), crouch/kneel, listen to (a person), watch (a person).", "At 1439.0s, [0.522, 0.436, 0.72, 0.965]: carry/hold (an object), crouch/kneel, watch (a person).", "At 1438.0s, [0.523, 0.432, 0.719, 0.965]: sit, carry/hold (an object), crouch/kneel, watch (a person).", "At 1437.0s, [0.513, 0.413, 0.716, 0.961]: carry/hold (an object), crouch/kneel, watch (a person).", "At 1436.0s, [0.522, 0.408, 0.718, 0.963]: carry/hold (an object), crouch/kneel.", "At 1435.0s, [0.52, 0.408, 0.718, 0.965]: sit, carry/hold (an object).", "At 1434.0s, [0.519, 0.412, 0.717, 0.965]: sit, carry/hold (an object), watch (a person).", "From 1431.0s to 1433.0s, [0.501, 0.421, 0.726, 0.959]->[0.518, 0.411, 0.719, 0.952]->[0.508, 0.411, 0.718, 0.956]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1430.0s, [0.53, 0.428, 0.722, 0.964]: sit, carry/hold (an object), listen to (a person)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1476, 1540], "conversations": [{"from": "human", "timestamps": 1506.0, "value": "What actions is the person in bbox [0.641, 0.189, 0.714, 0.496] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.641, 0.189, 0.714, 0.496]: walk, lift/pick up, watch (a person).", "At 1505.0s, [0.603, 0.218, 0.657, 0.47]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1540.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.111, 0.14, 0.26, 0.775]: watch (a person), walk; [0.401, 0.198, 0.638, 0.981]: carry/hold (an object), bend/bow (at the waist); [0.631, 0.42, 0.98, 0.836]: watch (a person), sit, smoke."}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1504, 1564], "conversations": [{"from": "human", "timestamps": 1534.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.173, 0.146, 0.342, 0.981]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk; [0.453, 0.096, 0.596, 0.92]: listen to (a person), stand, smoke, touch (an object); [0.773, 0.039, 0.99, 0.992]: listen to (a person), stand, smoke, watch (a person)."}, {"from": "human", "timestamps": 1564.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.479, 0.112, 0.93, 0.988]?"}, {"from": "gpt", "value": ["At Now, [0.479, 0.112, 0.93, 0.988]: sit, smoke, talk to (e.g., self, a person, a group).", "At 1563.0s, [0.374, 0.118, 0.938, 0.982]: sit, smoke, talk to (e.g., self, a person, a group).", "At 1562.0s, [0.513, 0.12, 0.925, 0.975]: sit, carry/hold (an object), smoke, talk to (e.g., self, a person, a group).", "At 1561.0s, [0.49, 0.125, 0.912, 0.971]: sit, smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1560.0s, [0.475, 0.119, 0.942, 0.979]: sit, carry/hold (an object), smoke, talk to (e.g., self, a person, a group)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1622, 1654], "conversations": [{"from": "human", "timestamps": 1652.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.081, 0.162, 0.58, 0.973]: carry/hold (an object), sit, listen to (a person); [0.39, 0.176, 0.918, 0.966]: carry/hold (an object), sit, smoke, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1654.0, "value": "Describe the actions of the person at bbox [0.077, 0.159, 0.58, 0.972] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.077, 0.159, 0.58, 0.972]: sit, carry/hold (an object), listen to (a person).", "At 1653.0s, [0.086, 0.165, 0.542, 0.961]: sit, smoke, listen to (a person).", "At 1652.0s, [0.081, 0.162, 0.58, 0.973]: sit, carry/hold (an object), listen to (a person).", "At 1651.0s, [0.087, 0.169, 0.547, 0.964]: sit, carry/hold (an object), smoke, listen to (a person).", "From 1649.0s to 1650.0s, [0.08, 0.168, 0.568, 0.966]->[0.088, 0.155, 0.611, 0.984]: sit, carry/hold (an object), smoke.", "At 1648.0s, [0.086, 0.17, 0.61, 0.978]: sit, carry/hold (an object), smoke, listen to (a person).", "At 1647.0s, [0.079, 0.16, 0.546, 0.975]: sit, carry/hold (an object)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1636, 1681], "conversations": [{"from": "human", "timestamps": 1666.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.023, 0.424, 0.488, 0.995]: sit, smoke, talk to (e.g., self, a person, a group); [0.469, 0.087, 0.706, 0.702]: carry/hold (an object), smoke, dance."}, {"from": "human", "timestamps": 1681.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.414, 0.253, 0.656, 0.831]?"}, {"from": "gpt", "value": ["At Now, [0.414, 0.253, 0.656, 0.831]: bend/bow (at the waist), smoke, talk to (e.g., self, a person, a group).", "At 1680.0s, [0.403, 0.272, 0.651, 0.8]: bend/bow (at the waist), smoke, talk to (e.g., self, a person, a group).", "From 1678.0s to 1679.0s, [0.437, 0.218, 0.665, 0.768]->[0.429, 0.224, 0.665, 0.694]: bend/bow (at the waist), carry/hold (an object), smoke, talk to (e.g., self, a person, a group).", "From 1676.0s to 1677.0s, [0.449, 0.032, 0.743, 0.704]->[0.343, 0.093, 0.688, 0.774]: walk, carry/hold (an object), smoke, talk to (e.g., self, a person, a group).", "At 1675.0s, [0.528, 0.03, 0.759, 0.691]: stand, smoke, talk to (e.g., self, a person, a group).", "At 1674.0s, [0.534, 0.079, 0.743, 0.692]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1673.0s, [0.562, 0.077, 0.751, 0.698]: stand, smoke, throw, listen to (a person).", "At 1672.0s, [0.579, 0.112, 0.742, 0.685]: stand, carry/hold (an object), smoke, listen to (a person).", "At 1671.0s, [0.534, 0.075, 0.737, 0.689]: stand, carry/hold (an object), throw, listen to (a person), watch (a person).", "At 1670.0s, [0.525, 0.077, 0.751, 0.692]: stand, carry/hold (an object), catch (an object), throw, listen to (a person), watch (a person).", "At 1669.0s, [0.586, 0.065, 0.761, 0.687]: stand, carry/hold (an object), listen to (a person).", "At 1668.0s, [0.556, 0.079, 0.747, 0.699]: stand, smoke, watch (a person).", "At 1667.0s, [0.43, 0.087, 0.7, 0.697]: stand, carry/hold (an object), listen to (a person).", "At 1666.0s, [0.469, 0.087, 0.706, 0.702]: carry/hold (an object), dance, smoke.", "At 1665.0s, [0.636, 0.045, 0.817, 0.743]: carry/hold (an object), dance, smoke, listen to (a person).", "At 1664.0s, [0.661, 0.06, 0.909, 0.927]: stand, carry/hold (an object), smoke, talk to (e.g., self, a person, a group).", "At 1663.0s, [0.505, 0.068, 0.877, 0.925]: stand, carry/hold (an object).", "At 1662.0s, [0.674, 0.022, 0.98, 0.931]: walk, carry/hold (an object), smoke, throw, listen to (a person).", "At 1661.0s, [0.799, 0.011, 1.0, 0.985]: walk, carry/hold (an object), listen to (a person)."]}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1681, 1714], "conversations": [{"from": "human", "timestamps": 1711.0, "value": "For the person at bbox [0.499, 0.113, 0.994, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.499, 0.113, 0.994, 0.981]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1710.0s, [0.491, 0.119, 0.995, 0.982]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1707.0s to 1709.0s, [0.492, 0.116, 0.981, 0.975]->[0.49, 0.125, 0.991, 1.0]->[0.492, 0.117, 0.99, 0.984]: sit, talk to (e.g., self, a person, a group).", "From 1705.0s to 1706.0s, [0.493, 0.113, 0.99, 0.982]->[0.495, 0.124, 0.997, 0.98]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1704.0s, [0.488, 0.115, 0.998, 0.974]: sit, talk to (e.g., self, a person, a group).", "From 1695.0s to 1703.0s, [0.505, 0.173, 0.978, 0.999]->[0.501, 0.165, 0.978, 0.994]->[0.494, 0.141, 0.978, 0.992]->[0.477, 0.132, 0.989, 0.98]->[0.503, 0.133, 0.999, 0.976]->[0.487, 0.129, 1.0, 0.979]->[0.498, 0.117, 0.998, 0.971]->[0.491, 0.127, 0.999, 1.0]->[0.497, 0.122, 1.0, 0.975]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1694.0s, [0.464, 0.15, 0.976, 0.99]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1714.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.144, 0.522, 0.994]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.515, 0.123, 0.972, 1.0]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "YYWdB7h1INo.mkv", "clip": [1720, 1786], "conversations": [{"from": "human", "timestamps": 1750.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.492, 0.152, 0.645, 0.781]: carry/hold (an object), push (an object), walk."}, {"from": "human", "timestamps": 1786.0, "value": "What actions is the person in bbox [0.222, 0.067, 0.492, 0.983] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.222, 0.067, 0.492, 0.983]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1785.0s, [0.098, 0.092, 0.485, 0.974]: walk, watch (a person).", "At 1784.0s, [0.226, 0.016, 0.643, 0.987]: stand, walk, watch (a person).", "At 1783.0s, [0.716, 0.015, 0.995, 0.935]: walk."]}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [886, 965], "conversations": [{"from": "human", "timestamps": 916.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.259, 0.632, 1.0, 0.956]: carry/hold (an object), lie/sleep, watch (a person)."}, {"from": "human", "timestamps": 965.0, "value": "What actions did the person at bbox [0.327, 0.144, 0.798, 0.707] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.327, 0.144, 0.798, 0.707]: sit, carry/hold (an object).", "At 964.0s, [0.202, 0.177, 0.81, 0.752]: sit, carry/hold (an object).", "At 963.0s, [0.432, 0.187, 0.732, 0.733]: sit.", "At 962.0s, [0.06, 0.26, 0.753, 0.892]: sit, touch (an object)."]}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1013, 1048], "conversations": [{"from": "human", "timestamps": 1043.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.055, 0.113, 0.229, 0.816]: watch (a person), talk to (e.g., self, a person, a group), walk; [0.203, 0.218, 0.338, 0.838]: listen to (a person), watch (a person), walk."}, {"from": "human", "timestamps": 1048.0, "value": "What actions did the person at bbox [0.239, 0.289, 0.515, 0.954] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.239, 0.289, 0.515, 0.954]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1047.0s, [0.211, 0.201, 0.423, 0.942]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1045.0s to 1046.0s, [0.208, 0.263, 0.395, 0.935]->[0.227, 0.251, 0.428, 0.871]: walk, talk to (e.g., self, a person, a group).", "At 1044.0s, [0.223, 0.227, 0.355, 0.911]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1043.0s, [0.203, 0.218, 0.338, 0.838]: walk, listen to (a person), watch (a person).", "From 1041.0s to 1042.0s, [0.199, 0.234, 0.321, 0.764]->[0.21, 0.214, 0.347, 0.802]: walk, listen to (a person)."]}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1000, 1103], "conversations": [{"from": "human", "timestamps": 1030.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.035, 0.706, 0.955]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1103.0, "value": "Describe the actions of the person at bbox [0.544, 0.033, 0.997, 0.913] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.544, 0.033, 0.997, 0.913]: stand, listen to (a person), watch (a person).", "At 1102.0s, [0.381, 0.088, 0.918, 0.946]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1101.0s, [0.621, 0.128, 0.938, 0.854]: stand, carry/hold (an object), grab (a person), listen to (a person), watch (a person)."]}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1062, 1109], "conversations": [{"from": "human", "timestamps": 1092.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.026, 0.026, 0.53, 0.963]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.426, 0.09, 1.0, 0.978]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1109.0, "value": "For the person at bbox [0.0, 0.095, 0.544, 0.954], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.095, 0.544, 0.954]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1108.0s, [0.001, 0.152, 0.635, 0.935]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1107.0s, [0.001, 0.089, 0.569, 0.938]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1106.0s, [0.0, 0.072, 0.563, 0.897]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1104.0s to 1105.0s, [0.004, 0.104, 0.465, 0.934]->[0.0, 0.106, 0.527, 0.946]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1101.0s to 1103.0s, [0.339, 0.221, 0.598, 0.884]->[0.041, 0.115, 0.579, 0.928]->[0.0, 0.078, 0.392, 0.949]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1100.0s, [0.306, 0.236, 0.955, 0.906]: run/jog, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1133, 1200], "conversations": [{"from": "human", "timestamps": 1163.0, "value": "What actions is the person in bbox [0.007, 0.04, 0.759, 0.936] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.007, 0.04, 0.759, 0.936]: stand, watch (a person).", "At 1162.0s, [0.0, 0.034, 0.75, 0.936]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1200.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.182, 0.199, 0.432, 0.713]: watch (a person), stand; [0.34, 0.25, 0.664, 0.692]: walk."}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1129, 1226], "conversations": [{"from": "human", "timestamps": 1159.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.157, 0.065, 0.626, 0.934]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1226.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.072, 0.422, 0.957]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.072, 0.422, 0.957]: stand, listen to (a person), watch (a person).", "From 1221.0s to 1225.0s, [0.0, 0.071, 0.387, 0.939]->[0.0, 0.074, 0.395, 0.929]->[0.0, 0.058, 0.398, 0.941]->[0.0, 0.075, 0.395, 0.939]->[0.0, 0.071, 0.355, 0.955]: stand, listen to (a person), watch (a person)."]}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1224, 1259], "conversations": [{"from": "human", "timestamps": 1254.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.007, 0.033, 0.844, 0.954]: stand."}, {"from": "human", "timestamps": 1259.0, "value": "Describe the actions of the person at bbox [0.0, 0.072, 0.4, 0.924] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.072, 0.4, 0.924]: sit, carry/hold (an object), touch (an object), watch (a person).", "At 1258.0s, [0.0, 0.137, 0.399, 0.934]: sit, carry/hold (an object), touch (an object), watch (a person).", "From 1256.0s to 1257.0s, [0.458, 0.263, 0.906, 0.946]->[0.46, 0.257, 0.921, 0.935]: sit, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1329, 1369], "conversations": [{"from": "human", "timestamps": 1359.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.072, 0.082, 0.686, 0.947]?"}, {"from": "gpt", "value": ["At Now, [0.072, 0.082, 0.686, 0.947]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1358.0s, [0.04, 0.073, 0.673, 0.962]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1369.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.062, 0.065, 0.655, 0.957]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1366, 1418], "conversations": [{"from": "human", "timestamps": 1396.0, "value": "What actions did the person at bbox [0.016, 0.133, 0.581, 0.954] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.016, 0.133, 0.581, 0.954]: stand.", "From 1394.0s to 1395.0s, [0.139, 0.127, 0.656, 0.96]->[0.022, 0.111, 0.569, 0.949]: stand, touch (an object).", "At 1393.0s, [0.02, 0.047, 0.643, 0.933]: stand, close (e.g., a door, a box), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1418.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.001, 0.048, 0.429, 0.95]: listen to (a person), stand, watch (a person); [0.062, 0.192, 0.781, 0.928]: watch (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1382, 1472], "conversations": [{"from": "human", "timestamps": 1412.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.124, 0.152, 0.543, 0.948]: listen to (a person), walk; [0.475, 0.17, 0.845, 0.991]: watch (a person), talk to (e.g., self, a person, a group), walk; [0.75, 0.319, 0.998, 0.94]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1472.0, "value": "What actions did the person at bbox [0.427, 0.109, 0.73, 0.753] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.427, 0.109, 0.73, 0.753]: stand, talk to (e.g., self, a person, a group).", "From 1463.0s to 1471.0s, [0.398, 0.122, 0.732, 0.737]->[0.414, 0.107, 0.73, 0.747]->[0.415, 0.112, 0.738, 0.708]->[0.421, 0.115, 0.707, 0.792]->[0.425, 0.111, 0.726, 0.792]->[0.432, 0.107, 0.731, 0.804]->[0.406, 0.117, 0.733, 0.819]->[0.424, 0.101, 0.726, 0.798]->[0.414, 0.099, 0.733, 0.812]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1434, 1488], "conversations": [{"from": "human", "timestamps": 1464.0, "value": "What actions is the person in bbox [0.211, 0.209, 0.365, 0.783] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.211, 0.209, 0.365, 0.783]: stand, listen to (a person).", "At 1463.0s, [0.206, 0.213, 0.37, 0.763]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1488.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.051, 0.108, 0.756, 0.933]: listen to (a person), sit."}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1477, 1517], "conversations": [{"from": "human", "timestamps": 1507.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.032, 1.0, 0.958]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1517.0, "value": "For the person at bbox [0.581, 0.358, 0.835, 0.885], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.581, 0.358, 0.835, 0.885]: sit, listen to (a person), watch (a person).", "At 1516.0s, [0.587, 0.372, 0.851, 0.863]: sit, listen to (a person), watch (a person).", "At 1515.0s, [0.578, 0.388, 0.833, 0.876]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1514.0s, [0.566, 0.398, 0.844, 0.934]: sit, listen to (a person), watch (a person)."]}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1473, 1579], "conversations": [{"from": "human", "timestamps": 1503.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.012, 0.047, 0.809, 0.952]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.713, 0.198, 1.0, 0.987]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1579.0, "value": "Describe the actions of the person at bbox [0.402, 0.067, 1.0, 0.955] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.402, 0.067, 1.0, 0.955]: stand, answer phone.", "At 1578.0s, [0.099, 0.078, 0.503, 0.94]: stand, answer phone.", "At 1577.0s, [0.15, 0.083, 0.479, 0.938]: stand, carry/hold (an object), text on/look at a cellphone.", "From 1575.0s to 1576.0s, [0.165, 0.057, 0.596, 0.926]->[0.169, 0.095, 0.464, 0.917]: stand, text on/look at a cellphone.", "At 1574.0s, [0.242, 0.152, 0.738, 0.858]: run/jog.", "From 1571.0s to 1573.0s, [0.201, 0.053, 0.931, 0.963]->[0.223, 0.053, 0.939, 0.947]->[0.258, 0.044, 0.988, 0.947]: stand.", "At 1570.0s, [0.159, 0.057, 0.88, 0.949]: stand, watch (a person)."]}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1558, 1594], "conversations": [{"from": "human", "timestamps": 1588.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.047, 0.117, 0.794, 0.852]: listen to (a person), grab (a person), walk; [0.074, 0.12, 0.689, 0.944]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1594.0, "value": "What actions is the person in bbox [0.143, 0.061, 0.806, 0.941] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.143, 0.061, 0.806, 0.941]: stand, listen to (a person).", "From 1591.0s to 1593.0s, [0.209, 0.055, 0.713, 0.948]->[0.239, 0.048, 0.749, 0.942]->[0.289, 0.052, 0.819, 0.951]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1590.0s, [0.189, 0.089, 0.835, 0.947]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1588.0s, [0.047, 0.117, 0.794, 0.852]: walk, grab (a person), listen to (a person)."]}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1619, 1679], "conversations": [{"from": "human", "timestamps": 1649.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.035, 0.465, 0.954]: stand, kiss (a person)."}, {"from": "human", "timestamps": 1679.0, "value": "What actions did the person at bbox [0.132, 0.094, 0.624, 0.87] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.132, 0.094, 0.624, 0.87]: bend/bow (at the waist), fight/hit (a person), watch (a person).", "At 1678.0s, [0.067, 0.099, 0.416, 0.913]: walk, watch (a person).", "At 1677.0s, [0.096, 0.143, 0.419, 0.938]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1676.0s, [0.019, 0.16, 0.367, 0.901]: walk, listen to (a person), watch (a person).", "At 1675.0s, [0.0, 0.141, 0.321, 0.944]: walk, close (e.g., a door, a box), talk to (e.g., self, a person, a group).", "At 1674.0s, [0.369, 0.164, 0.768, 0.766]: walk, close (e.g., a door, a box), talk to (e.g., self, a person, a group), watch (a person).", "At 1673.0s, [0.443, 0.16, 0.783, 0.719]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1672.0s, [0.614, 0.148, 0.81, 0.734]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1671.0s, [0.527, 0.117, 0.784, 0.528]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "Ytga8ciKWJc.mkv", "clip": [1674, 1718], "conversations": [{"from": "human", "timestamps": 1704.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.061, 0.037, 0.683, 0.911]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1718.0, "value": "For the person at bbox [0.039, 0.043, 0.525, 0.93], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.039, 0.043, 0.525, 0.93]: stand, talk to (e.g., self, a person, a group).", "From 1715.0s to 1717.0s, [0.073, 0.051, 0.656, 0.934]->[0.028, 0.035, 0.486, 0.943]->[0.23, 0.269, 0.864, 0.944]: stand, talk to (e.g., self, a person, a group).", "At 1714.0s, [0.143, 0.034, 0.682, 0.943]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1713.0s, [0.204, 0.054, 0.908, 0.934]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [909, 955], "conversations": [{"from": "human", "timestamps": 939.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.347, 0.063, 0.994, 0.994]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 955.0, "value": "What actions is the person in bbox [0.499, 0.433, 0.597, 0.653] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.499, 0.433, 0.597, 0.653]: sit, carry/hold (an object).", "At 954.0s, [0.499, 0.432, 0.597, 0.656]: sit, carry/hold (an object), listen to (a person).", "At 953.0s, [0.497, 0.435, 0.595, 0.654]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 945.0s to 952.0s, [0.16, 0.202, 1.0, 0.985]->[0.275, 0.062, 0.991, 0.969]->[0.363, 0.077, 1.0, 0.95]->[0.389, 0.078, 0.997, 0.977]->[0.384, 0.07, 0.993, 0.925]->[0.44, 0.06, 1.0, 0.979]->[0.39, 0.05, 0.986, 0.985]->[0.433, 0.073, 0.999, 0.961]: sit, carry/hold (an object), listen to (a person).", "From 935.0s to 944.0s, [0.272, 0.072, 0.992, 0.981]->[0.276, 0.055, 0.988, 0.979]->[0.278, 0.072, 0.993, 0.985]->[0.335, 0.047, 0.996, 0.991]->[0.347, 0.063, 0.994, 0.994]->[0.346, 0.056, 1.0, 0.98]->[0.306, 0.085, 1.0, 0.993]->[0.316, 0.071, 0.983, 0.987]->[0.19, 0.128, 0.986, 0.956]->[0.214, 0.093, 0.951, 0.969]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 934.0s, [0.36, 0.066, 0.99, 0.987]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 932.0s to 933.0s, [0.351, 0.064, 0.987, 0.992]->[0.403, 0.07, 0.988, 0.975]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 929.0s to 931.0s, [0.323, 0.067, 0.984, 0.991]->[0.344, 0.079, 0.974, 0.996]->[0.358, 0.066, 0.991, 0.986]: sit, carry/hold (an object), listen to (a person).", "At 928.0s, [0.248, 0.051, 0.981, 0.957]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 924.0s to 927.0s, [0.342, 0.04, 0.998, 0.958]->[0.346, 0.049, 0.994, 0.951]->[0.299, 0.052, 0.984, 0.951]->[0.33, 0.065, 0.989, 0.943]: sit, carry/hold (an object), listen to (a person).", "From 922.0s to 923.0s, [0.311, 0.067, 1.0, 0.982]->[0.262, 0.066, 1.0, 0.964]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [904, 979], "conversations": [{"from": "human", "timestamps": 934.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.081, 0.432, 0.973]: sit, watch (a person), listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.36, 0.066, 0.99, 0.987]: carry/hold (an object), sit, talk to (e.g., self, a person, a group), listen to (a person)."}, {"from": "human", "timestamps": 979.0, "value": "For the person at bbox [0.357, 0.113, 0.765, 0.763], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.357, 0.113, 0.765, 0.763]: walk.", "At 978.0s, [0.375, 0.092, 0.618, 0.532]: walk."]}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1025, 1068], "conversations": [{"from": "human", "timestamps": 1055.0, "value": "What actions is the person in bbox [0.041, 0.059, 0.878, 0.972] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.041, 0.059, 0.878, 0.972]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1054.0s, [0.099, 0.062, 0.958, 0.96]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1068.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.415, 0.188, 0.523, 0.634]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.513, 0.179, 0.658, 0.634]: carry/hold (an object), talk to (e.g., self, a person, a group), walk; [0.651, 0.177, 0.755, 0.632]: listen to (a person), stand, watch (a person)."}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1056, 1100], "conversations": [{"from": "human", "timestamps": 1086.0, "value": "For the person at bbox [0.663, 0.077, 0.993, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.663, 0.077, 0.993, 0.981]: stand, listen to (a person).", "At 1085.0s, [0.676, 0.051, 0.998, 0.971]: stand."]}, {"from": "human", "timestamps": 1100.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.037, 0.042, 0.357, 0.974]: listen to (a person), stand; [0.309, 0.181, 0.705, 0.977]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.679, 0.134, 0.998, 0.96]: listen to (a person), stand."}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1135, 1184], "conversations": [{"from": "human", "timestamps": 1165.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.396, 0.062, 0.796, 0.998]: carry/hold (an object), watch (a person), walk."}, {"from": "human", "timestamps": 1184.0, "value": "What actions did the person at bbox [0.013, 0.044, 0.914, 0.978] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.013, 0.044, 0.914, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1183.0s, [0.012, 0.056, 0.94, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1117, 1253], "conversations": [{"from": "human", "timestamps": 1147.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.381, 0.37, 0.492, 0.696]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.381, 0.537, 0.588, 0.77]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1253.0, "value": "What actions is the person in bbox [0.0, 0.067, 0.995, 0.971] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.067, 0.995, 0.971]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1252.0s, [0.0, 0.06, 1.0, 0.987]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1251.0s, [0.003, 0.084, 0.995, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1178, 1246], "conversations": [{"from": "human", "timestamps": 1208.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.008, 0.082, 0.988, 0.986]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1246.0, "value": "What actions is the person in bbox [0.005, 0.065, 0.892, 0.979] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.005, 0.065, 0.892, 0.979]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1244.0s to 1245.0s, [0.008, 0.057, 0.994, 0.981]->[0.0, 0.069, 0.962, 0.983]: sit, talk to (e.g., self, a person, a group).", "At 1243.0s, [0.0, 0.087, 1.0, 1.0]: sit, stand, talk to (e.g., self, a person, a group).", "From 1241.0s to 1242.0s, [0.0, 0.066, 0.987, 0.976]->[0.012, 0.071, 0.917, 0.971]: sit, talk to (e.g., self, a person, a group).", "At 1240.0s, [0.0, 0.08, 0.94, 0.973]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1247, 1295], "conversations": [{"from": "human", "timestamps": 1277.0, "value": "What actions is the person in bbox [0.139, 0.341, 0.411, 0.868] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.139, 0.341, 0.411, 0.868]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1276.0s, [0.178, 0.351, 0.457, 0.873]: sit, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1295.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.073, 0.042, 0.993, 0.979]: listen to (a person), sit, watch (a person)."}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1381, 1430], "conversations": [{"from": "human", "timestamps": 1411.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.045, 0.016, 0.891, 0.987]: sit, eat."}, {"from": "human", "timestamps": 1430.0, "value": "Describe the actions of the person at bbox [0.0, 0.09, 0.731, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.09, 0.731, 0.987]: sit, listen to (a person).", "At 1429.0s, [0.005, 0.101, 0.739, 0.971]: sit, listen to (a person)."]}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1369, 1495], "conversations": [{"from": "human", "timestamps": 1399.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.2, 0.505, 0.43, 0.738]: sit, eat; [0.35, 0.467, 0.502, 0.703]: sit, eat; [0.562, 0.36, 0.745, 0.737]: sit, eat."}, {"from": "human", "timestamps": 1495.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.205, 0.469, 0.388, 0.732]?"}, {"from": "gpt", "value": ["In the current frame, [0.205, 0.469, 0.388, 0.732]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1493.0s to 1494.0s, [0.208, 0.467, 0.395, 0.736]->[0.206, 0.467, 0.409, 0.739]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1491.0s to 1492.0s, [0.032, 0.07, 0.787, 0.975]->[0.044, 0.084, 0.778, 0.976]: sit, eat, listen to (a person)."]}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1485, 1539], "conversations": [{"from": "human", "timestamps": 1515.0, "value": "Describe the actions of the person at bbox [0.064, 0.069, 0.806, 0.996] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.064, 0.069, 0.806, 0.996]: sit, eat.", "At 1514.0s, [0.034, 0.075, 0.799, 0.978]: sit, eat."]}, {"from": "human", "timestamps": 1539.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.018, 0.589, 0.918]: listen to (a person), sit, talk to (e.g., self, a person, a group); [0.573, 0.105, 0.993, 0.991]: sit."}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1517, 1604], "conversations": [{"from": "human", "timestamps": 1547.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.06, 0.098, 0.97, 0.929]: lie/sleep, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1604.0, "value": "For the person at bbox [0.281, 0.089, 0.832, 0.968], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.281, 0.089, 0.832, 0.968]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1601.0s to 1603.0s, [0.4, 0.048, 0.855, 0.988]->[0.42, 0.108, 0.853, 0.959]->[0.439, 0.107, 0.849, 0.958]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1546, 1587], "conversations": [{"from": "human", "timestamps": 1576.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.586, 0.121, 0.994, 0.988]?"}, {"from": "gpt", "value": ["At Now, [0.586, 0.121, 0.994, 0.988]: sit, listen to (a person), watch (a person).", "From 1572.0s to 1575.0s, [0.586, 0.134, 0.985, 0.973]->[0.585, 0.123, 0.977, 0.973]->[0.563, 0.127, 0.983, 0.943]->[0.567, 0.105, 0.982, 0.959]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1587.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.194, 0.291, 0.295, 0.732]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.641, 0.417, 0.771, 0.6]: watch (a person), sit; [0.722, 0.449, 0.84, 0.732]: watch (a person), sit."}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1603, 1660], "conversations": [{"from": "human", "timestamps": 1633.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.425, 0.279, 0.838, 0.932]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1660.0, "value": "Describe the actions of the person at bbox [0.211, 0.034, 0.482, 0.98] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.211, 0.034, 0.482, 0.98]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1657.0s to 1658.0s, [0.077, 0.021, 1.0, 0.97]->[0.106, 0.045, 0.906, 0.989]: stand, talk to (e.g., self, a person, a group).", "From 1655.0s to 1656.0s, [0.092, 0.059, 1.0, 0.984]->[0.111, 0.048, 1.0, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ZxQn8HVmXsY.mkv", "clip": [1608, 1699], "conversations": [{"from": "human", "timestamps": 1638.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.481, 0.268, 0.893, 0.929]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1699.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.006, 0.124, 0.993, 0.982]?"}, {"from": "gpt", "value": ["In the current frame, [0.006, 0.124, 0.993, 0.982]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1698.0s, [0.0, 0.131, 0.988, 0.969]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [886, 933], "conversations": [{"from": "human", "timestamps": 916.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.411, 0.068, 0.916, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 933.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.706, 0.19, 0.976, 0.979]?"}, {"from": "gpt", "value": ["In the current frame, [0.706, 0.19, 0.976, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 930.0s to 932.0s, [0.364, 0.234, 0.853, 0.994]->[0.402, 0.257, 0.851, 0.998]->[0.397, 0.223, 0.857, 1.0]: stand, listen to (a person), watch (a person).", "At 929.0s, [0.375, 0.266, 0.844, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 927.0s to 928.0s, [0.375, 0.241, 0.851, 0.994]->[0.342, 0.177, 0.805, 0.975]: stand, listen to (a person), watch (a person).", "At 926.0s, [0.357, 0.241, 0.809, 0.973]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 921.0s to 925.0s, [0.379, 0.215, 0.834, 1.0]->[0.377, 0.226, 0.834, 1.0]->[0.199, 0.25, 0.826, 0.998]->[0.351, 0.164, 0.82, 0.981]->[0.37, 0.244, 0.811, 0.965]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 920.0s, [0.394, 0.071, 0.921, 0.995]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 918.0s to 919.0s, [0.349, 0.074, 0.923, 1.0]->[0.397, 0.072, 0.932, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 917.0s, [0.393, 0.08, 0.946, 0.994]: stand, talk to (e.g., self, a person, a group).", "From 914.0s to 916.0s, [0.426, 0.076, 0.944, 0.986]->[0.4, 0.071, 0.945, 0.995]->[0.411, 0.068, 0.916, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 912.0s to 913.0s, [0.404, 0.086, 0.927, 0.98]->[0.427, 0.083, 0.919, 0.973]: stand, talk to (e.g., self, a person, a group).", "From 909.0s to 911.0s, [0.418, 0.247, 0.912, 0.996]->[0.441, 0.242, 0.904, 0.994]->[0.279, 0.069, 0.987, 0.991]: stand, listen to (a person), watch (a person).", "At 908.0s, [0.411, 0.253, 0.924, 0.989]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 907.0s, [0.412, 0.231, 0.944, 0.989]: stand, listen to (a person), watch (a person).", "At 906.0s, [0.441, 0.214, 0.902, 0.994]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 903.0s to 905.0s, [0.333, 0.24, 0.902, 0.998]->[0.372, 0.229, 0.888, 0.996]->[0.37, 0.226, 0.914, 0.988]: stand, listen to (a person).", "At 902.0s, [0.392, 0.253, 0.916, 0.994]: stand, carry/hold (an object), listen to (a person)."]}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [895, 963], "conversations": [{"from": "human", "timestamps": 925.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.025, 0.084, 0.474, 0.967]: listen to (a person), stand, watch (a person); [0.37, 0.244, 0.811, 0.965]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 963.0, "value": "What actions did the person at bbox [0.073, 0.047, 0.551, 0.976] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.073, 0.047, 0.551, 0.976]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 960.0s to 962.0s, [0.21, 0.049, 0.596, 0.979]->[0.161, 0.044, 0.615, 0.985]->[0.115, 0.034, 0.585, 0.992]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 959.0s, [0.305, 0.016, 0.701, 0.982]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 957.0s to 958.0s, [0.318, 0.007, 0.705, 0.993]->[0.289, 0.004, 0.707, 0.985]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 956.0s, [0.307, 0.042, 0.606, 0.962]: walk, carry/hold (an object)."]}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [948, 992], "conversations": [{"from": "human", "timestamps": 978.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.005, 0.012, 0.42, 0.977]: listen to (a person), stand, watch (a person); [0.548, 0.485, 0.942, 0.908]: carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 992.0, "value": "Describe the actions of the person at bbox [0.205, 0.077, 0.439, 0.966] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.205, 0.077, 0.439, 0.966]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 991.0s, [0.093, 0.085, 0.352, 0.956]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 990.0s, [0.089, 0.067, 0.38, 0.983]: stand, carry/hold (an object), listen to (a person).", "From 988.0s to 989.0s, [0.08, 0.059, 0.427, 0.986]->[0.13, 0.041, 0.46, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 986.0s to 987.0s, [0.034, 0.069, 0.343, 1.0]->[0.069, 0.064, 0.395, 0.983]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 982.0s to 985.0s, [0.033, 0.042, 0.461, 0.993]->[0.123, 0.06, 0.503, 0.977]->[0.11, 0.054, 0.405, 0.995]->[0.116, 0.065, 0.483, 0.994]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 981.0s, [0.275, 0.125, 0.534, 0.945]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 980.0s, [0.538, 0.106, 0.777, 0.969]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 978.0s to 979.0s, [0.548, 0.485, 0.942, 0.908]->[0.559, 0.424, 0.938, 0.943]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 976.0s to 977.0s, [0.581, 0.464, 0.929, 0.935]->[0.624, 0.43, 0.947, 0.916]: bend/bow (at the waist), carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 975.0s, [0.601, 0.5, 0.939, 0.943]: bend/bow (at the waist), carry/hold (an object), watch (a person).", "At 974.0s, [0.569, 0.444, 0.875, 0.97]: bend/bow (at the waist), carry/hold (an object), listen to (a person).", "At 973.0s, [0.504, 0.153, 0.709, 0.982]: walk, carry/hold (an object), listen to (a person).", "At 972.0s, [0.247, 0.097, 0.58, 0.995]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 967.0s to 971.0s, [0.064, 0.045, 0.515, 0.977]->[0.061, 0.032, 0.511, 0.993]->[0.051, 0.031, 0.563, 0.988]->[0.042, 0.03, 0.58, 0.978]->[0.027, 0.022, 0.39, 0.989]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 964.0s to 966.0s, [0.056, 0.041, 0.489, 0.982]->[0.078, 0.034, 0.524, 0.984]->[0.062, 0.04, 0.507, 0.979]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 963.0s, [0.073, 0.047, 0.551, 0.976]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 960.0s to 962.0s, [0.21, 0.049, 0.596, 0.979]->[0.161, 0.044, 0.615, 0.985]->[0.115, 0.034, 0.585, 0.992]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 959.0s, [0.305, 0.016, 0.701, 0.982]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 957.0s to 958.0s, [0.318, 0.007, 0.705, 0.993]->[0.289, 0.004, 0.707, 0.985]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 956.0s, [0.307, 0.042, 0.606, 0.962]: walk, carry/hold (an object)."]}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [1036, 1078], "conversations": [{"from": "human", "timestamps": 1066.0, "value": "For the person at bbox [0.7, 0.22, 0.944, 0.851], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.7, 0.22, 0.944, 0.851]: stand, touch (an object), listen to (a person), watch (a person).", "From 1059.0s to 1065.0s, [0.682, 0.18, 0.944, 0.81]->[0.704, 0.21, 0.968, 0.826]->[0.708, 0.207, 0.948, 0.837]->[0.692, 0.217, 0.939, 0.834]->[0.687, 0.22, 0.936, 0.838]->[0.701, 0.217, 0.944, 0.846]->[0.701, 0.221, 0.941, 0.847]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1058.0s, [0.677, 0.183, 0.982, 0.831]: stand, talk to (e.g., self, a person, a group).", "From 1055.0s to 1057.0s, [0.723, 0.181, 0.972, 0.855]->[0.763, 0.178, 0.987, 0.841]->[0.741, 0.17, 0.985, 0.851]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1054.0s, [0.684, 0.181, 0.973, 0.869]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1053.0s, [0.738, 0.167, 0.976, 0.862]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1078.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.235, 0.116, 0.954, 0.98]: carry/hold (an object), stand."}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [1029, 1094], "conversations": [{"from": "human", "timestamps": 1059.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.197, 0.059, 0.48, 0.824]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.346, 0.047, 0.719, 0.995]: carry/hold (an object), stand, listen to (a person); [0.682, 0.18, 0.944, 0.81]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1094.0, "value": "For the person at bbox [0.662, 0.244, 0.988, 0.991], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.662, 0.244, 0.988, 0.991]: stand, listen to (a person).", "At 1093.0s, [0.666, 0.245, 0.989, 0.986]: stand, listen to (a person).", "From 1090.0s to 1092.0s, [0.666, 0.239, 0.984, 0.992]->[0.659, 0.24, 0.987, 0.984]->[0.66, 0.256, 0.984, 0.987]: stand, listen to (a person), watch (a person)."]}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [1051, 1093], "conversations": [{"from": "human", "timestamps": 1081.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.209, 0.057, 0.917, 0.994]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1093.0, "value": "Describe the actions of the person at bbox [0.666, 0.245, 0.989, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.666, 0.245, 0.989, 0.986]: stand, listen to (a person).", "From 1090.0s to 1092.0s, [0.666, 0.239, 0.984, 0.992]->[0.659, 0.24, 0.987, 0.984]->[0.66, 0.256, 0.984, 0.987]: stand, listen to (a person), watch (a person)."]}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [1150, 1192], "conversations": [{"from": "human", "timestamps": 1180.0, "value": "For the person at bbox [0.555, 0.222, 0.92, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.555, 0.222, 0.92, 0.981]: stand, carry/hold (an object), give/serve (an object) to (a person), listen to (a person), watch (a person).", "At 1179.0s, [0.56, 0.205, 0.983, 0.991]: stand, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1178.0s, [0.506, 0.249, 0.93, 1.0]: stand, carry/hold (an object), drink, give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1177.0s, [0.544, 0.265, 0.991, 0.999]: stand, drink, give/serve (an object) to (a person), listen to (a person), watch (a person).", "At 1176.0s, [0.692, 0.247, 0.979, 0.981]: stand, carry/hold (an object), drink, listen to (a person).", "At 1175.0s, [0.699, 0.234, 0.97, 0.98]: stand, carry/hold (an object), listen to (a person).", "At 1174.0s, [0.633, 0.215, 0.945, 0.993]: stand, drink, listen to (a person).", "At 1173.0s, [0.578, 0.192, 0.936, 0.992]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1172.0s, [0.68, 0.198, 0.979, 0.993]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1171.0s, [0.674, 0.217, 0.984, 0.991]: stand, drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1170.0s, [0.801, 0.205, 0.993, 0.996]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1192.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.051, 0.195, 0.387, 1.0]: listen to (a person), stand, watch (a person); [0.357, 0.009, 0.75, 0.996]: listen to (a person), stand, watch (a person), drink; [0.611, 0.204, 0.952, 0.984]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [1135, 1207], "conversations": [{"from": "human", "timestamps": 1165.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.041, 0.204, 0.38, 0.982]: listen to (a person), stand, watch (a person); [0.302, 0.072, 0.602, 0.994]: listen to (a person), stand, watch (a person); [0.533, 0.049, 0.925, 0.999]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1207.0, "value": "What actions is the person in bbox [0.596, 0.17, 0.988, 0.959] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.596, 0.17, 0.988, 0.959]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1205.0s to 1206.0s, [0.601, 0.178, 0.975, 0.954]->[0.575, 0.182, 0.988, 0.925]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1204.0s, [0.58, 0.189, 0.986, 0.969]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1203.0s, [0.598, 0.187, 0.979, 0.967]: stand, listen to (a person), watch (a person).", "At 1202.0s, [0.595, 0.16, 0.987, 0.965]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1201.0s, [0.575, 0.176, 0.982, 0.97]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1196.0s to 1200.0s, [0.613, 0.163, 0.982, 0.98]->[0.611, 0.171, 0.98, 0.938]->[0.604, 0.168, 0.984, 0.943]->[0.591, 0.16, 0.982, 0.968]->[0.596, 0.172, 0.982, 0.968]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1195.0s, [0.606, 0.159, 0.991, 0.954]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1192.0s to 1194.0s, [0.611, 0.204, 0.952, 0.984]->[0.614, 0.159, 0.991, 0.941]->[0.607, 0.141, 0.989, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1191.0s, [0.515, 0.245, 0.87, 0.984]: stand, drink, talk to (e.g., self, a person, a group), watch (a person).", "From 1189.0s to 1190.0s, [0.537, 0.246, 0.842, 0.988]->[0.555, 0.307, 0.934, 0.979]: stand, carry/hold (an object), listen to (a person).", "At 1188.0s, [0.522, 0.199, 0.953, 0.979]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1187.0s, [0.546, 0.199, 0.928, 0.961]: stand, drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1186.0s, [0.626, 0.204, 0.967, 0.983]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1185.0s, [0.472, 0.227, 0.874, 0.982]: stand, carry/hold (an object), give/serve (an object) to (a person), listen to (a person), watch (a person).", "From 1183.0s to 1184.0s, [0.49, 0.23, 0.876, 0.981]->[0.507, 0.234, 0.879, 0.985]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1182.0s, [0.536, 0.22, 0.884, 0.987]: stand, carry/hold (an object), give/serve (an object) to (a person), listen to (a person), watch (a person).", "At 1181.0s, [0.518, 0.221, 0.926, 0.974]: stand, carry/hold (an object), give/serve (an object) to (a person), listen to (a person).", "At 1180.0s, [0.555, 0.222, 0.92, 0.981]: stand, carry/hold (an object), give/serve (an object) to (a person), listen to (a person), watch (a person).", "At 1179.0s, [0.56, 0.205, 0.983, 0.991]: stand, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1178.0s, [0.506, 0.249, 0.93, 1.0]: stand, carry/hold (an object), drink, give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1177.0s, [0.544, 0.265, 0.991, 0.999]: stand, drink, give/serve (an object) to (a person), listen to (a person), watch (a person).", "At 1176.0s, [0.692, 0.247, 0.979, 0.981]: stand, carry/hold (an object), drink, listen to (a person).", "At 1175.0s, [0.699, 0.234, 0.97, 0.98]: stand, carry/hold (an object), listen to (a person).", "At 1174.0s, [0.633, 0.215, 0.945, 0.993]: stand, drink, listen to (a person).", "At 1173.0s, [0.578, 0.192, 0.936, 0.992]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1172.0s, [0.68, 0.198, 0.979, 0.993]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1171.0s, [0.674, 0.217, 0.984, 0.991]: stand, drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1170.0s, [0.801, 0.205, 0.993, 0.996]: stand, listen to (a person), watch (a person)."]}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [1302, 1355], "conversations": [{"from": "human", "timestamps": 1332.0, "value": "Describe the actions of the person at bbox [0.04, 0.185, 0.405, 0.98] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.04, 0.185, 0.405, 0.98]: sit, carry/hold (an object).", "At 1331.0s, [0.005, 0.174, 0.369, 0.978]: sit, carry/hold (an object).", "At 1330.0s, [0.012, 0.178, 0.373, 0.983]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), watch (a person).", "At 1329.0s, [0.03, 0.168, 0.436, 0.984]: sit, carry/hold (an object).", "At 1328.0s, [0.073, 0.163, 0.448, 0.982]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), watch (a person).", "At 1327.0s, [0.074, 0.132, 0.509, 0.975]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1355.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.174, 0.093, 0.453, 0.989]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.507, 0.069, 0.819, 0.991]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.803, 0.114, 0.99, 0.972]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [1391, 1426], "conversations": [{"from": "human", "timestamps": 1421.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.011, 0.474, 0.143, 0.969]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.259, 0.339, 0.443, 0.991]: listen to (a person), stand, watch (a person); [0.374, 0.264, 0.529, 0.933]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.594, 0.203, 0.731, 0.924]: listen to (a person), stand, watch (a person); [0.683, 0.183, 0.853, 0.982]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1426.0, "value": "What actions is the person in bbox [0.712, 0.155, 0.871, 0.805] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.712, 0.155, 0.871, 0.805]: stand, listen to (a person), watch (a person).", "At 1425.0s, [0.646, 0.149, 0.807, 0.823]: stand, listen to (a person), watch (a person)."]}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [1473, 1510], "conversations": [{"from": "human", "timestamps": 1503.0, "value": "For the person at bbox [0.011, 0.07, 0.403, 0.995], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.011, 0.07, 0.403, 0.995]: stand, listen to (a person), watch (a person).", "At 1502.0s, [0.016, 0.073, 0.423, 1.0]: stand, listen to (a person), watch (a person).", "At 1501.0s, [0.018, 0.074, 0.423, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1510.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.165, 0.093, 0.482, 1.0]: listen to (a person), stand, watch (a person); [0.563, 0.112, 0.873, 0.977]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [1533, 1606], "conversations": [{"from": "human", "timestamps": 1563.0, "value": "What actions is the person in bbox [0.476, 0.15, 0.61, 0.468] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.476, 0.15, 0.61, 0.468]: stand, listen to (a person), watch (a person).", "At 1562.0s, [0.43, 0.135, 0.608, 0.902]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1606.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.057, 0.358, 0.999]: listen to (a person), stand, watch (a person); [0.209, 0.312, 0.439, 0.969]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.403, 0.41, 0.523, 0.724]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.416, 0.504, 0.773, 0.991]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.51, 0.061, 0.739, 0.594]: ride (e.g., a bike, a car, a horse), sit, watch (a person), listen to (a person), carry/hold (an object); [0.713, 0.216, 0.977, 0.688]: carry/hold (an object), sit, watch (a person), listen to (a person)."}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [1596, 1634], "conversations": [{"from": "human", "timestamps": 1626.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.3, 0.453, 0.534, 0.983]: watch (a person), stand; [0.469, 0.263, 0.619, 0.589]: ride (e.g., a bike, a car, a horse), sit; [0.486, 0.117, 0.973, 0.964]: ride (e.g., a bike, a car, a horse), sit, watch (a person)."}, {"from": "human", "timestamps": 1634.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.375, 0.506, 0.745, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.375, 0.506, 0.745, 1.0]: stand, listen to (a person), watch (a person).", "At 1633.0s, [0.326, 0.515, 0.616, 0.988]: stand, listen to (a person), watch (a person).", "From 1631.0s to 1632.0s, [0.345, 0.514, 0.59, 0.992]->[0.332, 0.498, 0.595, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1629.0s to 1630.0s, [0.36, 0.48, 0.642, 0.996]->[0.342, 0.489, 0.626, 0.998]: stand, watch (a person)."]}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [1647, 1714], "conversations": [{"from": "human", "timestamps": 1677.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.085, 0.307, 0.325, 0.978]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.6, 0.265, 0.939, 0.989]: listen to (a person), stand, touch (an object); [0.748, 0.176, 0.866, 0.41]: carry/hold (an object), sit, watch (a person)."}, {"from": "human", "timestamps": 1714.0, "value": "What actions is the person in bbox [0.661, 0.259, 0.933, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.661, 0.259, 0.933, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1713.0s, [0.659, 0.248, 0.932, 0.994]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1712.0s, [0.477, 0.082, 0.991, 1.0]: stand, listen to (a person), watch (a person).", "At 1711.0s, [0.49, 0.081, 0.985, 1.0]: stand, listen to (a person).", "At 1710.0s, [0.44, 0.081, 0.999, 0.997]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1708.0s to 1709.0s, [0.418, 0.082, 0.993, 0.998]->[0.412, 0.076, 0.997, 0.997]: stand, talk to (e.g., self, a person, a group).", "At 1707.0s, [0.647, 0.236, 0.951, 0.992]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1706.0s, [0.665, 0.252, 0.946, 0.987]: stand, listen to (a person), watch (a person).", "From 1698.0s to 1705.0s, [0.68, 0.268, 0.933, 0.984]->[0.673, 0.262, 0.922, 0.99]->[0.679, 0.261, 0.93, 0.995]->[0.66, 0.229, 0.974, 0.995]->[0.687, 0.253, 0.973, 0.995]->[0.631, 0.256, 0.927, 0.991]->[0.675, 0.237, 0.949, 0.995]->[0.671, 0.243, 0.962, 0.99]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "_-Z6wFjXtGQ.mkv", "clip": [1662, 1732], "conversations": [{"from": "human", "timestamps": 1692.0, "value": "What actions is the person in bbox [0.008, 0.128, 0.388, 0.998] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.008, 0.128, 0.388, 0.998]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1690.0s to 1691.0s, [0.007, 0.121, 0.375, 0.986]->[0.007, 0.123, 0.387, 0.999]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1688.0s to 1689.0s, [0.003, 0.148, 0.25, 0.992]->[0.011, 0.109, 0.353, 1.0]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1687.0s, [0.021, 0.068, 0.404, 0.996]: walk, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1732.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.031, 0.091, 0.298, 1.0]: listen to (a person), stand, watch (a person); [0.256, 0.048, 0.542, 1.0]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.486, 0.072, 0.708, 1.0]: carry/hold (an object), stand, watch (a person); [0.612, 0.112, 0.941, 0.984]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "_145Aa_xkuE.mp4", "clip": [876, 960], "conversations": [{"from": "human", "timestamps": 906.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.062, 0.07, 0.627, 0.989]: stand, talk to (e.g., self, a person, a group); [0.577, 0.253, 0.894, 0.996]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 960.0, "value": "What actions is the person in bbox [0.016, 0.0, 0.749, 0.989] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.016, 0.0, 0.749, 0.989]: stand, watch (a person).", "From 958.0s to 959.0s, [0.288, 0.551, 0.341, 0.909]->[0.29, 0.551, 0.341, 0.91]: stand, watch (a person).", "At 957.0s, [0.289, 0.555, 0.341, 0.903]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 956.0s, [0.287, 0.558, 0.341, 0.912]: stand, watch (a person).", "From 953.0s to 955.0s, [0.285, 0.56, 0.341, 0.911]->[0.286, 0.561, 0.341, 0.905]->[0.285, 0.559, 0.341, 0.909]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 952.0s, [0.286, 0.559, 0.341, 0.908]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "_145Aa_xkuE.mp4", "clip": [887, 973], "conversations": [{"from": "human", "timestamps": 917.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.142, 0.124, 0.708, 0.975]: listen to (a person), stand; [0.598, 0.256, 0.885, 0.987]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 973.0, "value": "What actions is the person in bbox [0.32, 0.041, 0.98, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.32, 0.041, 0.98, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 970.0s to 972.0s, [0.686, 0.21, 1.0, 0.995]->[0.678, 0.179, 1.0, 1.0]->[0.681, 0.152, 1.0, 0.999]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "_145Aa_xkuE.mp4", "clip": [941, 982], "conversations": [{"from": "human", "timestamps": 971.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.141, 0.101, 0.496, 0.988]: listen to (a person), stand; [0.678, 0.179, 1.0, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 982.0, "value": "Describe the actions of the person at bbox [0.028, 0.004, 0.624, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.028, 0.004, 0.624, 0.982]: stand, watch (a person).", "From 980.0s to 981.0s, [0.029, 0.005, 0.844, 0.982]->[0.028, 0.008, 0.704, 0.988]: stand, watch (a person)."]}]}, {"video": "_145Aa_xkuE.mp4", "clip": [1031, 1121], "conversations": [{"from": "human", "timestamps": 1061.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.018, 0.054, 0.695, 0.985]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1121.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.228, 0.059, 0.933, 0.988]?"}, {"from": "gpt", "value": ["In the current frame, [0.228, 0.059, 0.933, 0.988]: sit, talk to (e.g., self, a person, a group).", "From 1119.0s to 1120.0s, [0.246, 0.055, 0.965, 0.98]->[0.24, 0.052, 0.956, 0.979]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "_145Aa_xkuE.mp4", "clip": [1052, 1087], "conversations": [{"from": "human", "timestamps": 1082.0, "value": "For the person at bbox [0.01, 0.055, 0.614, 0.968], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.01, 0.055, 0.614, 0.968]: sit, touch (an object).", "From 1078.0s to 1081.0s, [0.138, 0.498, 0.235, 0.765]->[0.137, 0.5, 0.23, 0.746]->[0.138, 0.501, 0.231, 0.75]->[0.023, 0.076, 0.683, 0.96]: sit, touch (an object).", "At 1077.0s, [0.14, 0.495, 0.227, 0.757]: sit.", "From 1075.0s to 1076.0s, [0.139, 0.499, 0.228, 0.754]->[0.14, 0.502, 0.228, 0.762]: sit, write.", "At 1074.0s, [0.263, 0.057, 0.972, 0.992]: sit, carry/hold (an object), write."]}, {"from": "human", "timestamps": 1087.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.409, 0.019, 0.86, 0.969]: sit, drink."}]}, {"video": "_145Aa_xkuE.mp4", "clip": [1137, 1170], "conversations": [{"from": "human", "timestamps": 1167.0, "value": "Describe the actions of the person at bbox [0.279, 0.012, 0.935, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.279, 0.012, 0.935, 0.991]: sit.", "From 1164.0s to 1166.0s, [0.311, 0.15, 0.981, 1.0]->[0.461, 0.108, 0.97, 1.0]->[0.158, 0.029, 0.942, 0.993]: sit.", "At 1163.0s, [0.304, 0.067, 0.918, 0.998]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1170.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.13, 0.071, 0.915, 0.982]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "_145Aa_xkuE.mp4", "clip": [1207, 1251], "conversations": [{"from": "human", "timestamps": 1237.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.031, 0.0, 0.719, 0.986]: stand."}, {"from": "human", "timestamps": 1251.0, "value": "What actions is the person in bbox [0.003, 0.0, 0.569, 0.977] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.003, 0.0, 0.569, 0.977]: stand.", "At 1250.0s, [0.013, 0.0, 0.564, 0.993]: stand.", "From 1246.0s to 1249.0s, [0.025, 0.0, 0.741, 0.989]->[0.708, 0.292, 0.923, 0.942]->[0.704, 0.293, 0.921, 0.874]->[0.707, 0.293, 0.919, 0.933]: stand, listen to (a person), watch (a person).", "At 1245.0s, [0.046, 0.0, 0.709, 0.982]: stand, watch (a person).", "From 1240.0s to 1244.0s, [0.027, 0.002, 0.735, 1.0]->[0.026, 0.0, 0.712, 0.994]->[0.049, 0.0, 0.697, 0.99]->[0.015, 0.0, 0.767, 1.0]->[0.035, 0.0, 0.688, 0.978]: stand.", "At 1239.0s, [0.024, 0.002, 0.736, 0.994]: stand, talk to (e.g., self, a person, a group).", "From 1230.0s to 1238.0s, [0.044, 0.0, 0.755, 0.997]->[0.04, 0.0, 0.684, 0.998]->[0.056, 0.016, 0.66, 0.962]->[0.041, 0.0, 0.733, 1.0]->[0.065, 0.01, 0.706, 1.0]->[0.035, 0.0, 0.735, 1.0]->[0.032, 0.0, 0.754, 0.996]->[0.031, 0.0, 0.719, 0.986]->[0.035, 0.0, 0.705, 0.996]: stand.", "At 1229.0s, [0.0, 0.0, 0.757, 0.979]: stand, listen to (a person)."]}]}, {"video": "_145Aa_xkuE.mp4", "clip": [1270, 1319], "conversations": [{"from": "human", "timestamps": 1300.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.086, 0.248, 0.346, 0.999]: watch (a person), stand; [0.61, 0.227, 0.911, 0.982]: bend/bow (at the waist)."}, {"from": "human", "timestamps": 1319.0, "value": "What actions is the person in bbox [0.077, 0.055, 0.762, 0.983] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.077, 0.055, 0.762, 0.983]: sit.", "At 1318.0s, [0.07, 0.054, 0.808, 0.984]: sit."]}]}, {"video": "_145Aa_xkuE.mp4", "clip": [1251, 1373], "conversations": [{"from": "human", "timestamps": 1281.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.005, 0.675, 0.998]: stand."}, {"from": "human", "timestamps": 1373.0, "value": "What actions is the person in bbox [0.079, 0.058, 0.563, 0.983] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.079, 0.058, 0.563, 0.983]: stand.", "From 1366.0s to 1372.0s, [0.368, 0.0, 0.979, 0.995]->[0.427, 0.0, 0.983, 0.976]->[0.385, 0.0, 1.0, 0.983]->[0.383, 0.0, 0.978, 0.975]->[0.372, 0.0, 0.976, 0.99]->[0.336, 0.021, 0.968, 1.0]->[0.221, 0.0, 0.917, 1.0]: stand.", "At 1365.0s, [0.408, 0.0, 0.978, 1.0]: stand, listen to (a person)."]}]}, {"video": "_145Aa_xkuE.mp4", "clip": [1305, 1336], "conversations": [{"from": "human", "timestamps": 1335.0, "value": "For the person at bbox [0.184, 0.044, 0.968, 0.98], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.184, 0.044, 0.968, 0.98]: stand.", "At 1334.0s, [0.16, 0.009, 0.988, 0.991]: stand."]}, {"from": "human", "timestamps": 1336.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.128, 0.019, 0.974, 0.992]: stand."}]}, {"video": "_145Aa_xkuE.mp4", "clip": [1364, 1460], "conversations": [{"from": "human", "timestamps": 1394.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.0, 0.642, 0.979]: stand."}, {"from": "human", "timestamps": 1460.0, "value": "For the person at bbox [0.466, 0.0, 0.999, 0.98], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.466, 0.0, 0.999, 0.98]: stand, listen to (a person), watch (a person).", "From 1450.0s to 1459.0s, [0.461, 0.0, 1.0, 0.994]->[0.467, 0.0, 1.0, 0.99]->[0.476, 0.0, 1.0, 0.992]->[0.475, 0.0, 1.0, 0.985]->[0.478, 0.0, 1.0, 0.981]->[0.471, 0.0, 0.997, 0.973]->[0.474, 0.0, 1.0, 0.988]->[0.475, 0.0, 1.0, 0.984]->[0.452, 0.0, 1.0, 0.98]->[0.477, 0.0, 1.0, 0.996]: stand, listen to (a person), watch (a person).", "From 1444.0s to 1449.0s, [0.517, 0.0, 1.0, 0.988]->[0.528, 0.0, 1.0, 0.99]->[0.521, 0.0, 1.0, 0.985]->[0.519, 0.004, 1.0, 0.979]->[0.509, 0.009, 1.0, 0.979]->[0.43, 0.0, 1.0, 0.986]: stand, listen to (a person).", "At 1443.0s, [0.371, 0.586, 0.434, 0.814]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1440.0s to 1442.0s, [0.356, 0.563, 0.419, 0.751]->[0.364, 0.571, 0.425, 0.872]->[0.371, 0.572, 0.429, 0.885]: stand, talk to (e.g., self, a person, a group).", "From 1431.0s to 1439.0s, [0.404, 0.65, 0.465, 0.879]->[0.412, 0.64, 0.468, 0.887]->[0.418, 0.624, 0.47, 0.766]->[0.426, 0.615, 0.481, 0.858]->[0.417, 0.608, 0.471, 0.839]->[0.402, 0.601, 0.457, 0.753]->[0.388, 0.587, 0.446, 0.875]->[0.384, 0.574, 0.438, 0.75]->[0.363, 0.572, 0.427, 0.757]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "_145Aa_xkuE.mp4", "clip": [1490, 1551], "conversations": [{"from": "human", "timestamps": 1520.0, "value": "What actions did the person at bbox [0.357, 0.011, 0.944, 0.996] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.357, 0.011, 0.944, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1517.0s to 1519.0s, [0.358, 0.012, 0.957, 0.996]->[0.372, 0.019, 0.945, 0.988]->[0.374, 0.012, 0.952, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1516.0s, [0.346, 0.022, 0.949, 0.987]: stand, watch (a person).", "At 1515.0s, [0.369, 0.025, 0.949, 0.996]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1551.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.004, 0.0, 0.295, 0.995]: listen to (a person), stand, watch (a person); [0.419, 0.021, 0.958, 0.995]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "_145Aa_xkuE.mp4", "clip": [1502, 1609], "conversations": [{"from": "human", "timestamps": 1532.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.751, 0.994]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.74, 0.221, 1.0, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1609.0, "value": "What actions is the person in bbox [0.363, 0.159, 0.681, 0.81] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.363, 0.159, 0.681, 0.81]: stand, watch (a person).", "At 1608.0s, [0.311, 0.159, 0.622, 0.802]: stand, watch (a person).", "From 1604.0s to 1607.0s, [0.286, 0.137, 0.45, 0.796]->[0.297, 0.137, 0.52, 0.8]->[0.304, 0.152, 0.613, 0.803]->[0.305, 0.121, 0.556, 0.795]: stand."]}]}, {"video": "_145Aa_xkuE.mp4", "clip": [1654, 1726], "conversations": [{"from": "human", "timestamps": 1684.0, "value": "For the person at bbox [0.403, 0.525, 0.494, 0.78], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.403, 0.525, 0.494, 0.78]: run/jog, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1683.0s, [0.416, 0.444, 0.499, 0.814]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1726.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.158, 0.287, 0.613, 0.995]: carry/hold (an object), sit, hug (a person), listen to (a person)."}]}, {"video": "_145Aa_xkuE.mp4", "clip": [1736, 1778], "conversations": [{"from": "human", "timestamps": 1766.0, "value": "What actions did the person at bbox [0.323, 0.327, 0.397, 0.554] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.323, 0.327, 0.397, 0.554]: stand, listen to (a person), watch (a person).", "From 1763.0s to 1765.0s, [0.328, 0.386, 0.393, 0.568]->[0.329, 0.366, 0.393, 0.567]->[0.315, 0.349, 0.391, 0.565]: stand, listen to (a person), watch (a person).", "At 1762.0s, [0.331, 0.387, 0.39, 0.567]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1778.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.174, 0.0, 0.903, 0.978]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "_Ca3gOdOHxU.mp4", "clip": [974, 1011], "conversations": [{"from": "human", "timestamps": 1004.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.184, 0.071, 0.583, 0.991]: carry/hold (an object), stand; [0.509, 0.106, 0.825, 0.986]: carry/hold (an object), stand, drink."}, {"from": "human", "timestamps": 1011.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.24, 0.053, 0.575, 0.986]?"}, {"from": "gpt", "value": ["At Now, [0.24, 0.053, 0.575, 0.986]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1010.0s, [0.224, 0.051, 0.548, 0.992]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1009.0s, [0.202, 0.054, 0.596, 0.979]: stand, carry/hold (an object), take (an object) from (a person), watch (a person).", "At 1008.0s, [0.197, 0.061, 0.596, 0.98]: stand, carry/hold (an object), touch (an object), take (an object) from (a person), watch (a person).", "From 1003.0s to 1007.0s, [0.181, 0.062, 0.576, 0.987]->[0.184, 0.071, 0.583, 0.991]->[0.182, 0.057, 0.585, 0.997]->[0.181, 0.057, 0.591, 0.986]->[0.162, 0.048, 0.531, 0.986]: stand, carry/hold (an object)."]}]}, {"video": "_Ca3gOdOHxU.mp4", "clip": [1110, 1146], "conversations": [{"from": "human", "timestamps": 1140.0, "value": "Describe the actions of the person at bbox [0.513, 0.146, 0.69, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.513, 0.146, 0.69, 0.983]: stand, carry/hold (an object), kick (an object).", "At 1139.0s, [0.577, 0.197, 0.727, 0.986]: stand, carry/hold (an object), listen to (a person).", "At 1138.0s, [0.578, 0.19, 0.729, 0.988]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1146.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.009, 0.002, 0.606, 0.973]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.45, 0.227, 0.67, 0.94]: watch (a person), walk; [0.56, 0.142, 0.643, 0.481]: listen to (a person), stand, watch (a person); [0.625, 0.348, 0.822, 0.947]: watch (a person), walk; [0.628, 0.102, 0.859, 0.561]: carry/hold (an object), give/serve (an object) to (a person), walk; [0.731, 0.355, 0.989, 0.983]: carry/hold (an object), walk; [0.84, 0.183, 0.901, 0.381]: watch (a person), stand."}]}, {"video": "_Ca3gOdOHxU.mp4", "clip": [1174, 1237], "conversations": [{"from": "human", "timestamps": 1204.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.374, 0.732, 0.663, 0.988]: listen to (a person), sit, touch (an object); [0.429, 0.001, 0.659, 0.814]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1237.0, "value": "What actions did the person at bbox [0.158, 0.049, 0.963, 0.973] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.158, 0.049, 0.963, 0.973]: stand, carry/hold (an object), watch (e.g., TV).", "From 1235.0s to 1236.0s, [0.079, 0.041, 0.884, 0.987]->[0.13, 0.073, 0.912, 0.979]: stand, carry/hold (an object)."]}]}, {"video": "_Ca3gOdOHxU.mp4", "clip": [1242, 1377], "conversations": [{"from": "human", "timestamps": 1272.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.392, 0.029, 0.803, 0.917]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1377.0, "value": "What actions did the person at bbox [0.495, 0.094, 0.623, 0.699] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.495, 0.094, 0.623, 0.699]: stand, carry/hold (an object).", "At 1376.0s, [0.774, 0.104, 0.856, 0.553]: run/jog, carry/hold (an object)."]}]}, {"video": "_Ca3gOdOHxU.mp4", "clip": [1298, 1356], "conversations": [{"from": "human", "timestamps": 1328.0, "value": "What actions is the person in bbox [0.307, 0.008, 0.642, 0.999] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.307, 0.008, 0.642, 0.999]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1326.0s to 1327.0s, [0.291, 0.0, 0.519, 1.0]->[0.326, 0.039, 0.615, 0.983]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1325.0s, [0.238, 0.0, 0.53, 0.993]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1324.0s, [0.228, 0.016, 0.439, 0.998]: walk, carry/hold (an object), listen to (a person).", "At 1323.0s, [0.271, 0.059, 0.411, 1.0]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1321.0s to 1322.0s, [0.311, 0.031, 0.516, 0.974]->[0.333, 0.028, 0.575, 1.0]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1317.0s to 1320.0s, [0.318, 0.085, 0.45, 0.977]->[0.328, 0.077, 0.481, 0.983]->[0.299, 0.042, 0.447, 0.995]->[0.301, 0.033, 0.471, 0.974]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1316.0s, [0.286, 0.078, 0.424, 0.926]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1356.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.125, 0.172, 0.204, 0.581]: watch (a person), talk to (e.g., self, a person, a group), walk; [0.193, 0.182, 0.317, 0.716]: carry/hold (an object), stand, watch (a person); [0.379, 0.119, 0.504, 0.788]: listen to (a person), stand, watch (a person); [0.47, 0.16, 0.575, 0.447]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.81, 0.184, 0.995, 0.98]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "_Ca3gOdOHxU.mp4", "clip": [1381, 1419], "conversations": [{"from": "human", "timestamps": 1411.0, "value": "Describe the actions of the person at bbox [0.342, 0.09, 0.611, 0.995] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.342, 0.09, 0.611, 0.995]: stand.", "From 1406.0s to 1410.0s, [0.101, 0.041, 0.378, 0.977]->[0.2, 0.06, 0.444, 0.969]->[0.218, 0.063, 0.502, 0.992]->[0.244, 0.059, 0.532, 0.999]->[0.329, 0.069, 0.586, 0.991]: stand, carry/hold (an object).", "At 1405.0s, [0.238, 0.049, 0.466, 0.996]: walk, carry/hold (an object).", "At 1404.0s, [0.151, 0.006, 0.549, 0.973]: walk, carry/hold (an object), watch (a person).", "At 1403.0s, [0.203, 0.039, 0.449, 0.977]: walk, carry/hold (an object).", "At 1402.0s, [0.259, 0.025, 0.474, 0.989]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1401.0s, [0.322, 0.053, 0.503, 0.999]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1399.0s to 1400.0s, [0.321, 0.049, 0.5, 0.993]->[0.321, 0.057, 0.504, 0.998]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1397.0s to 1398.0s, [0.295, 0.053, 0.489, 1.0]->[0.315, 0.054, 0.497, 0.988]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1396.0s, [0.31, 0.07, 0.49, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1395.0s, [0.32, 0.092, 0.49, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1393.0s to 1394.0s, [0.317, 0.105, 0.496, 0.991]->[0.319, 0.1, 0.492, 0.991]: stand, carry/hold (an object), listen to (a person).", "At 1392.0s, [0.316, 0.078, 0.491, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1391.0s, [0.311, 0.05, 0.484, 0.995]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1384.0s to 1390.0s, [0.32, 0.12, 0.491, 0.994]->[0.302, 0.079, 0.467, 0.982]->[0.268, 0.074, 0.443, 0.975]->[0.315, 0.079, 0.481, 1.0]->[0.282, 0.05, 0.467, 0.997]->[0.303, 0.051, 0.479, 0.996]->[0.307, 0.064, 0.481, 0.999]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1382.0s to 1383.0s, [0.179, 0.118, 0.339, 0.958]->[0.312, 0.096, 0.466, 0.978]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1381.0s, [0.181, 0.03, 0.403, 0.977]: run/jog, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1419.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.192, 0.115, 0.509, 0.996]: listen to (a person), stand, watch (a person); [0.57, 0.006, 0.948, 0.997]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "_Ca3gOdOHxU.mp4", "clip": [1497, 1570], "conversations": [{"from": "human", "timestamps": 1527.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.136, 0.071, 0.855, 0.976]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.836, 0.468, 0.99, 0.98]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1570.0, "value": "For the person at bbox [0.054, 0.012, 0.76, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.054, 0.012, 0.76, 1.0]: walk, watch (a person).", "At 1569.0s, [0.253, 0.039, 0.957, 0.982]: stand, watch (a person).", "At 1568.0s, [0.192, 0.082, 0.945, 0.985]: stand.", "At 1567.0s, [0.243, 0.091, 0.946, 0.984]: stand, touch (an object).", "At 1566.0s, [0.132, 0.07, 0.943, 0.993]: stand, touch (an object), watch (a person)."]}]}, {"video": "_Ca3gOdOHxU.mp4", "clip": [1537, 1574], "conversations": [{"from": "human", "timestamps": 1567.0, "value": "Describe the actions of the person at bbox [0.243, 0.091, 0.946, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.243, 0.091, 0.946, 0.984]: stand, touch (an object).", "At 1566.0s, [0.132, 0.07, 0.943, 0.993]: stand, touch (an object), watch (a person)."]}, {"from": "human", "timestamps": 1574.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.132, 0.045, 0.9, 0.986]: watch (a person), sit."}]}, {"video": "_Ca3gOdOHxU.mp4", "clip": [1590, 1669], "conversations": [{"from": "human", "timestamps": 1620.0, "value": "What actions did the person at bbox [0.466, 0.056, 0.875, 0.755] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.466, 0.056, 0.875, 0.755]: sit, carry/hold (an object).", "At 1619.0s, [0.434, 0.057, 0.861, 0.798]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1669.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.066, 0.079, 0.887, 0.994]: sit."}]}, {"video": "_Ca3gOdOHxU.mp4", "clip": [1667, 1715], "conversations": [{"from": "human", "timestamps": 1697.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.107, 0.176, 0.368, 0.749]: carry/hold (an object), sit, watch (a person); [0.462, 0.091, 0.869, 0.819]: listen to (a person), sit."}, {"from": "human", "timestamps": 1715.0, "value": "For the person at bbox [0.467, 0.067, 0.844, 0.847], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.467, 0.067, 0.844, 0.847]: sit, drive (e.g., a car, a truck), listen to (a person), watch (a person).", "From 1710.0s to 1714.0s, [0.453, 0.086, 0.902, 0.824]->[0.438, 0.099, 0.886, 0.85]->[0.42, 0.078, 0.895, 0.856]->[0.421, 0.091, 0.928, 0.87]->[0.452, 0.067, 0.88, 0.85]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1709.0s, [0.42, 0.129, 0.862, 0.83]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1708.0s, [0.473, 0.086, 0.875, 0.837]: sit, drive (e.g., a car, a truck), listen to (a person).", "At 1707.0s, [0.441, 0.075, 0.86, 0.829]: sit, carry/hold (an object), listen to (a person).", "At 1706.0s, [0.442, 0.078, 0.881, 0.846]: sit, talk to (e.g., self, a person, a group).", "At 1705.0s, [0.451, 0.085, 0.888, 0.87]: sit, drive (e.g., a car, a truck), talk to (e.g., self, a person, a group), watch (a person).", "At 1704.0s, [0.419, 0.082, 0.889, 0.857]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1702.0s to 1703.0s, [0.499, 0.097, 0.826, 0.829]->[0.435, 0.116, 0.839, 0.839]: sit, talk to (e.g., self, a person, a group).", "From 1700.0s to 1701.0s, [0.449, 0.091, 0.861, 0.831]->[0.445, 0.062, 0.856, 0.825]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1699.0s, [0.42, 0.069, 0.862, 0.828]: sit.", "At 1698.0s, [0.416, 0.076, 0.86, 0.84]: sit, touch (an object).", "From 1696.0s to 1697.0s, [0.458, 0.104, 0.896, 0.833]->[0.462, 0.091, 0.869, 0.819]: sit, listen to (a person).", "At 1695.0s, [0.409, 0.062, 0.869, 0.819]: sit, listen to (a person), watch (a person)."]}]}, {"video": "_Ca3gOdOHxU.mp4", "clip": [1735, 1770], "conversations": [{"from": "human", "timestamps": 1765.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.08, 0.169, 0.214, 0.621]: touch (an object), listen to (a person), watch (a person), walk, carry/hold (an object); [0.683, 0.386, 0.922, 0.861]: watch (a person), touch (an object), crouch/kneel, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1770.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.315, 0.261, 0.579, 0.947]?"}, {"from": "gpt", "value": ["In the current frame, [0.315, 0.261, 0.579, 0.947]: crouch/kneel, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1768.0s to 1769.0s, [0.31, 0.272, 0.578, 0.969]->[0.31, 0.273, 0.562, 0.94]: crouch/kneel, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "_a9SWtcaNj8.mkv", "clip": [882, 935], "conversations": [{"from": "human", "timestamps": 912.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.555, 0.026, 0.987, 0.949]: sit."}, {"from": "human", "timestamps": 935.0, "value": "What actions is the person in bbox [0.0, 0.088, 0.825, 0.994] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.088, 0.825, 0.994]: lie/sleep.", "From 933.0s to 934.0s, [0.0, 0.101, 0.829, 0.994]->[0.0, 0.098, 0.84, 0.999]: lie/sleep."]}]}, {"video": "_a9SWtcaNj8.mkv", "clip": [932, 1015], "conversations": [{"from": "human", "timestamps": 962.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.219, 0.197, 0.378, 0.686]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1015.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.358, 0.034, 0.96, 0.986]?"}, {"from": "gpt", "value": ["In the current frame, [0.358, 0.034, 0.96, 0.986]: stand, touch (an object).", "At 1014.0s, [0.203, 0.006, 0.887, 0.954]: stand, touch (an object).", "At 1013.0s, [0.447, 0.149, 0.943, 0.961]: stand.", "From 1011.0s to 1012.0s, [0.267, 0.012, 0.653, 0.981]->[0.247, 0.055, 0.796, 0.983]: walk.", "At 1010.0s, [0.24, 0.069, 0.684, 0.978]: walk, carry/hold (an object).", "At 1009.0s, [0.436, 0.128, 0.776, 1.0]: walk."]}]}, {"video": "_a9SWtcaNj8.mkv", "clip": [1052, 1084], "conversations": [{"from": "human", "timestamps": 1082.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.177, 0.0, 0.998, 0.982]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1084.0, "value": "Describe the actions of the person at bbox [0.125, 0.0, 1.0, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.125, 0.0, 1.0, 0.988]: stand, talk to (e.g., self, a person, a group).", "At 1083.0s, [0.129, 0.0, 1.0, 0.988]: stand, talk to (e.g., self, a person, a group).", "At 1082.0s, [0.177, 0.0, 0.998, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "_a9SWtcaNj8.mkv", "clip": [1068, 1124], "conversations": [{"from": "human", "timestamps": 1098.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.0, 0.767, 0.994]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.0, 0.767, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1096.0s to 1097.0s, [0.002, 0.0, 0.754, 1.0]->[0.0, 0.0, 0.654, 0.999]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1095.0s, [0.0, 0.0, 0.92, 1.0]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1124.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.026, 0.223, 0.435, 0.989]: listen to (a person), stand, watch (a person); [0.242, 0.17, 0.895, 0.968]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "_a9SWtcaNj8.mkv", "clip": [1142, 1242], "conversations": [{"from": "human", "timestamps": 1172.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.008, 0.148, 0.353, 1.0]: watch (a person), crouch/kneel, talk to (e.g., self, a person, a group); [0.202, 0.104, 0.764, 0.968]: carry/hold (an object), sit, listen to (a person)."}, {"from": "human", "timestamps": 1242.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.163, 0.0, 0.997, 0.987]?"}, {"from": "gpt", "value": ["At Now, [0.163, 0.0, 0.997, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1241.0s, [0.193, 0.014, 0.981, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "_a9SWtcaNj8.mkv", "clip": [1281, 1374], "conversations": [{"from": "human", "timestamps": 1311.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.079, 0.066, 0.902, 0.983]: stand, talk to (e.g., self, a person, a group), dress/put on clothing."}, {"from": "human", "timestamps": 1374.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.015, 0.02, 0.724, 0.981]?"}, {"from": "gpt", "value": ["At Now, [0.015, 0.02, 0.724, 0.981]: stand, listen to (a person), watch (a person).", "At 1373.0s, [0.021, 0.03, 0.721, 0.968]: stand."]}]}, {"video": "_a9SWtcaNj8.mkv", "clip": [1364, 1466], "conversations": [{"from": "human", "timestamps": 1394.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.003, 0.149, 0.37, 1.0]: listen to (a person), walk; [0.411, 0.08, 0.823, 0.996]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1466.0, "value": "What actions is the person in bbox [0.001, 0.0, 0.716, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.0, 0.716, 0.993]: stand, talk to (e.g., self, a person, a group).", "At 1465.0s, [0.015, 0.0, 0.748, 0.994]: talk to (e.g., self, a person, a group).", "From 1463.0s to 1464.0s, [0.001, 0.0, 0.761, 0.979]->[0.019, 0.0, 0.757, 0.986]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "_a9SWtcaNj8.mkv", "clip": [1474, 1582], "conversations": [{"from": "human", "timestamps": 1504.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.025, 0.023, 0.696, 0.995]: stand."}, {"from": "human", "timestamps": 1582.0, "value": "What actions is the person in bbox [0.065, 0.003, 1.0, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.065, 0.003, 1.0, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1580.0s to 1581.0s, [0.127, 0.0, 0.994, 1.0]->[0.097, 0.009, 1.0, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1579.0s, [0.019, 0.0, 0.759, 0.997]: stand, listen to (a person), watch (a person)."]}]}, {"video": "_a9SWtcaNj8.mkv", "clip": [1531, 1595], "conversations": [{"from": "human", "timestamps": 1561.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.1, 0.081, 0.556, 0.997]: talk to (e.g., self, a person, a group), walk; [0.565, 0.127, 0.996, 0.988]: listen to (a person), walk."}, {"from": "human", "timestamps": 1595.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.016, 0.058, 0.541, 0.982]?"}, {"from": "gpt", "value": ["At Now, [0.016, 0.058, 0.541, 0.982]: walk.", "At 1594.0s, [0.109, 0.002, 0.581, 0.98]: walk, watch (a person).", "At 1593.0s, [0.387, 0.066, 0.772, 0.992]: stand."]}]}, {"video": "_a9SWtcaNj8.mkv", "clip": [1596, 1628], "conversations": [{"from": "human", "timestamps": 1626.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.275, 0.002, 0.976, 0.981]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1628.0, "value": "For the person at bbox [0.01, 0.028, 0.621, 0.989], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.01, 0.028, 0.621, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 1627.0s, [0.005, 0.031, 0.637, 0.976]: sit, stand, listen to (a person)."]}]}, {"video": "_a9SWtcaNj8.mkv", "clip": [1607, 1707], "conversations": [{"from": "human", "timestamps": 1637.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.124, 0.0, 1.0, 0.996]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1707.0, "value": "Describe the actions of the person at bbox [0.104, 0.0, 0.95, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.104, 0.0, 0.95, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1706.0s, [0.014, 0.009, 0.795, 0.991]: stand, listen to (a person), watch (a person).", "At 1704.0s, [0.036, 0.007, 0.937, 1.0]: stand.", "From 1702.0s to 1703.0s, [0.0, 0.008, 0.942, 1.0]->[0.015, 0.009, 0.979, 1.0]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "_a9SWtcaNj8.mkv", "clip": [1661, 1719], "conversations": [{"from": "human", "timestamps": 1691.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.13, 0.0, 0.988, 1.0]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1719.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.045, 0.0, 1.0, 0.984]?"}, {"from": "gpt", "value": ["At Now, [0.045, 0.0, 1.0, 0.984]: stand, talk to (e.g., self, a person, a group).", "From 1716.0s to 1718.0s, [0.176, 0.0, 0.981, 1.0]->[0.059, 0.0, 0.972, 0.987]->[0.051, 0.0, 0.964, 0.991]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "_ithRWANKB0.mp4", "clip": [911, 1017], "conversations": [{"from": "human", "timestamps": 941.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.178, 0.45, 0.986]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.408, 0.037, 0.952, 0.987]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1017.0, "value": "For the person at bbox [0.573, 0.21, 0.759, 0.918], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.573, 0.21, 0.759, 0.918]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1016.0s, [0.508, 0.146, 0.681, 0.993]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1015.0s, [0.605, 0.131, 0.726, 0.908]: walk, watch (a person)."]}]}, {"video": "_ithRWANKB0.mp4", "clip": [958, 1006], "conversations": [{"from": "human", "timestamps": 988.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.186, 0.133, 0.347, 0.992]: listen to (a person), stand, watch (a person); [0.302, 0.137, 0.492, 0.981]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.42, 0.125, 0.517, 0.605]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.488, 0.125, 0.644, 0.984]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.626, 0.133, 0.772, 0.984]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.711, 0.209, 0.876, 0.991]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1006.0, "value": "What actions is the person in bbox [0.749, 0.184, 0.885, 0.959] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.749, 0.184, 0.885, 0.959]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1005.0s, [0.833, 0.192, 0.962, 0.918]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1004.0s, [0.885, 0.236, 1.0, 0.865]: walk, listen to (a person), watch (a person)."]}]}, {"video": "_ithRWANKB0.mp4", "clip": [1058, 1130], "conversations": [{"from": "human", "timestamps": 1088.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.412, 0.069, 0.536, 0.99]: sit, kiss (a person)."}, {"from": "human", "timestamps": 1130.0, "value": "What actions did the person at bbox [0.219, 0.153, 0.688, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.219, 0.153, 0.688, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1129.0s, [0.2, 0.099, 0.702, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1121.0s to 1128.0s, [0.213, 0.104, 0.733, 0.994]->[0.199, 0.113, 0.711, 0.994]->[0.204, 0.112, 0.708, 0.985]->[0.139, 0.106, 0.688, 0.984]->[0.133, 0.099, 0.701, 0.994]->[0.153, 0.114, 0.66, 0.99]->[0.18, 0.102, 0.713, 0.99]->[0.186, 0.103, 0.69, 0.997]: stand, talk to (e.g., self, a person, a group).", "At 1120.0s, [0.102, 0.109, 0.735, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "_ithRWANKB0.mp4", "clip": [1189, 1222], "conversations": [{"from": "human", "timestamps": 1219.0, "value": "For the person at bbox [0.341, 0.269, 0.681, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.341, 0.269, 0.681, 1.0]: dance, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1218.0s, [0.298, 0.292, 0.74, 0.982]: dance.", "At 1217.0s, [0.166, 0.249, 0.489, 0.956]: dance, watch (a person)."]}, {"from": "human", "timestamps": 1222.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.308, 0.085, 0.887, 0.961]: lie/sleep, kiss (a person)."}]}, {"video": "_ithRWANKB0.mp4", "clip": [1255, 1296], "conversations": [{"from": "human", "timestamps": 1285.0, "value": "Describe the actions of the person at bbox [0.015, 0.201, 0.376, 0.979] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.015, 0.201, 0.376, 0.979]: stand, listen to (a person), watch (a person).", "From 1280.0s to 1284.0s, [0.005, 0.0, 0.466, 0.981]->[0.025, 0.084, 0.433, 1.0]->[0.019, 0.193, 0.405, 0.987]->[0.01, 0.227, 0.396, 1.0]->[0.02, 0.194, 0.388, 0.99]: stand, listen to (a person), watch (a person).", "At 1279.0s, [0.005, 0.0, 0.464, 0.984]: stand, watch (a person).", "From 1276.0s to 1278.0s, [0.0, 0.0, 0.47, 0.972]->[0.006, 0.005, 0.489, 0.971]->[0.008, 0.0, 0.488, 0.975]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1296.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.006, 0.057, 0.447, 0.989]: stand, talk to (e.g., self, a person, a group); [0.384, 0.208, 0.547, 0.999]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.511, 0.106, 0.723, 0.994]: listen to (a person), stand, watch (a person); [0.705, 0.341, 0.785, 0.985]: listen to (a person), stand, watch (a person); [0.781, 0.106, 0.998, 0.997]: listen to (a person), stand."}]}, {"video": "_ithRWANKB0.mp4", "clip": [1484, 1546], "conversations": [{"from": "human", "timestamps": 1514.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.662, 0.256, 0.782, 0.749]: touch (an object), walk."}, {"from": "human", "timestamps": 1546.0, "value": "What actions did the person at bbox [0.008, 0.326, 0.257, 0.633] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.008, 0.326, 0.257, 0.633]: sit, carry/hold (an object), watch (a person).", "From 1544.0s to 1545.0s, [0.073, 0.327, 0.362, 0.655]->[0.005, 0.331, 0.259, 0.643]: carry/hold (an object), lie/sleep, watch (a person)."]}]}, {"video": "_ithRWANKB0.mp4", "clip": [1652, 1728], "conversations": [{"from": "human", "timestamps": 1682.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.149, 0.0, 0.97, 1.0]: stand, fishing, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1728.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.557, 0.029, 0.993, 0.987]?"}, {"from": "gpt", "value": ["At Now, [0.557, 0.029, 0.993, 0.987]: sit, sail boat, listen to (a person), watch (a person).", "At 1727.0s, [0.568, 0.034, 0.996, 0.979]: sit, stand, sail boat, listen to (a person), watch (a person).", "At 1726.0s, [0.515, 0.035, 0.984, 0.984]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1725.0s, [0.566, 0.058, 0.99, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1724.0s, [0.56, 0.045, 0.989, 0.997]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1723.0s, [0.575, 0.044, 0.991, 1.0]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1722.0s, [0.589, 0.067, 0.981, 0.983]: sit, sail boat, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1721.0s, [0.604, 0.05, 0.984, 0.993]: sit, sail boat, listen to (a person), watch (a person).", "At 1720.0s, [0.6, 0.058, 0.976, 0.988]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1719.0s, [0.586, 0.121, 0.953, 0.993]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1718.0s, [0.561, 0.105, 0.885, 1.0]: sit, sail boat, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1717.0s, [0.625, 0.089, 0.916, 1.0]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1716.0s, [0.567, 0.149, 0.907, 0.997]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "_mAfwH6i90E.mkv", "clip": [886, 959], "conversations": [{"from": "human", "timestamps": 916.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.131, 0.102, 0.983, 0.985]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 959.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.224, 0.004, 1.0, 0.991]?"}, {"from": "gpt", "value": ["At Now, [0.224, 0.004, 1.0, 0.991]: stand, talk to (e.g., self, a person, a group).", "From 954.0s to 958.0s, [0.21, 0.002, 1.0, 1.0]->[0.264, 0.003, 1.0, 0.991]->[0.205, 0.0, 1.0, 1.0]->[0.29, 0.007, 1.0, 0.992]->[0.233, 0.017, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 950.0s to 953.0s, [0.243, 0.009, 1.0, 0.993]->[0.261, 0.0, 1.0, 0.994]->[0.29, 0.0, 1.0, 1.0]->[0.165, 0.011, 1.0, 0.974]: stand.", "At 949.0s, [0.254, 0.01, 1.0, 0.991]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 947.0s to 948.0s, [0.307, 0.014, 1.0, 0.994]->[0.091, 0.02, 1.0, 0.994]: stand.", "At 946.0s, [0.317, 0.002, 1.0, 1.0]: stand, listen to (a person), watch (a person)."]}]}, {"video": "_mAfwH6i90E.mkv", "clip": [946, 1016], "conversations": [{"from": "human", "timestamps": 976.0, "value": "Describe the actions of the person at bbox [0.325, 0.013, 1.0, 0.997] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.325, 0.013, 1.0, 0.997]: stand.", "At 975.0s, [0.316, 0.007, 1.0, 1.0]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1016.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.099, 0.174, 0.489, 0.979]: walk; [0.466, 0.348, 0.833, 0.938]: carry/hold (an object), listen to (a person), walk."}]}, {"video": "_mAfwH6i90E.mkv", "clip": [1004, 1046], "conversations": [{"from": "human", "timestamps": 1034.0, "value": "Describe the actions of the person at bbox [0.653, 0.098, 0.95, 0.977] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.653, 0.098, 0.95, 0.977]: stand, listen to (a person), watch (a person).", "At 1033.0s, [0.697, 0.061, 0.983, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1032.0s, [0.754, 0.028, 0.981, 0.937]: stand, watch (a person).", "At 1031.0s, [0.552, 0.053, 0.958, 0.978]: stand, touch (an object), watch (a person).", "From 1029.0s to 1030.0s, [0.599, 0.066, 0.993, 1.0]->[0.583, 0.074, 0.987, 0.994]: stand, carry/hold (an object), watch (a person).", "At 1028.0s, [0.541, 0.056, 0.979, 0.998]: stand, watch (a person).", "At 1027.0s, [0.57, 0.088, 0.972, 0.995]: walk, watch (a person).", "From 1025.0s to 1026.0s, [0.561, 0.171, 0.852, 0.964]->[0.405, 0.086, 0.911, 0.991]: walk."]}, {"from": "human", "timestamps": 1046.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.471, 0.998]: listen to (a person), stand, watch (a person); [0.237, 0.033, 1.0, 0.971]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "_mAfwH6i90E.mkv", "clip": [1075, 1137], "conversations": [{"from": "human", "timestamps": 1105.0, "value": "What actions did the person at bbox [0.029, 0.235, 0.577, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.029, 0.235, 0.577, 0.99]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1103.0s to 1104.0s, [0.083, 0.237, 0.918, 0.976]->[0.065, 0.22, 0.896, 0.989]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1100.0s to 1102.0s, [0.066, 0.244, 0.924, 0.987]->[0.086, 0.245, 0.942, 0.99]->[0.089, 0.168, 0.684, 0.954]: sit, carry/hold (an object), listen to (a person).", "At 1099.0s, [0.071, 0.229, 0.883, 1.0]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1096.0s to 1098.0s, [0.055, 0.285, 0.566, 0.983]->[0.032, 0.256, 0.949, 0.996]->[0.054, 0.258, 0.55, 0.993]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1137.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.076, 0.106, 0.604, 0.985]: carry/hold (an object), sit, watch (a person)."}]}, {"video": "_mAfwH6i90E.mkv", "clip": [1120, 1176], "conversations": [{"from": "human", "timestamps": 1150.0, "value": "What actions is the person in bbox [0.371, 0.191, 0.939, 0.981] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.371, 0.191, 0.939, 0.981]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1149.0s, [0.359, 0.191, 0.955, 0.996]: talk to (e.g., self, a person, a group).", "From 1146.0s to 1148.0s, [0.356, 0.17, 0.962, 0.988]->[0.371, 0.186, 0.946, 0.987]->[0.355, 0.184, 0.954, 0.995]: sit, talk to (e.g., self, a person, a group).", "At 1145.0s, [0.362, 0.175, 0.946, 0.993]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1176.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.481, 0.249, 0.636, 0.93]: carry/hold (an object), run/jog."}]}, {"video": "_mAfwH6i90E.mkv", "clip": [1186, 1223], "conversations": [{"from": "human", "timestamps": 1216.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.341, 0.063, 0.66, 0.971]?"}, {"from": "gpt", "value": ["At Now, [0.341, 0.063, 0.66, 0.971]: walk, smoke, talk to (e.g., self, a person, a group).", "From 1214.0s to 1215.0s, [0.359, 0.048, 0.616, 0.964]->[0.373, 0.072, 0.66, 0.981]: stand, smoke, talk to (e.g., self, a person, a group).", "From 1211.0s to 1213.0s, [0.336, 0.462, 0.465, 0.965]->[0.337, 0.461, 0.476, 0.941]->[0.339, 0.446, 0.487, 0.944]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1223.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.041, 0.068, 0.682, 0.987]: smoke, walk."}]}, {"video": "_mAfwH6i90E.mkv", "clip": [1371, 1453], "conversations": [{"from": "human", "timestamps": 1401.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.24, 0.157, 0.739]: stand; [0.0, 0.262, 0.605, 0.997]: watch (a person), dance; [0.894, 0.249, 0.999, 0.825]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1453.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.452, 0.145, 0.814, 0.998]?"}, {"from": "gpt", "value": ["In the current frame, [0.452, 0.145, 0.814, 0.998]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1450.0s to 1452.0s, [0.439, 0.171, 0.817, 0.995]->[0.448, 0.153, 0.812, 0.997]->[0.465, 0.151, 0.785, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1449.0s, [0.414, 0.147, 0.777, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1448.0s, [0.228, 0.002, 0.957, 0.976]: stand, carry/hold (an object).", "At 1447.0s, [0.187, 0.0, 0.933, 0.975]: stand."]}]}, {"video": "_mAfwH6i90E.mkv", "clip": [1411, 1476], "conversations": [{"from": "human", "timestamps": 1441.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.255, 0.0, 0.946, 0.988]?"}, {"from": "gpt", "value": ["At Now, [0.255, 0.0, 0.946, 0.988]: stand, talk to (e.g., self, a person, a group).", "At 1440.0s, [0.227, 0.0, 0.917, 0.971]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1476.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.454, 0.129, 0.61, 0.48]: listen to (a person), stand, watch (a person); [0.664, 0.074, 0.995, 0.981]: carry/hold (an object), stand, listen to (a person)."}]}, {"video": "_mAfwH6i90E.mkv", "clip": [1508, 1580], "conversations": [{"from": "human", "timestamps": 1538.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.042, 0.223, 0.997]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.088, 0.271, 0.235, 0.561]: watch (a person), stand; [0.119, 0.169, 0.454, 0.979]: stand, talk to (e.g., self, a person, a group); [0.403, 0.01, 0.965, 0.981]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1580.0, "value": "Describe the actions of the person at bbox [0.071, 0.08, 0.713, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.071, 0.08, 0.713, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1579.0s, [0.05, 0.086, 0.795, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1578.0s, [0.087, 0.093, 0.715, 0.972]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1569.0s to 1577.0s, [0.004, 0.059, 0.627, 1.0]->[0.01, 0.053, 0.629, 1.0]->[0.0, 0.045, 0.628, 1.0]->[0.023, 0.047, 0.625, 1.0]->[0.009, 0.045, 0.599, 1.0]->[0.0, 0.052, 0.61, 0.99]->[0.023, 0.066, 0.593, 0.991]->[0.043, 0.061, 0.596, 0.996]->[0.072, 0.074, 0.613, 0.97]: stand, listen to (a person), watch (a person).", "At 1568.0s, [0.008, 0.052, 0.562, 0.986]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1566.0s to 1567.0s, [0.024, 0.061, 0.636, 1.0]->[0.013, 0.055, 0.565, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1563.0s to 1565.0s, [0.458, 0.258, 0.6, 0.918]->[0.458, 0.249, 0.601, 0.932]->[0.458, 0.248, 0.606, 0.914]: stand, listen to (a person), watch (a person).", "From 1556.0s to 1562.0s, [0.468, 0.259, 0.622, 0.925]->[0.467, 0.258, 0.612, 0.904]->[0.464, 0.246, 0.614, 0.91]->[0.467, 0.256, 0.612, 0.933]->[0.465, 0.241, 0.616, 0.929]->[0.461, 0.247, 0.622, 0.923]->[0.464, 0.255, 0.616, 0.928]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "_mAfwH6i90E.mkv", "clip": [1615, 1647], "conversations": [{"from": "human", "timestamps": 1645.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.058, 0.012, 0.846, 0.99]?"}, {"from": "gpt", "value": ["In the current frame, [0.058, 0.012, 0.846, 0.99]: stand, talk to (e.g., self, a person, a group).", "From 1642.0s to 1644.0s, [0.034, 0.002, 0.774, 0.963]->[0.012, 0.003, 0.916, 0.979]->[0.079, 0.0, 0.959, 0.978]: stand, talk to (e.g., self, a person, a group).", "From 1640.0s to 1641.0s, [0.032, 0.026, 0.913, 0.969]->[0.009, 0.011, 0.976, 0.969]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1647.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.057, 0.0, 0.931, 0.99]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "_mAfwH6i90E.mkv", "clip": [1725, 1758], "conversations": [{"from": "human", "timestamps": 1755.0, "value": "Describe the actions of the person at bbox [0.025, 0.057, 0.837, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.025, 0.057, 0.837, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 1752.0s to 1754.0s, [0.0, 0.102, 0.888, 0.987]->[0.003, 0.06, 0.795, 0.962]->[0.004, 0.047, 0.816, 0.983]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1758.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.452, 0.249, 0.592, 0.576]: stand, talk to (e.g., self, a person, a group); [0.616, 0.376, 0.755, 0.601]: listen to (a person), sit; [0.818, 0.102, 0.983, 0.911]: listen to (a person), stand, watch (a person)."}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [922, 986], "conversations": [{"from": "human", "timestamps": 952.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.171, 0.096, 0.572, 0.966]: listen to (a person), stand, watch (a person); [0.5, 0.053, 1.0, 0.956]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 986.0, "value": "What actions did the person at bbox [0.462, 0.164, 0.624, 0.757] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.462, 0.164, 0.624, 0.757]: walk, carry/hold (an object).", "At 985.0s, [0.461, 0.202, 0.622, 0.827]: walk, carry/hold (an object)."]}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [968, 1013], "conversations": [{"from": "human", "timestamps": 998.0, "value": "Describe the actions of the person at bbox [0.149, 0.072, 0.567, 0.979] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.149, 0.072, 0.567, 0.979]: stand.", "From 996.0s to 997.0s, [0.169, 0.137, 0.438, 0.999]->[0.177, 0.097, 0.432, 0.992]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1013.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.486, 0.098, 0.996, 0.994]: carry/hold (an object), talk to (e.g., self, a person, a group), bend/bow (at the waist)."}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [991, 1111], "conversations": [{"from": "human", "timestamps": 1021.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.5, 0.046, 0.865, 1.0]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1111.0, "value": "Describe the actions of the person at bbox [0.122, 0.017, 0.441, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.122, 0.017, 0.441, 0.981]: stand, carry/hold (an object).", "At 1110.0s, [0.694, 0.292, 0.977, 0.989]: stand, listen to (a person), watch (a person)."]}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [1165, 1225], "conversations": [{"from": "human", "timestamps": 1195.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.552, 0.053, 1.0, 0.988]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1225.0, "value": "For the person at bbox [0.095, 0.094, 0.701, 0.972], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.095, 0.094, 0.701, 0.972]: stand, talk to (e.g., self, a person, a group).", "From 1220.0s to 1224.0s, [0.154, 0.072, 0.731, 0.971]->[0.182, 0.091, 0.706, 0.975]->[0.179, 0.093, 0.722, 0.976]->[0.13, 0.098, 0.723, 0.974]->[0.106, 0.1, 0.723, 0.976]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [1184, 1246], "conversations": [{"from": "human", "timestamps": 1214.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.179, 0.043, 0.528, 0.967]: watch (a person), talk to (e.g., self, a person, a group), walk; [0.695, 0.202, 1.0, 0.967]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1246.0, "value": "What actions did the person at bbox [0.077, 0.089, 0.708, 0.947] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.077, 0.089, 0.708, 0.947]: stand, watch (a person).", "At 1245.0s, [0.202, 0.073, 0.654, 0.975]: stand, watch (a person)."]}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [1260, 1334], "conversations": [{"from": "human", "timestamps": 1290.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.039, 0.017, 0.776, 0.99]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1334.0, "value": "What actions is the person in bbox [0.065, 0.011, 0.694, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.065, 0.011, 0.694, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1333.0s, [0.071, 0.023, 0.707, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [1323, 1357], "conversations": [{"from": "human", "timestamps": 1353.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.339, 0.119, 0.553, 0.979]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1357.0, "value": "What actions is the person in bbox [0.254, 0.04, 0.941, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.254, 0.04, 0.941, 0.988]: stand, watch (a person).", "At 1356.0s, [0.223, 0.049, 0.908, 0.988]: stand, watch (a person).", "At 1355.0s, [0.246, 0.039, 0.876, 0.978]: stand."]}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [1397, 1468], "conversations": [{"from": "human", "timestamps": 1427.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.021, 0.265, 0.971]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.352, 0.0, 1.0, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1468.0, "value": "What actions did the person at bbox [0.089, 0.299, 0.738, 0.948] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.089, 0.299, 0.738, 0.948]: stand, carry/hold (an object), put down.", "At 1467.0s, [0.218, 0.272, 0.543, 0.837]: stand, carry/hold (an object).", "At 1466.0s, [0.164, 0.232, 0.443, 0.85]: stand, carry/hold (an object), put down."]}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [1455, 1491], "conversations": [{"from": "human", "timestamps": 1485.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.469, 0.026, 0.952, 0.991]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1491.0, "value": "What actions did the person at bbox [0.545, 0.002, 0.986, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.545, 0.002, 0.986, 0.986]: stand, talk to (e.g., self, a person, a group).", "From 1483.0s to 1490.0s, [0.478, 0.013, 0.949, 0.986]->[0.464, 0.005, 0.933, 0.987]->[0.469, 0.026, 0.952, 0.991]->[0.446, 0.03, 0.964, 0.986]->[0.389, 0.032, 0.969, 0.99]->[0.459, 0.007, 0.941, 0.988]->[0.516, 0.007, 0.968, 0.98]->[0.534, 0.0, 0.975, 0.986]: stand, talk to (e.g., self, a person, a group).", "At 1482.0s, [0.476, 0.0, 0.95, 0.985]: stand.", "At 1481.0s, [0.467, 0.005, 0.965, 0.992]: sit, talk to (e.g., self, a person, a group).", "At 1480.0s, [0.45, 0.006, 0.962, 0.982]: talk to (e.g., self, a person, a group).", "At 1479.0s, [0.474, 0.0, 0.98, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1478.0s, [0.447, 0.002, 0.991, 0.993]: stand."]}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [1519, 1564], "conversations": [{"from": "human", "timestamps": 1549.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.015, 0.0, 0.624, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1564.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.346, 0.066, 0.96, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.346, 0.066, 0.96, 1.0]: sit, listen to (a person).", "At 1563.0s, [0.306, 0.069, 0.965, 1.0]: sit.", "At 1562.0s, [0.338, 0.074, 0.985, 0.999]: sit, listen to (a person)."]}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [1549, 1613], "conversations": [{"from": "human", "timestamps": 1579.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.326, 0.028, 0.995, 0.969]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1613.0, "value": "What actions is the person in bbox [0.459, 0.0, 1.0, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.459, 0.0, 1.0, 1.0]: bend/bow (at the waist), sit.", "At 1612.0s, [0.528, 0.002, 0.986, 0.97]: sit.", "At 1611.0s, [0.569, 0.183, 0.97, 0.994]: sit, listen to (a person)."]}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [1600, 1645], "conversations": [{"from": "human", "timestamps": 1630.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.231, 0.163, 0.992]: walk; [0.222, 0.038, 0.97, 0.961]: push (another person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1645.0, "value": "For the person at bbox [0.456, 0.027, 0.83, 0.993], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.456, 0.027, 0.83, 0.993]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1644.0s, [0.456, 0.03, 0.826, 0.976]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1642.0s to 1643.0s, [0.467, 0.019, 0.83, 0.981]->[0.444, 0.019, 0.826, 0.986]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1641.0s, [0.491, 0.042, 0.85, 0.975]: stand, carry/hold (an object)."]}]}, {"video": "aDEYi1OG0vU.mkv", "clip": [1661, 1704], "conversations": [{"from": "human", "timestamps": 1691.0, "value": "For the person at bbox [0.446, 0.169, 0.777, 0.979], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.446, 0.169, 0.777, 0.979]: sit, ride (e.g., a bike, a car, a horse).", "From 1689.0s to 1690.0s, [0.525, 0.065, 0.859, 0.985]->[0.528, 0.134, 0.899, 0.991]: sit, ride (e.g., a bike, a car, a horse).", "At 1688.0s, [0.505, 0.093, 0.853, 0.99]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse).", "From 1677.0s to 1687.0s, [0.659, 0.581, 0.688, 0.689]->[0.64, 0.579, 0.695, 0.723]->[0.64, 0.517, 0.694, 0.687]->[0.624, 0.453, 0.672, 0.649]->[0.518, 0.42, 0.576, 0.657]->[0.513, 0.45, 0.568, 0.694]->[0.569, 0.089, 0.903, 1.0]->[0.576, 0.144, 0.938, 0.985]->[0.561, 0.123, 0.923, 0.995]->[0.6, 0.056, 0.963, 0.997]->[0.558, 0.096, 0.91, 0.992]: sit, ride (e.g., a bike, a car, a horse)."]}, {"from": "human", "timestamps": 1704.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.357, 0.272, 0.518, 0.865]: carry/hold (an object), walk; [0.53, 0.458, 0.571, 0.725]: watch (a person), walk."}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [912, 1018], "conversations": [{"from": "human", "timestamps": 942.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.183, 0.153, 0.682, 0.868]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1018.0, "value": "For the person at bbox [0.229, 0.38, 0.843, 0.8], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.229, 0.38, 0.843, 0.8]: lie/sleep.", "From 1016.0s to 1017.0s, [0.308, 0.349, 0.84, 0.755]->[0.298, 0.355, 0.833, 0.769]: lie/sleep.", "At 1015.0s, [0.288, 0.336, 0.854, 0.787]: sit.", "At 1014.0s, [0.348, 0.364, 0.875, 0.769]: touch (an object), lie/sleep.", "At 1013.0s, [0.361, 0.359, 0.91, 0.755]: lie/sleep.", "At 1012.0s, [0.393, 0.362, 0.933, 0.761]: listen to (a person), talk to (e.g., self, a person, a group), lie/sleep.", "At 1011.0s, [0.437, 0.364, 0.918, 0.757]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1010.0s, [0.434, 0.39, 0.935, 0.783]: sit, touch (an object), listen to (a person), watch (a person).", "At 1009.0s, [0.464, 0.41, 0.975, 0.82]: listen to (a person), lie/sleep.", "At 1008.0s, [0.462, 0.42, 0.969, 0.828]: sit, listen to (a person).", "From 1005.0s to 1007.0s, [0.496, 0.41, 0.96, 0.815]->[0.511, 0.415, 0.957, 0.812]->[0.492, 0.432, 0.981, 0.832]: sit, listen to (a person), watch (a person).", "From 1003.0s to 1004.0s, [0.485, 0.408, 0.964, 0.793]->[0.463, 0.421, 0.953, 0.81]: listen to (a person), talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "At 1002.0s, [0.473, 0.407, 0.973, 0.818]: listen to (a person), lie/sleep, watch (a person).", "From 999.0s to 1001.0s, [0.513, 0.418, 0.972, 0.786]->[0.503, 0.421, 0.975, 0.782]->[0.486, 0.402, 0.981, 0.81]: sit, listen to (a person), watch (a person)."]}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [997, 1031], "conversations": [{"from": "human", "timestamps": 1027.0, "value": "Describe the actions of the person at bbox [0.289, 0.322, 0.397, 0.624] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.289, 0.322, 0.397, 0.624]: sit, ride (e.g., a bike, a car, a horse).", "From 1025.0s to 1026.0s, [0.594, 0.409, 0.652, 0.575]->[0.459, 0.355, 0.546, 0.569]: sit, ride (e.g., a bike, a car, a horse)."]}, {"from": "human", "timestamps": 1031.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.182, 0.261, 0.269, 0.463]: carry/hold (an object), sit, ride (e.g., a bike, a car, a horse); [0.474, 0.306, 0.578, 0.703]: watch (a person), give/serve (an object) to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.6, 0.315, 0.684, 0.71]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.678, 0.408, 0.74, 0.697]: listen to (a person), stand, watch (a person)."}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1034, 1116], "conversations": [{"from": "human", "timestamps": 1064.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.075, 0.257, 0.5, 0.922]: watch (a person), give/serve (an object) to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.378, 0.159, 0.726, 0.796]: carry/hold (an object), listen to (a person), walk; [0.613, 0.347, 0.82, 0.772]: ride (e.g., a bike, a car, a horse), sit; [0.687, 0.141, 0.984, 0.878]: carry/hold (an object), read, listen to (a person), walk."}, {"from": "human", "timestamps": 1116.0, "value": "For the person at bbox [0.126, 0.175, 0.671, 0.863], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.126, 0.175, 0.671, 0.863]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1114.0s to 1115.0s, [0.483, 0.238, 0.647, 0.895]->[0.498, 0.261, 0.672, 0.864]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1113.0s, [0.471, 0.268, 0.672, 0.884]: stand, carry/hold (an object), touch (an object), listen to (a person)."]}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1053, 1092], "conversations": [{"from": "human", "timestamps": 1083.0, "value": "What actions did the person at bbox [0.357, 0.272, 0.851, 0.869] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.357, 0.272, 0.851, 0.869]: stand, talk to (e.g., self, a person, a group).", "At 1082.0s, [0.393, 0.294, 0.889, 0.871]: stand, talk to (e.g., self, a person, a group).", "At 1081.0s, [0.429, 0.262, 0.879, 0.89]: stand, sing to (e.g., self, a person, a group), talk to (e.g., self, a person, a group).", "At 1080.0s, [0.293, 0.261, 0.926, 0.894]: carry/hold (an object), dance, sing to (e.g., self, a person, a group), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1092.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.034, 0.236, 0.257, 0.869]: carry/hold (an object), stand, listen to (a person); [0.32, 0.277, 0.397, 0.652]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.336, 0.244, 0.512, 0.884]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.64, 0.394, 0.807, 0.656]: listen to (a person), sit, touch (an object); [0.764, 0.421, 0.826, 0.505]: sit, eat; [0.821, 0.335, 0.907, 0.479]: listen to (a person), sit, watch (a person); [0.846, 0.359, 0.978, 0.711]: watch (a person), sit, eat, touch (an object)."}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1154, 1192], "conversations": [{"from": "human", "timestamps": 1184.0, "value": "Describe the actions of the person at bbox [0.66, 0.129, 0.991, 0.875] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.66, 0.129, 0.991, 0.875]: stand, watch (a person).", "At 1183.0s, [0.296, 0.047, 0.949, 0.945]: walk, watch (a person).", "At 1182.0s, [0.367, 0.189, 0.641, 0.861]: walk, talk to (e.g., self, a person, a group).", "At 1181.0s, [0.335, 0.176, 0.552, 0.849]: stand, walk, talk to (e.g., self, a person, a group).", "At 1180.0s, [0.233, 0.181, 0.429, 0.878]: walk, talk to (e.g., self, a person, a group).", "At 1179.0s, [0.169, 0.177, 0.421, 0.883]: walk.", "From 1175.0s to 1178.0s, [0.153, 0.212, 0.34, 0.892]->[0.171, 0.212, 0.347, 0.886]->[0.185, 0.191, 0.365, 0.883]->[0.19, 0.187, 0.385, 0.888]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1173.0s to 1174.0s, [0.081, 0.165, 0.302, 0.875]->[0.124, 0.182, 0.342, 0.896]: stand, watch (a person).", "At 1172.0s, [0.099, 0.173, 0.257, 0.817]: walk."]}, {"from": "human", "timestamps": 1192.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.279, 0.134, 0.988, 0.892]: watch (a person), sit."}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1171, 1211], "conversations": [{"from": "human", "timestamps": 1201.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.047, 0.153, 0.639, 0.872]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.749, 0.114, 0.99, 0.874]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1211.0, "value": "What actions did the person at bbox [0.278, 0.08, 0.95, 0.924] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.278, 0.08, 0.95, 0.924]: stand, carry/hold (an object).", "From 1208.0s to 1210.0s, [0.407, 0.218, 0.625, 0.884]->[0.446, 0.218, 0.62, 0.883]->[0.416, 0.231, 0.635, 0.89]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1234, 1272], "conversations": [{"from": "human", "timestamps": 1264.0, "value": "Describe the actions of the person at bbox [0.532, 0.15, 0.913, 0.873] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.532, 0.15, 0.913, 0.873]: stand.", "At 1263.0s, [0.537, 0.141, 0.909, 0.884]: stand.", "From 1261.0s to 1262.0s, [0.541, 0.152, 0.91, 0.888]->[0.489, 0.14, 0.911, 0.885]: stand, talk to (e.g., self, a person, a group).", "From 1259.0s to 1260.0s, [0.54, 0.134, 0.922, 0.891]->[0.551, 0.128, 0.943, 0.904]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1257.0s to 1258.0s, [0.517, 0.138, 0.934, 0.892]->[0.5, 0.148, 0.941, 0.896]: stand, talk to (e.g., self, a person, a group).", "From 1250.0s to 1256.0s, [0.584, 0.169, 0.993, 0.923]->[0.587, 0.188, 0.997, 0.907]->[0.539, 0.201, 0.989, 0.913]->[0.529, 0.176, 0.931, 0.9]->[0.498, 0.163, 0.907, 0.898]->[0.514, 0.147, 0.9, 0.893]->[0.525, 0.14, 0.93, 0.907]: stand, listen to (a person).", "At 1249.0s, [0.583, 0.139, 0.994, 0.935]: stand.", "From 1246.0s to 1248.0s, [0.551, 0.141, 0.985, 0.94]->[0.546, 0.146, 0.989, 0.942]->[0.577, 0.153, 0.988, 0.925]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1245.0s, [0.537, 0.214, 0.724, 0.888]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1241.0s to 1244.0s, [0.503, 0.275, 0.771, 0.898]->[0.551, 0.268, 0.738, 0.896]->[0.544, 0.286, 0.735, 0.893]->[0.537, 0.244, 0.728, 0.888]: stand, touch (an object), listen to (a person).", "From 1239.0s to 1240.0s, [0.479, 0.28, 0.735, 0.894]->[0.512, 0.27, 0.743, 0.894]: walk, touch (an object), listen to (a person).", "From 1237.0s to 1238.0s, [0.502, 0.312, 0.662, 0.878]->[0.475, 0.295, 0.641, 0.877]: walk, listen to (a person).", "At 1236.0s, [0.494, 0.285, 0.672, 0.88]: walk, touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1233.0s to 1235.0s, [0.478, 0.26, 0.646, 0.77]->[0.487, 0.269, 0.64, 0.803]->[0.502, 0.297, 0.672, 0.861]: walk, touch (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1272.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.03, 0.174, 0.403, 0.883]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.562, 0.133, 0.95, 0.88]: listen to (a person), stand."}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1288, 1344], "conversations": [{"from": "human", "timestamps": 1318.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.211, 0.129, 0.877, 0.887]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.77, 0.07, 0.994, 0.894]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1344.0, "value": "What actions is the person in bbox [0.407, 0.216, 0.565, 0.629] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.407, 0.216, 0.565, 0.629]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1335.0s to 1343.0s, [0.409, 0.279, 0.557, 0.615]->[0.407, 0.253, 0.557, 0.615]->[0.401, 0.241, 0.56, 0.621]->[0.404, 0.225, 0.561, 0.626]->[0.407, 0.243, 0.56, 0.636]->[0.405, 0.259, 0.56, 0.648]->[0.407, 0.259, 0.562, 0.66]->[0.411, 0.261, 0.555, 0.655]->[0.416, 0.242, 0.566, 0.65]: stand, talk to (e.g., self, a person, a group).", "At 1334.0s, [0.409, 0.303, 0.557, 0.608]: stand.", "At 1333.0s, [0.408, 0.295, 0.552, 0.6]: sit.", "From 1330.0s to 1332.0s, [0.419, 0.399, 0.559, 0.599]->[0.412, 0.309, 0.559, 0.597]->[0.409, 0.298, 0.552, 0.63]: stand."]}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1303, 1338], "conversations": [{"from": "human", "timestamps": 1333.0, "value": "For the person at bbox [0.408, 0.295, 0.552, 0.6], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.408, 0.295, 0.552, 0.6]: sit.", "From 1330.0s to 1332.0s, [0.419, 0.399, 0.559, 0.599]->[0.412, 0.309, 0.559, 0.597]->[0.409, 0.298, 0.552, 0.63]: stand."]}, {"from": "human", "timestamps": 1338.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.404, 0.225, 0.561, 0.626]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1391, 1472], "conversations": [{"from": "human", "timestamps": 1421.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.705, 0.319, 0.949, 0.79]: carry/hold (an object), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1472.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.282, 0.441, 0.512, 0.738]?"}, {"from": "gpt", "value": ["In the current frame, [0.282, 0.441, 0.512, 0.738]: lie/sleep.", "From 1461.0s to 1471.0s, [0.262, 0.291, 0.523, 0.613]->[0.227, 0.301, 0.48, 0.61]->[0.231, 0.298, 0.479, 0.602]->[0.275, 0.287, 0.51, 0.581]->[0.311, 0.284, 0.528, 0.57]->[0.31, 0.284, 0.544, 0.572]->[0.304, 0.291, 0.534, 0.57]->[0.29, 0.288, 0.533, 0.575]->[0.269, 0.288, 0.515, 0.576]->[0.27, 0.321, 0.514, 0.614]->[0.264, 0.375, 0.491, 0.667]: lie/sleep."]}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1413, 1468], "conversations": [{"from": "human", "timestamps": 1443.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.349, 0.101, 0.967, 0.93]: carry/hold (an object), talk to (e.g., self, a person, a group), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1468.0, "value": "Describe the actions of the person at bbox [0.29, 0.288, 0.533, 0.575] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.29, 0.288, 0.533, 0.575]: lie/sleep.", "From 1461.0s to 1467.0s, [0.262, 0.291, 0.523, 0.613]->[0.227, 0.301, 0.48, 0.61]->[0.231, 0.298, 0.479, 0.602]->[0.275, 0.287, 0.51, 0.581]->[0.311, 0.284, 0.528, 0.57]->[0.31, 0.284, 0.544, 0.572]->[0.304, 0.291, 0.534, 0.57]: lie/sleep."]}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1472, 1522], "conversations": [{"from": "human", "timestamps": 1502.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.096, 0.176, 0.738, 0.871]: listen to (a person), sit."}, {"from": "human", "timestamps": 1522.0, "value": "For the person at bbox [0.362, 0.253, 0.521, 0.775], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.362, 0.253, 0.521, 0.775]: bend/bow (at the waist), grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1520.0s to 1521.0s, [0.326, 0.278, 0.507, 0.757]->[0.283, 0.262, 0.51, 0.765]: bend/bow (at the waist), grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1519.0s, [0.317, 0.269, 0.528, 0.756]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "From 1516.0s to 1518.0s, [0.342, 0.263, 0.519, 0.763]->[0.324, 0.256, 0.511, 0.777]->[0.332, 0.295, 0.51, 0.766]: bend/bow (at the waist), grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1514.0s to 1515.0s, [0.152, 0.191, 0.753, 0.891]->[0.368, 0.187, 0.581, 0.743]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1502, 1559], "conversations": [{"from": "human", "timestamps": 1532.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.188, 0.151, 0.799, 0.875]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1559.0, "value": "For the person at bbox [0.611, 0.289, 0.803, 0.657], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.611, 0.289, 0.803, 0.657]: bend/bow (at the waist), touch (an object).", "At 1558.0s, [0.562, 0.348, 0.807, 0.719]: bend/bow (at the waist), touch (an object).", "At 1557.0s, [0.574, 0.363, 0.731, 0.544]: bend/bow (at the waist), listen to (a person)."]}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1539, 1579], "conversations": [{"from": "human", "timestamps": 1569.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.204, 0.366, 0.764]: sit, eat; [0.231, 0.411, 0.449, 0.659]: sit, eat; [0.389, 0.365, 0.502, 0.643]: walk; [0.547, 0.27, 0.981, 0.746]: watch (a person), sit, eat; [0.606, 0.433, 0.725, 0.643]: sit, eat, drink."}, {"from": "human", "timestamps": 1579.0, "value": "What actions is the person in bbox [0.468, 0.43, 0.545, 0.619] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.468, 0.43, 0.545, 0.619]: stand, listen to (a person), watch (a person).", "At 1578.0s, [0.502, 0.406, 0.607, 0.642]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1577.0s, [0.523, 0.427, 0.624, 0.667]: walk, listen to (a person), watch (a person)."]}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1643, 1676], "conversations": [{"from": "human", "timestamps": 1673.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.588, 0.282, 0.945, 0.806]: carry/hold (an object), stand, cook, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1676.0, "value": "Describe the actions of the person at bbox [0.535, 0.259, 0.806, 0.631] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.535, 0.259, 0.806, 0.631]: stand, carry/hold (an object).", "At 1675.0s, [0.626, 0.295, 0.808, 0.666]: stand, cook, talk to (e.g., self, a person, a group).", "At 1674.0s, [0.606, 0.29, 0.844, 0.73]: stand, carry/hold (an object), cook, listen to (a person), talk to (e.g., self, a person, a group).", "From 1672.0s to 1673.0s, [0.628, 0.294, 0.875, 0.8]->[0.588, 0.282, 0.945, 0.806]: stand, carry/hold (an object), cook, talk to (e.g., self, a person, a group).", "At 1671.0s, [0.607, 0.285, 0.938, 0.701]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1670.0s, [0.525, 0.24, 0.694, 0.86]: stand, carry/hold (an object).", "At 1669.0s, [0.414, 0.283, 0.648, 0.792]: stand, carry/hold (an object), cook, listen to (a person).", "From 1667.0s to 1668.0s, [0.326, 0.217, 0.617, 0.621]->[0.362, 0.23, 0.609, 0.631]: stand, carry/hold (an object).", "From 1665.0s to 1666.0s, [0.319, 0.188, 0.676, 0.666]->[0.324, 0.205, 0.647, 0.666]: stand, cook, talk to (e.g., self, a person, a group)."]}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1614, 1703], "conversations": [{"from": "human", "timestamps": 1644.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.421, 0.241, 0.604, 0.881]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.593, 0.231, 0.814, 0.886]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1703.0, "value": "Describe the actions of the person at bbox [0.495, 0.261, 0.728, 0.644] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.495, 0.261, 0.728, 0.644]: stand, carry/hold (an object), watch (a person).", "At 1702.0s, [0.58, 0.234, 0.75, 0.66]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1701.0s, [0.321, 0.181, 0.933, 0.881]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "aMYcLyh9OhU.mkv", "clip": [1655, 1688], "conversations": [{"from": "human", "timestamps": 1685.0, "value": "For the person at bbox [0.45, 0.197, 0.964, 0.897], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.45, 0.197, 0.964, 0.897]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1681.0s to 1684.0s, [0.548, 0.27, 0.829, 0.873]->[0.395, 0.204, 0.899, 0.901]->[0.307, 0.284, 0.891, 0.899]->[0.418, 0.183, 0.777, 0.87]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1688.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.153, 0.206, 0.644, 0.873]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [887, 928], "conversations": [{"from": "human", "timestamps": 917.0, "value": "For the person at bbox [0.0, 0.015, 0.73, 0.948], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.015, 0.73, 0.948]: stand.", "At 916.0s, [0.0, 0.067, 0.748, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 915.0s, [0.0, 0.08, 0.735, 0.98]: stand, talk to (e.g., self, a person, a group).", "At 914.0s, [0.0, 0.069, 0.739, 0.978]: sit, stand, talk to (e.g., self, a person, a group).", "From 912.0s to 913.0s, [0.019, 0.085, 0.752, 0.967]->[0.0, 0.071, 0.74, 0.977]: stand, talk to (e.g., self, a person, a group).", "From 910.0s to 911.0s, [0.0, 0.066, 0.773, 0.964]->[0.001, 0.103, 0.767, 0.957]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 909.0s, [0.0, 0.019, 0.832, 0.945]: stand, listen to (a person), watch (a person).", "At 908.0s, [0.001, 0.03, 0.943, 0.97]: sit, stand, watch (a person)."]}, {"from": "human", "timestamps": 928.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.0, 0.828, 0.977]: stand."}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [883, 1018], "conversations": [{"from": "human", "timestamps": 913.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.071, 0.74, 0.977]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1018.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.001, 0.031, 0.674, 0.967]?"}, {"from": "gpt", "value": ["At Now, [0.001, 0.031, 0.674, 0.967]: stand, carry/hold (an object).", "At 1017.0s, [0.0, 0.045, 0.685, 0.962]: stand, carry/hold (an object)."]}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1018, 1050], "conversations": [{"from": "human", "timestamps": 1048.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.36, 0.157, 0.866, 0.957]: carry/hold (an object), crouch/kneel."}, {"from": "human", "timestamps": 1050.0, "value": "What actions is the person in bbox [0.339, 0.139, 0.859, 0.971] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.339, 0.139, 0.859, 0.971]: carry/hold (an object), crouch/kneel.", "At 1049.0s, [0.348, 0.162, 0.867, 0.966]: sit, carry/hold (an object), crouch/kneel.", "At 1048.0s, [0.36, 0.157, 0.866, 0.957]: carry/hold (an object), crouch/kneel."]}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1015, 1138], "conversations": [{"from": "human", "timestamps": 1045.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.024, 0.578, 0.976]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1138.0, "value": "What actions is the person in bbox [0.116, 0.057, 0.945, 0.953] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.116, 0.057, 0.945, 0.953]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1126.0s to 1137.0s, [0.787, 0.267, 1.0, 0.952]->[0.452, 0.192, 0.82, 0.927]->[0.421, 0.152, 0.739, 0.888]->[0.41, 0.157, 0.733, 0.892]->[0.371, 0.136, 0.729, 0.87]->[0.442, 0.156, 0.728, 0.874]->[0.382, 0.151, 0.684, 0.915]->[0.437, 0.118, 0.702, 0.955]->[0.441, 0.03, 0.776, 0.96]->[0.367, 0.008, 0.84, 0.945]->[0.232, 0.038, 0.874, 0.948]->[0.028, 0.063, 0.92, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1061, 1122], "conversations": [{"from": "human", "timestamps": 1091.0, "value": "For the person at bbox [0.073, 0.02, 0.979, 0.973], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.073, 0.02, 0.979, 0.973]: stand.", "At 1090.0s, [0.025, 0.031, 0.971, 0.963]: stand, listen to (a person).", "At 1089.0s, [0.314, 0.054, 0.986, 0.98]: stand.", "At 1088.0s, [0.229, 0.039, 0.99, 0.975]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1122.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.067, 0.211, 0.12, 0.372]: carry/hold (an object), walk."}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1146, 1184], "conversations": [{"from": "human", "timestamps": 1176.0, "value": "Describe the actions of the person at bbox [0.0, 0.094, 0.568, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.094, 0.568, 0.986]: stand, talk to (e.g., self, a person, a group).", "At 1175.0s, [0.343, 0.042, 1.0, 0.97]: stand.", "At 1174.0s, [0.002, 0.075, 0.517, 0.968]: stand, carry/hold (an object), watch (a person).", "From 1166.0s to 1173.0s, [0.371, 0.293, 0.466, 0.683]->[0.442, 0.289, 0.536, 0.683]->[0.481, 0.286, 0.591, 0.702]->[0.474, 0.252, 0.642, 0.708]->[0.455, 0.245, 0.611, 0.744]->[0.43, 0.253, 0.599, 0.776]->[0.436, 0.238, 0.622, 0.791]->[0.447, 0.233, 0.635, 0.831]: walk, carry/hold (an object).", "At 1165.0s, [0.086, 0.186, 0.712, 0.977]: walk, talk to (e.g., self, a person, a group).", "At 1164.0s, [0.0, 0.074, 0.534, 0.987]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1160.0s to 1163.0s, [0.009, 0.173, 0.585, 0.971]->[0.043, 0.075, 0.639, 0.989]->[0.0, 0.074, 0.533, 0.971]->[0.0, 0.051, 0.61, 0.985]: walk, talk to (e.g., self, a person, a group).", "From 1156.0s to 1159.0s, [0.013, 0.173, 0.666, 0.962]->[0.038, 0.205, 0.684, 0.962]->[0.012, 0.15, 0.671, 0.965]->[0.032, 0.078, 0.613, 0.971]: walk, carry/hold (an object).", "At 1155.0s, [0.675, 0.485, 0.964, 0.954]: walk, listen to (a person)."]}, {"from": "human", "timestamps": 1184.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.215, 0.255, 0.408, 0.857]: carry/hold (an object), stand; [0.33, 0.248, 0.474, 0.795]: stand; [0.64, 0.275, 0.758, 0.8]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1116, 1224], "conversations": [{"from": "human", "timestamps": 1146.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.737, 0.269, 1.0, 0.968]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1224.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.424, 0.027, 1.0, 0.983]?"}, {"from": "gpt", "value": ["In the current frame, [0.424, 0.027, 1.0, 0.983]: stand, listen to (a person).", "At 1223.0s, [0.411, 0.053, 1.0, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 1222.0s, [0.438, 0.025, 1.0, 0.965]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1221.0s, [0.0, 0.042, 0.98, 0.975]: stand, watch (a person).", "At 1220.0s, [0.004, 0.063, 0.969, 0.971]: stand, listen to (a person), watch (a person).", "From 1218.0s to 1219.0s, [0.39, 0.025, 1.0, 0.961]->[0.393, 0.011, 1.0, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1217.0s, [0.391, 0.04, 1.0, 0.988]: stand, talk to (e.g., self, a person, a group).", "From 1215.0s to 1216.0s, [0.374, 0.045, 1.0, 0.954]->[0.399, 0.032, 1.0, 0.961]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1214.0s, [0.0, 0.08, 0.903, 0.959]: stand, listen to (a person), watch (a person).", "From 1212.0s to 1213.0s, [0.378, 0.023, 1.0, 0.99]->[0.352, 0.037, 1.0, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1210.0s to 1211.0s, [0.033, 0.068, 0.95, 0.968]->[0.0, 0.081, 0.905, 0.965]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1171, 1243], "conversations": [{"from": "human", "timestamps": 1201.0, "value": "What actions is the person in bbox [0.474, 0.058, 1.0, 0.95] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.474, 0.058, 1.0, 0.95]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1200.0s, [0.731, 0.157, 0.907, 0.922]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1199.0s, [0.735, 0.149, 0.892, 0.911]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1243.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.129, 0.811, 0.969]: stand; [0.733, 0.033, 0.998, 0.902]: listen to (a person), stand."}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1244, 1315], "conversations": [{"from": "human", "timestamps": 1274.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.008, 0.905, 0.972]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1315.0, "value": "Describe the actions of the person at bbox [0.0, 0.017, 0.936, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.017, 0.936, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 1313.0s to 1314.0s, [0.067, 0.033, 0.978, 0.971]->[0.0, 0.031, 0.943, 0.972]: stand, talk to (e.g., self, a person, a group).", "At 1312.0s, [0.408, 0.038, 1.0, 0.945]: stand.", "At 1311.0s, [0.411, 0.04, 1.0, 0.961]: stand, listen to (a person)."]}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1305, 1374], "conversations": [{"from": "human", "timestamps": 1335.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.201, 0.315, 0.317, 0.903]: carry/hold (an object), stand, listen to (a person); [0.495, 0.331, 0.632, 0.846]: listen to (a person), stand, watch (a person); [0.632, 0.179, 0.804, 0.928]: carry/hold (an object), stand, listen to (a person)."}, {"from": "human", "timestamps": 1374.0, "value": "For the person at bbox [0.561, 0.294, 0.732, 0.801], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.561, 0.294, 0.732, 0.801]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1372.0s to 1373.0s, [0.522, 0.283, 0.675, 0.808]->[0.532, 0.284, 0.733, 0.822]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1371.0s, [0.525, 0.285, 0.665, 0.808]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1382, 1418], "conversations": [{"from": "human", "timestamps": 1412.0, "value": "For the person at bbox [0.206, 0.15, 0.94, 0.983], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.206, 0.15, 0.94, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1411.0s, [0.063, 0.129, 0.632, 0.96]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1418.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.187, 0.09, 0.593, 0.96]: carry/hold (an object), stand; [0.578, 0.293, 0.906, 0.95]: watch (a person), stand."}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1366, 1479], "conversations": [{"from": "human", "timestamps": 1396.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.017, 0.455, 0.98]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1479.0, "value": "What actions is the person in bbox [0.402, 0.04, 1.0, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.402, 0.04, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1478.0s, [0.318, 0.045, 1.0, 0.97]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1411, 1495], "conversations": [{"from": "human", "timestamps": 1441.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.063, 0.559, 0.966]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.063, 0.559, 0.966]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1439.0s to 1440.0s, [0.0, 0.1, 0.541, 0.991]->[0.02, 0.057, 0.594, 0.968]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1495.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.251, 0.396, 0.497, 0.954]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.582, 0.393, 0.847, 0.965]: listen to (a person), stand, watch (a person)."}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1511, 1543], "conversations": [{"from": "human", "timestamps": 1541.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.03, 0.241, 0.725, 0.975]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.618, 0.076, 0.991, 0.835]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1543.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.713, 0.071, 1.0, 0.753]?"}, {"from": "gpt", "value": ["In the current frame, [0.713, 0.071, 1.0, 0.753]: stand, listen to (a person), watch (a person).", "At 1542.0s, [0.631, 0.072, 1.0, 0.767]: stand, listen to (a person).", "From 1540.0s to 1541.0s, [0.618, 0.04, 0.97, 0.946]->[0.618, 0.076, 0.991, 0.835]: stand, listen to (a person), watch (a person).", "At 1539.0s, [0.659, 0.068, 0.997, 0.754]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1538.0s, [0.617, 0.036, 0.992, 0.818]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1557, 1614], "conversations": [{"from": "human", "timestamps": 1587.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.082, 0.557, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.082, 0.557, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1586.0s, [0.135, 0.437, 0.23, 0.752]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1614.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.517, 0.095, 1.0, 0.985]: carry/hold (an object), stand."}]}, {"video": "aRbLw-dU2XY.mp4", "clip": [1711, 1783], "conversations": [{"from": "human", "timestamps": 1741.0, "value": "Describe the actions of the person at bbox [0.202, 0.236, 0.4, 0.685] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.202, 0.236, 0.4, 0.685]: bend/bow (at the waist), carry/hold (an object), listen to (a person).", "At 1740.0s, [0.214, 0.313, 0.379, 0.708]: bend/bow (at the waist), carry/hold (an object)."]}, {"from": "human", "timestamps": 1783.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.267, 0.245, 0.446, 0.714]: carry/hold (an object), bend/bow (at the waist)."}]}, {"video": "ax3q-RkVIt4.mp4", "clip": [920, 987], "conversations": [{"from": "human", "timestamps": 950.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.567, 0.666, 0.747, 0.974]: watch (e.g., TV), sit."}, {"from": "human", "timestamps": 987.0, "value": "Describe the actions of the person at bbox [0.566, 0.669, 0.749, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.566, 0.669, 0.749, 0.988]: sit, watch (e.g., TV).", "At 986.0s, [0.568, 0.669, 0.746, 0.991]: sit, watch (e.g., TV)."]}]}, {"video": "ax3q-RkVIt4.mp4", "clip": [1061, 1099], "conversations": [{"from": "human", "timestamps": 1091.0, "value": "For the person at bbox [0.558, 0.665, 0.749, 0.993], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.558, 0.665, 0.749, 0.993]: sit, watch (e.g., TV).", "At 1089.0s, [0.559, 0.66, 0.751, 0.993]: sit."]}, {"from": "human", "timestamps": 1099.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.565, 0.673, 0.751, 0.995]: watch (e.g., TV), sit."}]}, {"video": "ax3q-RkVIt4.mp4", "clip": [1133, 1169], "conversations": [{"from": "human", "timestamps": 1163.0, "value": "What actions is the person in bbox [0.552, 0.663, 0.75, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.552, 0.663, 0.75, 0.993]: sit, watch (e.g., TV).", "From 1161.0s to 1162.0s, [0.565, 0.663, 0.749, 0.995]->[0.563, 0.664, 0.748, 0.993]: sit.", "From 1157.0s to 1159.0s, [0.567, 0.665, 0.748, 0.996]->[0.56, 0.665, 0.746, 0.995]->[0.562, 0.666, 0.747, 0.994]: sit, watch (e.g., TV)."]}, {"from": "human", "timestamps": 1169.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.56, 0.664, 0.748, 0.996]: watch (e.g., TV), sit."}]}, {"video": "ax3q-RkVIt4.mp4", "clip": [1122, 1214], "conversations": [{"from": "human", "timestamps": 1152.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.56, 0.671, 0.748, 0.996]: sit."}, {"from": "human", "timestamps": 1214.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.563, 0.667, 0.753, 0.992]?"}, {"from": "gpt", "value": ["At Now, [0.563, 0.667, 0.753, 0.992]: sit, watch (e.g., TV).", "At 1213.0s, [0.556, 0.662, 0.749, 1.0]: sit, watch (e.g., TV).", "At 1212.0s, [0.562, 0.661, 0.75, 0.995]: sit."]}]}, {"video": "ax3q-RkVIt4.mp4", "clip": [1443, 1481], "conversations": [{"from": "human", "timestamps": 1473.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.309, 0.092, 0.876, 0.934]: listen to (a person), stand."}, {"from": "human", "timestamps": 1481.0, "value": "What actions did the person at bbox [0.319, 0.106, 0.9, 0.931] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.319, 0.106, 0.9, 0.931]: stand.", "From 1479.0s to 1480.0s, [0.275, 0.108, 0.789, 0.947]->[0.318, 0.102, 0.868, 0.936]: stand.", "From 1474.0s to 1478.0s, [0.322, 0.121, 0.89, 0.945]->[0.296, 0.098, 0.81, 0.942]->[0.331, 0.098, 0.799, 0.931]->[0.313, 0.096, 0.803, 0.931]->[0.358, 0.092, 0.898, 0.937]: stand, talk to (e.g., self, a person, a group).", "At 1473.0s, [0.309, 0.092, 0.876, 0.934]: stand, listen to (a person).", "From 1471.0s to 1472.0s, [0.349, 0.109, 0.894, 0.942]->[0.312, 0.11, 0.809, 0.934]: stand.", "From 1459.0s to 1470.0s, [0.322, 0.097, 0.852, 0.944]->[0.31, 0.12, 0.866, 0.942]->[0.318, 0.111, 0.841, 0.919]->[0.294, 0.077, 0.929, 0.937]->[0.315, 0.118, 0.855, 0.943]->[0.315, 0.118, 0.9, 0.947]->[0.317, 0.1, 0.876, 0.935]->[0.314, 0.108, 0.894, 0.939]->[0.32, 0.119, 0.82, 0.934]->[0.324, 0.112, 0.861, 0.943]->[0.317, 0.121, 0.848, 0.952]->[0.317, 0.113, 0.823, 0.938]: stand, talk to (e.g., self, a person, a group).", "From 1456.0s to 1458.0s, [0.326, 0.102, 0.867, 0.945]->[0.395, 0.122, 0.866, 0.946]->[0.304, 0.121, 0.908, 0.938]: stand.", "From 1450.0s to 1455.0s, [0.347, 0.167, 0.735, 0.951]->[0.38, 0.162, 0.733, 0.934]->[0.373, 0.131, 0.729, 0.938]->[0.364, 0.128, 0.726, 0.94]->[0.335, 0.12, 0.792, 0.94]->[0.302, 0.083, 0.882, 0.919]: stand, talk to (e.g., self, a person, a group).", "At 1449.0s, [0.39, 0.168, 0.734, 0.92]: stand.", "At 1448.0s, [0.317, 0.168, 0.711, 0.94]: stand, watch (a person).", "From 1442.0s to 1447.0s, [0.366, 0.138, 0.722, 0.957]->[0.358, 0.133, 0.72, 0.951]->[0.37, 0.143, 0.731, 0.929]->[0.382, 0.132, 0.719, 0.936]->[0.371, 0.136, 0.715, 0.935]->[0.373, 0.144, 0.724, 0.937]: stand.", "From 1439.0s to 1440.0s, [0.358, 0.133, 0.729, 0.939]->[0.36, 0.13, 0.719, 0.935]: stand, talk to (e.g., self, a person, a group).", "From 1434.0s to 1437.0s, [0.369, 0.059, 0.811, 0.93]->[0.364, 0.118, 0.745, 0.927]->[0.374, 0.139, 0.733, 0.938]->[0.369, 0.159, 0.761, 0.935]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "ax3q-RkVIt4.mp4", "clip": [1608, 1647], "conversations": [{"from": "human", "timestamps": 1638.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.561, 0.668, 0.745, 0.995]: sit."}, {"from": "human", "timestamps": 1647.0, "value": "What actions did the person at bbox [0.552, 0.67, 0.744, 0.998] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.552, 0.67, 0.744, 0.998]: sit.", "At 1646.0s, [0.556, 0.675, 0.744, 0.996]: sit, watch (e.g., TV)."]}]}, {"video": "b50s4AlOOKY.mkv", "clip": [898, 989], "conversations": [{"from": "human", "timestamps": 928.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.197, 0.386, 0.348, 0.975]: listen to (a person), stand, watch (a person); [0.287, 0.326, 0.683, 0.976]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), listen to (a person); [0.57, 0.322, 0.731, 0.962]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.681, 0.409, 0.83, 0.995]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.82, 0.289, 0.995, 0.978]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 989.0, "value": "For the person at bbox [0.425, 0.326, 0.794, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.425, 0.326, 0.794, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 988.0s, [0.431, 0.333, 0.869, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "b50s4AlOOKY.mkv", "clip": [998, 1035], "conversations": [{"from": "human", "timestamps": 1028.0, "value": "Describe the actions of the person at bbox [0.673, 0.154, 1.0, 0.959] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.673, 0.154, 1.0, 0.959]: stand, listen to (a person), watch (a person).", "From 1026.0s to 1027.0s, [0.69, 0.17, 0.992, 0.961]->[0.686, 0.175, 0.995, 0.986]: stand, listen to (a person), watch (a person).", "At 1025.0s, [0.665, 0.181, 0.996, 0.972]: stand, touch (an object), listen to (a person), watch (a person).", "At 1024.0s, [0.715, 0.092, 1.0, 0.986]: stand, listen to (a person), watch (a person).", "At 1023.0s, [0.692, 0.2, 0.968, 0.845]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1021.0s to 1022.0s, [0.478, 0.153, 0.95, 0.835]->[0.483, 0.193, 0.989, 0.798]: stand, hand shake, talk to (e.g., self, a person, a group), watch (a person).", "From 1019.0s to 1020.0s, [0.676, 0.183, 0.962, 0.938]->[0.686, 0.152, 0.992, 0.987]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1018.0s, [0.668, 0.15, 0.967, 0.991]: stand, listen to (a person), watch (a person).", "At 1017.0s, [0.685, 0.101, 0.975, 0.966]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1015.0s to 1016.0s, [0.679, 0.181, 1.0, 0.979]->[0.639, 0.167, 0.946, 0.978]: stand, listen to (a person), watch (a person).", "At 1014.0s, [0.61, 0.202, 0.95, 0.985]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1035.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.164, 0.263, 0.443, 0.82]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.394, 0.391, 0.635, 0.962]: listen to (a person), stand; [0.683, 0.201, 0.998, 0.988]: carry/hold (an object), stand, listen to (a person)."}]}, {"video": "b50s4AlOOKY.mkv", "clip": [1014, 1101], "conversations": [{"from": "human", "timestamps": 1044.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.127, 0.254, 0.386, 0.701]: listen to (a person), stand, watch (a person); [0.44, 0.387, 0.669, 0.911]: stand, talk to (e.g., self, a person, a group); [0.677, 0.182, 0.979, 0.93]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1101.0, "value": "Describe the actions of the person at bbox [0.065, 0.378, 0.25, 0.954] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.065, 0.378, 0.25, 0.954]: run/jog.", "At 1100.0s, [0.24, 0.294, 0.442, 0.755]: walk, watch (a person)."]}]}, {"video": "b50s4AlOOKY.mkv", "clip": [1131, 1201], "conversations": [{"from": "human", "timestamps": 1161.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.48, 0.209, 0.676, 0.609]: walk."}, {"from": "human", "timestamps": 1201.0, "value": "What actions is the person in bbox [0.316, 0.118, 0.917, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.316, 0.118, 0.917, 1.0]: sit, listen to (a person), watch (a person).", "At 1200.0s, [0.311, 0.111, 0.924, 0.985]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "b50s4AlOOKY.mkv", "clip": [1244, 1289], "conversations": [{"from": "human", "timestamps": 1274.0, "value": "What actions is the person in bbox [0.54, 0.455, 0.77, 0.994] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.54, 0.455, 0.77, 0.994]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1272.0s to 1273.0s, [0.597, 0.457, 0.769, 0.966]->[0.585, 0.477, 0.814, 0.986]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1270.0s to 1271.0s, [0.556, 0.346, 0.738, 0.994]->[0.553, 0.418, 0.789, 0.993]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1268.0s to 1269.0s, [0.473, 0.363, 0.627, 0.984]->[0.516, 0.361, 0.689, 0.992]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1266.0s to 1267.0s, [0.454, 0.404, 0.561, 0.815]->[0.466, 0.39, 0.593, 0.891]: walk, carry/hold (an object), listen to (a person).", "From 1264.0s to 1265.0s, [0.444, 0.409, 0.525, 0.742]->[0.437, 0.406, 0.534, 0.771]: walk, carry/hold (an object).", "At 1263.0s, [0.471, 0.419, 0.552, 0.715]: walk, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1289.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.502, 0.431, 0.733, 0.995]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "b50s4AlOOKY.mkv", "clip": [1234, 1362], "conversations": [{"from": "human", "timestamps": 1264.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.358, 0.369, 0.468, 0.741]: carry/hold (an object), talk to (e.g., self, a person, a group), walk; [0.444, 0.409, 0.525, 0.742]: carry/hold (an object), walk; [0.565, 0.4, 0.646, 0.638]: walk; [0.681, 0.412, 0.735, 0.608]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.804, 0.387, 0.912, 0.713]: carry/hold (an object), watch (a person), walk."}, {"from": "human", "timestamps": 1362.0, "value": "What actions did the person at bbox [0.207, 0.017, 1.0, 0.969] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.207, 0.017, 1.0, 0.969]: hit (an object), martial art.", "At 1361.0s, [0.056, 0.106, 0.919, 0.979]: hit (an object), martial art."]}]}, {"video": "b50s4AlOOKY.mkv", "clip": [1307, 1372], "conversations": [{"from": "human", "timestamps": 1337.0, "value": "For the person at bbox [0.31, 0.484, 0.401, 0.834], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.31, 0.484, 0.401, 0.834]: walk, carry/hold (an object).", "From 1335.0s to 1336.0s, [0.324, 0.477, 0.441, 0.896]->[0.326, 0.472, 0.415, 0.859]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1372.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.349, 0.292, 0.467, 0.64]: listen to (a person), stand, touch (an object); [0.487, 0.235, 0.63, 0.554]: listen to (a person), stand, watch (a person)."}]}, {"video": "b50s4AlOOKY.mkv", "clip": [1488, 1607], "conversations": [{"from": "human", "timestamps": 1518.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.496, 0.149, 0.658, 0.691]: watch (a person), push (another person), walk."}, {"from": "human", "timestamps": 1607.0, "value": "What actions is the person in bbox [0.274, 0.167, 0.642, 0.861] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.274, 0.167, 0.642, 0.861]: sit.", "At 1605.0s, [0.278, 0.191, 0.665, 0.834]: sit."]}]}, {"video": "b50s4AlOOKY.mkv", "clip": [1701, 1736], "conversations": [{"from": "human", "timestamps": 1731.0, "value": "What actions is the person in bbox [0.633, 0.205, 0.795, 0.792] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.633, 0.205, 0.795, 0.792]: stand, carry/hold (an object), listen to (a person).", "From 1728.0s to 1730.0s, [0.645, 0.188, 0.819, 0.783]->[0.647, 0.183, 0.833, 0.787]->[0.645, 0.231, 0.822, 0.782]: stand, play musical instrument.", "At 1727.0s, [0.657, 0.181, 0.811, 0.772]: stand, play musical instrument, listen to (a person), watch (a person).", "At 1726.0s, [0.661, 0.19, 0.82, 0.783]: stand, play musical instrument.", "At 1725.0s, [0.689, 0.162, 0.835, 0.791]: stand, play musical instrument, watch (a person).", "At 1724.0s, [0.618, 0.181, 0.813, 0.788]: dance, play musical instrument, listen to (a person), watch (a person).", "At 1723.0s, [0.552, 0.201, 0.751, 0.799]: dance, play musical instrument, watch (a person).", "From 1721.0s to 1722.0s, [0.662, 0.177, 0.807, 0.805]->[0.61, 0.188, 0.797, 0.787]: stand, play musical instrument."]}, {"from": "human", "timestamps": 1736.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.143, 0.209, 0.379, 1.0]: touch (an object), watch (a person), bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group); [0.321, 0.142, 0.518, 0.558]: listen to (a person), stand, touch (an object), watch (a person); [0.751, 0.153, 0.956, 0.975]: listen (e.g., to music), watch (a person), listen to (a person), carry/hold (an object), stand."}]}, {"video": "b50s4AlOOKY.mkv", "clip": [1717, 1773], "conversations": [{"from": "human", "timestamps": 1747.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.202, 0.115, 0.384, 0.479]?"}, {"from": "gpt", "value": ["In the current frame, [0.202, 0.115, 0.384, 0.479]: stand, smoke, listen to (a person), watch (a person).", "At 1746.0s, [0.243, 0.127, 0.359, 0.401]: walk, smoke, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1773.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.038, 0.063, 0.597, 0.947]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.472, 0.198, 0.65, 0.853]: carry/hold (an object), stand, smoke, watch (a person); [0.709, 0.184, 0.998, 0.795]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "b5pRYl_djbs.mp4", "clip": [899, 949], "conversations": [{"from": "human", "timestamps": 929.0, "value": "What actions is the person in bbox [0.374, 0.03, 0.838, 0.986] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.374, 0.03, 0.838, 0.986]: walk.", "At 928.0s, [0.364, 0.052, 0.793, 0.99]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 949.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.408, 0.487, 0.432, 0.595]: carry/hold (an object), walk; [0.535, 0.715, 0.574, 0.806]: walk; [0.916, 0.739, 0.949, 0.871]: carry/hold (an object), walk."}]}, {"video": "b5pRYl_djbs.mp4", "clip": [936, 1000], "conversations": [{"from": "human", "timestamps": 966.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.105, 0.203, 0.336, 0.93]?"}, {"from": "gpt", "value": ["At Now, [0.105, 0.203, 0.336, 0.93]: stand, listen to (a person), watch (a person).", "At 965.0s, [0.335, 0.404, 0.621, 0.908]: stand."]}, {"from": "human", "timestamps": 1000.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.708, 0.099, 0.994, 0.977]: stand."}]}, {"video": "b5pRYl_djbs.mp4", "clip": [994, 1029], "conversations": [{"from": "human", "timestamps": 1024.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.045, 0.609, 0.162, 0.879]: carry/hold (an object), stand; [0.201, 0.211, 0.551, 1.0]: carry/hold (an object), stand, talk to (e.g., self, a person, a group), listen to (a person)."}, {"from": "human", "timestamps": 1029.0, "value": "For the person at bbox [0.0, 0.0, 0.419, 0.998], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.0, 0.419, 0.998]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1028.0s, [0.0, 0.002, 0.414, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "b5pRYl_djbs.mp4", "clip": [1014, 1112], "conversations": [{"from": "human", "timestamps": 1044.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.527, 0.047, 0.933, 0.859]: stand."}, {"from": "human", "timestamps": 1112.0, "value": "Describe the actions of the person at bbox [0.116, 0.172, 0.534, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.116, 0.172, 0.534, 0.988]: stand, answer phone.", "From 1105.0s to 1111.0s, [0.041, 0.103, 0.572, 1.0]->[0.103, 0.192, 0.549, 1.0]->[0.08, 0.188, 0.584, 0.984]->[0.111, 0.111, 0.567, 0.984]->[0.095, 0.147, 0.565, 1.0]->[0.133, 0.191, 0.565, 0.987]->[0.124, 0.171, 0.524, 0.992]: stand, answer phone.", "At 1104.0s, [0.05, 0.121, 0.601, 0.997]: stand, answer phone, listen to (a person).", "At 1103.0s, [0.007, 0.176, 0.539, 0.995]: bend/bow (at the waist), answer phone."]}]}, {"video": "b5pRYl_djbs.mp4", "clip": [1119, 1164], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.292, 0.033, 0.757, 0.968]: stand, smoke."}, {"from": "human", "timestamps": 1164.0, "value": "What actions is the person in bbox [0.267, 0.101, 0.504, 0.902] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.267, 0.101, 0.504, 0.902]: stand, listen to (a person), watch (a person).", "At 1163.0s, [0.292, 0.097, 0.53, 0.918]: stand, listen to (a person), watch (a person)."]}]}, {"video": "b5pRYl_djbs.mp4", "clip": [1123, 1241], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.496, 0.228, 0.686, 0.998]: watch (a person), stand."}, {"from": "human", "timestamps": 1241.0, "value": "What actions is the person in bbox [0.454, 0.123, 0.797, 0.991] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.454, 0.123, 0.797, 0.991]: stand, carry/hold (an object).", "From 1238.0s to 1240.0s, [0.339, 0.058, 0.682, 0.948]->[0.337, 0.064, 0.686, 0.99]->[0.401, 0.023, 0.738, 0.981]: stand, carry/hold (an object)."]}]}, {"video": "b5pRYl_djbs.mp4", "clip": [1268, 1306], "conversations": [{"from": "human", "timestamps": 1298.0, "value": "What actions is the person in bbox [0.532, 0.018, 0.963, 0.979] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.532, 0.018, 0.963, 0.979]: sit, carry/hold (an object).", "At 1297.0s, [0.479, 0.018, 1.0, 0.981]: sit, carry/hold (an object).", "At 1296.0s, [0.512, 0.0, 0.98, 0.999]: sit, carry/hold (an object), write, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1306.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.217, 0.003, 1.0, 0.991]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "b5pRYl_djbs.mp4", "clip": [1258, 1366], "conversations": [{"from": "human", "timestamps": 1288.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.28, 0.0, 0.994, 0.986]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1366.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.209, 0.0, 0.594, 0.939]?"}, {"from": "gpt", "value": ["At Now, [0.209, 0.0, 0.594, 0.939]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1365.0s, [0.003, 0.037, 0.474, 0.968]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1363.0s to 1364.0s, [0.213, 0.153, 0.42, 0.948]->[0.16, 0.116, 0.458, 0.94]: walk, carry/hold (an object)."]}]}, {"video": "b5pRYl_djbs.mp4", "clip": [1369, 1480], "conversations": [{"from": "human", "timestamps": 1399.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.025, 0.605, 0.933]: listen to (a person), sit; [0.362, 0.129, 0.855, 0.968]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1480.0, "value": "What actions did the person at bbox [0.12, 0.0, 0.913, 0.997] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.12, 0.0, 0.913, 0.997]: sit.", "From 1475.0s to 1479.0s, [0.406, 0.085, 0.868, 0.992]->[0.423, 0.103, 0.834, 0.99]->[0.414, 0.091, 0.827, 0.987]->[0.405, 0.097, 0.821, 0.986]->[0.409, 0.101, 0.846, 0.989]: stand.", "From 1472.0s to 1474.0s, [0.315, 0.027, 0.778, 0.975]->[0.411, 0.054, 0.848, 0.971]->[0.395, 0.066, 0.839, 0.987]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "b5pRYl_djbs.mp4", "clip": [1457, 1489], "conversations": [{"from": "human", "timestamps": 1487.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.308, 0.0, 0.958, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.308, 0.0, 0.958, 1.0]: stand.", "At 1486.0s, [0.292, 0.0, 0.964, 1.0]: sit, stand.", "From 1484.0s to 1485.0s, [0.251, 0.0, 0.924, 0.993]->[0.273, 0.0, 0.918, 0.997]: stand.", "At 1483.0s, [0.284, 0.0, 0.908, 1.0]: sit, stand.", "From 1481.0s to 1482.0s, [0.252, 0.0, 0.919, 0.999]->[0.298, 0.0, 0.917, 1.0]: stand.", "At 1480.0s, [0.12, 0.0, 0.913, 0.997]: sit.", "From 1475.0s to 1479.0s, [0.406, 0.085, 0.868, 0.992]->[0.423, 0.103, 0.834, 0.99]->[0.414, 0.091, 0.827, 0.987]->[0.405, 0.097, 0.821, 0.986]->[0.409, 0.101, 0.846, 0.989]: stand.", "From 1472.0s to 1474.0s, [0.315, 0.027, 0.778, 0.975]->[0.411, 0.054, 0.848, 0.971]->[0.395, 0.066, 0.839, 0.987]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1489.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.354, 0.0, 0.979, 1.0]: sit, stand."}]}, {"video": "b5pRYl_djbs.mp4", "clip": [1518, 1584], "conversations": [{"from": "human", "timestamps": 1548.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.066, 0.103, 0.63, 0.984]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1584.0, "value": "For the person at bbox [0.15, 0.094, 0.496, 0.996], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.15, 0.094, 0.496, 0.996]: stand, smoke, talk to (e.g., self, a person, a group).", "From 1582.0s to 1583.0s, [0.061, 0.139, 0.425, 1.0]->[0.081, 0.14, 0.438, 0.992]: stand, carry/hold (an object), smoke.", "At 1581.0s, [0.053, 0.131, 0.42, 0.99]: stand, smoke.", "At 1580.0s, [0.088, 0.148, 0.46, 0.965]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1577.0s to 1579.0s, [0.175, 0.187, 0.395, 0.948]->[0.263, 0.155, 0.535, 0.933]->[0.086, 0.104, 0.536, 0.978]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "b5pRYl_djbs.mp4", "clip": [1693, 1731], "conversations": [{"from": "human", "timestamps": 1723.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.165, 0.113, 0.429, 0.974]?"}, {"from": "gpt", "value": ["In the current frame, [0.165, 0.113, 0.429, 0.974]: stand, write, talk to (e.g., self, a person, a group).", "At 1722.0s, [0.173, 0.09, 0.409, 0.965]: stand, write, listen to (a person), watch (a person).", "At 1721.0s, [0.203, 0.087, 0.429, 0.892]: stand, write, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1719.0s, [0.0, 0.0, 0.72, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1717.0s to 1718.0s, [0.029, 0.002, 0.739, 0.959]->[0.109, 0.004, 0.78, 0.964]: stand, talk to (e.g., self, a person, a group).", "At 1716.0s, [0.013, 0.0, 0.674, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1715.0s, [0.015, 0.006, 0.695, 0.97]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1731.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.123, 0.0, 0.356, 0.945]: carry/hold (an object), walk; [0.68, 0.405, 0.999, 1.0]: read, sit."}]}, {"video": "b5pRYl_djbs.mp4", "clip": [1722, 1758], "conversations": [{"from": "human", "timestamps": 1752.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.2, 0.018, 0.961, 0.998]?"}, {"from": "gpt", "value": ["At Now, [0.2, 0.018, 0.961, 0.998]: stand, drink.", "At 1751.0s, [0.249, 0.003, 0.917, 0.992]: stand.", "At 1750.0s, [0.304, 0.002, 0.969, 1.0]: sit, stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1758.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.001, 0.417, 0.173, 0.935]: sit; [0.033, 0.021, 0.675, 1.0]: listen to (a person), stand, watch (a person)."}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [887, 940], "conversations": [{"from": "human", "timestamps": 917.0, "value": "For the person at bbox [0.002, 0.002, 0.785, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.002, 0.002, 0.785, 0.994]: sit, answer phone.", "At 916.0s, [0.038, 0.003, 0.84, 0.987]: stand, answer phone."]}, {"from": "human", "timestamps": 940.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.042, 0.102, 0.961, 0.996]: sit, answer phone."}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [891, 979], "conversations": [{"from": "human", "timestamps": 921.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.005, 0.013, 0.739, 0.989]: stand, answer phone."}, {"from": "human", "timestamps": 979.0, "value": "For the person at bbox [0.252, 0.16, 0.746, 0.689], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.252, 0.16, 0.746, 0.689]: sit, answer phone, touch (an object).", "At 978.0s, [0.225, 0.175, 0.721, 0.678]: sit, answer phone.", "At 977.0s, [0.239, 0.183, 0.691, 0.695]: sit, answer phone, touch (an object).", "From 970.0s to 976.0s, [0.241, 0.146, 0.725, 0.681]->[0.226, 0.158, 0.727, 0.665]->[0.231, 0.15, 0.7, 0.684]->[0.221, 0.163, 0.701, 0.68]->[0.249, 0.157, 0.739, 0.675]->[0.238, 0.166, 0.669, 0.701]->[0.245, 0.166, 0.728, 0.705]: sit, answer phone.", "At 969.0s, [0.266, 0.156, 0.663, 0.673]: sit, answer phone, touch (an object)."]}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [959, 997], "conversations": [{"from": "human", "timestamps": 989.0, "value": "For the person at bbox [0.037, 0.031, 0.534, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.037, 0.031, 0.534, 0.987]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 988.0s, [0.076, 0.033, 0.489, 0.998]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 986.0s to 987.0s, [0.136, 0.161, 0.415, 0.993]->[0.049, 0.064, 0.414, 1.0]: walk, talk to (e.g., self, a person, a group).", "At 985.0s, [0.201, 0.241, 0.386, 0.981]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 997.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.014, 0.192, 0.257, 0.992]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.35, 0.013, 0.694, 1.0]: listen to (a person), stand, watch (a person); [0.681, 0.043, 0.989, 1.0]: listen to (a person), stand, watch (a person)."}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1003, 1061], "conversations": [{"from": "human", "timestamps": 1033.0, "value": "What actions did the person at bbox [0.389, 0.009, 0.758, 0.996] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.389, 0.009, 0.758, 0.996]: stand, watch (a person).", "At 1032.0s, [0.371, 0.011, 0.762, 0.996]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1061.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.009, 0.699, 0.985]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1008, 1136], "conversations": [{"from": "human", "timestamps": 1038.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.014, 0.435, 0.122, 0.642]: listen to (a person), sit, touch (an object), watch (a person); [0.705, 0.422, 0.787, 0.541]: watch (a person), sit; [0.78, 0.42, 0.847, 0.573]: listen to (a person), sit, watch (a person); [0.853, 0.426, 0.956, 0.613]: listen to (a person), sit, touch (an object), watch (a person); [0.937, 0.448, 1.0, 0.633]: listen to (a person), sit, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1136.0, "value": "What actions is the person in bbox [0.087, 0.011, 0.874, 0.976] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.087, 0.011, 0.874, 0.976]: sit, answer phone, carry/hold (an object).", "From 1134.0s to 1135.0s, [0.088, 0.0, 0.893, 0.959]->[0.048, 0.002, 0.872, 0.964]: sit, answer phone, carry/hold (an object).", "At 1133.0s, [0.106, 0.022, 0.937, 0.933]: sit, answer phone, press, touch (an object).", "At 1132.0s, [0.062, 0.009, 0.85, 0.956]: sit, answer phone, carry/hold (an object), touch (an object).", "From 1128.0s to 1131.0s, [0.086, 0.026, 0.789, 0.98]->[0.05, 0.031, 0.839, 0.957]->[0.046, 0.02, 0.874, 0.951]->[0.056, 0.017, 0.884, 0.939]: sit, answer phone, carry/hold (an object)."]}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1115, 1163], "conversations": [{"from": "human", "timestamps": 1145.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.074, 0.036, 0.867, 0.971]: sit, touch (an object), answer phone."}, {"from": "human", "timestamps": 1163.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.167, 0.019, 0.441, 0.71]?"}, {"from": "gpt", "value": ["At Now, [0.167, 0.019, 0.441, 0.71]: stand, answer phone, watch (a person).", "At 1162.0s, [0.155, 0.008, 0.428, 0.709]: stand, answer phone, watch (a person)."]}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1135, 1224], "conversations": [{"from": "human", "timestamps": 1165.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.158, 0.018, 0.412, 0.766]: watch (a person), stand, answer phone; [0.353, 0.011, 0.891, 0.989]: listen to (a person), crouch/kneel, watch (a person)."}, {"from": "human", "timestamps": 1224.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.018, 0.0, 0.518, 0.978]?"}, {"from": "gpt", "value": ["In the current frame, [0.018, 0.0, 0.518, 0.978]: stand, carry/hold (an object), listen to (a person).", "At 1223.0s, [0.021, 0.0, 0.612, 1.0]: stand, carry/hold (an object), listen to (a person).", "From 1220.0s to 1222.0s, [0.0, 0.0, 0.456, 0.976]->[0.003, 0.0, 0.5, 0.969]->[0.025, 0.0, 0.599, 0.995]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1219.0s, [0.026, 0.0, 0.79, 0.973]: stand, talk to (e.g., self, a person, a group).", "At 1217.0s, [0.036, 0.05, 0.499, 0.974]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1180, 1230], "conversations": [{"from": "human", "timestamps": 1210.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.419, 0.02, 0.974, 0.987]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1230.0, "value": "What actions is the person in bbox [0.525, 0.089, 0.983, 0.977] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.525, 0.089, 0.983, 0.977]: stand, listen to (a person).", "From 1227.0s to 1229.0s, [0.42, 0.067, 0.984, 0.991]->[0.427, 0.073, 0.984, 0.984]->[0.443, 0.072, 0.993, 0.984]: stand, talk to (e.g., self, a person, a group).", "At 1226.0s, [0.415, 0.051, 0.995, 0.981]: stand, listen to (a person).", "At 1225.0s, [0.379, 0.023, 0.996, 0.979]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1223.0s to 1224.0s, [0.352, 0.006, 0.978, 0.976]->[0.361, 0.005, 0.99, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1220.0s to 1222.0s, [0.326, 0.046, 0.843, 0.998]->[0.294, 0.019, 0.948, 0.989]->[0.335, 0.03, 0.956, 0.976]: stand, listen to (a person)."]}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1257, 1288], "conversations": [{"from": "human", "timestamps": 1287.0, "value": "Describe the actions of the person at bbox [0.567, 0.073, 1.0, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.567, 0.073, 1.0, 0.991]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1286.0s, [0.465, 0.054, 0.965, 0.994]: stand, listen to (a person), watch (a person).", "At 1285.0s, [0.401, 0.025, 0.907, 0.985]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1284.0s, [0.392, 0.004, 0.989, 0.979]: stand, listen to (a person), watch (a person).", "From 1281.0s to 1282.0s, [0.33, 0.006, 0.975, 0.981]->[0.117, 0.037, 0.89, 0.983]: walk, carry/hold (an object), listen to (a person).", "At 1280.0s, [0.402, 0.053, 0.986, 0.976]: walk, listen to (a person).", "At 1279.0s, [0.562, 0.0, 1.0, 0.986]: walk, listen to (a person), watch (a person).", "At 1278.0s, [0.619, 0.056, 1.0, 0.993]: stand, listen to (a person), watch (a person).", "From 1275.0s to 1277.0s, [0.433, 0.0, 0.99, 1.0]->[0.451, 0.005, 0.996, 0.968]->[0.496, 0.008, 1.0, 0.969]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1274.0s, [0.434, 0.065, 1.0, 0.984]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1273.0s, [0.29, 0.159, 0.628, 0.996]: walk, listen to (a person), watch (a person).", "At 1272.0s, [0.05, 0.261, 0.329, 0.992]: walk, watch (a person).", "From 1270.0s to 1271.0s, [0.273, 0.009, 0.989, 1.0]->[0.196, 0.296, 0.762, 1.0]: bend/bow (at the waist), talk to (e.g., self, a person, a group).", "At 1269.0s, [0.089, 0.109, 1.0, 0.988]: stand, talk to (e.g., self, a person, a group).", "At 1268.0s, [0.146, 0.0, 1.0, 0.96]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1263.0s to 1267.0s, [0.026, 0.033, 0.985, 0.993]->[0.057, 0.017, 1.0, 0.984]->[0.045, 0.0, 1.0, 0.982]->[0.075, 0.0, 1.0, 0.976]->[0.039, 0.08, 0.987, 0.999]: stand, talk to (e.g., self, a person, a group).", "At 1262.0s, [0.066, 0.0, 0.995, 0.986]: sit, stand, talk to (e.g., self, a person, a group).", "From 1260.0s to 1261.0s, [0.225, 0.011, 1.0, 0.969]->[0.327, 0.006, 1.0, 0.98]: stand, talk to (e.g., self, a person, a group).", "At 1259.0s, [0.101, 0.0, 0.981, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1288.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.016, 0.109, 0.563, 0.987]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.597, 0.235, 0.989, 0.976]: listen to (a person), watch (a person), walk."}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1262, 1359], "conversations": [{"from": "human", "timestamps": 1292.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.167, 0.115, 0.879, 0.982]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1359.0, "value": "For the person at bbox [0.442, 0.127, 0.971, 0.971], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.442, 0.127, 0.971, 0.971]: sit, carry/hold (an object), read.", "At 1358.0s, [0.453, 0.129, 0.956, 0.982]: sit, carry/hold (an object).", "At 1357.0s, [0.438, 0.013, 0.894, 0.979]: bend/bow (at the waist), carry/hold (an object).", "At 1356.0s, [0.327, 0.061, 0.673, 0.931]: bend/bow (at the waist), lift/pick up.", "From 1352.0s to 1355.0s, [0.383, 0.19, 0.59, 0.779]->[0.35, 0.171, 0.576, 0.793]->[0.413, 0.154, 0.648, 0.812]->[0.393, 0.092, 0.666, 0.838]: walk.", "At 1351.0s, [0.361, 0.204, 0.525, 0.775]: stand, walk.", "From 1349.0s to 1350.0s, [0.292, 0.263, 0.527, 0.682]->[0.353, 0.258, 0.5, 0.753]: walk, talk to (e.g., self, a person, a group).", "At 1348.0s, [0.266, 0.283, 0.472, 0.655]: stand, throw, talk to (e.g., self, a person, a group).", "At 1347.0s, [0.327, 0.291, 0.542, 0.676]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1346.0s, [0.354, 0.323, 0.482, 0.712]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1343.0s to 1345.0s, [0.392, 0.372, 0.52, 0.685]->[0.392, 0.354, 0.5, 0.695]->[0.362, 0.345, 0.473, 0.686]: walk, carry/hold (an object).", "At 1342.0s, [0.384, 0.372, 0.484, 0.679]: stand, carry/hold (an object).", "At 1341.0s, [0.413, 0.371, 0.544, 0.685]: stand, carry/hold (an object), touch (an object)."]}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1317, 1367], "conversations": [{"from": "human", "timestamps": 1347.0, "value": "For the person at bbox [0.327, 0.291, 0.542, 0.676], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.327, 0.291, 0.542, 0.676]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1346.0s, [0.354, 0.323, 0.482, 0.712]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1343.0s to 1345.0s, [0.392, 0.372, 0.52, 0.685]->[0.392, 0.354, 0.5, 0.695]->[0.362, 0.345, 0.473, 0.686]: walk, carry/hold (an object).", "At 1342.0s, [0.384, 0.372, 0.484, 0.679]: stand, carry/hold (an object).", "At 1341.0s, [0.413, 0.371, 0.544, 0.685]: stand, carry/hold (an object), touch (an object)."]}, {"from": "human", "timestamps": 1367.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.431, 0.075, 0.929, 0.967]: carry/hold (an object), sit, read."}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1358, 1427], "conversations": [{"from": "human", "timestamps": 1388.0, "value": "For the person at bbox [0.434, 0.015, 1.0, 0.988], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.434, 0.015, 1.0, 0.988]: stand, talk to (e.g., self, a person, a group).", "At 1387.0s, [0.419, 0.014, 1.0, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1427.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.395, 0.019, 0.964, 0.973]: carry/hold (an object), stand."}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1401, 1489], "conversations": [{"from": "human", "timestamps": 1431.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.412, 0.057, 0.994, 0.999]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1489.0, "value": "Describe the actions of the person at bbox [0.564, 0.113, 1.0, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.564, 0.113, 1.0, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1488.0s, [0.577, 0.135, 1.0, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1485.0s to 1487.0s, [0.569, 0.155, 1.0, 0.985]->[0.579, 0.142, 1.0, 1.0]->[0.584, 0.148, 1.0, 0.994]: stand, listen to (a person).", "From 1483.0s to 1484.0s, [0.579, 0.167, 1.0, 0.984]->[0.575, 0.157, 1.0, 0.987]: stand, listen to (a person), watch (a person)."]}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1410, 1469], "conversations": [{"from": "human", "timestamps": 1440.0, "value": "What actions is the person in bbox [0.181, 0.035, 0.651, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.181, 0.035, 0.651, 0.982]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1437.0s to 1439.0s, [0.249, 0.077, 0.685, 0.993]->[0.188, 0.065, 0.608, 0.981]->[0.086, 0.048, 0.576, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1436.0s, [0.182, 0.076, 0.586, 0.985]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1432.0s to 1435.0s, [0.086, 0.022, 0.85, 0.975]->[0.168, 0.024, 0.808, 0.994]->[0.142, 0.019, 0.898, 0.98]->[0.171, 0.021, 0.787, 0.998]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1469.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.012, 0.254, 0.269, 0.976]: listen to (a person), stand; [0.051, 0.114, 0.76, 0.995]: stand, talk to (e.g., self, a person, a group); [0.488, 0.0, 0.965, 0.996]: listen to (a person), stand."}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1493, 1547], "conversations": [{"from": "human", "timestamps": 1523.0, "value": "What actions is the person in bbox [0.624, 0.044, 0.845, 0.563] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.624, 0.044, 0.845, 0.563]: stand, hand shake, talk to (e.g., self, a person, a group), watch (a person).", "At 1522.0s, [0.629, 0.071, 0.851, 0.858]: stand, hand shake, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1547.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.048, 0.022, 0.679, 1.0]: walk."}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1496, 1613], "conversations": [{"from": "human", "timestamps": 1526.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.025, 0.285, 0.243, 0.747]: listen to (a person), stand, watch (a person); [0.414, 0.072, 0.688, 0.524]: listen to (a person), stand; [0.633, 0.093, 0.851, 0.637]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1613.0, "value": "For the person at bbox [0.014, 0.0, 0.988, 0.979], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.014, 0.0, 0.988, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1612.0s, [0.013, 0.021, 0.973, 0.969]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1534, 1607], "conversations": [{"from": "human", "timestamps": 1564.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.2, 0.008, 1.0, 0.987]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1607.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.039, 0.99, 0.978]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.039, 0.99, 0.978]: stand, talk to (e.g., self, a person, a group).", "At 1606.0s, [0.0, 0.012, 1.0, 0.984]: stand, talk to (e.g., self, a person, a group).", "At 1605.0s, [0.011, 0.059, 0.985, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "bAVXp1oGjHA.mkv", "clip": [1607, 1676], "conversations": [{"from": "human", "timestamps": 1637.0, "value": "For the person at bbox [0.134, 0.014, 0.997, 0.996], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.134, 0.014, 0.997, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1635.0s to 1636.0s, [0.049, 0.0, 0.938, 0.992]->[0.067, 0.0, 0.947, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1676.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.049, 0.003, 0.802, 0.978]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [904, 947], "conversations": [{"from": "human", "timestamps": 934.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.279, 0.226, 0.431, 0.868]: stand, talk to (e.g., self, a person, a group); [0.401, 0.19, 0.521, 0.817]: listen to (a person), stand, watch (a person); [0.517, 0.182, 0.666, 0.833]: listen to (a person), stand."}, {"from": "human", "timestamps": 947.0, "value": "For the person at bbox [0.214, 0.227, 0.559, 0.871], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.214, 0.227, 0.559, 0.871]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 945.0s to 946.0s, [0.369, 0.189, 0.494, 0.802]->[0.367, 0.192, 0.492, 0.795]: stand, listen to (a person), watch (a person).", "At 944.0s, [0.371, 0.186, 0.492, 0.798]: stand, listen to (a person).", "At 943.0s, [0.376, 0.19, 0.498, 0.806]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 942.0s, [0.379, 0.195, 0.499, 0.808]: stand, listen to (a person), watch (a person).", "At 941.0s, [0.387, 0.18, 0.505, 0.8]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 940.0s, [0.382, 0.187, 0.513, 0.808]: stand, listen to (a person), watch (a person).", "From 938.0s to 939.0s, [0.379, 0.175, 0.51, 0.802]->[0.386, 0.175, 0.513, 0.805]: stand, listen to (a person).", "At 937.0s, [0.369, 0.182, 0.498, 0.813]: stand.", "At 936.0s, [0.383, 0.18, 0.51, 0.822]: stand, listen to (a person).", "From 934.0s to 935.0s, [0.401, 0.19, 0.521, 0.817]->[0.393, 0.188, 0.522, 0.808]: stand, listen to (a person), watch (a person).", "At 933.0s, [0.404, 0.196, 0.528, 0.82]: stand, listen to (a person)."]}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [903, 1001], "conversations": [{"from": "human", "timestamps": 933.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.274, 0.233, 0.423, 0.883]: hand wave, stand; [0.404, 0.196, 0.528, 0.82]: listen to (a person), stand; [0.531, 0.198, 0.672, 0.847]: listen to (a person), stand."}, {"from": "human", "timestamps": 1001.0, "value": "What actions is the person in bbox [0.241, 0.193, 0.484, 0.788] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.241, 0.193, 0.484, 0.788]: stand, touch (an object), listen to (a person), watch (a person).", "From 998.0s to 1000.0s, [0.264, 0.202, 0.534, 0.787]->[0.286, 0.199, 0.513, 0.783]->[0.292, 0.212, 0.489, 0.771]: stand, touch (an object), listen to (a person), watch (a person).", "From 994.0s to 997.0s, [0.416, 0.26, 0.515, 0.567]->[0.279, 0.208, 0.521, 0.789]->[0.279, 0.201, 0.52, 0.793]->[0.305, 0.197, 0.512, 0.766]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 993.0s, [0.437, 0.268, 0.524, 0.565]: stand, touch (an object), listen to (a person)."]}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [954, 1005], "conversations": [{"from": "human", "timestamps": 984.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.03, 0.359, 0.37, 0.901]?"}, {"from": "gpt", "value": ["In the current frame, [0.03, 0.359, 0.37, 0.901]: stand, listen to (a person), watch (a person).", "From 982.0s to 983.0s, [0.12, 0.169, 0.606, 0.892]->[0.133, 0.196, 0.606, 0.883]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1005.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.3, 0.181, 0.913, 0.878]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [994, 1065], "conversations": [{"from": "human", "timestamps": 1024.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.612, 0.202, 0.887, 0.774]?"}, {"from": "gpt", "value": ["At Now, [0.612, 0.202, 0.887, 0.774]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1023.0s, [0.614, 0.205, 0.878, 0.768]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1022.0s, [0.646, 0.189, 0.875, 0.756]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1020.0s to 1021.0s, [0.633, 0.189, 0.893, 0.758]->[0.646, 0.182, 0.886, 0.77]: stand, touch (an object), watch (a person)."]}, {"from": "human", "timestamps": 1065.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.419, 0.132, 0.921, 0.89]: sit."}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [1032, 1105], "conversations": [{"from": "human", "timestamps": 1062.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.419, 0.173, 0.936, 0.884]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1105.0, "value": "Describe the actions of the person at bbox [0.324, 0.326, 0.565, 0.792] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.324, 0.326, 0.565, 0.792]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1104.0s, [0.289, 0.305, 0.514, 0.776]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1102.0s to 1103.0s, [0.21, 0.199, 0.971, 0.901]->[0.241, 0.323, 0.482, 0.763]: sit, talk to (e.g., self, a person, a group).", "From 1100.0s to 1101.0s, [0.227, 0.205, 0.954, 0.899]->[0.376, 0.161, 0.973, 0.89]: stand, talk to (e.g., self, a person, a group).", "From 1098.0s to 1099.0s, [0.264, 0.216, 0.979, 0.873]->[0.232, 0.25, 0.968, 0.87]: sit, talk to (e.g., self, a person, a group).", "At 1097.0s, [0.324, 0.198, 0.531, 0.548]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1091.0s to 1096.0s, [0.388, 0.254, 0.606, 0.714]->[0.398, 0.264, 0.622, 0.702]->[0.382, 0.244, 0.598, 0.677]->[0.377, 0.231, 0.559, 0.503]->[0.365, 0.231, 0.539, 0.507]->[0.299, 0.246, 0.526, 0.573]: sit.", "At 1090.0s, [0.294, 0.286, 0.627, 0.782]: sit, touch (an object).", "At 1089.0s, [0.269, 0.272, 0.613, 0.877]: bend/bow (at the waist), touch (an object).", "From 1087.0s to 1088.0s, [0.439, 0.148, 0.959, 0.885]->[0.152, 0.113, 0.493, 0.884]: walk.", "At 1086.0s, [0.327, 0.201, 0.884, 0.879]: stand."]}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [1112, 1258], "conversations": [{"from": "human", "timestamps": 1142.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.47, 0.155, 0.972, 0.912]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1258.0, "value": "What actions did the person at bbox [0.498, 0.172, 0.84, 0.883] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.498, 0.172, 0.84, 0.883]: stand.", "At 1257.0s, [0.516, 0.113, 0.828, 0.896]: stand.", "At 1256.0s, [0.407, 0.179, 0.707, 0.884]: stand, walk, talk to (e.g., self, a person, a group).", "From 1254.0s to 1255.0s, [0.404, 0.194, 0.741, 0.889]->[0.467, 0.2, 0.786, 0.886]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [1273, 1321], "conversations": [{"from": "human", "timestamps": 1303.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.866, 0.465, 0.936, 0.716]: walk."}, {"from": "human", "timestamps": 1321.0, "value": "What actions is the person in bbox [0.437, 0.336, 0.518, 0.685] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.437, 0.336, 0.518, 0.685]: walk, talk to (e.g., self, a person, a group).", "At 1320.0s, [0.455, 0.331, 0.544, 0.688]: stand, listen to (a person).", "At 1319.0s, [0.454, 0.325, 0.535, 0.686]: stand, talk to (e.g., self, a person, a group).", "At 1318.0s, [0.495, 0.33, 0.588, 0.689]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [1332, 1364], "conversations": [{"from": "human", "timestamps": 1362.0, "value": "What actions did the person at bbox [0.447, 0.21, 0.802, 0.894] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.447, 0.21, 0.802, 0.894]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1357.0s to 1361.0s, [0.395, 0.202, 0.763, 0.891]->[0.432, 0.214, 0.801, 0.895]->[0.458, 0.236, 0.752, 0.894]->[0.46, 0.228, 0.782, 0.895]->[0.464, 0.223, 0.792, 0.891]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1356.0s, [0.392, 0.187, 0.775, 0.891]: sit, touch (an object).", "From 1354.0s to 1355.0s, [0.4, 0.238, 0.745, 0.88]->[0.424, 0.184, 0.755, 0.876]: sit, carry/hold (an object).", "From 1351.0s to 1353.0s, [0.369, 0.22, 0.739, 0.903]->[0.359, 0.233, 0.751, 0.879]->[0.398, 0.191, 0.74, 0.871]: sit, touch (an object)."]}, {"from": "human", "timestamps": 1364.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.351, 0.216, 0.799, 0.884]: carry/hold (an object), sit."}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [1358, 1420], "conversations": [{"from": "human", "timestamps": 1388.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.011, 0.204, 0.364, 0.878]?"}, {"from": "gpt", "value": ["At Now, [0.011, 0.204, 0.364, 0.878]: stand, listen to (a person).", "At 1387.0s, [0.01, 0.239, 0.31, 0.883]: stand, listen to (a person).", "At 1386.0s, [0.078, 0.135, 0.634, 0.884]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1383.0s to 1385.0s, [0.004, 0.175, 0.212, 0.861]->[0.006, 0.192, 0.226, 0.878]->[0.012, 0.182, 0.229, 0.872]: stand, listen to (a person), watch (a person).", "At 1382.0s, [0.15, 0.145, 0.729, 0.884]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1380.0s to 1381.0s, [0.093, 0.133, 0.784, 0.917]->[0.166, 0.171, 0.711, 0.888]: stand, talk to (e.g., self, a person, a group).", "At 1379.0s, [0.36, 0.257, 0.472, 0.685]: stand, listen to (a person), watch (a person).", "At 1378.0s, [0.363, 0.283, 0.476, 0.682]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1377.0s, [0.353, 0.281, 0.472, 0.681]: stand, talk to (e.g., self, a person, a group).", "At 1376.0s, [0.355, 0.254, 0.471, 0.687]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1375.0s, [0.364, 0.286, 0.518, 0.684]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1374.0s, [0.35, 0.288, 0.518, 0.688]: stand, point to (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1373.0s, [0.337, 0.286, 0.515, 0.685]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1372.0s, [0.341, 0.279, 0.514, 0.687]: stand, touch (an object), talk to (e.g., self, a person, a group).", "From 1370.0s to 1371.0s, [0.307, 0.288, 0.432, 0.681]->[0.325, 0.286, 0.446, 0.687]: stand, talk to (e.g., self, a person, a group).", "At 1369.0s, [0.253, 0.298, 0.374, 0.731]: walk, talk to (e.g., self, a person, a group).", "At 1368.0s, [0.069, 0.298, 0.257, 0.647]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1420.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.023, 0.148, 0.385, 0.869]: listen to (a person), stand, watch (a person); [0.438, 0.161, 0.919, 0.866]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [1438, 1482], "conversations": [{"from": "human", "timestamps": 1468.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.728, 0.455, 0.813, 0.865]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1482.0, "value": "For the person at bbox [0.697, 0.431, 0.825, 0.742], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.697, 0.431, 0.825, 0.742]: stand, carry/hold (an object), watch (a person).", "From 1480.0s to 1481.0s, [0.699, 0.499, 0.816, 0.725]->[0.677, 0.472, 0.809, 0.825]: stand, carry/hold (an object), watch (a person).", "At 1479.0s, [0.709, 0.514, 0.81, 0.859]: stand, walk, carry/hold (an object), watch (a person).", "At 1478.0s, [0.932, 0.611, 0.986, 0.74]: walk, carry/hold (an object)."]}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [1478, 1515], "conversations": [{"from": "human", "timestamps": 1508.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.335, 0.223, 0.444, 0.755]: listen to (a person), stand, watch (a person); [0.418, 0.197, 0.569, 0.842]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.668, 0.363, 0.792, 0.881]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1515.0, "value": "What actions is the person in bbox [0.003, 0.093, 0.222, 0.872] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.003, 0.093, 0.222, 0.872]: stand, listen to (a person), watch (a person).", "At 1514.0s, [0.009, 0.106, 0.294, 0.867]: stand.", "From 1511.0s to 1513.0s, [0.009, 0.135, 0.31, 0.895]->[0.013, 0.119, 0.32, 0.873]->[0.003, 0.14, 0.275, 0.887]: stand, listen to (a person), watch (a person).", "From 1507.0s to 1510.0s, [0.414, 0.205, 0.578, 0.842]->[0.418, 0.197, 0.569, 0.842]->[0.423, 0.174, 0.568, 0.853]->[0.43, 0.211, 0.57, 0.838]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [1588, 1685], "conversations": [{"from": "human", "timestamps": 1618.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.005, 0.472, 0.282, 0.919]: listen to (a person), sit; [0.472, 0.184, 0.975, 0.885]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1685.0, "value": "What actions is the person in bbox [0.628, 0.52, 0.769, 0.767] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.628, 0.52, 0.769, 0.767]: sit.", "At 1684.0s, [0.617, 0.528, 0.778, 0.77]: sit."]}]}, {"video": "bSZiZ4rOC7c.mkv", "clip": [1749, 1783], "conversations": [{"from": "human", "timestamps": 1779.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.351, 0.413, 0.478, 0.87]: watch (a person), walk; [0.457, 0.433, 0.504, 0.555]: watch (a person), walk; [0.502, 0.394, 0.604, 0.838]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.636, 0.463, 0.803, 0.878]: watch (a person), walk."}, {"from": "human", "timestamps": 1783.0, "value": "What actions did the person at bbox [0.378, 0.431, 0.496, 0.865] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.378, 0.431, 0.496, 0.865]: stand, listen to (a person), watch (a person).", "From 1780.0s to 1782.0s, [0.381, 0.413, 0.494, 0.862]->[0.385, 0.417, 0.498, 0.856]->[0.385, 0.435, 0.5, 0.867]: stand, listen to (a person), watch (a person).", "From 1777.0s to 1779.0s, [0.318, 0.421, 0.469, 0.869]->[0.349, 0.442, 0.481, 0.869]->[0.351, 0.413, 0.478, 0.87]: walk, watch (a person).", "At 1776.0s, [0.218, 0.443, 0.413, 0.867]: walk."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [898, 943], "conversations": [{"from": "human", "timestamps": 928.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.109, 0.0, 0.782, 0.978]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 943.0, "value": "What actions is the person in bbox [0.379, 0.0, 1.0, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.379, 0.0, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 942.0s, [0.286, 0.016, 0.983, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 941.0s, [0.334, 0.013, 0.998, 0.997]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [890, 1007], "conversations": [{"from": "human", "timestamps": 920.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.074, 0.005, 0.731, 0.985]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1007.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.251, 0.01, 0.958, 0.987]?"}, {"from": "gpt", "value": ["At Now, [0.251, 0.01, 0.958, 0.987]: sit, listen to (a person), watch (a person).", "At 1006.0s, [0.241, 0.006, 0.948, 0.991]: sit, stand, watch (a person)."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1001, 1043], "conversations": [{"from": "human", "timestamps": 1031.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.104, 0.006, 0.829, 0.99]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1043.0, "value": "What actions is the person in bbox [0.113, 0.012, 0.852, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.113, 0.012, 0.852, 0.993]: sit, talk to (e.g., self, a person, a group).", "At 1042.0s, [0.127, 0.0, 0.855, 1.0]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1035, 1088], "conversations": [{"from": "human", "timestamps": 1065.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.167, 0.0, 0.82, 1.0]: listen to (a person), stand."}, {"from": "human", "timestamps": 1088.0, "value": "Describe the actions of the person at bbox [0.095, 0.022, 0.8, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.095, 0.022, 0.8, 0.991]: sit, talk to (e.g., self, a person, a group).", "From 1086.0s to 1087.0s, [0.034, 0.032, 0.807, 0.987]->[0.088, 0.014, 0.811, 0.994]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1062, 1111], "conversations": [{"from": "human", "timestamps": 1092.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.077, 0.009, 0.748, 1.0]: watch (a person), stand."}, {"from": "human", "timestamps": 1111.0, "value": "What actions did the person at bbox [0.077, 0.0, 0.792, 0.984] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.077, 0.0, 0.792, 0.984]: stand, talk to (e.g., self, a person, a group).", "From 1109.0s to 1110.0s, [0.009, 0.0, 0.627, 0.966]->[0.032, 0.002, 0.795, 1.0]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1120, 1171], "conversations": [{"from": "human", "timestamps": 1150.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.48, 0.0, 0.997, 0.993]: stand."}, {"from": "human", "timestamps": 1171.0, "value": "What actions is the person in bbox [0.312, 0.236, 0.438, 0.62] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.312, 0.236, 0.438, 0.62]: walk, carry/hold (an object).", "From 1168.0s to 1170.0s, [0.305, 0.24, 0.404, 0.559]->[0.306, 0.239, 0.409, 0.587]->[0.326, 0.203, 0.433, 0.571]: walk, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1116, 1227], "conversations": [{"from": "human", "timestamps": 1146.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.496, 0.038, 0.749, 0.93]: stand."}, {"from": "human", "timestamps": 1227.0, "value": "What actions did the person at bbox [0.425, 0.0, 0.98, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.425, 0.0, 0.98, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1225.0s to 1226.0s, [0.363, 0.026, 1.0, 0.989]->[0.331, 0.003, 1.0, 0.994]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1205, 1251], "conversations": [{"from": "human", "timestamps": 1235.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.366, 0.004, 0.96, 0.998]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1251.0, "value": "For the person at bbox [0.005, 0.014, 0.361, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.005, 0.014, 0.361, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1245.0s to 1250.0s, [0.024, 0.0, 0.372, 0.985]->[0.0, 0.014, 0.343, 0.99]->[0.004, 0.0, 0.354, 0.991]->[0.023, 0.011, 0.355, 1.0]->[0.012, 0.005, 0.357, 0.994]->[0.023, 0.013, 0.36, 0.998]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1243.0s to 1244.0s, [0.078, 0.021, 0.343, 0.948]->[0.074, 0.012, 0.403, 0.986]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1461, 1493], "conversations": [{"from": "human", "timestamps": 1491.0, "value": "Describe the actions of the person at bbox [0.099, 0.0, 0.826, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.099, 0.0, 0.826, 1.0]: sit.", "At 1490.0s, [0.113, 0.015, 0.849, 0.992]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1493.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.161, 0.017, 0.942, 0.987]: sit, talk to (e.g., self, a person, a group), stand."}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1492, 1524], "conversations": [{"from": "human", "timestamps": 1522.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.031, 0.0, 0.893, 0.955]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1524.0, "value": "For the person at bbox [0.078, 0.022, 0.905, 0.991], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.078, 0.022, 0.905, 0.991]: sit, talk to (e.g., self, a person, a group).", "From 1522.0s to 1523.0s, [0.031, 0.0, 0.893, 0.955]->[0.108, 0.005, 0.856, 0.987]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1506, 1613], "conversations": [{"from": "human", "timestamps": 1536.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.245, 0.028, 0.936, 0.988]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1613.0, "value": "Describe the actions of the person at bbox [0.045, 0.043, 0.529, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.045, 0.043, 0.529, 0.981]: stand, watch (a person).", "At 1612.0s, [0.049, 0.068, 0.512, 0.976]: stand, listen to (a person), watch (a person)."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1624, 1669], "conversations": [{"from": "human", "timestamps": 1654.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.304, 0.011, 0.973, 0.974]?"}, {"from": "gpt", "value": ["At Now, [0.304, 0.011, 0.973, 0.974]: stand, listen to (a person), watch (a person).", "At 1653.0s, [0.304, 0.006, 0.961, 0.972]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1669.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.019, 0.061, 0.548, 0.993]: listen to (a person), stand, watch (a person); [0.462, 0.0, 0.982, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1594, 1679], "conversations": [{"from": "human", "timestamps": 1624.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.805, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1679.0, "value": "For the person at bbox [0.015, 0.013, 0.879, 0.993], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.015, 0.013, 0.879, 0.993]: stand, talk to (e.g., self, a person, a group).", "At 1678.0s, [0.017, 0.0, 0.879, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1677.0s, [0.0, 0.011, 0.914, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1662, 1698], "conversations": [{"from": "human", "timestamps": 1692.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.011, 0.114, 0.51, 0.983]: stand, talk to (e.g., self, a person, a group); [0.455, 0.01, 1.0, 1.0]: listen to (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1698.0, "value": "What actions did the person at bbox [0.172, 0.014, 0.882, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.172, 0.014, 0.882, 0.981]: stand, talk to (e.g., self, a person, a group).", "At 1697.0s, [0.114, 0.017, 0.93, 0.987]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "bhlFavrh7WU.mkv", "clip": [1710, 1750], "conversations": [{"from": "human", "timestamps": 1740.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.077, 0.005, 0.947, 0.993]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1750.0, "value": "Describe the actions of the person at bbox [0.202, 0.0, 0.703, 0.992] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.202, 0.0, 0.703, 0.992]: stand, hug (a person), listen to (a person), watch (a person).", "At 1749.0s, [0.173, 0.0, 0.625, 0.973]: stand, grab (a person), listen to (a person), watch (a person)."]}]}, {"video": "c9pEMjPT16M.webm", "clip": [893, 957], "conversations": [{"from": "human", "timestamps": 923.0, "value": "For the person at bbox [0.213, 0.091, 0.896, 0.976], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.213, 0.091, 0.896, 0.976]: sit, talk to (e.g., self, a person, a group).", "At 922.0s, [0.251, 0.095, 0.883, 0.951]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 957.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.602, 0.407, 0.785, 0.839]: sit."}]}, {"video": "c9pEMjPT16M.webm", "clip": [1016, 1127], "conversations": [{"from": "human", "timestamps": 1046.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.08, 0.408, 0.528, 0.871]: carry/hold (an object), sit, listen to (a person); [0.615, 0.121, 0.986, 0.956]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1127.0, "value": "For the person at bbox [0.533, 0.235, 0.849, 0.814], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.533, 0.235, 0.849, 0.814]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1126.0s, [0.532, 0.242, 0.863, 0.84]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "c9pEMjPT16M.webm", "clip": [1118, 1199], "conversations": [{"from": "human", "timestamps": 1148.0, "value": "For the person at bbox [0.014, 0.105, 0.68, 0.965], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.014, 0.105, 0.68, 0.965]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1147.0s, [0.012, 0.101, 0.698, 0.948]: stand, carry/hold (an object), listen to (a person).", "At 1146.0s, [0.023, 0.108, 0.727, 0.955]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1145.0s, [0.03, 0.105, 0.738, 0.943]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1199.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.115, 0.62, 0.952]: listen to (a person), sit; [0.557, 0.271, 0.989, 0.951]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "c9pEMjPT16M.webm", "clip": [1111, 1238], "conversations": [{"from": "human", "timestamps": 1141.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.01, 0.071, 0.675, 0.946]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.424, 0.443, 0.911, 0.963]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1238.0, "value": "What actions is the person in bbox [0.117, 0.066, 0.982, 0.948] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.117, 0.066, 0.982, 0.948]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1234.0s to 1237.0s, [0.004, 0.04, 0.933, 0.961]->[0.005, 0.078, 0.903, 0.949]->[0.079, 0.097, 0.971, 0.948]->[0.088, 0.063, 0.987, 0.954]: stand, talk to (e.g., self, a person, a group).", "At 1233.0s, [0.011, 0.079, 0.954, 0.959]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1232.0s, [0.009, 0.068, 0.971, 0.944]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "c9pEMjPT16M.webm", "clip": [1247, 1293], "conversations": [{"from": "human", "timestamps": 1277.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.157, 0.121, 0.955, 0.935]?"}, {"from": "gpt", "value": ["At Now, [0.157, 0.121, 0.955, 0.935]: stand, talk to (e.g., self, a person, a group).", "At 1276.0s, [0.143, 0.089, 0.992, 0.964]: stand, talk to (e.g., self, a person, a group).", "At 1275.0s, [0.1, 0.104, 1.0, 0.964]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1293.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.042, 0.063, 0.932, 0.971]: stand, bend/bow (at the waist)."}]}, {"video": "c9pEMjPT16M.webm", "clip": [1274, 1368], "conversations": [{"from": "human", "timestamps": 1304.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.016, 0.027, 0.921, 0.978]: listen to (a person), sit."}, {"from": "human", "timestamps": 1368.0, "value": "What actions did the person at bbox [0.059, 0.097, 0.374, 0.874] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.059, 0.097, 0.374, 0.874]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1367.0s, [0.09, 0.16, 0.438, 0.912]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1366.0s, [0.065, 0.102, 0.365, 0.872]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1365.0s, [0.131, 0.148, 0.411, 0.904]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "c9pEMjPT16M.webm", "clip": [1388, 1482], "conversations": [{"from": "human", "timestamps": 1418.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.151, 0.098, 0.449, 0.804]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1482.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.468, 0.105, 0.802, 0.626]?"}, {"from": "gpt", "value": ["In the current frame, [0.468, 0.105, 0.802, 0.626]: sit, carry/hold (an object).", "At 1481.0s, [0.481, 0.152, 0.822, 0.576]: sit, carry/hold (an object), read.", "At 1480.0s, [0.557, 0.139, 0.831, 0.543]: sit, read.", "From 1478.0s to 1479.0s, [0.075, 0.046, 0.894, 0.974]->[0.025, 0.129, 0.919, 0.975]: sit, carry/hold (an object).", "From 1476.0s to 1477.0s, [0.084, 0.066, 0.951, 0.981]->[0.082, 0.068, 0.935, 0.951]: sit, read.", "At 1475.0s, [0.072, 0.052, 0.939, 0.95]: sit, carry/hold (an object).", "From 1473.0s to 1474.0s, [0.065, 0.085, 0.95, 0.971]->[0.05, 0.056, 0.94, 0.972]: sit.", "From 1471.0s to 1472.0s, [0.105, 0.072, 0.948, 0.958]->[0.121, 0.056, 0.926, 0.963]: sit, carry/hold (an object).", "From 1469.0s to 1470.0s, [0.139, 0.014, 0.985, 0.971]->[0.187, 0.056, 0.947, 0.946]: sit, carry/hold (an object), read.", "From 1464.0s to 1468.0s, [0.112, 0.032, 0.983, 0.95]->[0.164, 0.045, 0.983, 0.961]->[0.21, 0.029, 0.988, 0.956]->[0.147, 0.027, 0.992, 0.97]->[0.13, 0.042, 0.984, 0.964]: sit, read.", "At 1463.0s, [0.065, 0.08, 0.899, 0.956]: sit.", "From 1460.0s to 1462.0s, [0.045, 0.047, 0.876, 0.961]->[0.063, 0.056, 0.868, 0.949]->[0.071, 0.052, 0.87, 0.953]: sit, carry/hold (an object).", "At 1459.0s, [0.542, 0.151, 0.836, 0.536]: sit, read.", "At 1458.0s, [0.556, 0.152, 0.825, 0.538]: sit, carry/hold (an object), read.", "At 1457.0s, [0.575, 0.163, 0.829, 0.546]: sit, read.", "From 1453.0s to 1456.0s, [0.432, 0.177, 0.801, 0.647]->[0.429, 0.178, 0.797, 0.649]->[0.56, 0.152, 0.809, 0.56]->[0.573, 0.153, 0.814, 0.537]: sit, carry/hold (an object), read.", "From 1451.0s to 1452.0s, [0.451, 0.163, 0.789, 0.695]->[0.454, 0.164, 0.802, 0.641]: sit, carry/hold (an object).", "From 1445.0s to 1450.0s, [0.409, 0.223, 0.785, 0.693]->[0.363, 0.197, 0.79, 0.724]->[0.388, 0.201, 0.788, 0.722]->[0.365, 0.186, 0.791, 0.677]->[0.389, 0.189, 0.799, 0.671]->[0.393, 0.194, 0.798, 0.678]: sit, carry/hold (an object), read.", "At 1444.0s, [0.39, 0.215, 0.773, 0.687]: sit, read.", "At 1443.0s, [0.403, 0.217, 0.77, 0.679]: sit, carry/hold (an object), read.", "At 1442.0s, [0.011, 0.05, 0.777, 0.991]: sit, read.", "From 1439.0s to 1441.0s, [0.0, 0.043, 0.94, 0.955]->[0.0, 0.043, 0.913, 0.995]->[0.044, 0.043, 0.698, 0.988]: sit.", "At 1438.0s, [0.05, 0.036, 0.89, 0.956]: sit, read."]}]}, {"video": "c9pEMjPT16M.webm", "clip": [1451, 1494], "conversations": [{"from": "human", "timestamps": 1481.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.481, 0.152, 0.822, 0.576]: carry/hold (an object), sit, read."}, {"from": "human", "timestamps": 1494.0, "value": "What actions is the person in bbox [0.353, 0.175, 0.729, 0.741] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.353, 0.175, 0.729, 0.741]: carry/hold (an object), crouch/kneel.", "At 1493.0s, [0.28, 0.189, 0.723, 0.714]: sit, carry/hold (an object).", "At 1492.0s, [0.334, 0.22, 0.714, 0.725]: sit, carry/hold (an object), open (e.g., a window, a car door).", "At 1491.0s, [0.295, 0.2, 0.696, 0.703]: sit, open (e.g., a window, a car door).", "From 1489.0s to 1490.0s, [0.317, 0.134, 0.723, 0.71]->[0.269, 0.184, 0.672, 0.689]: sit, carry/hold (an object).", "At 1488.0s, [0.63, 0.145, 0.81, 0.558]: sit.", "At 1487.0s, [0.347, 0.12, 0.787, 0.604]: sit, open (e.g., a window, a car door).", "At 1486.0s, [0.355, 0.135, 0.793, 0.705]: sit, catch (an object), open (e.g., a window, a car door).", "From 1482.0s to 1485.0s, [0.468, 0.105, 0.802, 0.626]->[0.477, 0.147, 0.835, 0.617]->[0.548, 0.117, 0.796, 0.556]->[0.501, 0.137, 0.803, 0.554]: sit, carry/hold (an object).", "At 1481.0s, [0.481, 0.152, 0.822, 0.576]: sit, carry/hold (an object), read.", "At 1480.0s, [0.557, 0.139, 0.831, 0.543]: sit, read.", "From 1478.0s to 1479.0s, [0.075, 0.046, 0.894, 0.974]->[0.025, 0.129, 0.919, 0.975]: sit, carry/hold (an object).", "From 1476.0s to 1477.0s, [0.084, 0.066, 0.951, 0.981]->[0.082, 0.068, 0.935, 0.951]: sit, read.", "At 1475.0s, [0.072, 0.052, 0.939, 0.95]: sit, carry/hold (an object).", "From 1473.0s to 1474.0s, [0.065, 0.085, 0.95, 0.971]->[0.05, 0.056, 0.94, 0.972]: sit.", "From 1471.0s to 1472.0s, [0.105, 0.072, 0.948, 0.958]->[0.121, 0.056, 0.926, 0.963]: sit, carry/hold (an object).", "From 1469.0s to 1470.0s, [0.139, 0.014, 0.985, 0.971]->[0.187, 0.056, 0.947, 0.946]: sit, carry/hold (an object), read.", "From 1464.0s to 1468.0s, [0.112, 0.032, 0.983, 0.95]->[0.164, 0.045, 0.983, 0.961]->[0.21, 0.029, 0.988, 0.956]->[0.147, 0.027, 0.992, 0.97]->[0.13, 0.042, 0.984, 0.964]: sit, read.", "At 1463.0s, [0.065, 0.08, 0.899, 0.956]: sit.", "From 1460.0s to 1462.0s, [0.045, 0.047, 0.876, 0.961]->[0.063, 0.056, 0.868, 0.949]->[0.071, 0.052, 0.87, 0.953]: sit, carry/hold (an object).", "At 1459.0s, [0.542, 0.151, 0.836, 0.536]: sit, read.", "At 1458.0s, [0.556, 0.152, 0.825, 0.538]: sit, carry/hold (an object), read.", "At 1457.0s, [0.575, 0.163, 0.829, 0.546]: sit, read.", "From 1453.0s to 1456.0s, [0.432, 0.177, 0.801, 0.647]->[0.429, 0.178, 0.797, 0.649]->[0.56, 0.152, 0.809, 0.56]->[0.573, 0.153, 0.814, 0.537]: sit, carry/hold (an object), read.", "From 1451.0s to 1452.0s, [0.451, 0.163, 0.789, 0.695]->[0.454, 0.164, 0.802, 0.641]: sit, carry/hold (an object).", "From 1445.0s to 1450.0s, [0.409, 0.223, 0.785, 0.693]->[0.363, 0.197, 0.79, 0.724]->[0.388, 0.201, 0.788, 0.722]->[0.365, 0.186, 0.791, 0.677]->[0.389, 0.189, 0.799, 0.671]->[0.393, 0.194, 0.798, 0.678]: sit, carry/hold (an object), read.", "At 1444.0s, [0.39, 0.215, 0.773, 0.687]: sit, read.", "At 1443.0s, [0.403, 0.217, 0.77, 0.679]: sit, carry/hold (an object), read.", "At 1442.0s, [0.011, 0.05, 0.777, 0.991]: sit, read.", "From 1439.0s to 1441.0s, [0.0, 0.043, 0.94, 0.955]->[0.0, 0.043, 0.913, 0.995]->[0.044, 0.043, 0.698, 0.988]: sit.", "At 1438.0s, [0.05, 0.036, 0.89, 0.956]: sit, read."]}]}, {"video": "c9pEMjPT16M.webm", "clip": [1499, 1531], "conversations": [{"from": "human", "timestamps": 1529.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.067, 0.025, 0.54, 0.849]: listen to (a person), sit, watch (a person); [0.328, 0.054, 0.934, 0.919]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1531.0, "value": "What actions did the person at bbox [0.207, 0.111, 0.559, 0.683] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.207, 0.111, 0.559, 0.683]: sit, listen to (a person), watch (a person).", "From 1529.0s to 1530.0s, [0.067, 0.025, 0.54, 0.849]->[0.147, 0.083, 0.541, 0.79]: sit, listen to (a person), watch (a person).", "At 1528.0s, [0.005, 0.028, 0.383, 0.507]: sit."]}]}, {"video": "c9pEMjPT16M.webm", "clip": [1529, 1571], "conversations": [{"from": "human", "timestamps": 1559.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.024, 0.092, 0.987, 0.968]?"}, {"from": "gpt", "value": ["In the current frame, [0.024, 0.092, 0.987, 0.968]: sit, talk to (e.g., self, a person, a group).", "From 1557.0s to 1558.0s, [0.006, 0.103, 0.968, 0.97]->[0.021, 0.117, 0.989, 0.99]: sit, talk to (e.g., self, a person, a group).", "At 1556.0s, [0.006, 0.071, 0.979, 0.946]: sit."]}, {"from": "human", "timestamps": 1571.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.008, 0.022, 0.886, 0.944]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "c9pEMjPT16M.webm", "clip": [1671, 1718], "conversations": [{"from": "human", "timestamps": 1701.0, "value": "What actions is the person in bbox [0.169, 0.242, 0.616, 0.818] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.169, 0.242, 0.616, 0.818]: sit.", "From 1688.0s to 1700.0s, [0.006, 0.053, 0.93, 0.997]->[0.0, 0.042, 0.973, 0.983]->[0.031, 0.073, 0.882, 0.963]->[0.048, 0.077, 0.971, 0.962]->[0.105, 0.051, 0.98, 0.978]->[0.119, 0.026, 0.972, 0.979]->[0.054, 0.122, 0.997, 0.983]->[0.115, 0.158, 0.965, 0.964]->[0.169, 0.174, 0.926, 0.98]->[0.21, 0.177, 0.925, 0.936]->[0.179, 0.19, 0.797, 0.844]->[0.154, 0.207, 0.649, 0.858]->[0.178, 0.238, 0.615, 0.727]: sit."]}, {"from": "human", "timestamps": 1718.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.025, 0.01, 0.843, 0.982]: get up."}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [906, 944], "conversations": [{"from": "human", "timestamps": 936.0, "value": "What actions is the person in bbox [0.508, 0.181, 0.837, 0.81] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.508, 0.181, 0.837, 0.81]: sit, listen to (a person).", "From 934.0s to 935.0s, [0.002, 0.356, 0.331, 0.976]->[0.251, 0.254, 0.601, 0.907]: sit."]}, {"from": "human", "timestamps": 944.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.14, 0.045, 0.713, 0.978]: stand, smoke."}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [887, 976], "conversations": [{"from": "human", "timestamps": 917.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.087, 0.166, 0.982]: listen to (a person), stand, watch (a person); [0.108, 0.162, 0.228, 0.907]: listen to (a person), stand, watch (a person); [0.166, 0.175, 0.329, 0.895]: listen to (a person), stand, watch (a person); [0.272, 0.167, 0.463, 0.908]: listen to (a person), stand, touch (an object), watch (a person); [0.424, 0.126, 0.502, 0.511]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.462, 0.191, 0.609, 0.838]: listen to (a person), stand, watch (a person); [0.597, 0.159, 0.809, 0.761]: listen to (a person), stand, watch (a person); [0.761, 0.44, 0.993, 0.983]: listen to (a person), sit, watch (a person); [0.794, 0.166, 0.893, 0.463]: listen to (a person), stand; [0.889, 0.149, 0.995, 0.563]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 976.0, "value": "For the person at bbox [0.0, 0.083, 0.752, 0.96], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.083, 0.752, 0.96]: stand.", "At 975.0s, [0.01, 0.066, 0.835, 0.962]: stand.", "From 973.0s to 974.0s, [0.0, 0.097, 0.854, 0.974]->[0.0, 0.052, 0.795, 0.993]: bend/bow (at the waist)."]}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [941, 974], "conversations": [{"from": "human", "timestamps": 971.0, "value": "What actions did the person at bbox [0.025, 0.034, 0.882, 0.988] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.025, 0.034, 0.882, 0.988]: sit.", "At 970.0s, [0.009, 0.047, 0.826, 0.992]: sit, stand, carry/hold (an object).", "At 969.0s, [0.018, 0.051, 0.734, 1.0]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 974.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.052, 0.795, 0.993]: bend/bow (at the waist)."}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [1033, 1069], "conversations": [{"from": "human", "timestamps": 1063.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.479, 0.145, 0.918, 0.973]?"}, {"from": "gpt", "value": ["In the current frame, [0.479, 0.145, 0.918, 0.973]: dance, grab (a person), listen to (a person), watch (a person).", "At 1062.0s, [0.481, 0.193, 0.901, 1.0]: dance.", "At 1061.0s, [0.42, 0.159, 0.832, 0.999]: dance, smoke, watch (a person).", "At 1060.0s, [0.398, 0.146, 0.883, 1.0]: dance, smoke.", "At 1059.0s, [0.555, 0.056, 0.959, 0.964]: dance, smoke, watch (a person)."]}, {"from": "human", "timestamps": 1069.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.231, 0.019, 0.735, 0.962]: watch (a person), hug (a person), dance; [0.624, 0.203, 0.787, 0.743]: watch (a person), grab (a person), dance."}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [1035, 1086], "conversations": [{"from": "human", "timestamps": 1065.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.004, 0.173, 0.343, 0.975]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.145, 0.063, 0.614, 0.962]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.575, 0.171, 0.993, 0.988]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1086.0, "value": "Describe the actions of the person at bbox [0.077, 0.201, 0.21, 0.817] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.077, 0.201, 0.21, 0.817]: walk, carry/hold (an object), eat.", "At 1085.0s, [0.141, 0.153, 0.266, 0.683]: stand, carry/hold (an object), eat."]}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [1057, 1108], "conversations": [{"from": "human", "timestamps": 1087.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.135, 0.069, 0.356, 0.972]: dance; [0.27, 0.125, 0.543, 0.966]: hug (a person), dance; [0.524, 0.619, 0.725, 0.961]: carry/hold (an object), sit, watch (a person); [0.701, 0.288, 0.97, 0.946]: stand, drink."}, {"from": "human", "timestamps": 1108.0, "value": "Describe the actions of the person at bbox [0.211, 0.229, 0.51, 0.979] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.211, 0.229, 0.51, 0.979]: carry/hold (an object), dance, watch (a person).", "At 1107.0s, [0.161, 0.2, 0.439, 0.978]: carry/hold (an object), dance, watch (a person)."]}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [1111, 1185], "conversations": [{"from": "human", "timestamps": 1141.0, "value": "What actions is the person in bbox [0.086, 0.237, 0.165, 0.51] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.086, 0.237, 0.165, 0.51]: stand, watch (a person).", "At 1140.0s, [0.073, 0.224, 0.147, 0.471]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1185.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.802, 0.202, 0.857, 0.561]: carry/hold (an object), grab (a person), walk; [0.832, 0.183, 0.9, 0.588]: walk."}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [1120, 1259], "conversations": [{"from": "human", "timestamps": 1150.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.805, 0.085, 0.99, 0.899]: listen (e.g., to music), dance, watch (a person)."}, {"from": "human", "timestamps": 1259.0, "value": "Describe the actions of the person at bbox [0.528, 0.183, 0.82, 0.975] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.528, 0.183, 0.82, 0.975]: stand, touch (an object).", "At 1258.0s, [0.422, 0.202, 0.828, 0.958]: stand.", "At 1257.0s, [0.385, 0.217, 0.822, 0.955]: stand, carry/hold (an object).", "At 1256.0s, [0.389, 0.235, 0.805, 0.962]: stand, touch (an object).", "At 1255.0s, [0.386, 0.283, 0.799, 0.952]: stand, carry/hold (an object), touch (an object)."]}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [1190, 1257], "conversations": [{"from": "human", "timestamps": 1220.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.833, 0.156, 0.918, 0.561]: run/jog; [0.906, 0.156, 0.955, 0.407]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1257.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.385, 0.217, 0.822, 0.955]?"}, {"from": "gpt", "value": ["In the current frame, [0.385, 0.217, 0.822, 0.955]: stand, carry/hold (an object).", "At 1256.0s, [0.389, 0.235, 0.805, 0.962]: stand, touch (an object).", "At 1255.0s, [0.386, 0.283, 0.799, 0.952]: stand, carry/hold (an object), touch (an object)."]}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [1254, 1309], "conversations": [{"from": "human", "timestamps": 1284.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.355, 0.065, 0.982, 0.977]: stand."}, {"from": "human", "timestamps": 1309.0, "value": "What actions did the person at bbox [0.117, 0.097, 0.504, 0.959] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.117, 0.097, 0.504, 0.959]: walk.", "At 1308.0s, [0.153, 0.084, 0.503, 0.973]: walk."]}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [1247, 1334], "conversations": [{"from": "human", "timestamps": 1277.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.397, 0.031, 0.836, 1.0]: stand, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1334.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.271, 0.047, 0.656, 0.923]?"}, {"from": "gpt", "value": ["At Now, [0.271, 0.047, 0.656, 0.923]: bend/bow (at the waist), carry/hold (an object).", "At 1333.0s, [0.353, 0.047, 0.7, 0.947]: stand, carry/hold (an object).", "At 1332.0s, [0.383, 0.07, 0.759, 0.933]: walk, carry/hold (an object), play with pets.", "At 1331.0s, [0.25, 0.103, 0.548, 0.897]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1330.0s, [0.344, 0.135, 0.635, 0.89]: bend/bow (at the waist), carry/hold (an object).", "From 1328.0s to 1329.0s, [0.401, 0.189, 0.599, 0.881]->[0.367, 0.082, 0.642, 0.842]: walk, carry/hold (an object)."]}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [1632, 1668], "conversations": [{"from": "human", "timestamps": 1662.0, "value": "What actions did the person at bbox [0.057, 0.305, 0.316, 0.849] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.057, 0.305, 0.316, 0.849]: sit, eat, listen to (a person).", "At 1661.0s, [0.052, 0.307, 0.327, 0.869]: sit, carry/hold (an object), eat, listen to (a person).", "At 1660.0s, [0.043, 0.293, 0.401, 0.907]: sit, touch (an object).", "At 1659.0s, [0.058, 0.254, 0.481, 0.957]: sit, carry/hold (an object).", "At 1658.0s, [0.123, 0.216, 0.504, 0.956]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1652.0s to 1657.0s, [0.01, 0.022, 0.447, 0.995]->[0.003, 0.015, 0.403, 0.978]->[0.024, 0.092, 0.518, 0.976]->[0.006, 0.116, 0.411, 0.968]->[0.019, 0.155, 0.557, 0.967]->[0.018, 0.189, 0.541, 0.98]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1668.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.059, 0.313, 0.298, 0.845]: carry/hold (an object), sit; [0.282, 0.29, 0.559, 0.749]: sit, text on/look at a cellphone; [0.751, 0.032, 0.994, 0.65]: watch (a person), stand, touch (an object)."}]}, {"video": "cKA-qeZuH_w.mkv", "clip": [1744, 1787], "conversations": [{"from": "human", "timestamps": 1774.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.075, 0.106, 0.894, 0.96]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1787.0, "value": "For the person at bbox [0.464, 0.163, 0.911, 0.955], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.464, 0.163, 0.911, 0.955]: stand.", "At 1786.0s, [0.466, 0.202, 0.84, 0.961]: stand.", "At 1785.0s, [0.344, 0.255, 0.818, 0.975]: run/jog.", "At 1784.0s, [0.001, 0.329, 0.821, 0.973]: run/jog, throw.", "At 1783.0s, [0.153, 0.225, 0.795, 0.979]: run/jog.", "At 1782.0s, [0.085, 0.058, 0.759, 0.986]: run/jog, carry/hold (an object), throw."]}]}, {"video": "cLiJgvrDlWw.mp4", "clip": [892, 951], "conversations": [{"from": "human", "timestamps": 922.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.06, 0.037, 0.924, 0.965]: watch (a person), stand."}, {"from": "human", "timestamps": 951.0, "value": "What actions did the person at bbox [0.07, 0.071, 0.887, 0.973] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.07, 0.071, 0.887, 0.973]: sit, carry/hold (an object), watch (a person).", "At 950.0s, [0.647, 0.005, 0.997, 0.988]: sit, carry/hold (an object).", "From 948.0s to 949.0s, [0.621, 0.002, 0.994, 0.874]->[0.64, 0.0, 0.999, 0.856]: sit, listen to (a person), watch (a person).", "At 947.0s, [0.619, 0.0, 0.997, 0.871]: sit, listen to (a person).", "At 946.0s, [0.633, 0.002, 0.997, 0.851]: sit, carry/hold (an object), listen to (a person).", "At 945.0s, [0.138, 0.077, 0.924, 0.984]: sit, carry/hold (an object).", "From 942.0s to 944.0s, [0.656, 0.121, 1.0, 0.956]->[0.629, 0.118, 0.994, 0.965]->[0.081, 0.083, 0.925, 0.97]: sit, carry/hold (an object), touch (an object), listen to (a person).", "From 940.0s to 941.0s, [0.045, 0.008, 0.977, 0.993]->[0.614, 0.125, 0.998, 0.971]: sit, carry/hold (an object), touch (an object).", "From 937.0s to 939.0s, [0.061, 0.0, 0.953, 1.0]->[0.086, 0.0, 0.945, 0.983]->[0.024, 0.0, 0.968, 0.984]: sit.", "At 936.0s, [0.112, 0.002, 0.963, 1.0]: sit, stand.", "At 935.0s, [0.068, 0.026, 0.973, 0.943]: sit, text on/look at a cellphone."]}]}, {"video": "cLiJgvrDlWw.mp4", "clip": [895, 1012], "conversations": [{"from": "human", "timestamps": 925.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.018, 0.11, 0.73, 0.967]: watch (a person), stand."}, {"from": "human", "timestamps": 1012.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.046, 0.0, 0.826, 0.973]?"}, {"from": "gpt", "value": ["In the current frame, [0.046, 0.0, 0.826, 0.973]: stand.", "At 1011.0s, [0.049, 0.0, 0.721, 0.97]: stand.", "At 1010.0s, [0.032, 0.0, 0.864, 0.974]: stand, watch (a person)."]}]}, {"video": "cLiJgvrDlWw.mp4", "clip": [974, 1020], "conversations": [{"from": "human", "timestamps": 1004.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.027, 0.022, 0.818, 0.995]: watch (a person), stand."}, {"from": "human", "timestamps": 1020.0, "value": "Describe the actions of the person at bbox [0.144, 0.0, 0.869, 0.993] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.144, 0.0, 0.869, 0.993]: stand, watch (a person).", "From 1017.0s to 1019.0s, [0.256, 0.016, 0.969, 0.964]->[0.215, 0.0, 0.831, 0.944]->[0.134, 0.0, 0.895, 0.98]: stand, watch (a person)."]}]}, {"video": "cLiJgvrDlWw.mp4", "clip": [1116, 1154], "conversations": [{"from": "human", "timestamps": 1146.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.286, 0.0, 1.0, 0.981]?"}, {"from": "gpt", "value": ["In the current frame, [0.286, 0.0, 1.0, 0.981]: sit.", "From 1144.0s to 1145.0s, [0.035, 0.007, 0.997, 0.961]->[0.159, 0.0, 0.975, 0.997]: sit."]}, {"from": "human", "timestamps": 1154.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.025, 0.094, 0.764, 0.986]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "cLiJgvrDlWw.mp4", "clip": [1135, 1200], "conversations": [{"from": "human", "timestamps": 1165.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.106, 0.0, 0.964, 0.984]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1200.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.014, 0.057, 0.877, 0.976]?"}, {"from": "gpt", "value": ["At Now, [0.014, 0.057, 0.877, 0.976]: sit, talk to (e.g., self, a person, a group).", "At 1199.0s, [0.089, 0.052, 1.0, 0.95]: sit, talk to (e.g., self, a person, a group).", "At 1198.0s, [0.063, 0.033, 1.0, 0.957]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "cLiJgvrDlWw.mp4", "clip": [1239, 1372], "conversations": [{"from": "human", "timestamps": 1269.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.095, 0.076, 1.0, 0.966]: carry/hold (an object), sit, ride (e.g., a bike, a car, a horse)."}, {"from": "human", "timestamps": 1372.0, "value": "For the person at bbox [0.005, 0.06, 0.767, 0.941], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.005, 0.06, 0.767, 0.941]: sit, drive (e.g., a car, a truck).", "At 1371.0s, [0.0, 0.025, 0.736, 0.995]: sit, ride (e.g., a bike, a car, a horse)."]}]}, {"video": "cLiJgvrDlWw.mp4", "clip": [1392, 1468], "conversations": [{"from": "human", "timestamps": 1422.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.068, 0.054, 0.157, 0.537]: stand, shoot; [0.487, 0.073, 0.8, 0.992]: watch (a person), stand, shoot."}, {"from": "human", "timestamps": 1468.0, "value": "What actions did the person at bbox [0.356, 0.045, 0.831, 0.977] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.356, 0.045, 0.831, 0.977]: stand, watch (a person).", "At 1467.0s, [0.224, 0.0, 0.78, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1466.0s, [0.303, 0.023, 0.975, 0.982]: stand, get up, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "cLiJgvrDlWw.mp4", "clip": [1418, 1477], "conversations": [{"from": "human", "timestamps": 1448.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.101, 0.009, 0.833, 0.979]: watch (a person), stand, shoot."}, {"from": "human", "timestamps": 1477.0, "value": "Describe the actions of the person at bbox [0.022, 0.036, 0.741, 0.95] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.022, 0.036, 0.741, 0.95]: walk, shoot.", "At 1476.0s, [0.106, 0.102, 0.93, 0.973]: walk, shoot, watch (a person)."]}]}, {"video": "cLiJgvrDlWw.mp4", "clip": [1516, 1601], "conversations": [{"from": "human", "timestamps": 1546.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.062, 0.029, 0.827, 0.999]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1601.0, "value": "For the person at bbox [0.29, 0.076, 0.65, 0.998], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.29, 0.076, 0.65, 0.998]: walk, carry/hold (an object).", "At 1600.0s, [0.093, 0.149, 0.476, 0.991]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "cLiJgvrDlWw.mp4", "clip": [1580, 1613], "conversations": [{"from": "human", "timestamps": 1610.0, "value": "What actions is the person in bbox [0.307, 0.284, 0.69, 0.994] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.307, 0.284, 0.69, 0.994]: run/jog.", "At 1609.0s, [0.213, 0.107, 0.819, 0.977]: run/jog.", "At 1608.0s, [0.429, 0.182, 0.874, 0.969]: run/jog, watch (a person).", "From 1606.0s to 1607.0s, [0.506, 0.191, 0.975, 0.995]->[0.362, 0.166, 0.79, 0.919]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1613.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.062, 0.162, 0.46, 0.999]: carry/hold (an object)."}]}, {"video": "cLiJgvrDlWw.mp4", "clip": [1632, 1670], "conversations": [{"from": "human", "timestamps": 1662.0, "value": "Describe the actions of the person at bbox [0.128, 0.018, 0.9, 0.976] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.128, 0.018, 0.9, 0.976]: stand, watch (a person).", "At 1661.0s, [0.008, 0.019, 0.587, 1.0]: stand, watch (a person).", "At 1660.0s, [0.42, 0.022, 0.948, 0.967]: stand."]}, {"from": "human", "timestamps": 1670.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.107, 0.049, 0.731, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "cLiJgvrDlWw.mp4", "clip": [1667, 1698], "conversations": [{"from": "human", "timestamps": 1697.0, "value": "For the person at bbox [0.043, 0.052, 0.818, 0.98], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.043, 0.052, 0.818, 0.98]: stand.", "From 1693.0s to 1696.0s, [0.017, 0.053, 0.759, 0.982]->[0.05, 0.061, 0.812, 0.991]->[0.029, 0.056, 0.798, 0.993]->[0.003, 0.048, 0.783, 0.996]: stand.", "At 1692.0s, [0.008, 0.088, 0.665, 0.984]: stand, talk to (e.g., self, a person, a group).", "At 1691.0s, [0.002, 0.084, 0.71, 0.992]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1698.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.164, 0.017, 0.937, 0.987]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "cWYJHb25EVs.mp4", "clip": [892, 1009], "conversations": [{"from": "human", "timestamps": 922.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.018, 0.084, 0.616, 0.964]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.464, 0.036, 0.98, 0.986]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1009.0, "value": "For the person at bbox [0.373, 0.072, 0.958, 0.978], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.373, 0.072, 0.958, 0.978]: stand, talk to (e.g., self, a person, a group).", "At 1008.0s, [0.374, 0.066, 0.938, 0.991]: stand, talk to (e.g., self, a person, a group).", "At 1007.0s, [0.275, 0.066, 0.912, 0.966]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "cWYJHb25EVs.mp4", "clip": [968, 1011], "conversations": [{"from": "human", "timestamps": 998.0, "value": "What actions is the person in bbox [0.004, 0.109, 0.404, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.004, 0.109, 0.404, 1.0]: sit, watch (a person).", "From 996.0s to 997.0s, [0.016, 0.116, 0.42, 1.0]->[0.012, 0.12, 0.413, 1.0]: sit.", "From 994.0s to 995.0s, [0.0, 0.105, 0.41, 1.0]->[0.005, 0.079, 0.415, 0.996]: sit, watch (a person).", "From 988.0s to 993.0s, [0.023, 0.132, 0.43, 1.0]->[0.026, 0.133, 0.412, 1.0]->[0.014, 0.125, 0.423, 1.0]->[0.013, 0.119, 0.427, 1.0]->[0.006, 0.102, 0.43, 1.0]->[0.005, 0.107, 0.431, 0.995]: sit."]}, {"from": "human", "timestamps": 1011.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.018, 0.024, 0.521, 0.999]: carry/hold (an object), stand, watch (a person); [0.575, 0.352, 0.781, 0.995]: watch (a person), stand, close (e.g., a door, a box)."}]}, {"video": "cWYJHb25EVs.mp4", "clip": [994, 1135], "conversations": [{"from": "human", "timestamps": 1024.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.552, 0.052, 0.801, 1.0]: walk."}, {"from": "human", "timestamps": 1135.0, "value": "What actions did the person at bbox [0.49, 0.104, 0.974, 0.971] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.49, 0.104, 0.974, 0.971]: stand, listen to (a person), watch (a person).", "From 1133.0s to 1134.0s, [0.488, 0.113, 0.967, 0.986]->[0.556, 0.131, 0.978, 0.982]: stand, listen to (a person), watch (a person).", "At 1132.0s, [0.537, 0.114, 0.946, 0.994]: stand."]}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1070, 1126], "conversations": [{"from": "human", "timestamps": 1100.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.0, 0.44, 0.904]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.0, 0.44, 0.904]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1099.0s, [0.042, 0.0, 0.468, 0.99]: walk, watch (a person).", "At 1098.0s, [0.006, 0.019, 0.567, 0.993]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1126.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.555, 0.087, 0.989, 0.985]: stand, kiss (a person)."}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1120, 1236], "conversations": [{"from": "human", "timestamps": 1150.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.339, 0.12, 0.908, 0.992]: stand, kiss (a person)."}, {"from": "human", "timestamps": 1236.0, "value": "What actions is the person in bbox [0.089, 0.172, 0.536, 0.968] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.089, 0.172, 0.536, 0.968]: stand, carry/hold (an object), take (an object) from (a person), watch (a person).", "At 1235.0s, [0.132, 0.172, 0.382, 0.961]: stand, carry/hold (an object), pull (an object), listen to (a person), watch (a person).", "At 1234.0s, [0.154, 0.159, 0.485, 0.998]: stand, carry/hold (an object), listen to (a person)."]}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1178, 1228], "conversations": [{"from": "human", "timestamps": 1208.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.012, 0.007, 0.64, 0.982]: walk."}, {"from": "human", "timestamps": 1228.0, "value": "For the person at bbox [0.204, 0.084, 0.679, 0.983], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.204, 0.084, 0.679, 0.983]: walk.", "At 1227.0s, [0.453, 0.521, 0.874, 0.997]: sit, carry/hold (an object).", "At 1226.0s, [0.374, 0.458, 0.769, 0.985]: sit, carry/hold (an object), take (an object) from (a person).", "At 1225.0s, [0.322, 0.326, 0.822, 0.977]: sit, listen to (a person), watch (a person).", "At 1224.0s, [0.402, 0.3, 0.828, 0.988]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1217.0s to 1223.0s, [0.435, 0.338, 0.846, 0.988]->[0.415, 0.3, 0.845, 0.991]->[0.405, 0.31, 0.832, 0.992]->[0.399, 0.324, 0.841, 0.989]->[0.41, 0.334, 0.833, 0.993]->[0.372, 0.305, 0.824, 0.99]->[0.387, 0.314, 0.864, 0.992]: sit, listen to (a person), watch (a person).", "From 1214.0s to 1216.0s, [0.483, 0.376, 0.738, 0.974]->[0.478, 0.34, 0.77, 0.991]->[0.424, 0.334, 0.852, 0.991]: sit, watch (a person)."]}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1260, 1304], "conversations": [{"from": "human", "timestamps": 1290.0, "value": "Describe the actions of the person at bbox [0.025, 0.0, 0.875, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.025, 0.0, 0.875, 0.981]: sit, talk to (e.g., self, a person, a group).", "From 1282.0s to 1289.0s, [0.036, 0.0, 0.848, 0.971]->[0.028, 0.001, 0.877, 0.974]->[0.0, 0.023, 0.758, 1.0]->[0.031, 0.012, 0.868, 0.979]->[0.021, 0.0, 0.839, 0.978]->[0.027, 0.004, 0.844, 0.98]->[0.028, 0.0, 0.861, 0.961]->[0.028, 0.001, 0.842, 0.976]: stand, talk to (e.g., self, a person, a group).", "At 1281.0s, [0.02, 0.028, 0.808, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1280.0s, [0.0, 0.019, 0.771, 0.985]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1304.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.091, 0.045, 0.679, 0.99]: talk to (e.g., self, a person, a group), walk."}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1248, 1377], "conversations": [{"from": "human", "timestamps": 1278.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.326, 0.021, 0.989, 0.979]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1377.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.659, 0.469, 0.755, 0.893]?"}, {"from": "gpt", "value": ["At Now, [0.659, 0.469, 0.755, 0.893]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1376.0s, [0.739, 0.5, 0.821, 0.885]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1299, 1333], "conversations": [{"from": "human", "timestamps": 1329.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.178, 0.074, 0.874, 0.977]: walk."}, {"from": "human", "timestamps": 1333.0, "value": "What actions is the person in bbox [0.1, 0.123, 0.624, 0.99] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.1, 0.123, 0.624, 0.99]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1332.0s, [0.149, 0.099, 0.638, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1362, 1430], "conversations": [{"from": "human", "timestamps": 1392.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.404, 0.313, 0.636, 0.96]: carry/hold (an object), sit, read; [0.627, 0.309, 0.849, 0.93]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1430.0, "value": "For the person at bbox [0.258, 0.069, 1.0, 0.976], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.258, 0.069, 1.0, 0.976]: lie/sleep, watch (a person).", "At 1429.0s, [0.275, 0.067, 0.995, 0.97]: lie/sleep, watch (a person)."]}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1396, 1451], "conversations": [{"from": "human", "timestamps": 1426.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.025, 0.894, 0.844]: lie/sleep, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1451.0, "value": "For the person at bbox [0.007, 0.02, 0.582, 0.957], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.007, 0.02, 0.582, 0.957]: lie/sleep, watch (a person).", "At 1450.0s, [0.01, 0.026, 0.743, 0.928]: lie/sleep.", "At 1449.0s, [0.005, 0.008, 0.658, 0.927]: kiss (a person), lie/sleep.", "At 1448.0s, [0.001, 0.02, 0.657, 0.954]: lie/sleep.", "From 1445.0s to 1447.0s, [0.001, 0.047, 0.665, 0.987]->[0.009, 0.033, 0.673, 0.958]->[0.0, 0.018, 0.696, 0.916]: lie/sleep, watch (a person).", "From 1443.0s to 1444.0s, [0.009, 0.009, 0.621, 0.97]->[0.008, 0.079, 0.699, 0.972]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "From 1440.0s to 1442.0s, [0.0, 0.011, 0.669, 0.942]->[0.009, 0.013, 0.679, 0.972]->[0.0, 0.012, 0.714, 0.945]: listen to (a person), lie/sleep, watch (a person).", "At 1439.0s, [0.0, 0.016, 0.706, 0.913]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person)."]}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1515, 1613], "conversations": [{"from": "human", "timestamps": 1545.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.041, 0.125, 0.625, 0.974]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.596, 0.08, 1.0, 0.967]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1613.0, "value": "What actions is the person in bbox [0.132, 0.261, 0.375, 0.953] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.132, 0.261, 0.375, 0.953]: sit, listen to (a person), watch (a person).", "From 1609.0s to 1612.0s, [0.13, 0.237, 0.347, 0.965]->[0.142, 0.236, 0.393, 0.968]->[0.131, 0.273, 0.339, 0.936]->[0.133, 0.246, 0.413, 0.959]: sit, listen to (a person), watch (a person)."]}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1535, 1595], "conversations": [{"from": "human", "timestamps": 1565.0, "value": "What actions is the person in bbox [0.431, 0.392, 0.95, 0.995] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.431, 0.392, 0.95, 0.995]: bend/bow (at the waist).", "From 1563.0s to 1564.0s, [0.408, 0.581, 0.967, 0.984]->[0.412, 0.512, 0.918, 0.979]: bend/bow (at the waist).", "At 1562.0s, [0.387, 0.595, 0.861, 0.981]: bend/bow (at the waist), listen to (a person)."]}, {"from": "human", "timestamps": 1595.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.457, 0.063, 0.839, 0.925]: carry/hold (an object), sit, smoke."}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1631, 1668], "conversations": [{"from": "human", "timestamps": 1661.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.054, 0.477, 0.993]: stand, talk to (e.g., self, a person, a group); [0.697, 0.196, 1.0, 0.872]: sit, touch (an object), write."}, {"from": "human", "timestamps": 1668.0, "value": "What actions did the person at bbox [0.0, 0.0, 0.589, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.0, 0.589, 0.99]: sit, talk to (e.g., self, a person, a group).", "From 1664.0s to 1667.0s, [0.013, 0.036, 0.529, 1.0]->[0.003, 0.057, 0.544, 0.995]->[0.008, 0.02, 0.564, 1.0]->[0.008, 0.0, 0.559, 0.993]: sit, talk to (e.g., self, a person, a group).", "From 1661.0s to 1663.0s, [0.0, 0.054, 0.477, 0.993]->[0.013, 0.037, 0.473, 0.996]->[0.007, 0.054, 0.485, 0.985]: stand, talk to (e.g., self, a person, a group).", "At 1660.0s, [0.014, 0.007, 0.531, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1658.0s to 1659.0s, [0.183, 0.257, 0.469, 0.969]->[0.201, 0.255, 0.493, 0.95]: sit.", "At 1657.0s, [0.177, 0.266, 0.451, 0.936]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1642, 1687], "conversations": [{"from": "human", "timestamps": 1672.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.006, 0.006, 0.587, 0.992]: watch (a person), sit, talk to (e.g., self, a person, a group), stand."}, {"from": "human", "timestamps": 1687.0, "value": "What actions is the person in bbox [0.103, 0.09, 0.673, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.103, 0.09, 0.673, 1.0]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1686.0s, [0.124, 0.046, 0.629, 0.994]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1683.0s to 1685.0s, [0.042, 0.142, 0.328, 0.961]->[0.027, 0.08, 0.372, 1.0]->[0.023, 0.16, 0.327, 0.964]: sit, listen to (a person), watch (a person).", "At 1682.0s, [0.028, 0.036, 0.361, 1.0]: stand, listen to (a person), watch (a person).", "At 1681.0s, [0.035, 0.045, 0.366, 0.987]: stand, listen to (a person).", "From 1679.0s to 1680.0s, [0.0, 0.0, 0.538, 0.988]->[0.0, 0.003, 0.54, 0.999]: sit, talk to (e.g., self, a person, a group).", "From 1677.0s to 1678.0s, [0.016, 0.0, 0.58, 0.988]->[0.0, 0.0, 0.554, 0.992]: sit, stand, talk to (e.g., self, a person, a group).", "At 1676.0s, [0.01, 0.014, 0.59, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1675.0s, [0.006, 0.025, 0.564, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1674.0s, [0.014, 0.015, 0.575, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1673.0s, [0.002, 0.032, 0.595, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1672.0s, [0.006, 0.006, 0.587, 0.992]: sit, stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "cWYJHb25EVs.mp4", "clip": [1751, 1791], "conversations": [{"from": "human", "timestamps": 1781.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.289, 0.01, 0.877, 0.989]: carry/hold (an object), sit, write."}, {"from": "human", "timestamps": 1791.0, "value": "What actions did the person at bbox [0.624, 0.241, 0.849, 0.655] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.624, 0.241, 0.849, 0.655]: sit, talk to (e.g., self, a person, a group).", "From 1789.0s to 1790.0s, [0.0, 0.058, 0.955, 0.985]->[0.057, 0.105, 0.957, 0.986]: sit, talk to (e.g., self, a person, a group).", "From 1785.0s to 1788.0s, [0.657, 0.189, 0.912, 0.725]->[0.647, 0.19, 0.884, 0.691]->[0.668, 0.194, 0.849, 0.653]->[0.286, 0.041, 0.952, 0.999]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1784.0s, [0.641, 0.185, 0.912, 0.713]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "cc4y-yYm5Ao.mkv", "clip": [948, 1022], "conversations": [{"from": "human", "timestamps": 978.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.192, 0.444, 0.357, 0.814]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.531, 0.111, 0.889, 0.995]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1022.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.62, 0.251, 0.935, 0.855]?"}, {"from": "gpt", "value": ["In the current frame, [0.62, 0.251, 0.935, 0.855]: walk, touch (an object).", "At 1021.0s, [0.552, 0.167, 0.928, 0.944]: stand, touch (an object).", "At 1020.0s, [0.543, 0.131, 0.919, 0.963]: bend/bow (at the waist), touch (an object), watch (a person).", "At 1019.0s, [0.544, 0.146, 0.955, 0.924]: stand, touch (an object), watch (a person)."]}]}, {"video": "cc4y-yYm5Ao.mkv", "clip": [1155, 1216], "conversations": [{"from": "human", "timestamps": 1185.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.277, 0.164, 0.735, 0.837]: stand."}, {"from": "human", "timestamps": 1216.0, "value": "For the person at bbox [0.456, 0.137, 0.932, 0.972], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.456, 0.137, 0.932, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1215.0s, [0.463, 0.223, 0.894, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "cc4y-yYm5Ao.mkv", "clip": [1184, 1228], "conversations": [{"from": "human", "timestamps": 1214.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.038, 0.081, 0.419, 0.995]: listen to (a person), stand, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1228.0, "value": "What actions is the person in bbox [0.17, 0.235, 0.489, 0.856] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.17, 0.235, 0.489, 0.856]: stand, carry/hold (an object), close (e.g., a door, a box), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1227.0s, [0.068, 0.207, 0.508, 0.98]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1226.0s, [0.062, 0.199, 0.507, 0.986]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1225.0s, [0.05, 0.2, 0.508, 0.975]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1224.0s, [0.056, 0.217, 0.51, 0.982]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1223.0s, [0.077, 0.205, 0.518, 0.977]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1221.0s to 1222.0s, [0.097, 0.19, 0.521, 0.976]->[0.099, 0.208, 0.515, 0.968]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "cc4y-yYm5Ao.mkv", "clip": [1241, 1312], "conversations": [{"from": "human", "timestamps": 1271.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.173, 0.243, 0.441, 0.927]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.495, 0.224, 0.692, 0.999]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1312.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.389, 0.152, 0.976, 0.973]?"}, {"from": "gpt", "value": ["In the current frame, [0.389, 0.152, 0.976, 0.973]: sit, touch (an object), listen to (a person), watch (a person).", "From 1310.0s to 1311.0s, [0.399, 0.15, 0.982, 0.978]->[0.423, 0.164, 0.978, 0.981]: sit, touch (an object), listen to (a person), watch (a person).", "From 1305.0s to 1309.0s, [0.485, 0.239, 0.885, 0.775]->[0.494, 0.246, 0.887, 0.76]->[0.503, 0.242, 0.895, 0.746]->[0.495, 0.246, 0.89, 0.774]->[0.483, 0.241, 0.884, 0.772]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1304.0s, [0.487, 0.242, 0.889, 0.757]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1302.0s to 1303.0s, [0.482, 0.245, 0.892, 0.76]->[0.48, 0.247, 0.899, 0.791]: sit, touch (an object), listen to (a person), watch (a person).", "At 1301.0s, [0.482, 0.244, 0.89, 0.767]: sit, listen to (a person), watch (a person).", "At 1300.0s, [0.49, 0.241, 0.888, 0.756]: sit, touch (an object), listen to (a person), watch (a person).", "At 1299.0s, [0.497, 0.247, 0.894, 0.758]: sit, listen to (a person), watch (a person).", "From 1297.0s to 1298.0s, [0.485, 0.242, 0.891, 0.766]->[0.498, 0.238, 0.892, 0.754]: sit, touch (an object), listen to (a person), watch (a person).", "At 1296.0s, [0.509, 0.251, 0.889, 0.753]: sit, touch (an object), watch (a person).", "From 1293.0s to 1295.0s, [0.503, 0.237, 0.886, 0.759]->[0.488, 0.243, 0.893, 0.77]->[0.489, 0.243, 0.894, 0.767]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1292.0s, [0.486, 0.25, 0.889, 0.768]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1291.0s, [0.505, 0.247, 0.896, 0.768]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1290.0s, [0.5, 0.253, 0.886, 0.754]: sit, touch (an object).", "At 1289.0s, [0.514, 0.25, 0.886, 0.774]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "cc4y-yYm5Ao.mkv", "clip": [1295, 1381], "conversations": [{"from": "human", "timestamps": 1325.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.394, 0.169, 0.566, 0.675]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.553, 0.289, 0.885, 0.784]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1381.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.355, 0.175, 0.868, 0.979]?"}, {"from": "gpt", "value": ["In the current frame, [0.355, 0.175, 0.868, 0.979]: stand, talk to (e.g., self, a person, a group).", "At 1380.0s, [0.351, 0.196, 0.867, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1379.0s, [0.376, 0.19, 0.876, 0.988]: stand, listen to (a person), watch (a person)."]}]}, {"video": "cc4y-yYm5Ao.mkv", "clip": [1330, 1376], "conversations": [{"from": "human", "timestamps": 1360.0, "value": "What actions is the person in bbox [0.539, 0.281, 0.772, 0.797] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.539, 0.281, 0.772, 0.797]: stand, talk to (e.g., self, a person, a group).", "At 1358.0s, [0.511, 0.275, 0.772, 0.757]: stand, talk to (e.g., self, a person, a group).", "At 1357.0s, [0.571, 0.29, 0.785, 0.808]: walk."]}, {"from": "human", "timestamps": 1376.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.246, 0.24, 0.682, 0.957]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "cc4y-yYm5Ao.mkv", "clip": [1366, 1410], "conversations": [{"from": "human", "timestamps": 1396.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.326, 0.171, 0.852, 0.975]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1410.0, "value": "Describe the actions of the person at bbox [0.614, 0.496, 0.933, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.614, 0.496, 0.933, 0.983]: stand.", "At 1409.0s, [0.181, 0.203, 0.715, 0.911]: stand.", "From 1407.0s to 1408.0s, [0.254, 0.248, 0.715, 1.0]->[0.221, 0.312, 0.627, 0.986]: walk.", "From 1405.0s to 1406.0s, [0.329, 0.183, 0.857, 0.978]->[0.326, 0.165, 0.85, 0.976]: stand, talk to (e.g., self, a person, a group).", "At 1404.0s, [0.331, 0.184, 0.857, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "cc4y-yYm5Ao.mkv", "clip": [1565, 1613], "conversations": [{"from": "human", "timestamps": 1595.0, "value": "What actions did the person at bbox [0.739, 0.156, 0.815, 0.304] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.739, 0.156, 0.815, 0.304]: walk, drink.", "At 1594.0s, [0.769, 0.128, 0.84, 0.279]: walk, drink."]}, {"from": "human", "timestamps": 1613.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.377, 0.289, 0.495, 0.629]: walk."}]}, {"video": "dMH8L7mqCNI.mkv", "clip": [882, 988], "conversations": [{"from": "human", "timestamps": 912.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.006, 0.011, 0.574, 0.99]: sit, touch (an object), answer phone."}, {"from": "human", "timestamps": 988.0, "value": "Describe the actions of the person at bbox [0.335, 0.229, 0.62, 0.691] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.335, 0.229, 0.62, 0.691]: sit.", "At 987.0s, [0.331, 0.246, 0.613, 0.877]: sit, carry/hold (an object).", "At 986.0s, [0.326, 0.251, 0.631, 0.946]: sit."]}]}, {"video": "dMH8L7mqCNI.mkv", "clip": [1005, 1039], "conversations": [{"from": "human", "timestamps": 1035.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.026, 0.567, 0.986]: listen to (a person), stand; [0.319, 0.0, 0.972, 0.943]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1039.0, "value": "What actions is the person in bbox [0.018, 0.001, 0.489, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.018, 0.001, 0.489, 1.0]: stand, listen to (a person).", "From 1033.0s to 1038.0s, [0.014, 0.0, 0.473, 0.995]->[0.015, 0.021, 0.544, 0.993]->[0.0, 0.026, 0.567, 0.986]->[0.0, 0.018, 0.495, 1.0]->[0.016, 0.029, 0.565, 1.0]->[0.02, 0.027, 0.518, 1.0]: stand, listen to (a person)."]}]}, {"video": "dMH8L7mqCNI.mkv", "clip": [992, 1112], "conversations": [{"from": "human", "timestamps": 1022.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.209, 0.007, 0.698, 1.0]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1112.0, "value": "Describe the actions of the person at bbox [0.377, 0.027, 0.784, 0.992] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.377, 0.027, 0.784, 0.992]: stand, touch (an object), talk to (e.g., self, a person, a group).", "From 1110.0s to 1111.0s, [0.505, 0.284, 0.634, 0.983]->[0.361, 0.021, 0.758, 0.988]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1109.0s, [0.509, 0.28, 0.632, 0.984]: stand, touch (an object).", "From 1104.0s to 1108.0s, [0.711, 0.165, 0.841, 0.558]->[0.583, 0.223, 0.698, 0.533]->[0.567, 0.322, 0.675, 0.537]->[0.625, 0.339, 0.74, 0.54]->[0.684, 0.337, 0.765, 0.541]: walk.", "From 1102.0s to 1103.0s, [0.839, 0.139, 1.0, 0.974]->[0.763, 0.116, 0.961, 0.993]: stand.", "At 1101.0s, [0.841, 0.13, 1.0, 0.966]: stand, talk to (e.g., self, a person, a group).", "At 1100.0s, [0.838, 0.094, 1.0, 0.962]: stand, close (e.g., a door, a box), talk to (e.g., self, a person, a group).", "At 1099.0s, [0.835, 0.098, 0.985, 0.978]: stand, close (e.g., a door, a box).", "At 1098.0s, [0.881, 0.068, 0.996, 0.97]: stand, close (e.g., a door, a box), enter, touch (an object)."]}]}, {"video": "dMH8L7mqCNI.mkv", "clip": [1099, 1136], "conversations": [{"from": "human", "timestamps": 1129.0, "value": "For the person at bbox [0.235, 0.024, 0.61, 0.982], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.235, 0.024, 0.61, 0.982]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1124.0s to 1128.0s, [0.285, 0.149, 0.455, 0.933]->[0.261, 0.146, 0.461, 0.924]->[0.18, 0.043, 0.582, 0.986]->[0.219, 0.058, 0.607, 1.0]->[0.172, 0.015, 0.631, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1118.0s to 1123.0s, [0.279, 0.248, 0.439, 0.987]->[0.236, 0.242, 0.412, 0.993]->[0.16, 0.03, 0.512, 0.962]->[0.167, 0.029, 0.463, 0.979]->[0.182, 0.035, 0.476, 0.978]->[0.299, 0.137, 0.44, 0.94]: stand, listen to (a person), watch (a person).", "At 1117.0s, [0.331, 0.247, 0.536, 0.99]: stand, watch (a person).", "At 1116.0s, [0.41, 0.254, 0.619, 0.984]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1110.0s to 1115.0s, [0.505, 0.284, 0.634, 0.983]->[0.361, 0.021, 0.758, 0.988]->[0.377, 0.027, 0.784, 0.992]->[0.362, 0.028, 0.789, 0.994]->[0.363, 0.006, 0.804, 0.989]->[0.44, 0.248, 0.63, 0.992]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1109.0s, [0.509, 0.28, 0.632, 0.984]: stand, touch (an object).", "From 1104.0s to 1108.0s, [0.711, 0.165, 0.841, 0.558]->[0.583, 0.223, 0.698, 0.533]->[0.567, 0.322, 0.675, 0.537]->[0.625, 0.339, 0.74, 0.54]->[0.684, 0.337, 0.765, 0.541]: walk.", "From 1102.0s to 1103.0s, [0.839, 0.139, 1.0, 0.974]->[0.763, 0.116, 0.961, 0.993]: stand.", "At 1101.0s, [0.841, 0.13, 1.0, 0.966]: stand, talk to (e.g., self, a person, a group).", "At 1100.0s, [0.838, 0.094, 1.0, 0.962]: stand, close (e.g., a door, a box), talk to (e.g., self, a person, a group).", "At 1099.0s, [0.835, 0.098, 0.985, 0.978]: stand, close (e.g., a door, a box).", "At 1098.0s, [0.881, 0.068, 0.996, 0.97]: stand, close (e.g., a door, a box), enter, touch (an object)."]}, {"from": "human", "timestamps": 1136.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.297, 0.108, 0.789, 0.996]: stand, kiss (a person); [0.489, 0.193, 0.886, 0.998]: stand, kiss (a person)."}]}, {"video": "dMH8L7mqCNI.mkv", "clip": [1127, 1181], "conversations": [{"from": "human", "timestamps": 1157.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.27, 0.131, 0.55, 0.621]: listen to (a person), sit, watch (a person); [0.52, 0.165, 0.901, 0.953]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1181.0, "value": "For the person at bbox [0.577, 0.131, 0.802, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.577, 0.131, 0.802, 0.981]: sit, listen to (a person), watch (a person).", "At 1180.0s, [0.519, 0.345, 0.572, 0.57]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1178.0s, [0.518, 0.351, 0.572, 0.57]: sit, talk to (e.g., self, a person, a group).", "From 1176.0s to 1177.0s, [0.577, 0.148, 0.802, 0.978]->[0.581, 0.144, 0.804, 0.969]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1175.0s, [0.58, 0.12, 0.852, 0.981]: sit, talk to (e.g., self, a person, a group).", "From 1172.0s to 1174.0s, [0.587, 0.12, 0.823, 0.97]->[0.543, 0.132, 0.809, 0.974]->[0.535, 0.133, 0.807, 0.968]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1170.0s to 1171.0s, [0.543, 0.161, 0.882, 0.979]->[0.597, 0.117, 0.852, 0.969]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1168.0s to 1169.0s, [0.545, 0.175, 0.903, 0.93]->[0.525, 0.169, 0.889, 0.91]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "dMH8L7mqCNI.mkv", "clip": [1143, 1226], "conversations": [{"from": "human", "timestamps": 1173.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.267, 0.129, 0.505, 0.626]: listen to (a person), sit; [0.543, 0.132, 0.809, 0.974]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1226.0, "value": "What actions is the person in bbox [0.429, 0.351, 0.482, 0.471] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.429, 0.351, 0.482, 0.471]: sit, listen to (a person), watch (a person).", "At 1225.0s, [0.427, 0.351, 0.487, 0.468]: sit, watch (a person).", "At 1224.0s, [0.429, 0.351, 0.489, 0.476]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1223.0s, [0.43, 0.35, 0.484, 0.472]: sit.", "From 1221.0s to 1222.0s, [0.248, 0.119, 0.573, 0.621]->[0.249, 0.121, 0.571, 0.621]: sit, listen to (a person).", "At 1220.0s, [0.244, 0.118, 0.559, 0.626]: sit, watch (a person).", "From 1213.0s to 1219.0s, [0.436, 0.351, 0.501, 0.469]->[0.431, 0.348, 0.487, 0.471]->[0.43, 0.349, 0.487, 0.474]->[0.429, 0.351, 0.485, 0.469]->[0.428, 0.35, 0.482, 0.471]->[0.428, 0.352, 0.481, 0.469]->[0.243, 0.12, 0.558, 0.62]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "dMH8L7mqCNI.mkv", "clip": [1247, 1349], "conversations": [{"from": "human", "timestamps": 1277.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.063, 0.174, 0.402, 0.987]: listen to (a person), sit, text on/look at a cellphone; [0.267, 0.322, 0.851, 0.673]: lie/sleep, text on/look at a cellphone, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1349.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.291, 0.441, 0.438, 0.733]?"}, {"from": "gpt", "value": ["At Now, [0.291, 0.441, 0.438, 0.733]: sit, listen to (a person), watch (a person).", "At 1348.0s, [0.293, 0.442, 0.438, 0.736]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1345.0s to 1347.0s, [0.303, 0.418, 0.442, 0.741]->[0.3, 0.433, 0.432, 0.74]->[0.291, 0.44, 0.436, 0.737]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1342.0s to 1344.0s, [0.327, 0.462, 0.423, 0.733]->[0.311, 0.451, 0.438, 0.734]->[0.31, 0.415, 0.442, 0.737]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "dMH8L7mqCNI.mkv", "clip": [1411, 1479], "conversations": [{"from": "human", "timestamps": 1441.0, "value": "What actions did the person at bbox [0.616, 0.309, 0.646, 0.466] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.616, 0.309, 0.646, 0.466]: stand.", "From 1436.0s to 1437.0s, [0.619, 0.309, 0.647, 0.468]->[0.62, 0.306, 0.649, 0.465]: stand."]}, {"from": "human", "timestamps": 1479.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.197, 0.285, 0.474, 0.977]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.489, 0.3, 0.785, 0.993]: listen to (a person), sit, watch (a person)."}]}, {"video": "dMH8L7mqCNI.mkv", "clip": [1506, 1546], "conversations": [{"from": "human", "timestamps": 1536.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.047, 0.03, 0.529, 0.987]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1546.0, "value": "For the person at bbox [0.124, 0.247, 0.453, 0.968], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.124, 0.247, 0.453, 0.968]: sit, listen to (a person), watch (a person).", "At 1545.0s, [0.128, 0.246, 0.42, 0.977]: sit, listen to (a person)."]}]}, {"video": "dMH8L7mqCNI.mkv", "clip": [1592, 1632], "conversations": [{"from": "human", "timestamps": 1622.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.446, 0.433, 0.525, 0.8]: watch (a person), walk; [0.502, 0.452, 0.562, 0.779]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1632.0, "value": "For the person at bbox [0.299, 0.39, 0.336, 0.661], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.299, 0.39, 0.336, 0.661]: stand.", "At 1630.0s, [0.299, 0.393, 0.327, 0.718]: stand."]}]}, {"video": "dMH8L7mqCNI.mkv", "clip": [1650, 1685], "conversations": [{"from": "human", "timestamps": 1680.0, "value": "What actions did the person at bbox [0.328, 0.009, 0.833, 0.968] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.328, 0.009, 0.833, 0.968]: stand, talk to (e.g., self, a person, a group).", "At 1679.0s, [0.274, 0.015, 0.787, 0.97]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1678.0s, [0.549, 0.484, 0.65, 0.823]: stand, carry/hold (an object), watch (a person).", "At 1677.0s, [0.553, 0.49, 0.647, 0.83]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1675.0s to 1676.0s, [0.542, 0.473, 0.632, 0.802]->[0.553, 0.486, 0.65, 0.819]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1685.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.002, 0.0, 0.585, 0.998]: listen to (a person), stand, watch (a person); [0.339, 0.019, 0.91, 0.966]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "dMH8L7mqCNI.mkv", "clip": [1736, 1769], "conversations": [{"from": "human", "timestamps": 1766.0, "value": "What actions is the person in bbox [0.435, 0.197, 0.904, 0.883] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.435, 0.197, 0.904, 0.883]: sit, listen to (a person).", "From 1762.0s to 1765.0s, [0.438, 0.185, 0.945, 0.864]->[0.458, 0.201, 0.873, 0.875]->[0.45, 0.207, 0.86, 0.887]->[0.444, 0.2, 0.874, 0.871]: sit, listen to (a person), watch (a person).", "At 1761.0s, [0.463, 0.168, 0.905, 0.891]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1760.0s, [0.459, 0.184, 0.907, 0.865]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1757.0s to 1759.0s, [0.11, 0.051, 1.0, 1.0]->[0.15, 0.056, 1.0, 0.988]->[0.13, 0.051, 1.0, 0.993]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1769.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.31, 0.334, 0.569, 0.934]: sit, eat; [0.515, 0.23, 0.91, 0.886]: watch (a person), sit."}]}, {"video": "dgLApPvmfBE.mkv", "clip": [879, 919], "conversations": [{"from": "human", "timestamps": 909.0, "value": "What actions did the person at bbox [0.411, 0.17, 0.984, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.411, 0.17, 0.984, 1.0]: stand, listen to (a person), watch (a person).", "At 908.0s, [0.658, 0.297, 1.0, 0.999]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 906.0s to 907.0s, [0.689, 0.211, 1.0, 0.998]->[0.394, 0.148, 0.981, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 905.0s, [0.675, 0.204, 1.0, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 919.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.016, 0.047, 0.826, 1.0]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "dgLApPvmfBE.mkv", "clip": [906, 968], "conversations": [{"from": "human", "timestamps": 936.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.039, 0.696, 1.0]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 968.0, "value": "What actions did the person at bbox [0.0, 0.043, 0.3, 0.996] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.043, 0.3, 0.996]: carry/hold (an object), fight/hit (a person), watch (a person), martial art.", "At 967.0s, [0.017, 0.591, 0.85, 0.998]: carry/hold (an object), fight/hit (a person), martial art."]}]}, {"video": "dgLApPvmfBE.mkv", "clip": [966, 1004], "conversations": [{"from": "human", "timestamps": 996.0, "value": "What actions is the person in bbox [0.019, 0.036, 0.921, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.019, 0.036, 0.921, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 994.0s to 995.0s, [0.018, 0.052, 0.898, 0.98]->[0.002, 0.1, 0.969, 0.984]: sit, talk to (e.g., self, a person, a group).", "From 992.0s to 993.0s, [0.011, 0.124, 0.926, 0.993]->[0.0, 0.09, 0.949, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1004.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.052, 0.067, 0.715, 0.982]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "dgLApPvmfBE.mkv", "clip": [995, 1047], "conversations": [{"from": "human", "timestamps": 1025.0, "value": "Describe the actions of the person at bbox [0.0, 0.008, 0.74, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.008, 0.74, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1024.0s, [0.0, 0.021, 0.544, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1022.0s to 1023.0s, [0.081, 0.137, 0.667, 0.973]->[0.069, 0.148, 0.752, 0.987]: stand, watch (a person).", "At 1021.0s, [0.067, 0.121, 0.705, 0.993]: stand, carry/hold (an object), watch (a person).", "From 1019.0s to 1020.0s, [0.089, 0.115, 0.688, 0.982]->[0.09, 0.132, 0.738, 0.976]: stand.", "At 1018.0s, [0.065, 0.127, 0.659, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1016.0s to 1017.0s, [0.086, 0.112, 0.728, 0.981]->[0.065, 0.128, 0.758, 0.991]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1047.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.011, 0.169, 0.806, 1.0]: listen to (a person), stand, watch (a person)."}]}, {"video": "dgLApPvmfBE.mkv", "clip": [1247, 1320], "conversations": [{"from": "human", "timestamps": 1277.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.012, 0.467, 0.469, 0.98]: listen to (a person), stand, watch (a person); [0.064, 0.015, 1.0, 0.984]: hand wave, stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1320.0, "value": "What actions did the person at bbox [0.694, 0.019, 0.985, 0.913] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.694, 0.019, 0.985, 0.913]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1319.0s, [0.395, 0.0, 0.953, 0.957]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "dgLApPvmfBE.mkv", "clip": [1296, 1344], "conversations": [{"from": "human", "timestamps": 1326.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.203, 0.221, 0.986]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.689, 0.011, 0.984, 0.972]: stand, pull (an object)."}, {"from": "human", "timestamps": 1344.0, "value": "For the person at bbox [0.011, 0.076, 0.228, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.011, 0.076, 0.228, 0.981]: stand, listen to (a person).", "From 1342.0s to 1343.0s, [0.008, 0.07, 0.23, 0.972]->[0.008, 0.076, 0.226, 0.984]: stand, listen to (a person).", "At 1341.0s, [0.012, 0.07, 0.233, 0.973]: stand.", "At 1340.0s, [0.014, 0.071, 0.227, 0.979]: stand, listen to (a person)."]}]}, {"video": "dgLApPvmfBE.mkv", "clip": [1355, 1489], "conversations": [{"from": "human", "timestamps": 1385.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.029, 0.199, 0.991]: listen to (a person), stand, watch (a person); [0.385, 0.245, 0.566, 0.974]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1489.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.087, 0.0, 0.978, 0.978]?"}, {"from": "gpt", "value": ["In the current frame, [0.087, 0.0, 0.978, 0.978]: stand, watch (a person).", "From 1487.0s to 1488.0s, [0.035, 0.0, 0.978, 0.983]->[0.131, 0.004, 0.977, 0.993]: stand, watch (a person).", "From 1484.0s to 1486.0s, [0.074, 0.01, 1.0, 0.981]->[0.08, 0.0, 0.959, 0.986]->[0.099, 0.0, 0.974, 0.99]: stand, listen to (a person), watch (a person).", "At 1483.0s, [0.06, 0.0, 1.0, 0.966]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "dgLApPvmfBE.mkv", "clip": [1416, 1498], "conversations": [{"from": "human", "timestamps": 1446.0, "value": "What actions is the person in bbox [0.019, 0.009, 0.553, 0.552] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.019, 0.009, 0.553, 0.552]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1445.0s, [0.031, 0.002, 0.56, 0.572]: stand."]}, {"from": "human", "timestamps": 1498.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.749, 0.979]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "dgLApPvmfBE.mkv", "clip": [1480, 1532], "conversations": [{"from": "human", "timestamps": 1510.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.392, 0.162, 0.606, 0.489]: listen to (a person), stand, touch (an object), watch (a person); [0.826, 0.115, 0.999, 0.953]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1532.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.242, 0.537, 0.533, 0.996]?"}, {"from": "gpt", "value": ["In the current frame, [0.242, 0.537, 0.533, 0.996]: crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1531.0s, [0.044, 0.491, 0.541, 0.993]: crouch/kneel, talk to (e.g., self, a person, a group).", "At 1530.0s, [0.177, 0.6, 0.516, 0.985]: fall down."]}]}, {"video": "dgLApPvmfBE.mkv", "clip": [1515, 1615], "conversations": [{"from": "human", "timestamps": 1545.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.764, 0.011, 1.0, 0.995]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1615.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.314, 0.006, 1.0, 0.982]?"}, {"from": "gpt", "value": ["In the current frame, [0.314, 0.006, 1.0, 0.982]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1612.0s to 1614.0s, [0.53, 0.161, 0.831, 0.901]->[0.24, 0.0, 0.999, 0.988]->[0.215, 0.001, 0.993, 0.976]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1610.0s, [0.542, 0.0, 0.918, 0.978]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1609.0s, [0.73, 0.0, 0.994, 0.961]: walk, listen to (a person), watch (a person).", "From 1607.0s to 1608.0s, [0.762, 0.0, 0.997, 0.996]->[0.812, 0.028, 0.996, 0.731]: stand, listen to (a person), watch (a person).", "At 1606.0s, [0.658, 0.006, 1.0, 0.838]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1604.0s to 1605.0s, [0.626, 0.001, 1.0, 0.836]->[0.653, 0.013, 1.0, 0.845]: stand, talk to (e.g., self, a person, a group).", "From 1602.0s to 1603.0s, [0.605, 0.0, 1.0, 0.841]->[0.602, 0.007, 1.0, 0.843]: stand, listen to (a person).", "From 1600.0s to 1601.0s, [0.747, 0.004, 1.0, 0.794]->[0.599, 0.008, 1.0, 0.838]: stand.", "At 1599.0s, [0.715, 0.01, 0.996, 0.762]: stand, listen to (a person), watch (a person).", "From 1597.0s to 1598.0s, [0.6, 0.003, 1.0, 0.848]->[0.604, 0.012, 0.995, 0.825]: stand, talk to (e.g., self, a person, a group).", "At 1596.0s, [0.548, 0.006, 0.996, 0.912]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1594.0s to 1595.0s, [0.644, 0.006, 0.974, 0.546]->[0.554, 0.017, 1.0, 0.863]: stand, listen to (a person), watch (a person)."]}]}, {"video": "dgLApPvmfBE.mkv", "clip": [1639, 1670], "conversations": [{"from": "human", "timestamps": 1669.0, "value": "What actions did the person at bbox [0.15, 0.38, 0.84, 0.993] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.15, 0.38, 0.84, 0.993]: listen to (a person), lie/sleep.", "At 1668.0s, [0.149, 0.378, 0.679, 0.998]: listen to (a person), lie/sleep."]}, {"from": "human", "timestamps": 1670.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.149, 0.335, 0.819, 0.994]: listen to (a person), lie/sleep; [0.775, 0.118, 1.0, 0.858]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "eA55_shhKko.mkv", "clip": [898, 947], "conversations": [{"from": "human", "timestamps": 928.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.228, 0.25, 0.664, 0.863]: listen to (a person), stand."}, {"from": "human", "timestamps": 947.0, "value": "What actions did the person at bbox [0.551, 0.243, 0.713, 0.705] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.551, 0.243, 0.713, 0.705]: stand, touch (an object), listen to (a person), watch (a person).", "At 946.0s, [0.504, 0.233, 0.676, 0.765]: stand, touch (an object), listen to (a person), watch (a person).", "From 944.0s to 945.0s, [0.422, 0.242, 0.661, 0.838]->[0.444, 0.249, 0.669, 0.797]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 943.0s, [0.36, 0.161, 0.973, 0.86]: stand, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "eA55_shhKko.mkv", "clip": [901, 972], "conversations": [{"from": "human", "timestamps": 931.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.294, 0.259, 0.752, 0.862]: watch (a person), stand."}, {"from": "human", "timestamps": 972.0, "value": "For the person at bbox [0.392, 0.182, 0.649, 0.798], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.392, 0.182, 0.649, 0.798]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 971.0s, [0.397, 0.415, 0.714, 0.857]: bend/bow (at the waist), lift/pick up, touch (an object).", "At 970.0s, [0.4, 0.372, 0.696, 0.743]: bend/bow (at the waist), touch (an object).", "At 969.0s, [0.392, 0.265, 0.684, 0.784]: stand, touch (an object), talk to (e.g., self, a person, a group).", "From 966.0s to 968.0s, [0.452, 0.252, 0.725, 0.826]->[0.329, 0.38, 0.682, 0.816]->[0.362, 0.337, 0.686, 0.803]: stand, touch (an object).", "At 965.0s, [0.443, 0.248, 0.693, 0.828]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 964.0s, [0.172, 0.173, 0.858, 0.862]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 962.0s to 963.0s, [0.15, 0.166, 0.809, 0.865]->[0.115, 0.178, 0.809, 0.854]: stand, talk to (e.g., self, a person, a group).", "At 961.0s, [0.266, 0.181, 0.956, 0.857]: stand.", "From 958.0s to 960.0s, [0.322, 0.203, 0.962, 0.859]->[0.319, 0.189, 0.965, 0.872]->[0.336, 0.199, 0.974, 0.872]: stand, talk to (e.g., self, a person, a group).", "At 957.0s, [0.34, 0.21, 0.976, 0.855]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 956.0s, [0.247, 0.161, 0.958, 0.856]: stand.", "At 955.0s, [0.486, 0.247, 0.688, 0.826]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 954.0s, [0.582, 0.15, 0.758, 0.672]: stand, touch (an object), watch (a person)."]}]}, {"video": "eA55_shhKko.mkv", "clip": [1068, 1108], "conversations": [{"from": "human", "timestamps": 1098.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.077, 0.177, 0.819, 0.87]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group); [0.529, 0.269, 0.969, 0.881]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1108.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.016, 0.184, 0.437, 0.864]?"}, {"from": "gpt", "value": ["In the current frame, [0.016, 0.184, 0.437, 0.864]: sit, listen to (a person).", "At 1107.0s, [0.229, 0.224, 0.64, 0.855]: sit, listen to (a person).", "From 1101.0s to 1106.0s, [0.125, 0.204, 0.674, 0.86]->[0.148, 0.192, 0.646, 0.856]->[0.245, 0.202, 0.633, 0.852]->[0.272, 0.226, 0.634, 0.857]->[0.27, 0.242, 0.631, 0.85]->[0.268, 0.234, 0.634, 0.857]: sit, listen to (a person), watch (a person).", "At 1100.0s, [0.005, 0.189, 0.443, 0.864]: sit, touch (an object), listen to (a person), watch (a person).", "At 1099.0s, [0.004, 0.163, 0.466, 0.854]: sit, listen to (a person), watch (a person)."]}]}, {"video": "eA55_shhKko.mkv", "clip": [1166, 1259], "conversations": [{"from": "human", "timestamps": 1196.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.186, 0.553, 0.251, 0.809]: walk; [0.375, 0.563, 0.447, 0.71]: watch (a person), sit, eat; [0.44, 0.553, 0.509, 0.695]: listen to (a person), sit, watch (a person); [0.459, 0.426, 0.52, 0.556]: carry/hold (an object), sit; [0.509, 0.402, 0.618, 0.567]: watch (a person), sit; [0.601, 0.393, 0.795, 0.843]: sit; [0.658, 0.298, 0.775, 0.515]: watch (a person), sit."}, {"from": "human", "timestamps": 1259.0, "value": "For the person at bbox [0.079, 0.271, 0.387, 0.853], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.079, 0.271, 0.387, 0.853]: sit, carry/hold (an object), touch (an object), watch (a person).", "From 1257.0s to 1258.0s, [0.074, 0.27, 0.361, 0.865]->[0.074, 0.265, 0.354, 0.856]: sit, carry/hold (an object), watch (a person)."]}]}, {"video": "eA55_shhKko.mkv", "clip": [1284, 1325], "conversations": [{"from": "human", "timestamps": 1314.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.083, 0.117, 0.934, 0.855]: lie/sleep, touch (an object)."}, {"from": "human", "timestamps": 1325.0, "value": "What actions did the person at bbox [0.724, 0.354, 0.982, 0.868] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.724, 0.354, 0.982, 0.868]: touch (an object), lie/sleep.", "At 1324.0s, [0.715, 0.444, 0.977, 0.843]: touch (an object), lie/sleep, watch (a person).", "At 1323.0s, [0.063, 0.185, 0.971, 0.86]: bend/bow (at the waist), watch (a person)."]}]}, {"video": "eA55_shhKko.mkv", "clip": [1309, 1346], "conversations": [{"from": "human", "timestamps": 1339.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.113, 0.274, 0.436, 0.869]: carry/hold (an object), sit, touch (an object), watch (a person); [0.495, 0.275, 0.973, 0.868]: sit, touch (an object)."}, {"from": "human", "timestamps": 1346.0, "value": "What actions did the person at bbox [0.177, 0.134, 0.902, 0.847] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.177, 0.134, 0.902, 0.847]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1345.0s, [0.173, 0.148, 0.878, 0.847]: sit, touch (an object), listen to (a person)."]}]}, {"video": "eA55_shhKko.mkv", "clip": [1369, 1428], "conversations": [{"from": "human", "timestamps": 1399.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.059, 0.22, 0.709, 0.857]?"}, {"from": "gpt", "value": ["In the current frame, [0.059, 0.22, 0.709, 0.857]: sit, carry/hold (an object), write.", "At 1398.0s, [0.044, 0.222, 0.732, 0.863]: sit, write, talk to (e.g., self, a person, a group).", "From 1395.0s to 1397.0s, [0.077, 0.182, 0.725, 0.861]->[0.083, 0.17, 0.749, 0.863]->[0.075, 0.202, 0.739, 0.861]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1428.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.114, 0.21, 0.763, 0.863]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "eA55_shhKko.mkv", "clip": [1403, 1444], "conversations": [{"from": "human", "timestamps": 1433.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.084, 0.226, 0.739, 0.872]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1444.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.248, 0.172, 0.95, 0.858]?"}, {"from": "gpt", "value": ["In the current frame, [0.248, 0.172, 0.95, 0.858]: sit, stand, talk to (e.g., self, a person, a group).", "From 1439.0s to 1443.0s, [0.226, 0.164, 0.949, 0.864]->[0.243, 0.173, 0.925, 0.845]->[0.265, 0.161, 0.949, 0.849]->[0.263, 0.171, 0.941, 0.851]->[0.236, 0.179, 0.936, 0.864]: sit, talk to (e.g., self, a person, a group).", "At 1438.0s, [0.232, 0.156, 0.962, 0.854]: sit, listen to (a person), watch (a person)."]}]}, {"video": "eA55_shhKko.mkv", "clip": [1413, 1479], "conversations": [{"from": "human", "timestamps": 1443.0, "value": "What actions is the person in bbox [0.236, 0.179, 0.936, 0.864] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.236, 0.179, 0.936, 0.864]: sit, talk to (e.g., self, a person, a group).", "From 1439.0s to 1442.0s, [0.226, 0.164, 0.949, 0.864]->[0.243, 0.173, 0.925, 0.845]->[0.265, 0.161, 0.949, 0.849]->[0.263, 0.171, 0.941, 0.851]: sit, talk to (e.g., self, a person, a group).", "At 1438.0s, [0.232, 0.156, 0.962, 0.854]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1479.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.284, 0.167, 0.962, 0.858]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "eA55_shhKko.mkv", "clip": [1484, 1545], "conversations": [{"from": "human", "timestamps": 1514.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.367, 0.308, 0.486, 0.857]?"}, {"from": "gpt", "value": ["In the current frame, [0.367, 0.308, 0.486, 0.857]: walk, listen to (a person), watch (a person).", "At 1513.0s, [0.554, 0.323, 0.711, 0.865]: walk, watch (a person)."]}, {"from": "human", "timestamps": 1545.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.03, 0.214, 0.568, 0.879]: sit, touch (an object), watch (a person), listen to (a person), carry/hold (an object); [0.27, 0.141, 0.999, 0.852]: listen to (a person), sit, touch (an object), watch (a person)."}]}, {"video": "eA55_shhKko.mkv", "clip": [1474, 1569], "conversations": [{"from": "human", "timestamps": 1504.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.743, 0.392, 0.81, 0.848]: listen (e.g., to music), listen to (a person), watch (a person), walk, carry/hold (an object); [0.763, 0.326, 0.907, 0.869]: watch (a person), stand, play musical instrument."}, {"from": "human", "timestamps": 1569.0, "value": "Describe the actions of the person at bbox [0.275, 0.205, 0.914, 0.844] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.275, 0.205, 0.914, 0.844]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1568.0s, [0.287, 0.187, 0.914, 0.856]: sit, carry/hold (an object)."]}]}, {"video": "eA55_shhKko.mkv", "clip": [1593, 1665], "conversations": [{"from": "human", "timestamps": 1623.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.247, 0.229, 0.608, 0.753]: watch (a person), sit."}, {"from": "human", "timestamps": 1665.0, "value": "For the person at bbox [0.354, 0.202, 0.73, 0.879], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.354, 0.202, 0.73, 0.879]: stand, carry/hold (an object).", "At 1664.0s, [0.347, 0.2, 0.714, 0.86]: stand, carry/hold (an object), watch (a person).", "At 1663.0s, [0.335, 0.189, 0.69, 0.87]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1660.0s to 1662.0s, [0.33, 0.2, 0.682, 0.867]->[0.328, 0.202, 0.683, 0.856]->[0.332, 0.197, 0.69, 0.866]: stand, carry/hold (an object), listen to (a person).", "At 1659.0s, [0.31, 0.198, 0.684, 0.868]: stand, carry/hold (an object)."]}]}, {"video": "eA55_shhKko.mkv", "clip": [1606, 1713], "conversations": [{"from": "human", "timestamps": 1636.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.368, 0.182, 0.896, 0.873]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1713.0, "value": "Describe the actions of the person at bbox [0.149, 0.194, 0.704, 0.852] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.149, 0.194, 0.704, 0.852]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1711.0s to 1712.0s, [0.116, 0.22, 0.383, 0.868]->[0.116, 0.207, 0.374, 0.868]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1710.0s, [0.284, 0.257, 0.626, 0.873]: stand, carry/hold (an object), watch (a person).", "At 1709.0s, [0.264, 0.266, 0.582, 0.854]: stand, listen to (a person), watch (a person).", "From 1707.0s to 1708.0s, [0.132, 0.228, 0.386, 0.868]->[0.126, 0.221, 0.387, 0.867]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1704.0s to 1706.0s, [0.152, 0.195, 0.735, 0.854]->[0.161, 0.211, 0.724, 0.856]->[0.156, 0.209, 0.704, 0.854]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "eA55_shhKko.mkv", "clip": [1679, 1737], "conversations": [{"from": "human", "timestamps": 1709.0, "value": "Describe the actions of the person at bbox [0.264, 0.266, 0.582, 0.854] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.264, 0.266, 0.582, 0.854]: stand, listen to (a person), watch (a person).", "From 1707.0s to 1708.0s, [0.132, 0.228, 0.386, 0.868]->[0.126, 0.221, 0.387, 0.867]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1704.0s to 1706.0s, [0.152, 0.195, 0.735, 0.854]->[0.161, 0.211, 0.724, 0.856]->[0.156, 0.209, 0.704, 0.854]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1737.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.427, 0.583, 0.489, 0.798]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "eA55_shhKko.mkv", "clip": [1759, 1793], "conversations": [{"from": "human", "timestamps": 1789.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.702, 0.253, 0.859, 0.792]: walk."}, {"from": "human", "timestamps": 1793.0, "value": "What actions did the person at bbox [0.001, 0.206, 0.51, 0.87] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.206, 0.51, 0.87]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1792.0s, [0.003, 0.196, 0.489, 0.858]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "fD6VkIRlIRI.mkv", "clip": [921, 956], "conversations": [{"from": "human", "timestamps": 951.0, "value": "For the person at bbox [0.007, 0.201, 0.393, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.007, 0.201, 0.393, 1.0]: stand, listen to (a person), watch (a person).", "At 950.0s, [0.007, 0.257, 0.418, 0.991]: stand, touch (an object), listen to (a person), watch (a person).", "From 948.0s to 949.0s, [0.0, 0.238, 0.415, 0.998]->[0.001, 0.142, 0.421, 0.991]: sit, touch (an object), listen to (a person), watch (a person).", "At 947.0s, [0.004, 0.21, 0.367, 0.989]: sit, listen to (a person).", "At 946.0s, [0.012, 0.186, 0.36, 0.981]: sit, listen to (a person), watch (a person).", "From 944.0s to 945.0s, [0.0, 0.199, 0.327, 0.975]->[0.002, 0.177, 0.318, 0.98]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 956.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.187, 0.405, 0.989]: listen to (a person), watch (a person); [0.13, 0.139, 0.956, 0.974]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "fD6VkIRlIRI.mkv", "clip": [1007, 1039], "conversations": [{"from": "human", "timestamps": 1037.0, "value": "What actions did the person at bbox [0.812, 0.405, 0.951, 0.804] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.812, 0.405, 0.951, 0.804]: sit, listen to (a person), watch (a person).", "At 1036.0s, [0.797, 0.247, 0.949, 0.536]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1039.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.233, 0.438, 0.37, 0.841]: watch (a person), sit; [0.445, 0.353, 0.748, 0.901]: sit."}]}, {"video": "fD6VkIRlIRI.mkv", "clip": [1135, 1176], "conversations": [{"from": "human", "timestamps": 1165.0, "value": "What actions is the person in bbox [0.231, 0.155, 0.556, 0.753] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.231, 0.155, 0.556, 0.753]: carry/hold (an object), crouch/kneel, open (e.g., a window, a car door).", "At 1164.0s, [0.229, 0.18, 0.519, 0.788]: crouch/kneel, open (e.g., a window, a car door).", "At 1163.0s, [0.229, 0.184, 0.502, 0.775]: carry/hold (an object), crouch/kneel, open (e.g., a window, a car door).", "At 1162.0s, [0.224, 0.179, 0.493, 0.758]: sit, crouch/kneel, open (e.g., a window, a car door).", "At 1161.0s, [0.229, 0.177, 0.518, 0.769]: carry/hold (an object), crouch/kneel.", "At 1160.0s, [0.23, 0.176, 0.569, 0.775]: carry/hold (an object), crouch/kneel, touch (an object), talk to (e.g., self, a person, a group).", "At 1159.0s, [0.226, 0.183, 0.566, 0.741]: carry/hold (an object), crouch/kneel, touch (an object).", "At 1158.0s, [0.219, 0.188, 0.557, 0.762]: sit, crouch/kneel, touch (an object).", "From 1156.0s to 1157.0s, [0.228, 0.178, 0.574, 0.763]->[0.234, 0.192, 0.558, 0.745]: carry/hold (an object), crouch/kneel, touch (an object).", "At 1155.0s, [0.227, 0.193, 0.562, 0.766]: crouch/kneel, touch (an object).", "At 1154.0s, [0.231, 0.189, 0.571, 0.755]: crouch/kneel, touch (an object), turn (e.g., a screwdriver).", "At 1153.0s, [0.223, 0.216, 0.501, 0.811]: sit, crouch/kneel, touch (an object).", "At 1152.0s, [0.229, 0.216, 0.532, 0.799]: crouch/kneel, put down, touch (an object).", "At 1151.0s, [0.233, 0.19, 0.52, 0.792]: carry/hold (an object), crouch/kneel, read.", "From 1149.0s to 1150.0s, [0.228, 0.196, 0.556, 0.781]->[0.226, 0.19, 0.543, 0.794]: crouch/kneel, read.", "At 1148.0s, [0.226, 0.198, 0.512, 0.797]: carry/hold (an object), crouch/kneel.", "At 1147.0s, [0.224, 0.196, 0.548, 0.788]: crouch/kneel, touch (an object).", "At 1146.0s, [0.224, 0.194, 0.508, 0.778]: crouch/kneel, read, touch (an object).", "At 1145.0s, [0.226, 0.189, 0.522, 0.797]: carry/hold (an object), crouch/kneel, read.", "At 1144.0s, [0.229, 0.197, 0.529, 0.778]: sit, carry/hold (an object), crouch/kneel, read.", "From 1142.0s to 1143.0s, [0.215, 0.191, 0.549, 0.799]->[0.22, 0.197, 0.52, 0.779]: carry/hold (an object), crouch/kneel, read.", "At 1141.0s, [0.226, 0.199, 0.542, 0.786]: catch (an object), crouch/kneel, read.", "At 1140.0s, [0.225, 0.196, 0.507, 0.787]: crouch/kneel, read.", "At 1139.0s, [0.23, 0.195, 0.554, 0.799]: carry/hold (an object), crouch/kneel, read.", "From 1137.0s to 1138.0s, [0.207, 0.207, 0.471, 0.805]->[0.217, 0.192, 0.483, 0.797]: carry/hold (an object), crouch/kneel.", "At 1136.0s, [0.221, 0.209, 0.54, 0.769]: carry/hold (an object), crouch/kneel, touch (an object).", "From 1133.0s to 1135.0s, [0.208, 0.178, 0.56, 0.776]->[0.223, 0.187, 0.584, 0.824]->[0.228, 0.199, 0.581, 0.804]: crouch/kneel, touch (an object)."]}, {"from": "human", "timestamps": 1176.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.126, 0.137, 0.959, 0.986]: stand."}]}, {"video": "fD6VkIRlIRI.mkv", "clip": [1123, 1259], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.223, 0.216, 0.501, 0.811]: sit, touch (an object), crouch/kneel."}, {"from": "human", "timestamps": 1259.0, "value": "What actions is the person in bbox [0.312, 0.135, 0.71, 0.987] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.312, 0.135, 0.71, 0.987]: carry/hold (an object), jump/leap.", "At 1258.0s, [0.295, 0.143, 0.658, 0.947]: carry/hold (an object)."]}]}, {"video": "fD6VkIRlIRI.mkv", "clip": [1262, 1312], "conversations": [{"from": "human", "timestamps": 1292.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.03, 0.159, 0.937, 0.993]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1312.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.056, 0.224, 0.19, 0.66]?"}, {"from": "gpt", "value": ["At Now, [0.056, 0.224, 0.19, 0.66]: stand, carry/hold (an object), watch (a person).", "From 1310.0s to 1311.0s, [0.81, 0.266, 0.951, 0.705]->[0.555, 0.258, 0.668, 0.679]: stand, carry/hold (an object).", "At 1309.0s, [0.81, 0.268, 0.949, 0.708]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "fD6VkIRlIRI.mkv", "clip": [1319, 1377], "conversations": [{"from": "human", "timestamps": 1349.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.169, 0.121, 0.753, 0.977]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1377.0, "value": "Describe the actions of the person at bbox [0.184, 0.219, 0.703, 0.889] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.184, 0.219, 0.703, 0.889]: sit, carry/hold (an object).", "At 1376.0s, [0.222, 0.231, 0.689, 0.966]: sit, carry/hold (an object).", "At 1375.0s, [0.206, 0.188, 0.717, 0.908]: sit."]}]}, {"video": "fD6VkIRlIRI.mkv", "clip": [1409, 1461], "conversations": [{"from": "human", "timestamps": 1439.0, "value": "For the person at bbox [0.391, 0.456, 0.502, 0.811], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.391, 0.456, 0.502, 0.811]: run/jog, carry/hold (an object).", "At 1438.0s, [0.356, 0.441, 0.42, 0.668]: run/jog, carry/hold (an object)."]}, {"from": "human", "timestamps": 1461.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.238, 0.311, 0.559, 0.877]: carry/hold (an object), crouch/kneel; [0.251, 0.455, 0.367, 0.672]: carry/hold (an object), watch (a person), bend/bow (at the waist); [0.494, 0.262, 0.704, 0.653]: carry/hold (an object), crouch/kneel."}]}, {"video": "fD6VkIRlIRI.mkv", "clip": [1498, 1538], "conversations": [{"from": "human", "timestamps": 1528.0, "value": "What actions did the person at bbox [0.23, 0.265, 0.466, 0.709] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.23, 0.265, 0.466, 0.709]: bend/bow (at the waist), walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1526.0s to 1527.0s, [0.147, 0.501, 0.289, 0.742]->[0.158, 0.404, 0.352, 0.734]: walk, carry/hold (an object).", "At 1525.0s, [0.175, 0.565, 0.269, 0.705]: run/jog, carry/hold (an object)."]}, {"from": "human", "timestamps": 1538.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.04, 0.183, 0.553, 0.962]: carry/hold (an object), crouch/kneel; [0.423, 0.126, 0.648, 0.55]: bend/bow (at the waist)."}]}, {"video": "fD6VkIRlIRI.mkv", "clip": [1588, 1640], "conversations": [{"from": "human", "timestamps": 1618.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.069, 0.164, 0.339, 0.987]: stand, talk to (e.g., self, a person, a group); [0.528, 0.141, 0.917, 0.998]: carry/hold (an object), stand, listen to (a person)."}, {"from": "human", "timestamps": 1640.0, "value": "For the person at bbox [0.001, 0.129, 0.744, 0.982], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.001, 0.129, 0.744, 0.982]: stand, talk to (e.g., self, a person, a group).", "At 1639.0s, [0.0, 0.12, 0.748, 0.984]: stand, talk to (e.g., self, a person, a group).", "At 1638.0s, [0.053, 0.126, 0.829, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1637.0s, [0.002, 0.126, 0.57, 0.963]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "fD6VkIRlIRI.mkv", "clip": [1644, 1724], "conversations": [{"from": "human", "timestamps": 1674.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.158, 0.79, 1.0]: stand; [0.571, 0.192, 0.853, 0.944]: watch (a person), stand, touch (an object)."}, {"from": "human", "timestamps": 1724.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.002, 0.335, 0.247, 0.735]?"}, {"from": "gpt", "value": ["In the current frame, [0.002, 0.335, 0.247, 0.735]: carry/hold (an object), talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "At 1723.0s, [0.006, 0.317, 0.448, 0.76]: talk to (e.g., self, a person, a group), lie/sleep."]}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [899, 949], "conversations": [{"from": "human", "timestamps": 929.0, "value": "What actions is the person in bbox [0.323, 0.041, 0.969, 0.934] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.323, 0.041, 0.969, 0.934]: sit.", "From 926.0s to 928.0s, [0.298, 0.112, 0.945, 0.947]->[0.263, 0.084, 0.941, 0.903]->[0.36, 0.052, 0.944, 0.897]: sit.", "At 925.0s, [0.304, 0.138, 0.919, 0.955]: sit, touch (an object).", "From 921.0s to 924.0s, [0.451, 0.19, 0.908, 0.927]->[0.435, 0.17, 0.91, 0.954]->[0.405, 0.169, 0.92, 0.947]->[0.387, 0.151, 0.932, 0.964]: sit.", "At 920.0s, [0.453, 0.21, 0.897, 0.888]: sit, touch (an object), watch (a person).", "From 916.0s to 919.0s, [0.462, 0.251, 0.849, 0.836]->[0.466, 0.242, 0.855, 0.834]->[0.463, 0.233, 0.845, 0.851]->[0.455, 0.234, 0.869, 0.86]: sit, watch (a person).", "At 915.0s, [0.487, 0.239, 0.855, 0.831]: sit.", "At 914.0s, [0.502, 0.257, 0.87, 0.797]: sit, watch (a person).", "At 913.0s, [0.504, 0.258, 0.866, 0.825]: sit.", "At 912.0s, [0.508, 0.264, 0.858, 0.811]: sit, watch (a person).", "At 911.0s, [0.495, 0.266, 0.861, 0.809]: sit.", "At 910.0s, [0.52, 0.263, 0.857, 0.798]: sit, listen to (a person), watch (a person).", "At 909.0s, [0.51, 0.266, 0.856, 0.801]: sit."]}, {"from": "human", "timestamps": 949.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.427, 0.317, 0.568, 0.567]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.523, 0.221, 0.673, 0.671]: carry/hold (an object), sit."}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [872, 983], "conversations": [{"from": "human", "timestamps": 902.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.39, 0.329, 0.776, 0.645]: touch (an object), get up."}, {"from": "human", "timestamps": 983.0, "value": "What actions is the person in bbox [0.035, 0.002, 0.925, 0.967] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.035, 0.002, 0.925, 0.967]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 982.0s, [0.039, 0.03, 0.931, 1.0]: sit, listen to (a person)."]}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [986, 1017], "conversations": [{"from": "human", "timestamps": 1016.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.04, 0.087, 0.757, 0.947]?"}, {"from": "gpt", "value": ["At Now, [0.04, 0.087, 0.757, 0.947]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1014.0s to 1015.0s, [0.06, 0.047, 0.728, 0.973]->[0.047, 0.075, 0.737, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1017.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.06, 0.078, 0.761, 0.987]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [998, 1093], "conversations": [{"from": "human", "timestamps": 1028.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.4, 0.309, 0.549, 0.611]: stand, touch (an object)."}, {"from": "human", "timestamps": 1093.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.008, 0.089, 0.387, 0.866]?"}, {"from": "gpt", "value": ["At Now, [0.008, 0.089, 0.387, 0.866]: sit, eat, listen to (a person).", "At 1092.0s, [0.204, 0.327, 0.382, 0.693]: sit, touch (an object), listen to (a person), watch (a person).", "At 1091.0s, [0.202, 0.327, 0.383, 0.705]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1089.0s to 1090.0s, [0.212, 0.33, 0.37, 0.636]->[0.209, 0.316, 0.369, 0.64]: sit, touch (an object), listen to (a person), watch (a person).", "At 1088.0s, [0.216, 0.334, 0.37, 0.631]: sit, touch (an object), watch (a person).", "At 1087.0s, [0.21, 0.329, 0.373, 0.651]: sit, eat, touch (an object), listen to (a person), watch (a person).", "From 1085.0s to 1086.0s, [0.206, 0.338, 0.386, 0.734]->[0.206, 0.336, 0.404, 0.774]: sit, eat, touch (an object), watch (a person).", "At 1084.0s, [0.228, 0.359, 0.394, 0.723]: sit, eat, touch (an object).", "At 1083.0s, [0.266, 0.33, 0.416, 0.662]: sit, eat."]}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [1118, 1165], "conversations": [{"from": "human", "timestamps": 1148.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.119, 0.17, 0.71, 0.95]: carry/hold (an object), stand, touch (an object)."}, {"from": "human", "timestamps": 1165.0, "value": "What actions did the person at bbox [0.661, 0.349, 0.732, 0.508] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.661, 0.349, 0.732, 0.508]: stand, open (e.g., a window, a car door).", "From 1162.0s to 1164.0s, [0.075, 0.056, 0.771, 0.968]->[0.008, 0.12, 0.639, 0.988]->[0.137, 0.13, 0.933, 0.963]: stand, open (e.g., a window, a car door).", "From 1159.0s to 1161.0s, [0.227, 0.096, 0.992, 0.989]->[0.278, 0.126, 0.976, 0.982]->[0.132, 0.207, 0.804, 0.98]: stand, talk to (e.g., self, a person, a group).", "At 1158.0s, [0.076, 0.043, 0.893, 0.955]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [1136, 1247], "conversations": [{"from": "human", "timestamps": 1166.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.666, 0.357, 0.739, 0.508]: watch (a person), stand."}, {"from": "human", "timestamps": 1247.0, "value": "For the person at bbox [0.299, 0.358, 0.447, 0.551], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.299, 0.358, 0.447, 0.551]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1245.0s to 1246.0s, [0.331, 0.28, 0.49, 0.584]->[0.274, 0.338, 0.483, 0.574]: bend/bow (at the waist), carry/hold (an object).", "At 1244.0s, [0.329, 0.251, 0.528, 0.575]: bend/bow (at the waist), lift/pick up.", "From 1241.0s to 1243.0s, [0.602, 0.184, 0.733, 0.672]->[0.465, 0.208, 0.584, 0.595]->[0.397, 0.21, 0.498, 0.604]: walk, watch (a person)."]}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [1232, 1270], "conversations": [{"from": "human", "timestamps": 1262.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.104, 0.392, 0.312, 0.569]: watch (a person), sit, touch (an object); [0.295, 0.374, 0.409, 0.502]: listen to (a person), sit, watch (a person); [0.556, 0.21, 0.67, 0.638]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1270.0, "value": "Describe the actions of the person at bbox [0.157, 0.097, 0.957, 0.932] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.157, 0.097, 0.957, 0.932]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1269.0s, [0.084, 0.104, 0.935, 0.937]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [1272, 1332], "conversations": [{"from": "human", "timestamps": 1302.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.089, 0.42, 0.469, 0.685]: listen to (a person), sit, watch (a person); [0.364, 0.397, 0.698, 0.734]: listen to (a person), sit, watch (a person); [0.715, 0.082, 0.923, 0.795]: read, watch (a person), listen to (a person), carry/hold (an object), stand."}, {"from": "human", "timestamps": 1332.0, "value": "What actions is the person in bbox [0.111, 0.304, 0.522, 0.62] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.111, 0.304, 0.522, 0.62]: sit, carry/hold (an object), listen to (a person).", "At 1331.0s, [0.112, 0.319, 0.51, 0.609]: sit, listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [1356, 1396], "conversations": [{"from": "human", "timestamps": 1386.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.557, 0.293, 0.72, 0.628]?"}, {"from": "gpt", "value": ["At Now, [0.557, 0.293, 0.72, 0.628]: stand, carry/hold (an object), give/serve (an object) to (a person), listen to (a person), watch (a person).", "At 1385.0s, [0.541, 0.301, 0.717, 0.615]: stand, touch (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1396.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.111, 0.134, 0.438, 0.708]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group); [0.572, 0.165, 0.848, 0.749]: listen to (a person), stand, watch (a person), drink."}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [1375, 1470], "conversations": [{"from": "human", "timestamps": 1405.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.029, 0.123, 0.959, 0.942]: watch (a person), stand."}, {"from": "human", "timestamps": 1470.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.297, 0.177, 0.723, 0.616]?"}, {"from": "gpt", "value": ["In the current frame, [0.297, 0.177, 0.723, 0.616]: swim.", "At 1469.0s, [0.295, 0.316, 0.67, 0.635]: swim.", "From 1457.0s to 1468.0s, [0.111, 0.609, 0.375, 0.832]->[0.111, 0.578, 0.412, 0.849]->[0.105, 0.565, 0.384, 0.784]->[0.134, 0.556, 0.386, 0.78]->[0.096, 0.533, 0.354, 0.771]->[0.168, 0.571, 0.38, 0.771]->[0.131, 0.536, 0.38, 0.751]->[0.181, 0.523, 0.381, 0.725]->[0.229, 0.543, 0.446, 0.734]->[0.223, 0.522, 0.465, 0.747]->[0.194, 0.438, 0.493, 0.707]->[0.305, 0.38, 0.565, 0.66]: swim, watch (a person)."]}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [1512, 1544], "conversations": [{"from": "human", "timestamps": 1542.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.077, 0.967, 0.976]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1544.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.078, 0.984, 0.97]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.078, 0.984, 0.97]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1543.0s, [0.015, 0.084, 0.99, 0.96]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1542.0s, [0.0, 0.077, 0.967, 0.976]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [1577, 1608], "conversations": [{"from": "human", "timestamps": 1607.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.383, 0.12, 0.593, 0.684]: walk."}, {"from": "human", "timestamps": 1608.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.482, 0.105, 0.679, 0.712]?"}, {"from": "gpt", "value": ["In the current frame, [0.482, 0.105, 0.679, 0.712]: walk.", "From 1604.0s to 1607.0s, [0.186, 0.112, 0.377, 0.762]->[0.238, 0.123, 0.42, 0.755]->[0.303, 0.126, 0.496, 0.728]->[0.383, 0.12, 0.593, 0.684]: walk.", "From 1602.0s to 1603.0s, [0.13, 0.123, 0.304, 0.767]->[0.133, 0.128, 0.301, 0.763]: stand.", "At 1601.0s, [0.101, 0.125, 0.323, 0.757]: stand, carry/hold (an object).", "At 1600.0s, [0.119, 0.127, 0.326, 0.757]: stand, carry/hold (an object), put down.", "At 1599.0s, [0.151, 0.116, 0.321, 0.752]: stand, carry/hold (an object)."]}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [1620, 1724], "conversations": [{"from": "human", "timestamps": 1650.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.074, 0.043, 0.985, 0.984]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1724.0, "value": "What actions is the person in bbox [0.144, 0.032, 1.0, 0.98] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.144, 0.032, 1.0, 0.98]: sit, talk to (e.g., self, a person, a group).", "From 1719.0s to 1723.0s, [0.184, 0.099, 1.0, 0.974]->[0.162, 0.055, 0.977, 0.977]->[0.112, 0.03, 0.999, 0.976]->[0.144, 0.021, 1.0, 0.985]->[0.137, 0.017, 1.0, 0.986]: stand, talk to (e.g., self, a person, a group).", "At 1718.0s, [0.21, 0.047, 1.0, 0.986]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [1695, 1737], "conversations": [{"from": "human", "timestamps": 1725.0, "value": "What actions did the person at bbox [0.159, 0.052, 1.0, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.159, 0.052, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1724.0s, [0.144, 0.032, 1.0, 0.98]: sit, talk to (e.g., self, a person, a group).", "From 1719.0s to 1723.0s, [0.184, 0.099, 1.0, 0.974]->[0.162, 0.055, 0.977, 0.977]->[0.112, 0.03, 0.999, 0.976]->[0.144, 0.021, 1.0, 0.985]->[0.137, 0.017, 1.0, 0.986]: stand, talk to (e.g., self, a person, a group).", "At 1718.0s, [0.21, 0.047, 1.0, 0.986]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1737.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.26, 0.031, 1.0, 0.98]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "fGgnNCbXZ20.mp4", "clip": [1760, 1791], "conversations": [{"from": "human", "timestamps": 1790.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.055, 0.085, 0.311, 0.666]?"}, {"from": "gpt", "value": ["At Now, [0.055, 0.085, 0.311, 0.666]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1786.0s to 1789.0s, [0.264, 0.16, 0.473, 0.707]->[0.305, 0.137, 0.472, 0.705]->[0.329, 0.145, 0.487, 0.698]->[0.32, 0.147, 0.47, 0.699]: stand, answer phone.", "At 1785.0s, [0.677, 0.24, 0.77, 0.612]: walk."]}, {"from": "human", "timestamps": 1791.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.01, 0.1, 0.31, 0.701]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.605, 0.129, 0.936, 0.711]: listen to (a person), sit, watch (a person)."}]}, {"video": "fNcxxBjEOgw.mkv", "clip": [934, 981], "conversations": [{"from": "human", "timestamps": 964.0, "value": "What actions is the person in bbox [0.259, 0.0, 0.814, 0.976] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.259, 0.0, 0.814, 0.976]: stand, listen to (a person).", "At 963.0s, [0.253, 0.019, 0.783, 0.965]: stand, listen to (a person), watch (a person).", "At 962.0s, [0.215, 0.018, 0.728, 0.971]: stand, watch (a person)."]}, {"from": "human", "timestamps": 981.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.017, 0.029, 0.596, 0.966]: watch (a person), stand; [0.786, 0.365, 0.88, 0.826]: carry/hold (an object), stand."}]}, {"video": "fNcxxBjEOgw.mkv", "clip": [964, 999], "conversations": [{"from": "human", "timestamps": 994.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.06, 0.225, 0.261, 0.58]: watch (a person), walk; [0.149, 0.102, 0.423, 0.569]: watch (a person), stand; [0.46, 0.187, 0.759, 0.604]: stand, smoke; [0.716, 0.242, 0.949, 0.59]: listen to (a person), stand."}, {"from": "human", "timestamps": 999.0, "value": "What actions is the person in bbox [0.591, 0.18, 0.968, 0.629] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.591, 0.18, 0.968, 0.629]: stand, talk to (e.g., self, a person, a group).", "At 998.0s, [0.653, 0.167, 1.0, 0.634]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 997.0s, [0.553, 0.19, 0.867, 0.632]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 996.0s, [0.464, 0.319, 0.661, 0.608]: listen to (a person)."]}]}, {"video": "fNcxxBjEOgw.mkv", "clip": [1020, 1051], "conversations": [{"from": "human", "timestamps": 1050.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.254, 0.401, 0.331, 0.579]: bend/bow (at the waist); [0.432, 0.148, 0.986, 0.962]: walk."}, {"from": "human", "timestamps": 1051.0, "value": "Describe the actions of the person at bbox [0.376, 0.066, 0.947, 0.968] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.376, 0.066, 0.947, 0.968]: walk.", "From 1047.0s to 1050.0s, [0.151, 0.048, 0.943, 0.954]->[0.407, 0.027, 0.983, 0.989]->[0.218, 0.121, 1.0, 0.961]->[0.432, 0.148, 0.986, 0.962]: walk.", "At 1046.0s, [0.089, 0.059, 0.724, 1.0]: stand, watch (a person).", "From 1042.0s to 1045.0s, [0.578, 0.031, 1.0, 0.995]->[0.539, 0.03, 1.0, 0.976]->[0.508, 0.019, 0.998, 0.962]->[0.385, 0.07, 0.992, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1037.0s to 1041.0s, [0.534, 0.02, 1.0, 1.0]->[0.561, 0.015, 1.0, 0.998]->[0.542, 0.014, 1.0, 1.0]->[0.555, 0.024, 1.0, 1.0]->[0.554, 0.037, 1.0, 0.999]: stand, listen to (a person), watch (a person).", "From 1035.0s to 1036.0s, [0.584, 0.034, 1.0, 0.971]->[0.511, 0.029, 1.0, 0.947]: stand, listen to (a person).", "At 1034.0s, [0.613, 0.047, 1.0, 1.0]: stand.", "At 1033.0s, [0.41, 0.133, 0.975, 0.991]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1032.0s, [0.487, 0.072, 0.99, 1.0]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "fNcxxBjEOgw.mkv", "clip": [1130, 1256], "conversations": [{"from": "human", "timestamps": 1160.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.239, 0.486, 0.378, 0.98]: carry/hold (an object), hit (an object), jump/leap."}, {"from": "human", "timestamps": 1256.0, "value": "For the person at bbox [0.109, 0.018, 0.98, 0.95], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.109, 0.018, 0.98, 0.95]: sit, talk to (e.g., self, a person, a group).", "At 1254.0s, [0.081, 0.017, 0.965, 0.978]: sit, eat.", "From 1251.0s to 1253.0s, [0.089, 0.006, 0.983, 0.957]->[0.072, 0.007, 0.986, 0.966]->[0.087, 0.015, 0.97, 0.991]: sit, talk to (e.g., self, a person, a group).", "At 1250.0s, [0.055, 0.0, 0.974, 0.958]: sit.", "At 1249.0s, [0.06, 0.017, 0.966, 0.945]: stand."]}]}, {"video": "fNcxxBjEOgw.mkv", "clip": [1192, 1229], "conversations": [{"from": "human", "timestamps": 1222.0, "value": "Describe the actions of the person at bbox [0.134, 0.033, 0.907, 0.996] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.134, 0.033, 0.907, 0.996]: sit, talk to (e.g., self, a person, a group).", "At 1221.0s, [0.15, 0.034, 0.956, 0.962]: sit, talk to (e.g., self, a person, a group).", "At 1220.0s, [0.121, 0.027, 0.886, 0.978]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1229.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.166, 0.003, 0.744, 0.898]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "fNcxxBjEOgw.mkv", "clip": [1322, 1366], "conversations": [{"from": "human", "timestamps": 1352.0, "value": "Describe the actions of the person at bbox [0.571, 0.109, 0.999, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.571, 0.109, 0.999, 0.983]: listen to (a person), lie/sleep.", "From 1349.0s to 1351.0s, [0.746, 0.218, 1.0, 0.959]->[0.643, 0.002, 0.994, 0.983]->[0.587, 0.017, 0.997, 0.97]: listen to (a person), lie/sleep."]}, {"from": "human", "timestamps": 1366.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.14, 0.02, 0.832, 0.974]: lie/sleep, talk to (e.g., self, a person, a group); [0.544, 0.045, 0.996, 0.975]: listen to (a person), lie/sleep."}]}, {"video": "fNcxxBjEOgw.mkv", "clip": [1509, 1592], "conversations": [{"from": "human", "timestamps": 1539.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.009, 0.201, 0.203, 0.955]: listen to (a person), watch (a person), walk; [0.113, 0.285, 0.483, 0.968]: listen to (a person), walk; [0.156, 0.266, 0.282, 0.583]: walk; [0.407, 0.103, 0.683, 0.971]: listen to (a person), watch (a person), walk; [0.764, 0.21, 0.989, 0.971]: carry/hold (an object), read, walk."}, {"from": "human", "timestamps": 1592.0, "value": "What actions did the person at bbox [0.38, 0.12, 0.658, 0.928] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.38, 0.12, 0.658, 0.928]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1591.0s, [0.331, 0.228, 0.673, 0.984]: stand, lift/pick up, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "fNcxxBjEOgw.mkv", "clip": [1538, 1606], "conversations": [{"from": "human", "timestamps": 1568.0, "value": "Describe the actions of the person at bbox [0.0, 0.065, 0.459, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.065, 0.459, 0.981]: stand, watch (a person).", "At 1567.0s, [0.0, 0.075, 0.406, 0.981]: stand, listen to (a person), watch (a person).", "At 1566.0s, [0.0, 0.01, 0.445, 0.979]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1606.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.131, 0.0, 0.977, 0.95]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "fNcxxBjEOgw.mkv", "clip": [1610, 1704], "conversations": [{"from": "human", "timestamps": 1640.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.0, 0.535, 0.979]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.565, 0.025, 0.997, 0.983]: listen to (a person), stand."}, {"from": "human", "timestamps": 1704.0, "value": "What actions did the person at bbox [0.635, 0.166, 1.0, 0.965] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.635, 0.166, 1.0, 0.965]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1703.0s, [0.614, 0.044, 1.0, 0.981]: stand, carry/hold (an object), eat, talk to (e.g., self, a person, a group), watch (a person).", "From 1700.0s to 1702.0s, [0.607, 0.031, 0.998, 0.961]->[0.608, 0.056, 1.0, 0.978]->[0.631, 0.029, 1.0, 0.954]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1699.0s, [0.566, 0.155, 0.923, 0.966]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1698.0s, [0.671, 0.191, 0.996, 0.967]: walk, carry/hold (an object), eat, push (another person), watch (a person).", "At 1697.0s, [0.784, 0.212, 0.93, 0.956]: walk, carry/hold (an object).", "At 1696.0s, [0.77, 0.18, 0.877, 0.771]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1694.0s to 1695.0s, [0.786, 0.103, 0.869, 0.449]->[0.795, 0.144, 0.877, 0.603]: walk, carry/hold (an object)."]}]}, {"video": "fNcxxBjEOgw.mkv", "clip": [1668, 1703], "conversations": [{"from": "human", "timestamps": 1698.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.385, 0.317, 0.43, 0.482]: stand; [0.572, 0.464, 0.71, 0.941]: carry/hold (an object), stand; [0.671, 0.191, 0.996, 0.967]: watch (a person), walk, carry/hold (an object), eat, push (another person)."}, {"from": "human", "timestamps": 1703.0, "value": "What actions is the person in bbox [0.614, 0.044, 1.0, 0.981] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.614, 0.044, 1.0, 0.981]: stand, carry/hold (an object), eat, talk to (e.g., self, a person, a group), watch (a person).", "From 1700.0s to 1702.0s, [0.607, 0.031, 0.998, 0.961]->[0.608, 0.056, 1.0, 0.978]->[0.631, 0.029, 1.0, 0.954]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1699.0s, [0.566, 0.155, 0.923, 0.966]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1698.0s, [0.671, 0.191, 0.996, 0.967]: walk, carry/hold (an object), eat, push (another person), watch (a person).", "At 1697.0s, [0.784, 0.212, 0.93, 0.956]: walk, carry/hold (an object).", "At 1696.0s, [0.77, 0.18, 0.877, 0.771]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1694.0s to 1695.0s, [0.786, 0.103, 0.869, 0.449]->[0.795, 0.144, 0.877, 0.603]: walk, carry/hold (an object)."]}]}, {"video": "fNcxxBjEOgw.mkv", "clip": [1735, 1784], "conversations": [{"from": "human", "timestamps": 1765.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.666, 0.26, 0.749, 0.424]?"}, {"from": "gpt", "value": ["At Now, [0.666, 0.26, 0.749, 0.424]: stand.", "At 1764.0s, [0.673, 0.262, 0.745, 0.43]: stand."]}, {"from": "human", "timestamps": 1784.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.117, 0.582, 0.461, 0.983]: run/jog, talk to (e.g., self, a person, a group); [0.753, 0.235, 0.916, 0.841]: watch (a person), run/jog."}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [912, 949], "conversations": [{"from": "human", "timestamps": 942.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.06, 0.239, 0.469, 0.985]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.431, 0.283, 0.896, 0.992]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 949.0, "value": "For the person at bbox [0.729, 0.287, 0.915, 0.79], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.729, 0.287, 0.915, 0.79]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 948.0s, [0.722, 0.295, 0.913, 0.789]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 946.0s to 947.0s, [0.729, 0.285, 0.913, 0.791]->[0.718, 0.296, 0.909, 0.788]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 945.0s, [0.732, 0.296, 0.913, 0.786]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [910, 968], "conversations": [{"from": "human", "timestamps": 940.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.077, 0.247, 0.465, 0.99]: sit, talk to (e.g., self, a person, a group); [0.426, 0.274, 0.899, 0.98]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 968.0, "value": "For the person at bbox [0.037, 0.089, 0.75, 0.982], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.037, 0.089, 0.75, 0.982]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 965.0s to 967.0s, [0.129, 0.301, 0.53, 0.997]->[0.18, 0.27, 0.502, 0.984]->[0.0, 0.154, 0.561, 0.998]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 964.0s, [0.214, 0.082, 0.525, 0.974]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 963.0s, [0.097, 0.013, 0.827, 0.993]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 961.0s, [0.727, 0.232, 0.86, 0.627]: carry/hold (an object), get up, talk to (e.g., self, a person, a group), watch (a person).", "At 960.0s, [0.76, 0.339, 0.899, 0.619]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 959.0s, [0.772, 0.339, 0.889, 0.621]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 958.0s, [0.773, 0.338, 0.872, 0.617]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 957.0s, [0.771, 0.343, 0.871, 0.618]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 954.0s to 956.0s, [0.748, 0.336, 0.843, 0.611]->[0.746, 0.335, 0.86, 0.614]->[0.766, 0.344, 0.868, 0.615]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [933, 998], "conversations": [{"from": "human", "timestamps": 963.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.097, 0.013, 0.827, 0.993]: carry/hold (an object), talk to (e.g., self, a person, a group), walk; [0.632, 0.469, 0.956, 0.987]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 998.0, "value": "What actions did the person at bbox [0.057, 0.093, 0.681, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.057, 0.093, 0.681, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 997.0s, [0.049, 0.108, 0.66, 0.981]: stand, listen to (a person), watch (a person)."]}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [1047, 1079], "conversations": [{"from": "human", "timestamps": 1077.0, "value": "What actions is the person in bbox [0.131, 0.084, 0.848, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.131, 0.084, 0.848, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1076.0s, [0.188, 0.114, 0.862, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1079.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.071, 0.987, 0.967]: listen to (a person), sit, watch (a person)."}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [1204, 1239], "conversations": [{"from": "human", "timestamps": 1234.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.301, 0.342, 0.433, 0.504]: carry/hold (an object), sit, read, touch (an object)."}, {"from": "human", "timestamps": 1239.0, "value": "What actions did the person at bbox [0.196, 0.177, 0.527, 0.914] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.196, 0.177, 0.527, 0.914]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1238.0s, [0.237, 0.159, 0.478, 0.761]: stand, carry/hold (an object).", "At 1237.0s, [0.308, 0.144, 0.497, 0.65]: stand, carry/hold (an object), close (e.g., a door, a box).", "At 1236.0s, [0.326, 0.153, 0.438, 0.549]: walk, open (e.g., a window, a car door), watch (a person)."]}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [1276, 1350], "conversations": [{"from": "human", "timestamps": 1306.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.388, 0.086, 0.993, 0.994]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1350.0, "value": "For the person at bbox [0.097, 0.087, 0.892, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.097, 0.087, 0.892, 1.0]: listen to (a person), talk to (e.g., self, a person, a group), lie/sleep.", "At 1349.0s, [0.103, 0.1, 0.876, 0.984]: listen to (a person), talk to (e.g., self, a person, a group), lie/sleep.", "From 1347.0s to 1348.0s, [0.014, 0.398, 0.931, 0.997]->[0.09, 0.325, 0.761, 0.996]: talk to (e.g., self, a person, a group), lie/sleep."]}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [1302, 1368], "conversations": [{"from": "human", "timestamps": 1332.0, "value": "What actions did the person at bbox [0.016, 0.186, 0.932, 0.993] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.016, 0.186, 0.932, 0.993]: lie/sleep.", "At 1331.0s, [0.011, 0.0, 0.852, 0.829]: lie/sleep.", "At 1330.0s, [0.159, 0.073, 0.933, 0.949]: listen to (a person), lie/sleep."]}, {"from": "human", "timestamps": 1368.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.055, 0.068, 0.983, 0.991]: listen to (a person), lie/sleep, talk to (e.g., self, a person, a group)."}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [1383, 1425], "conversations": [{"from": "human", "timestamps": 1413.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.045, 0.067, 0.917, 0.999]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1425.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.001, 0.16, 0.959, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.16, 0.959, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1423.0s to 1424.0s, [0.042, 0.129, 0.955, 1.0]->[0.129, 0.127, 0.952, 0.989]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1422.0s, [0.107, 0.132, 0.957, 0.991]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [1358, 1441], "conversations": [{"from": "human", "timestamps": 1388.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.073, 0.118, 0.902, 0.995]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1441.0, "value": "What actions did the person at bbox [0.013, 0.057, 0.77, 0.976] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.013, 0.057, 0.77, 0.976]: sit, talk to (e.g., self, a person, a group).", "At 1440.0s, [0.225, 0.114, 0.955, 0.999]: sit, talk to (e.g., self, a person, a group).", "At 1439.0s, [0.053, 0.017, 0.866, 0.993]: sit, crouch/kneel, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [1495, 1534], "conversations": [{"from": "human", "timestamps": 1525.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.088, 0.891, 0.99]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1534.0, "value": "For the person at bbox [0.318, 0.169, 0.816, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.318, 0.169, 0.816, 0.981]: sit, touch (an object), listen to (a person), watch (a person).", "At 1533.0s, [0.344, 0.168, 0.896, 0.997]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1532.0s, [0.344, 0.19, 0.901, 1.0]: sit, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [1514, 1575], "conversations": [{"from": "human", "timestamps": 1544.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.186, 0.267, 0.443, 0.926]: sit, touch (an object), listen to (a person), watch (a person), eat; [0.265, 0.212, 0.87, 0.978]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.415, 0.279, 0.639, 0.573]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1575.0, "value": "Describe the actions of the person at bbox [0.472, 0.28, 0.831, 0.995] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.472, 0.28, 0.831, 0.995]: sit, read.", "At 1574.0s, [0.474, 0.299, 0.776, 0.974]: sit, read, touch (an object)."]}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [1646, 1713], "conversations": [{"from": "human", "timestamps": 1676.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.446, 0.009, 1.0, 0.989]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1713.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.041, 0.01, 0.917, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.041, 0.01, 0.917, 1.0]: sit.", "At 1712.0s, [0.033, 0.004, 0.973, 1.0]: sit, touch (an object).", "From 1710.0s to 1711.0s, [0.148, 0.381, 0.346, 0.723]->[0.215, 0.354, 0.414, 0.698]: sit, touch (an object), watch (a person).", "From 1708.0s to 1709.0s, [0.067, 0.38, 0.282, 0.73]->[0.081, 0.382, 0.287, 0.72]: sit, touch (an object), listen to (a person), watch (a person).", "From 1706.0s to 1707.0s, [0.0, 0.02, 0.849, 1.0]->[0.069, 0.383, 0.28, 0.732]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [1670, 1735], "conversations": [{"from": "human", "timestamps": 1700.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.029, 0.012, 0.9, 0.997]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1735.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.269, 0.377, 0.426, 0.606]?"}, {"from": "gpt", "value": ["At Now, [0.269, 0.377, 0.426, 0.606]: sit, touch (an object), work on a computer.", "At 1734.0s, [0.267, 0.376, 0.418, 0.613]: sit, touch (an object), work on a computer.", "From 1731.0s to 1733.0s, [0.117, 0.324, 0.659, 0.98]->[0.133, 0.293, 0.71, 0.957]->[0.125, 0.302, 0.696, 0.957]: sit, touch (an object).", "From 1724.0s to 1730.0s, [0.114, 0.323, 0.695, 0.977]->[0.137, 0.316, 0.66, 0.981]->[0.124, 0.322, 0.685, 0.954]->[0.151, 0.323, 0.66, 0.975]->[0.145, 0.323, 0.655, 0.992]->[0.142, 0.331, 0.639, 0.948]->[0.133, 0.331, 0.642, 0.952]: sit, answer phone, touch (an object).", "At 1723.0s, [0.111, 0.27, 0.712, 0.958]: sit, carry/hold (an object), touch (an object)."]}]}, {"video": "fZs-yXm-uUs.mp4", "clip": [1731, 1766], "conversations": [{"from": "human", "timestamps": 1761.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.02, 0.1, 0.868, 0.993]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1766.0, "value": "What actions is the person in bbox [0.028, 0.108, 0.876, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.028, 0.108, 0.876, 0.993]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1765.0s, [0.0, 0.117, 0.836, 0.984]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "g1wyIcLPbq0.mp4", "clip": [922, 958], "conversations": [{"from": "human", "timestamps": 952.0, "value": "Describe the actions of the person at bbox [0.142, 0.372, 0.483, 0.885] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.142, 0.372, 0.483, 0.885]: lie/sleep.", "From 950.0s to 951.0s, [0.179, 0.379, 0.466, 0.881]->[0.136, 0.374, 0.483, 0.91]: lie/sleep, watch (a person).", "From 948.0s to 949.0s, [0.187, 0.411, 0.469, 0.926]->[0.189, 0.418, 0.487, 0.905]: listen to (a person), lie/sleep.", "At 947.0s, [0.165, 0.429, 0.647, 0.914]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "From 945.0s to 946.0s, [0.118, 0.422, 0.575, 0.927]->[0.163, 0.351, 0.468, 0.894]: listen to (a person), lie/sleep."]}, {"from": "human", "timestamps": 958.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.234, 0.122, 0.758, 0.947]: crouch/kneel, talk to (e.g., self, a person, a group)."}]}, {"video": "g1wyIcLPbq0.mp4", "clip": [1024, 1067], "conversations": [{"from": "human", "timestamps": 1054.0, "value": "For the person at bbox [0.164, 0.272, 0.375, 0.873], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.164, 0.272, 0.375, 0.873]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1053.0s, [0.166, 0.27, 0.331, 0.802]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1052.0s, [0.177, 0.262, 0.338, 0.784]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1050.0s to 1051.0s, [0.149, 0.259, 0.309, 0.774]->[0.149, 0.263, 0.312, 0.776]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1067.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.007, 0.329, 0.765]: watch (a person), stand; [0.221, 0.085, 1.0, 0.98]: carry/hold (an object), stand, watch (a person)."}]}, {"video": "g1wyIcLPbq0.mp4", "clip": [1007, 1099], "conversations": [{"from": "human", "timestamps": 1037.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.288, 0.164, 0.931, 1.0]: stand."}, {"from": "human", "timestamps": 1099.0, "value": "Describe the actions of the person at bbox [0.118, 0.277, 0.626, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.118, 0.277, 0.626, 0.987]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1098.0s, [0.105, 0.223, 0.679, 0.999]: stand, carry/hold (an object), watch (a person).", "At 1097.0s, [0.024, 0.161, 0.653, 0.991]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1096.0s, [0.043, 0.084, 0.697, 1.0]: stand, carry/hold (an object), listen to (a person).", "From 1094.0s to 1095.0s, [0.065, 0.06, 0.697, 1.0]->[0.06, 0.071, 0.694, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1093.0s, [0.037, 0.067, 0.724, 0.999]: stand, carry/hold (an object), listen to (a person).", "At 1092.0s, [0.052, 0.063, 0.702, 0.998]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1090.0s to 1091.0s, [0.023, 0.051, 0.847, 0.987]->[0.001, 0.004, 0.651, 0.998]: stand, carry/hold (an object), watch (a person).", "At 1089.0s, [0.018, 0.072, 0.738, 0.973]: stand, listen to (a person), watch (a person)."]}]}, {"video": "g1wyIcLPbq0.mp4", "clip": [1064, 1134], "conversations": [{"from": "human", "timestamps": 1094.0, "value": "What actions is the person in bbox [0.003, 0.251, 0.161, 0.914] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.003, 0.251, 0.161, 0.914]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1093.0s, [0.001, 0.25, 0.114, 0.833]: stand, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1134.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.082, 0.0, 0.976, 0.986]: stand."}]}, {"video": "g1wyIcLPbq0.mp4", "clip": [1127, 1234], "conversations": [{"from": "human", "timestamps": 1157.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.079, 0.239, 0.289, 0.854]: ride (e.g., a bike, a car, a horse), sit; [0.535, 0.212, 0.675, 0.677]: ride (e.g., a bike, a car, a horse), sit."}, {"from": "human", "timestamps": 1234.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.193, 0.067, 0.972, 0.971]?"}, {"from": "gpt", "value": ["In the current frame, [0.193, 0.067, 0.972, 0.971]: stand, talk to (e.g., self, a person, a group).", "From 1232.0s to 1233.0s, [0.038, 0.102, 0.687, 0.954]->[0.051, 0.065, 0.829, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1231.0s, [0.046, 0.02, 0.92, 0.976]: stand.", "From 1229.0s to 1230.0s, [0.325, 0.049, 0.941, 1.0]->[0.065, 0.117, 0.975, 0.982]: walk.", "At 1228.0s, [0.369, 0.055, 0.948, 0.987]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "g1wyIcLPbq0.mp4", "clip": [1231, 1273], "conversations": [{"from": "human", "timestamps": 1261.0, "value": "What actions is the person in bbox [0.482, 0.235, 0.731, 0.961] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.482, 0.235, 0.731, 0.961]: stand, carry/hold (an object), throw.", "At 1260.0s, [0.453, 0.196, 0.789, 0.991]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1273.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.234, 0.125, 0.995, 0.984]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "g1wyIcLPbq0.mp4", "clip": [1263, 1333], "conversations": [{"from": "human", "timestamps": 1293.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.009, 0.2, 0.597, 0.941]: watch (a person), stand; [0.249, 0.003, 0.967, 0.966]: watch (a person), stand."}, {"from": "human", "timestamps": 1333.0, "value": "What actions did the person at bbox [0.438, 0.265, 0.985, 0.988] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.438, 0.265, 0.985, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1331.0s to 1332.0s, [0.432, 0.284, 0.966, 0.981]->[0.443, 0.252, 0.976, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "g1wyIcLPbq0.mp4", "clip": [1290, 1336], "conversations": [{"from": "human", "timestamps": 1320.0, "value": "For the person at bbox [0.0, 0.004, 0.332, 0.976], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.004, 0.332, 0.976]: stand, listen to (a person), watch (a person).", "At 1319.0s, [0.0, 0.009, 0.335, 0.842]: stand, listen to (a person), watch (a person).", "At 1318.0s, [0.001, 0.0, 0.418, 0.995]: stand, listen to (a person).", "At 1317.0s, [0.0, 0.009, 0.344, 0.846]: stand, listen to (a person), watch (a person).", "At 1316.0s, [0.0, 0.0, 0.514, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1315.0s, [0.0, 0.05, 0.726, 0.986]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1336.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.045, 0.441, 0.989]: listen to (a person), stand, watch (a person); [0.449, 0.307, 0.988, 0.988]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "g1wyIcLPbq0.mp4", "clip": [1357, 1418], "conversations": [{"from": "human", "timestamps": 1387.0, "value": "What actions did the person at bbox [0.811, 0.388, 0.859, 0.593] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.811, 0.388, 0.859, 0.593]: stand, listen to (a person), watch (a person).", "At 1386.0s, [0.81, 0.382, 0.86, 0.588]: stand, listen to (a person), watch (a person).", "At 1385.0s, [0.816, 0.382, 0.867, 0.591]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1418.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.132, 0.235, 0.577, 0.993]: carry/hold (an object), sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group)."}]}, {"video": "g1wyIcLPbq0.mp4", "clip": [1415, 1496], "conversations": [{"from": "human", "timestamps": 1445.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.017, 0.077, 0.618, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.017, 0.077, 0.618, 1.0]: stand, watch (a person).", "At 1444.0s, [0.006, 0.05, 0.653, 1.0]: stand."]}, {"from": "human", "timestamps": 1496.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.202, 0.425, 0.997]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.568, 0.142, 0.999, 0.978]: listen to (a person), stand."}]}, {"video": "g1wyIcLPbq0.mp4", "clip": [1599, 1668], "conversations": [{"from": "human", "timestamps": 1629.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.587, 0.5, 0.943, 0.993]?"}, {"from": "gpt", "value": ["In the current frame, [0.587, 0.5, 0.943, 0.993]: carry/hold (an object), crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1628.0s, [0.594, 0.501, 0.927, 0.997]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1627.0s, [0.602, 0.5, 0.971, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1626.0s, [0.665, 0.492, 0.983, 0.966]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1625.0s, [0.788, 0.558, 0.995, 0.999]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1668.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.281, 0.347, 0.363, 0.609]: carry/hold (an object), stand; [0.653, 0.31, 0.738, 0.668]: watch (a person), stand; [0.725, 0.274, 0.955, 0.989]: watch (a person), stand."}]}, {"video": "g1wyIcLPbq0.mp4", "clip": [1620, 1682], "conversations": [{"from": "human", "timestamps": 1650.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.002, 0.3, 0.4, 0.983]: listen to (a person), crouch/kneel, watch (a person); [0.331, 0.37, 0.433, 0.691]: listen to (a person), stand, watch (a person); [0.399, 0.35, 0.771, 0.996]: listen to (a person), hand shake, watch (a person); [0.898, 0.424, 0.968, 0.575]: walk."}, {"from": "human", "timestamps": 1682.0, "value": "For the person at bbox [0.203, 0.149, 0.864, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.203, 0.149, 0.864, 1.0]: stand.", "From 1679.0s to 1681.0s, [0.2, 0.156, 0.884, 0.997]->[0.225, 0.17, 0.885, 0.996]->[0.193, 0.144, 0.876, 1.0]: stand.", "At 1678.0s, [0.356, 0.489, 0.472, 0.875]: stand, watch (a person)."]}]}, {"video": "gjasEUDkbuc.mkv", "clip": [886, 948], "conversations": [{"from": "human", "timestamps": 916.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.22, 0.0, 0.787, 1.0]: sit, text on/look at a cellphone."}, {"from": "human", "timestamps": 948.0, "value": "What actions did the person at bbox [0.316, 0.259, 0.559, 0.898] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.316, 0.259, 0.559, 0.898]: sit, answer phone.", "From 945.0s to 947.0s, [0.311, 0.275, 0.56, 0.96]->[0.307, 0.265, 0.561, 0.936]->[0.316, 0.26, 0.562, 0.913]: sit, answer phone.", "From 942.0s to 944.0s, [0.298, 0.267, 0.558, 0.976]->[0.295, 0.268, 0.574, 0.969]->[0.303, 0.268, 0.565, 0.958]: sit.", "From 939.0s to 941.0s, [0.278, 0.273, 0.557, 0.985]->[0.29, 0.27, 0.555, 0.98]->[0.289, 0.269, 0.56, 0.975]: sit, answer phone.", "At 938.0s, [0.274, 0.279, 0.559, 0.995]: sit, answer phone, talk to (e.g., self, a person, a group).", "From 927.0s to 937.0s, [0.224, 0.174, 0.632, 0.986]->[0.233, 0.2, 0.632, 0.985]->[0.245, 0.209, 0.622, 0.986]->[0.255, 0.186, 0.613, 0.982]->[0.248, 0.201, 0.608, 0.99]->[0.214, 0.239, 0.562, 0.985]->[0.243, 0.268, 0.568, 0.992]->[0.271, 0.32, 0.6, 0.997]->[0.262, 0.281, 0.587, 0.997]->[0.262, 0.273, 0.563, 0.993]->[0.271, 0.276, 0.561, 0.995]: sit, answer phone.", "From 925.0s to 926.0s, [0.174, 0.09, 0.601, 0.982]->[0.191, 0.135, 0.617, 0.986]: sit.", "From 920.0s to 924.0s, [0.162, 0.0, 0.62, 0.986]->[0.204, 0.047, 0.707, 0.979]->[0.227, 0.108, 0.798, 0.969]->[0.174, 0.061, 0.692, 0.981]->[0.145, 0.048, 0.577, 0.975]: sit, answer phone.", "From 909.0s to 919.0s, [0.483, 0.181, 0.971, 0.992]->[0.257, 0.028, 0.855, 0.956]->[0.256, 0.0, 0.793, 0.995]->[0.28, 0.0, 0.796, 0.986]->[0.246, 0.003, 0.762, 0.985]->[0.238, 0.013, 0.785, 0.987]->[0.226, 0.017, 0.762, 0.998]->[0.22, 0.0, 0.787, 1.0]->[0.219, 0.001, 0.791, 0.992]->[0.218, 0.0, 0.815, 1.0]->[0.212, 0.016, 0.772, 0.966]: sit, text on/look at a cellphone.", "From 907.0s to 908.0s, [0.409, 0.045, 0.856, 1.0]->[0.459, 0.0, 0.913, 1.0]: stand, text on/look at a cellphone.", "At 906.0s, [0.307, 0.023, 0.821, 0.992]: stand, carry/hold (an object).", "From 902.0s to 905.0s, [0.475, 0.005, 0.777, 1.0]->[0.435, 0.038, 0.791, 1.0]->[0.33, 0.095, 0.738, 1.0]->[0.306, 0.035, 0.768, 0.994]: stand."]}]}, {"video": "gjasEUDkbuc.mkv", "clip": [899, 982], "conversations": [{"from": "human", "timestamps": 929.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.245, 0.209, 0.622, 0.986]: sit, answer phone."}, {"from": "human", "timestamps": 982.0, "value": "For the person at bbox [0.189, 0.05, 0.473, 0.985], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.189, 0.05, 0.473, 0.985]: stand, carry/hold (an object).", "From 980.0s to 981.0s, [0.19, 0.051, 0.525, 0.985]->[0.19, 0.043, 0.493, 1.0]: stand, carry/hold (an object), read.", "From 974.0s to 979.0s, [0.259, 0.05, 0.558, 0.996]->[0.198, 0.052, 0.536, 1.0]->[0.181, 0.074, 0.543, 0.989]->[0.169, 0.032, 0.481, 0.996]->[0.247, 0.034, 0.586, 0.988]->[0.22, 0.05, 0.598, 0.997]: stand, carry/hold (an object).", "At 973.0s, [0.293, 0.068, 0.562, 0.989]: stand, carry/hold (an object), touch (an object).", "At 972.0s, [0.219, 0.081, 0.543, 0.995]: stand, carry/hold (an object).", "From 969.0s to 971.0s, [0.2, 0.076, 0.638, 0.98]->[0.174, 0.077, 0.578, 0.986]->[0.142, 0.063, 0.486, 0.997]: walk, carry/hold (an object)."]}]}, {"video": "gjasEUDkbuc.mkv", "clip": [1018, 1086], "conversations": [{"from": "human", "timestamps": 1048.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.032, 0.019, 0.541, 0.992]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1086.0, "value": "What actions is the person in bbox [0.727, 0.491, 0.987, 0.98] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.727, 0.491, 0.987, 0.98]: sit, watch (e.g., TV).", "At 1085.0s, [0.721, 0.517, 0.938, 0.991]: sit, watch (e.g., TV), listen to (a person).", "At 1084.0s, [0.653, 0.537, 0.883, 0.99]: sit, watch (e.g., TV).", "At 1083.0s, [0.661, 0.475, 0.942, 0.999]: stand, watch (e.g., TV).", "At 1082.0s, [0.671, 0.466, 0.917, 0.987]: sit, watch (e.g., TV), watch (a person)."]}]}, {"video": "gjasEUDkbuc.mkv", "clip": [1074, 1127], "conversations": [{"from": "human", "timestamps": 1104.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.228, 0.038, 0.78, 0.979]: stand."}, {"from": "human", "timestamps": 1127.0, "value": "What actions is the person in bbox [0.306, 0.113, 0.739, 0.994] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.306, 0.113, 0.739, 0.994]: sit, talk to (e.g., self, a person, a group).", "From 1125.0s to 1126.0s, [0.31, 0.115, 0.744, 0.992]->[0.307, 0.116, 0.743, 0.993]: sit, talk to (e.g., self, a person, a group).", "At 1124.0s, [0.309, 0.136, 0.744, 0.991]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "gjasEUDkbuc.mkv", "clip": [1113, 1187], "conversations": [{"from": "human", "timestamps": 1143.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.187, 0.064, 0.845, 0.975]?"}, {"from": "gpt", "value": ["At Now, [0.187, 0.064, 0.845, 0.975]: sit, talk to (e.g., self, a person, a group).", "From 1141.0s to 1142.0s, [0.173, 0.06, 0.846, 0.996]->[0.191, 0.069, 0.847, 0.977]: stand, talk to (e.g., self, a person, a group).", "From 1139.0s to 1140.0s, [0.177, 0.064, 0.856, 0.978]->[0.147, 0.08, 0.837, 0.982]: sit, talk to (e.g., self, a person, a group).", "From 1137.0s to 1138.0s, [0.215, 0.052, 0.851, 1.0]->[0.219, 0.062, 0.86, 0.982]: stand, talk to (e.g., self, a person, a group).", "At 1136.0s, [0.217, 0.056, 0.874, 0.991]: sit, talk to (e.g., self, a person, a group).", "At 1135.0s, [0.192, 0.031, 0.879, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1133.0s to 1134.0s, [0.217, 0.064, 0.859, 0.994]->[0.217, 0.038, 0.831, 0.988]: sit, talk to (e.g., self, a person, a group).", "From 1131.0s to 1132.0s, [0.217, 0.044, 0.856, 0.984]->[0.206, 0.047, 0.85, 1.0]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1187.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.337, 0.073, 0.775, 1.0]: stand, hand clap; [0.727, 0.256, 0.928, 0.958]: carry/hold (an object), stand, hand clap, watch (a person)."}]}, {"video": "gjasEUDkbuc.mkv", "clip": [1114, 1226], "conversations": [{"from": "human", "timestamps": 1144.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.004, 0.407, 0.333, 0.974]: sit; [0.169, 0.061, 0.851, 0.992]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1226.0, "value": "Describe the actions of the person at bbox [0.763, 0.26, 0.882, 0.469] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.763, 0.26, 0.882, 0.469]: stand, carry/hold (an object), touch (an object), watch (a person).", "At 1225.0s, [0.709, 0.484, 0.801, 0.681]: stand, touch (an object)."]}]}, {"video": "gjasEUDkbuc.mkv", "clip": [1229, 1289], "conversations": [{"from": "human", "timestamps": 1259.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.367, 0.038, 0.956, 0.978]: carry/hold (an object), sit, touch (an object)."}, {"from": "human", "timestamps": 1289.0, "value": "What actions did the person at bbox [0.842, 0.251, 0.919, 0.79] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.842, 0.251, 0.919, 0.79]: stand, carry/hold (an object), watch (a person).", "From 1287.0s to 1288.0s, [0.793, 0.418, 0.875, 0.966]->[0.814, 0.341, 0.892, 0.88]: stand, watch (a person).", "At 1286.0s, [0.793, 0.474, 0.883, 0.972]: walk."]}]}, {"video": "gjasEUDkbuc.mkv", "clip": [1344, 1419], "conversations": [{"from": "human", "timestamps": 1374.0, "value": "What actions is the person in bbox [0.178, 0.036, 0.874, 0.972] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.178, 0.036, 0.874, 0.972]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1373.0s, [0.183, 0.034, 0.889, 0.973]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1372.0s, [0.15, 0.029, 0.871, 0.962]: sit, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 1371.0s, [0.165, 0.054, 0.869, 0.969]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1369.0s to 1370.0s, [0.16, 0.05, 0.861, 0.972]->[0.156, 0.054, 0.866, 0.975]: sit, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 1360.0s to 1368.0s, [0.218, 0.052, 0.911, 0.982]->[0.202, 0.044, 0.91, 0.982]->[0.199, 0.043, 0.915, 0.978]->[0.18, 0.03, 0.904, 0.976]->[0.184, 0.029, 0.941, 0.99]->[0.202, 0.021, 0.919, 0.984]->[0.159, 0.055, 0.849, 0.982]->[0.173, 0.06, 0.847, 0.97]->[0.133, 0.052, 0.864, 0.974]: sit, carry/hold (an object), read."]}, {"from": "human", "timestamps": 1419.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.07, 0.43, 0.234, 0.822]: carry/hold (an object), sit, listen to (a person); [0.226, 0.468, 0.396, 0.834]: listen to (a person), sit, touch (an object), watch (a person); [0.365, 0.444, 0.444, 0.759]: watch (a person), sit; [0.383, 0.499, 0.537, 0.842]: listen to (a person), sit, watch (a person); [0.518, 0.522, 0.614, 0.845]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.559, 0.477, 0.766, 0.949]: watch (a person), sit; [0.704, 0.489, 0.846, 0.95]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.819, 0.436, 0.966, 0.923]: listen to (a person), sit, watch (a person)."}]}, {"video": "gjasEUDkbuc.mkv", "clip": [1385, 1446], "conversations": [{"from": "human", "timestamps": 1415.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.072, 0.434, 0.226, 0.803]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.219, 0.462, 0.386, 0.832]: listen to (a person), sit, touch (an object), watch (a person); [0.38, 0.473, 0.542, 0.844]: carry/hold (an object), sit, watch (a person); [0.525, 0.519, 0.617, 0.835]: listen to (a person), sit, watch (a person); [0.553, 0.475, 0.763, 0.94]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.6, 0.425, 0.679, 0.66]: watch (a person), sit; [0.711, 0.503, 0.834, 0.958]: carry/hold (an object), sit, watch (a person); [0.82, 0.415, 0.968, 0.937]: watch (a person), sit."}, {"from": "human", "timestamps": 1446.0, "value": "Describe the actions of the person at bbox [0.404, 0.652, 0.514, 0.927] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.404, 0.652, 0.514, 0.927]: sit, listen to (a person), watch (a person).", "At 1445.0s, [0.406, 0.65, 0.52, 0.925]: sit, listen to (a person), watch (a person)."]}]}, {"video": "gjasEUDkbuc.mkv", "clip": [1429, 1464], "conversations": [{"from": "human", "timestamps": 1459.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.189, 0.022, 0.746, 0.988]?"}, {"from": "gpt", "value": ["At Now, [0.189, 0.022, 0.746, 0.988]: sit, stand.", "At 1458.0s, [0.191, 0.026, 0.765, 0.983]: sit.", "At 1457.0s, [0.168, 0.027, 0.756, 0.976]: sit, stand.", "At 1456.0s, [0.178, 0.033, 0.751, 0.98]: sit.", "At 1455.0s, [0.161, 0.031, 0.749, 0.968]: sit, stand.", "At 1454.0s, [0.189, 0.026, 0.74, 0.962]: sit."]}, {"from": "human", "timestamps": 1464.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.235, 0.179, 0.511, 0.976]: watch (a person), sit, touch (an object); [0.449, 0.114, 0.766, 0.701]: carry/hold (an object), sit, read."}]}, {"video": "gjasEUDkbuc.mkv", "clip": [1452, 1490], "conversations": [{"from": "human", "timestamps": 1482.0, "value": "For the person at bbox [0.021, 0.175, 0.802, 0.969], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.021, 0.175, 0.802, 0.969]: sit.", "At 1481.0s, [0.105, 0.098, 0.814, 0.977]: sit, stand.", "At 1480.0s, [0.124, 0.031, 0.841, 0.981]: sit, watch (a person).", "From 1478.0s to 1479.0s, [0.224, 0.175, 0.528, 0.974]->[0.146, 0.029, 0.83, 0.97]: sit, touch (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1490.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.233, 0.165, 0.486, 0.978]: listen to (a person), sit, hand shake, watch (a person); [0.364, 0.164, 0.733, 0.767]: watch (a person), sit, touch (an object)."}]}, {"video": "gjasEUDkbuc.mkv", "clip": [1457, 1540], "conversations": [{"from": "human", "timestamps": 1487.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.228, 0.186, 0.478, 0.995]: watch (a person), sit, touch (an object); [0.379, 0.143, 0.736, 0.756]: sit, touch (an object), watch (a person), carry/hold (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1540.0, "value": "For the person at bbox [0.464, 0.029, 0.913, 0.977], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.464, 0.029, 0.913, 0.977]: stand, answer phone, carry/hold (an object).", "At 1539.0s, [0.421, 0.004, 0.832, 0.99]: stand, answer phone, carry/hold (an object).", "From 1535.0s to 1538.0s, [0.237, 0.102, 0.604, 0.998]->[0.304, 0.156, 0.668, 0.998]->[0.339, 0.081, 0.719, 0.993]->[0.342, 0.0, 0.747, 0.975]: walk, answer phone, carry/hold (an object).", "From 1533.0s to 1534.0s, [0.33, 0.103, 0.675, 0.981]->[0.301, 0.14, 0.644, 1.0]: stand, answer phone, carry/hold (an object).", "From 1531.0s to 1532.0s, [0.3, 0.143, 0.63, 0.988]->[0.266, 0.148, 0.632, 0.973]: walk, answer phone, carry/hold (an object)."]}]}, {"video": "gjasEUDkbuc.mkv", "clip": [1521, 1563], "conversations": [{"from": "human", "timestamps": 1551.0, "value": "Describe the actions of the person at bbox [0.815, 0.751, 0.997, 0.994] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.815, 0.751, 0.997, 0.994]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1550.0s, [0.818, 0.779, 1.0, 0.994]: bend/bow (at the waist), watch (a person).", "At 1549.0s, [0.823, 0.774, 1.0, 0.998]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "From 1547.0s to 1548.0s, [0.809, 0.759, 1.0, 0.993]->[0.816, 0.766, 1.0, 0.994]: bend/bow (at the waist)."]}, {"from": "human", "timestamps": 1563.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.335, 0.492, 0.357, 0.623]: stand; [0.538, 0.34, 0.626, 0.873]: talk to (e.g., self, a person, a group), walk; [0.627, 0.375, 0.717, 0.864]: carry/hold (an object), listen to (a person), watch (a person), walk."}]}, {"video": "gjasEUDkbuc.mkv", "clip": [1687, 1755], "conversations": [{"from": "human", "timestamps": 1717.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.214, 0.111, 0.711, 1.0]: listen to (a person), stand, watch (a person); [0.598, 0.039, 0.947, 0.981]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1755.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.001, 0.004, 0.446, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.001, 0.004, 0.446, 1.0]: stand, play with kids, watch (a person).", "At 1754.0s, [0.0, 0.0, 0.511, 1.0]: stand, watch (a person).", "From 1752.0s to 1753.0s, [0.0, 0.16, 0.321, 0.994]->[0.084, 0.143, 0.364, 0.968]: stand, kiss (a person), watch (a person).", "At 1751.0s, [0.004, 0.181, 0.271, 0.993]: stand, lift (a person), watch (a person).", "At 1750.0s, [0.0, 0.112, 0.269, 0.976]: stand, grab (a person), watch (a person).", "At 1749.0s, [0.0, 0.331, 0.31, 0.982]: stand, lift (a person).", "At 1748.0s, [0.0, 0.285, 0.244, 0.995]: stand.", "At 1747.0s, [0.0, 0.237, 0.309, 0.975]: stand, talk to (e.g., self, a person, a group).", "At 1746.0s, [0.0, 0.359, 0.226, 0.995]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1745.0s, [0.015, 0.351, 0.378, 0.996]: stand.", "At 1744.0s, [0.017, 0.418, 0.333, 0.993]: stand, lift (a person)."]}]}, {"video": "gjdgj04FzR0.mp4", "clip": [916, 978], "conversations": [{"from": "human", "timestamps": 946.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.51, 0.507, 0.592, 0.788]: carry/hold (an object), walk; [0.587, 0.481, 0.674, 0.78]: walk."}, {"from": "human", "timestamps": 978.0, "value": "What actions did the person at bbox [0.072, 0.179, 0.31, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.072, 0.179, 0.31, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 975.0s to 977.0s, [0.06, 0.223, 0.301, 0.978]->[0.07, 0.218, 0.293, 0.969]->[0.065, 0.22, 0.311, 0.983]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 973.0s to 974.0s, [0.022, 0.21, 0.354, 0.991]->[0.021, 0.219, 0.322, 0.972]: stand, carry/hold (an object), hand shake, talk to (e.g., self, a person, a group), watch (a person).", "At 972.0s, [0.005, 0.187, 0.272, 0.963]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "gjdgj04FzR0.mp4", "clip": [965, 1001], "conversations": [{"from": "human", "timestamps": 995.0, "value": "What actions is the person in bbox [0.106, 0.153, 0.403, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.106, 0.153, 0.403, 0.988]: stand, touch (an object).", "At 994.0s, [0.003, 0.074, 0.33, 0.972]: stand, carry/hold (an object).", "From 990.0s to 993.0s, [0.328, 0.19, 0.523, 0.722]->[0.446, 0.095, 0.676, 1.0]->[0.33, 0.158, 0.549, 0.965]->[0.019, 0.125, 0.322, 0.956]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1001.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.129, 0.141, 0.465, 0.978]: carry/hold (an object), stand."}]}, {"video": "gjdgj04FzR0.mp4", "clip": [992, 1028], "conversations": [{"from": "human", "timestamps": 1022.0, "value": "What actions is the person in bbox [0.122, 0.139, 0.704, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.122, 0.139, 0.704, 0.985]: stand.", "At 1021.0s, [0.096, 0.117, 0.693, 0.997]: stand, listen to (a person).", "From 1019.0s to 1020.0s, [0.714, 0.254, 0.913, 0.986]->[0.737, 0.253, 0.937, 0.986]: stand, carry/hold (an object).", "At 1018.0s, [0.762, 0.246, 0.994, 0.972]: walk, carry/hold (an object), listen to (a person).", "At 1017.0s, [0.726, 0.224, 0.999, 0.985]: walk, carry/hold (an object), watch (e.g., TV).", "From 1015.0s to 1016.0s, [0.571, 0.215, 0.92, 0.999]->[0.669, 0.235, 0.981, 0.982]: walk, carry/hold (an object).", "At 1014.0s, [0.323, 0.24, 0.593, 0.976]: walk, watch (e.g., TV)."]}, {"from": "human", "timestamps": 1028.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.149, 0.156, 0.745, 0.989]: stand."}]}, {"video": "gjdgj04FzR0.mp4", "clip": [1043, 1083], "conversations": [{"from": "human", "timestamps": 1073.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.153, 0.11, 0.805, 0.982]: stand."}, {"from": "human", "timestamps": 1083.0, "value": "What actions did the person at bbox [0.503, 0.215, 0.571, 0.582] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.503, 0.215, 0.571, 0.582]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1080.0s to 1082.0s, [0.509, 0.245, 0.71, 0.991]->[0.483, 0.249, 0.71, 0.992]->[0.514, 0.255, 0.71, 0.982]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "gjdgj04FzR0.mp4", "clip": [1116, 1158], "conversations": [{"from": "human", "timestamps": 1146.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.356, 0.135, 0.686, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.356, 0.135, 0.686, 1.0]: stand, carry/hold (an object), watch (e.g., TV), listen to (a person), watch (a person).", "At 1145.0s, [0.34, 0.141, 0.671, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1158.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.222, 0.196, 0.977]: walk; [0.159, 0.188, 0.447, 0.984]: watch (a person), walk; [0.298, 0.132, 0.718, 0.992]: stand."}]}, {"video": "gjdgj04FzR0.mp4", "clip": [1196, 1253], "conversations": [{"from": "human", "timestamps": 1226.0, "value": "What actions did the person at bbox [0.321, 0.219, 0.487, 0.894] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.321, 0.219, 0.487, 0.894]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1225.0s, [0.243, 0.239, 0.423, 0.89]: stand, listen to (a person), watch (a person).", "At 1224.0s, [0.156, 0.209, 0.335, 0.857]: stand, touch (an object), watch (a person).", "At 1223.0s, [0.073, 0.233, 0.257, 0.854]: stand."]}, {"from": "human", "timestamps": 1253.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.082, 0.207, 0.544, 0.984]: carry/hold (an object), stand; [0.51, 0.231, 1.0, 0.979]: carry/hold (an object), sit, watch (a person), listen to (a person)."}]}, {"video": "gjdgj04FzR0.mp4", "clip": [1288, 1334], "conversations": [{"from": "human", "timestamps": 1318.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.034, 0.123, 0.782, 0.988]: listen to (a person), stand, watch (a person); [0.491, 0.106, 0.993, 0.981]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1334.0, "value": "Describe the actions of the person at bbox [0.0, 0.131, 0.319, 0.996] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.131, 0.319, 0.996]: sit, stand, listen to (a person), watch (a person).", "At 1333.0s, [0.0, 0.16, 0.31, 0.971]: stand, listen to (a person), watch (a person).", "At 1332.0s, [0.0, 0.154, 0.334, 0.988]: sit, stand, listen to (a person), watch (a person).", "At 1331.0s, [0.0, 0.197, 0.355, 0.999]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1330.0s, [0.0, 0.172, 0.636, 0.98]: sit, stand, carry/hold (an object).", "At 1329.0s, [0.0, 0.16, 0.591, 0.971]: stand, carry/hold (an object), listen to (a person).", "From 1327.0s to 1328.0s, [0.03, 0.168, 0.633, 1.0]->[0.036, 0.167, 0.635, 0.994]: stand, carry/hold (an object), watch (a person).", "From 1324.0s to 1326.0s, [0.04, 0.18, 0.423, 0.966]->[0.04, 0.204, 0.45, 0.943]->[0.046, 0.196, 0.429, 0.975]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1323.0s, [0.047, 0.186, 0.437, 1.0]: stand, watch (a person).", "At 1322.0s, [0.054, 0.187, 0.424, 0.984]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1320.0s to 1321.0s, [0.037, 0.159, 0.682, 0.989]->[0.045, 0.169, 0.69, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1319.0s, [0.024, 0.154, 0.685, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1318.0s, [0.034, 0.123, 0.782, 0.988]: stand, listen to (a person), watch (a person).", "At 1317.0s, [0.033, 0.16, 0.67, 0.983]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1316.0s, [0.02, 0.156, 0.667, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1315.0s, [0.02, 0.143, 0.659, 0.991]: stand, talk to (e.g., self, a person, a group).", "From 1311.0s to 1314.0s, [0.0, 0.197, 0.386, 0.961]->[0.0, 0.208, 0.363, 0.963]->[0.0, 0.181, 0.366, 0.967]->[0.0, 0.2, 0.341, 0.992]: stand, listen to (a person).", "From 1309.0s to 1310.0s, [0.018, 0.162, 0.598, 0.98]->[0.013, 0.169, 0.604, 0.982]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1308.0s, [0.0, 0.176, 0.258, 0.97]: sit, listen to (a person), watch (a person).", "From 1306.0s to 1307.0s, [0.0, 0.164, 0.244, 0.973]->[0.0, 0.165, 0.239, 0.97]: stand, listen to (a person), watch (a person).", "At 1305.0s, [0.0, 0.162, 0.223, 0.961]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1303.0s to 1304.0s, [0.048, 0.159, 0.641, 0.977]->[0.043, 0.152, 0.649, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1300.0s to 1302.0s, [0.083, 0.157, 0.425, 0.976]->[0.015, 0.133, 0.482, 0.822]->[0.0, 0.175, 0.362, 0.897]: stand, carry/hold (an object), listen to (a person).", "At 1299.0s, [0.082, 0.165, 0.39, 0.971]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1298.0s, [0.089, 0.144, 0.417, 0.971]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "gjdgj04FzR0.mp4", "clip": [1291, 1347], "conversations": [{"from": "human", "timestamps": 1321.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.045, 0.169, 0.69, 0.986]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.504, 0.084, 0.994, 0.997]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1347.0, "value": "Describe the actions of the person at bbox [0.301, 0.159, 0.961, 0.998] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.301, 0.159, 0.961, 0.998]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1345.0s to 1346.0s, [0.3, 0.168, 0.971, 1.0]->[0.307, 0.155, 0.949, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1344.0s, [0.301, 0.168, 0.966, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1343.0s, [0.293, 0.175, 0.983, 0.992]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1342.0s, [0.222, 0.151, 0.975, 0.988]: sit, drink, listen to (a person).", "At 1341.0s, [0.183, 0.155, 0.975, 0.989]: stand, drink, listen to (a person).", "At 1340.0s, [0.157, 0.146, 0.958, 1.0]: sit, drink.", "At 1339.0s, [0.222, 0.166, 0.907, 0.998]: stand, drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1338.0s, [0.176, 0.148, 0.985, 0.984]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1337.0s, [0.248, 0.144, 0.895, 0.983]: sit, stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1336.0s, [0.228, 0.174, 0.942, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1334.0s to 1335.0s, [0.28, 0.178, 0.892, 0.998]->[0.279, 0.156, 0.868, 0.99]: sit, stand, watch (a person).", "At 1333.0s, [0.291, 0.172, 0.923, 0.994]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1332.0s, [0.24, 0.175, 0.905, 0.991]: sit, stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1331.0s, [0.164, 0.16, 0.907, 0.989]: sit, listen to (a person), watch (a person)."]}]}, {"video": "gjdgj04FzR0.mp4", "clip": [1371, 1480], "conversations": [{"from": "human", "timestamps": 1401.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.334, 0.188, 0.521, 0.979]: stand, answer phone; [0.824, 0.25, 0.996, 0.992]: watch (a person), stand."}, {"from": "human", "timestamps": 1480.0, "value": "For the person at bbox [0.077, 0.137, 0.986, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.077, 0.137, 0.986, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1479.0s, [0.086, 0.142, 1.0, 0.997]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1476.0s to 1478.0s, [0.69, 0.246, 1.0, 0.994]->[0.506, 0.177, 0.996, 0.971]->[0.052, 0.112, 0.979, 0.985]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1475.0s, [0.648, 0.211, 0.992, 0.994]: sit, carry/hold (an object)."]}]}, {"video": "gjdgj04FzR0.mp4", "clip": [1423, 1457], "conversations": [{"from": "human", "timestamps": 1453.0, "value": "Describe the actions of the person at bbox [0.056, 0.194, 0.624, 0.882] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.056, 0.194, 0.624, 0.882]: sit, listen to (a person), watch (a person).", "From 1449.0s to 1452.0s, [0.049, 0.195, 0.63, 0.89]->[0.056, 0.191, 0.62, 0.887]->[0.055, 0.193, 0.619, 0.886]->[0.049, 0.191, 0.622, 0.879]: sit, listen to (a person), watch (a person).", "At 1448.0s, [0.042, 0.203, 0.624, 0.887]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1447.0s, [0.032, 0.193, 0.623, 0.888]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1446.0s, [0.032, 0.207, 0.62, 0.899]: sit, touch (an object), listen to (a person), watch (a person).", "From 1444.0s to 1445.0s, [0.023, 0.21, 0.598, 0.9]->[0.02, 0.209, 0.609, 0.873]: sit, listen to (a person), watch (a person).", "From 1441.0s to 1443.0s, [0.025, 0.205, 0.595, 0.892]->[0.027, 0.211, 0.598, 0.896]->[0.022, 0.204, 0.594, 0.903]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1440.0s, [0.023, 0.192, 0.593, 0.879]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1434.0s to 1439.0s, [0.021, 0.203, 0.603, 0.893]->[0.032, 0.201, 0.592, 0.89]->[0.028, 0.205, 0.595, 0.911]->[0.024, 0.203, 0.589, 0.903]->[0.029, 0.197, 0.595, 0.907]->[0.025, 0.206, 0.589, 0.914]: sit, listen to (a person), watch (a person).", "At 1433.0s, [0.02, 0.207, 0.59, 0.914]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1432.0s, [0.029, 0.19, 0.587, 0.888]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1431.0s, [0.023, 0.192, 0.585, 0.886]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1426.0s to 1430.0s, [0.008, 0.216, 0.546, 0.916]->[0.013, 0.205, 0.564, 0.916]->[0.009, 0.211, 0.583, 0.918]->[0.018, 0.204, 0.585, 0.906]->[0.02, 0.197, 0.578, 0.894]: sit, listen to (a person), watch (a person).", "From 1424.0s to 1425.0s, [0.208, 0.514, 0.499, 0.914]->[0.023, 0.205, 0.586, 0.937]: sit, touch (an object), listen to (a person).", "At 1423.0s, [0.194, 0.563, 0.507, 0.963]: sit, listen to (a person).", "At 1422.0s, [0.196, 0.578, 0.509, 0.95]: sit.", "At 1421.0s, [0.315, 0.459, 0.523, 0.964]: bend/bow (at the waist), listen to (a person).", "At 1420.0s, [0.426, 0.253, 0.605, 0.975]: stand."]}, {"from": "human", "timestamps": 1457.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.052, 0.207, 0.63, 0.876]: sit, talk to (e.g., self, a person, a group); [0.623, 0.17, 1.0, 0.967]: listen to (a person), sit, watch (a person)."}]}, {"video": "gjdgj04FzR0.mp4", "clip": [1525, 1557], "conversations": [{"from": "human", "timestamps": 1555.0, "value": "What actions is the person in bbox [0.596, 0.513, 0.8, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.596, 0.513, 0.8, 0.978]: sit, carry/hold (an object), listen to (a person).", "From 1553.0s to 1554.0s, [0.603, 0.52, 0.802, 0.967]->[0.605, 0.533, 0.802, 0.962]: sit, carry/hold (an object), listen to (a person).", "From 1551.0s to 1552.0s, [0.59, 0.494, 0.807, 0.961]->[0.596, 0.513, 0.8, 0.929]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1550.0s, [0.591, 0.512, 0.817, 0.959]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1546.0s to 1549.0s, [0.545, 0.53, 0.79, 0.983]->[0.553, 0.53, 0.793, 0.976]->[0.577, 0.519, 0.82, 0.954]->[0.595, 0.509, 0.828, 0.915]: sit, carry/hold (an object), listen to (a person).", "At 1545.0s, [0.583, 0.508, 0.801, 0.899]: sit, listen to (a person), watch (a person).", "From 1543.0s to 1544.0s, [0.521, 0.507, 0.787, 0.934]->[0.57, 0.504, 0.817, 0.934]: sit, listen to (a person).", "At 1542.0s, [0.652, 0.508, 0.781, 0.958]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1557.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.343, 0.143, 0.542, 0.841]: listen to (a person), stand, watch (a person); [0.586, 0.507, 0.814, 0.9]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "gjdgj04FzR0.mp4", "clip": [1512, 1583], "conversations": [{"from": "human", "timestamps": 1542.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.344, 0.139, 0.543, 0.847]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.516, 0.516, 0.71, 0.836]: listen to (a person), sit, watch (a person); [0.652, 0.508, 0.781, 0.958]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1583.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.437, 0.235, 0.906, 0.963]?"}, {"from": "gpt", "value": ["At Now, [0.437, 0.235, 0.906, 0.963]: sit, touch (an object), listen to (a person), watch (a person).", "From 1581.0s to 1582.0s, [0.432, 0.241, 0.904, 0.956]->[0.435, 0.24, 0.91, 0.956]: sit, talk to (e.g., self, a person, a group).", "From 1579.0s to 1580.0s, [0.432, 0.294, 0.894, 0.932]->[0.43, 0.29, 0.906, 0.942]: sit, listen to (a person).", "From 1577.0s to 1578.0s, [0.481, 0.436, 0.849, 0.949]->[0.449, 0.312, 0.864, 0.941]: sit, touch (an object), listen to (a person).", "From 1575.0s to 1576.0s, [0.53, 0.494, 0.809, 0.981]->[0.504, 0.499, 0.829, 0.953]: sit, listen to (a person).", "From 1573.0s to 1574.0s, [0.592, 0.519, 0.807, 0.973]->[0.595, 0.538, 0.814, 0.981]: sit, carry/hold (an object), listen to (a person).", "From 1571.0s to 1572.0s, [0.564, 0.519, 0.819, 0.955]->[0.587, 0.522, 0.812, 0.963]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1570.0s, [0.593, 0.52, 0.815, 0.924]: sit, carry/hold (an object), listen to (a person).", "At 1569.0s, [0.589, 0.524, 0.814, 0.945]: sit, carry/hold (an object), touch (an object), listen to (a person).", "From 1567.0s to 1568.0s, [0.576, 0.523, 0.808, 0.936]->[0.583, 0.526, 0.811, 0.955]: sit, carry/hold (an object), listen to (a person).", "From 1556.0s to 1566.0s, [0.579, 0.512, 0.811, 0.95]->[0.586, 0.507, 0.814, 0.9]->[0.586, 0.505, 0.803, 0.938]->[0.581, 0.505, 0.801, 0.943]->[0.584, 0.507, 0.802, 0.942]->[0.581, 0.509, 0.803, 0.948]->[0.581, 0.508, 0.801, 0.942]->[0.587, 0.507, 0.802, 0.95]->[0.588, 0.515, 0.803, 0.964]->[0.596, 0.515, 0.804, 0.96]->[0.591, 0.506, 0.8, 0.939]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1553.0s to 1555.0s, [0.603, 0.52, 0.802, 0.967]->[0.605, 0.533, 0.802, 0.962]->[0.596, 0.513, 0.8, 0.978]: sit, carry/hold (an object), listen to (a person).", "From 1551.0s to 1552.0s, [0.59, 0.494, 0.807, 0.961]->[0.596, 0.513, 0.8, 0.929]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1550.0s, [0.591, 0.512, 0.817, 0.959]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1546.0s to 1549.0s, [0.545, 0.53, 0.79, 0.983]->[0.553, 0.53, 0.793, 0.976]->[0.577, 0.519, 0.82, 0.954]->[0.595, 0.509, 0.828, 0.915]: sit, carry/hold (an object), listen to (a person).", "At 1545.0s, [0.583, 0.508, 0.801, 0.899]: sit, listen to (a person), watch (a person).", "From 1543.0s to 1544.0s, [0.521, 0.507, 0.787, 0.934]->[0.57, 0.504, 0.817, 0.934]: sit, listen to (a person).", "At 1542.0s, [0.652, 0.508, 0.781, 0.958]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "gjdgj04FzR0.mp4", "clip": [1572, 1615], "conversations": [{"from": "human", "timestamps": 1602.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.501, 0.189, 0.873, 0.968]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1615.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.003, 0.104, 0.694, 0.989]?"}, {"from": "gpt", "value": ["In the current frame, [0.003, 0.104, 0.694, 0.989]: stand, answer phone.", "From 1609.0s to 1614.0s, [0.011, 0.074, 0.789, 0.996]->[0.02, 0.104, 0.767, 0.984]->[0.0, 0.108, 0.672, 0.983]->[0.004, 0.118, 0.698, 0.98]->[0.006, 0.12, 0.701, 0.993]->[0.001, 0.119, 0.69, 0.987]: stand, answer phone."]}]}, {"video": "gjdgj04FzR0.mp4", "clip": [1715, 1752], "conversations": [{"from": "human", "timestamps": 1745.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.048, 0.273, 0.541, 0.98]: stand, talk to (e.g., self, a person, a group); [0.514, 0.109, 1.0, 0.999]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1752.0, "value": "For the person at bbox [0.519, 0.109, 1.0, 0.996], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.519, 0.109, 1.0, 0.996]: stand, listen to (a person), watch (a person).", "From 1749.0s to 1751.0s, [0.513, 0.12, 1.0, 0.987]->[0.523, 0.114, 0.995, 0.993]->[0.527, 0.11, 1.0, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1742.0s to 1748.0s, [0.51, 0.125, 0.995, 0.999]->[0.522, 0.122, 1.0, 1.0]->[0.516, 0.119, 0.999, 0.983]->[0.514, 0.109, 1.0, 0.999]->[0.517, 0.104, 0.997, 0.99]->[0.514, 0.109, 0.991, 0.988]->[0.516, 0.11, 0.992, 0.993]: stand, listen to (a person), watch (a person).", "From 1740.0s to 1741.0s, [0.518, 0.127, 0.995, 0.982]->[0.521, 0.132, 1.0, 0.98]: stand, watch (a person).", "From 1735.0s to 1739.0s, [0.456, 0.117, 0.938, 0.973]->[0.46, 0.148, 0.923, 0.979]->[0.502, 0.09, 1.0, 0.988]->[0.539, 0.116, 0.999, 0.986]->[0.528, 0.116, 0.992, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1734.0s, [0.458, 0.115, 0.926, 1.0]: stand, touch (an object), listen to (a person), watch (a person).", "From 1732.0s to 1733.0s, [0.447, 0.13, 0.917, 0.976]->[0.434, 0.122, 0.946, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1731.0s, [0.422, 0.087, 0.908, 0.989]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1730.0s, [0.43, 0.12, 0.88, 0.971]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1729.0s, [0.434, 0.12, 0.834, 0.983]: walk, listen to (a person), watch (a person).", "At 1728.0s, [0.502, 0.175, 0.863, 0.988]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1727.0s, [0.574, 0.162, 0.824, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1726.0s, [0.557, 0.157, 0.788, 0.976]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1725.0s, [0.547, 0.167, 0.79, 0.973]: stand, listen to (a person), watch (a person).", "From 1723.0s to 1724.0s, [0.52, 0.154, 0.748, 0.98]->[0.531, 0.165, 0.771, 0.971]: stand, touch (an object), listen to (a person), watch (a person).", "At 1722.0s, [0.52, 0.159, 0.753, 0.98]: stand, listen to (a person), watch (a person).", "At 1721.0s, [0.467, 0.162, 0.759, 0.972]: stand, touch (an object), listen to (a person), watch (a person).", "From 1719.0s to 1720.0s, [0.489, 0.188, 0.798, 0.974]->[0.465, 0.186, 0.789, 0.971]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1718.0s, [0.474, 0.192, 0.787, 0.973]: stand, touch (an object), listen to (a person), watch (a person).", "At 1717.0s, [0.474, 0.194, 0.783, 0.976]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1716.0s, [0.468, 0.199, 0.779, 0.975]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1715.0s, [0.472, 0.2, 0.784, 0.969]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1714.0s, [0.471, 0.207, 0.792, 0.962]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "From 1712.0s to 1713.0s, [0.518, 0.136, 0.784, 0.98]->[0.474, 0.185, 0.797, 0.946]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1711.0s, [0.503, 0.136, 0.801, 0.967]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1710.0s, [0.667, 0.348, 0.907, 0.926]: carry/hold (an object), get up, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "gqmmpoO1JrY.mkv", "clip": [1202, 1270], "conversations": [{"from": "human", "timestamps": 1232.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.123, 0.0, 0.653, 0.987]: carry/hold (an object), write, walk."}, {"from": "human", "timestamps": 1270.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.105, 0.018, 0.644, 0.98]?"}, {"from": "gpt", "value": ["At Now, [0.105, 0.018, 0.644, 0.98]: bend/bow (at the waist), carry/hold (an object), listen (e.g., to music).", "At 1269.0s, [0.209, 0.338, 0.424, 0.968]: bend/bow (at the waist), carry/hold (an object), listen (e.g., to music), touch (an object)."]}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [894, 960], "conversations": [{"from": "human", "timestamps": 924.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.334, 0.071, 0.851, 0.968]?"}, {"from": "gpt", "value": ["In the current frame, [0.334, 0.071, 0.851, 0.968]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 923.0s, [0.598, 0.309, 0.757, 0.76]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 922.0s, [0.606, 0.273, 0.743, 0.725]: sit, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 960.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.298, 0.399, 0.58, 1.0]: listen to (a person), sit, watch (a person); [0.383, 0.061, 0.523, 0.628]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.568, 0.314, 0.656, 0.565]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.575, 0.303, 0.736, 0.746]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.639, 0.336, 0.878, 0.988]: listen to (a person), sit, touch (an object), watch (a person)."}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [895, 1015], "conversations": [{"from": "human", "timestamps": 925.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.362, 0.069, 0.861, 0.988]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1015.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.505, 0.089, 0.878, 0.983]?"}, {"from": "gpt", "value": ["In the current frame, [0.505, 0.089, 0.878, 0.983]: sit, listen to (a person), watch (a person).", "At 1014.0s, [0.514, 0.084, 0.878, 0.986]: sit, listen to (a person), watch (a person)."]}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [980, 1016], "conversations": [{"from": "human", "timestamps": 1010.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.5, 0.235, 0.718, 0.995]?"}, {"from": "gpt", "value": ["At Now, [0.5, 0.235, 0.718, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1009.0s, [0.455, 0.421, 0.57, 0.916]: sit, talk to (e.g., self, a person, a group).", "At 1008.0s, [0.324, 0.203, 0.557, 0.99]: stand."]}, {"from": "human", "timestamps": 1016.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.239, 0.121, 0.648, 0.976]: sit, talk to (e.g., self, a person, a group); [0.54, 0.087, 0.878, 0.972]: listen to (a person), watch (a person)."}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [1009, 1110], "conversations": [{"from": "human", "timestamps": 1039.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.217, 0.061, 0.616, 0.986]: sit."}, {"from": "human", "timestamps": 1110.0, "value": "For the person at bbox [0.353, 0.408, 0.648, 0.991], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.353, 0.408, 0.648, 0.991]: stand, listen to (a person), watch (a person).", "At 1109.0s, [0.276, 0.263, 0.483, 0.967]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1108.0s, [0.124, 0.079, 0.333, 0.997]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [1060, 1134], "conversations": [{"from": "human", "timestamps": 1090.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.129, 0.157, 0.456, 0.944]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.757, 0.22, 0.876, 0.628]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1134.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.561, 0.33, 0.637, 0.807]?"}, {"from": "gpt", "value": ["In the current frame, [0.561, 0.33, 0.637, 0.807]: bend/bow (at the waist), take a photo, watch (a person).", "At 1133.0s, [0.565, 0.337, 0.643, 0.817]: bend/bow (at the waist), carry/hold (an object), take a photo.", "At 1132.0s, [0.563, 0.327, 0.643, 0.841]: bend/bow (at the waist), take a photo, watch (a person)."]}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [1185, 1226], "conversations": [{"from": "human", "timestamps": 1215.0, "value": "Describe the actions of the person at bbox [0.591, 0.049, 0.874, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.591, 0.049, 0.874, 0.991]: stand, listen to (a person), watch (a person).", "At 1214.0s, [0.158, 0.051, 0.53, 0.991]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1226.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.127, 0.24, 0.291, 0.988]: take a photo, stand, listen to (a person); [0.233, 0.267, 0.367, 0.981]: carry/hold (an object), stand, hand wave; [0.362, 0.32, 0.458, 0.987]: stand; [0.417, 0.022, 0.59, 0.986]: carry/hold (an object), stand, hand wave, talk to (e.g., self, a person, a group); [0.549, 0.16, 0.77, 0.991]: carry/hold (an object), stand, listen to (a person), take a photo; [0.735, 0.175, 0.878, 0.977]: take a photo, stand, listen to (a person)."}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [1243, 1312], "conversations": [{"from": "human", "timestamps": 1273.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.501, 0.319, 0.535, 0.515]: stand."}, {"from": "human", "timestamps": 1312.0, "value": "For the person at bbox [0.429, 0.076, 0.768, 0.986], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.429, 0.076, 0.768, 0.986]: stand, talk to (e.g., self, a person, a group).", "From 1310.0s to 1311.0s, [0.527, 0.031, 0.836, 0.974]->[0.445, 0.082, 0.765, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [1298, 1367], "conversations": [{"from": "human", "timestamps": 1328.0, "value": "Describe the actions of the person at bbox [0.244, 0.275, 0.4, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.244, 0.275, 0.4, 0.988]: walk, listen to (a person), watch (a person).", "At 1327.0s, [0.458, 0.241, 0.546, 0.995]: walk."]}, {"from": "human", "timestamps": 1367.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.122, 0.131, 0.207, 0.932]: stand; [0.294, 0.181, 0.392, 0.6]: listen to (a person), stand, watch (a person); [0.341, 0.106, 0.57, 0.945]: listen to (a person), stand, watch (a person); [0.546, 0.43, 0.682, 0.982]: listen to (a person), stand, watch (a person); [0.633, 0.238, 0.821, 0.973]: listen to (a person), stand, watch (a person); [0.761, 0.249, 0.876, 0.908]: listen to (a person), stand, watch (a person)."}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [1394, 1448], "conversations": [{"from": "human", "timestamps": 1424.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.177, 0.188, 0.452, 0.986]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1448.0, "value": "What actions is the person in bbox [0.529, 0.224, 0.684, 0.986] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.529, 0.224, 0.684, 0.986]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1445.0s to 1447.0s, [0.511, 0.574, 0.685, 1.0]->[0.412, 0.259, 0.772, 0.999]->[0.476, 0.285, 0.854, 0.997]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [1470, 1543], "conversations": [{"from": "human", "timestamps": 1500.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.309, 0.099, 0.615, 0.998]: watch (a person), martial art; [0.506, 0.163, 0.731, 0.994]: martial art, fight/hit (a person)."}, {"from": "human", "timestamps": 1543.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.345, 0.211, 0.776, 0.979]?"}, {"from": "gpt", "value": ["At Now, [0.345, 0.211, 0.776, 0.979]: stand, watch (a person), martial art.", "At 1542.0s, [0.326, 0.082, 0.572, 0.99]: stand, watch (a person).", "At 1541.0s, [0.324, 0.107, 0.61, 0.993]: watch (a person), martial art.", "From 1538.0s to 1539.0s, [0.313, 0.055, 0.638, 0.976]->[0.316, 0.077, 0.576, 0.995]: fight/hit (a person), watch (a person), martial art.", "At 1537.0s, [0.246, 0.195, 0.575, 0.951]: stand, fight/hit (a person), watch (a person).", "From 1535.0s to 1536.0s, [0.14, 0.022, 0.559, 1.0]->[0.127, 0.0, 0.595, 0.99]: fight/hit (a person), watch (a person), martial art."]}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [1532, 1603], "conversations": [{"from": "human", "timestamps": 1562.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.124, 0.227, 0.356, 0.986]: listen to (a person), stand; [0.228, 0.136, 0.646, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.644, 0.078, 0.874, 0.999]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1603.0, "value": "For the person at bbox [0.171, 0.126, 0.427, 0.976], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.171, 0.126, 0.427, 0.976]: stand, watch (a person).", "At 1602.0s, [0.254, 0.141, 0.491, 0.993]: stand, listen to (a person), watch (a person)."]}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [1594, 1660], "conversations": [{"from": "human", "timestamps": 1624.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.206, 0.201, 0.641, 0.981]: martial art; [0.623, 0.09, 0.874, 0.998]: martial art."}, {"from": "human", "timestamps": 1660.0, "value": "Describe the actions of the person at bbox [0.275, 0.21, 0.635, 0.993] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.275, 0.21, 0.635, 0.993]: stand, fight/hit (a person), watch (a person).", "At 1658.0s, [0.419, 0.23, 0.611, 0.967]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person), martial art."]}]}, {"video": "hbYvDvJrpNk.mp4", "clip": [1746, 1793], "conversations": [{"from": "human", "timestamps": 1776.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.122, 0.009, 0.43, 0.987]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1793.0, "value": "For the person at bbox [0.429, 0.049, 0.878, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.429, 0.049, 0.878, 0.992]: bend/bow (at the waist), listen to (a person).", "At 1792.0s, [0.128, 0.23, 0.534, 0.976]: bend/bow (at the waist)."]}]}, {"video": "iSlDMboCSao.mkv", "clip": [877, 994], "conversations": [{"from": "human", "timestamps": 907.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.133, 0.346, 0.319, 0.972]: watch (a person), stand; [0.15, 0.228, 0.214, 0.434]: stand; [0.268, 0.232, 0.339, 0.568]: stand; [0.295, 0.399, 0.441, 0.976]: walk; [0.643, 0.358, 0.815, 0.987]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 994.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.176, 0.208, 0.417, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.176, 0.208, 0.417, 1.0]: stand, open (e.g., a window, a car door).", "At 993.0s, [0.184, 0.337, 0.407, 0.974]: stand, open (e.g., a window, a car door).", "At 992.0s, [0.614, 0.3, 0.807, 0.987]: walk."]}]}, {"video": "iSlDMboCSao.mkv", "clip": [1162, 1247], "conversations": [{"from": "human", "timestamps": 1192.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.119, 0.132, 0.496, 0.987]: watch (a person), stand; [0.348, 0.0, 0.86, 0.971]: listen to (a person), stand."}, {"from": "human", "timestamps": 1247.0, "value": "What actions did the person at bbox [0.218, 0.184, 0.522, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.218, 0.184, 0.522, 1.0]: stand, listen to (a person), watch (a person).", "At 1246.0s, [0.226, 0.189, 0.51, 0.979]: stand, listen to (a person), watch (a person)."]}]}, {"video": "iSlDMboCSao.mkv", "clip": [1205, 1238], "conversations": [{"from": "human", "timestamps": 1235.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.121, 0.13, 0.309, 0.985]: listen to (a person), stand, watch (a person); [0.29, 0.249, 0.4, 0.967]: watch (a person), stand; [0.326, 0.047, 0.595, 0.993]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.529, 0.023, 0.822, 0.988]: listen to (a person), stand; [0.537, 0.186, 0.641, 0.578]: listen to (a person), stand, watch (a person); [0.776, 0.262, 0.881, 0.992]: watch (a person), stand."}, {"from": "human", "timestamps": 1238.0, "value": "Describe the actions of the person at bbox [0.721, 0.246, 0.881, 0.92] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.721, 0.246, 0.881, 0.92]: stand, listen to (a person), watch (a person).", "At 1237.0s, [0.119, 0.211, 0.389, 0.994]: stand, listen to (a person)."]}]}, {"video": "iSlDMboCSao.mkv", "clip": [1234, 1313], "conversations": [{"from": "human", "timestamps": 1264.0, "value": "What actions did the person at bbox [0.326, 0.002, 0.846, 0.971] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.326, 0.002, 0.846, 0.971]: stand, watch (a person).", "From 1261.0s to 1263.0s, [0.364, 0.0, 0.839, 0.968]->[0.317, 0.0, 0.838, 0.971]->[0.304, 0.002, 0.842, 0.971]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1313.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.393, 0.186, 0.532, 0.886]: listen to (a person), walk; [0.511, 0.196, 0.67, 0.9]: watch (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "iSlDMboCSao.mkv", "clip": [1282, 1364], "conversations": [{"from": "human", "timestamps": 1312.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.377, 0.214, 0.509, 0.905]: listen to (a person), walk; [0.499, 0.227, 0.658, 0.891]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1364.0, "value": "What actions is the person in bbox [0.632, 0.06, 0.795, 0.937] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.632, 0.06, 0.795, 0.937]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1363.0s, [0.667, 0.047, 0.827, 0.892]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1362.0s, [0.683, 0.064, 0.865, 0.975]: stand, carry/hold (an object), listen to (a person).", "At 1361.0s, [0.526, 0.434, 0.863, 1.0]: bend/bow (at the waist), carry/hold (an object), listen to (a person).", "At 1360.0s, [0.63, 0.09, 0.866, 0.978]: bend/bow (at the waist), listen to (a person).", "From 1358.0s to 1359.0s, [0.526, 0.097, 0.814, 1.0]->[0.495, 0.09, 0.801, 0.984]: bend/bow (at the waist), kiss (a person).", "At 1357.0s, [0.46, 0.003, 0.801, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1355.0s to 1356.0s, [0.686, 0.028, 0.869, 0.969]->[0.654, 0.0, 0.813, 0.976]: stand, listen to (a person), watch (a person).", "At 1354.0s, [0.563, 0.42, 0.879, 0.999]: bend/bow (at the waist), carry/hold (an object), put down, talk to (e.g., self, a person, a group).", "At 1353.0s, [0.639, 0.098, 0.865, 0.98]: bend/bow (at the waist), carry/hold (an object).", "At 1352.0s, [0.702, 0.02, 0.881, 0.969]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1351.0s, [0.793, 0.027, 0.881, 0.784]: walk, carry/hold (an object)."]}]}, {"video": "iSlDMboCSao.mkv", "clip": [1319, 1350], "conversations": [{"from": "human", "timestamps": 1349.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.24, 0.224, 0.607, 0.996]: talk to (e.g., self, a person, a group), touch (an object), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1350.0, "value": "Describe the actions of the person at bbox [0.206, 0.119, 0.524, 0.994] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.206, 0.119, 0.524, 0.994]: bend/bow (at the waist), press, touch (an object), talk to (e.g., self, a person, a group).", "At 1349.0s, [0.24, 0.224, 0.607, 0.996]: bend/bow (at the waist), touch (an object), talk to (e.g., self, a person, a group).", "From 1347.0s to 1348.0s, [0.525, 0.061, 0.685, 1.0]->[0.306, 0.04, 0.618, 0.983]: walk.", "From 1344.0s to 1346.0s, [0.482, 0.078, 0.647, 0.985]->[0.556, 0.048, 0.715, 0.998]->[0.575, 0.064, 0.726, 0.968]: stand, close (e.g., a door, a box).", "At 1343.0s, [0.582, 0.106, 0.645, 0.965]: walk, open (e.g., a window, a car door).", "At 1342.0s, [0.428, 0.077, 0.806, 0.992]: stand, open (e.g., a window, a car door).", "At 1341.0s, [0.485, 0.029, 0.858, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1339.0s to 1340.0s, [0.503, 0.001, 0.835, 0.997]->[0.508, 0.022, 0.881, 0.998]: stand, listen to (a person), watch (a person).", "At 1338.0s, [0.43, 0.0, 0.814, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1333.0s to 1337.0s, [0.557, 0.0, 0.863, 0.99]->[0.562, 0.007, 0.862, 0.987]->[0.559, 0.0, 0.855, 0.983]->[0.544, 0.0, 0.855, 0.996]->[0.465, 0.017, 0.825, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1332.0s, [0.557, 0.0, 0.862, 0.99]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1329.0s to 1331.0s, [0.501, 0.012, 0.823, 0.985]->[0.481, 0.035, 0.873, 0.979]->[0.527, 0.003, 0.881, 0.987]: walk, listen to (a person), watch (a person).", "At 1328.0s, [0.497, 0.04, 0.88, 0.983]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1327.0s, [0.48, 0.046, 0.818, 0.966]: walk, talk to (e.g., self, a person, a group).", "At 1326.0s, [0.484, 0.082, 0.782, 0.992]: walk.", "From 1323.0s to 1325.0s, [0.482, 0.144, 0.604, 0.978]->[0.483, 0.11, 0.644, 0.972]->[0.489, 0.103, 0.679, 0.972]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "iSlDMboCSao.mkv", "clip": [1369, 1420], "conversations": [{"from": "human", "timestamps": 1399.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.227, 0.042, 0.693, 0.992]: stand."}, {"from": "human", "timestamps": 1420.0, "value": "What actions did the person at bbox [0.278, 0.006, 0.646, 0.967] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.278, 0.006, 0.646, 0.967]: stand, dress/put on clothing.", "At 1419.0s, [0.259, 0.01, 0.709, 0.977]: stand, dress/put on clothing.", "At 1418.0s, [0.175, 0.027, 0.725, 0.978]: sit, stand, dress/put on clothing.", "At 1417.0s, [0.189, 0.011, 0.798, 0.968]: stand, dress/put on clothing.", "At 1416.0s, [0.198, 0.044, 0.713, 0.998]: stand, carry/hold (an object), dress/put on clothing."]}]}, {"video": "iSlDMboCSao.mkv", "clip": [1374, 1491], "conversations": [{"from": "human", "timestamps": 1404.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.266, 0.0, 0.678, 0.91]: stand."}, {"from": "human", "timestamps": 1491.0, "value": "What actions is the person in bbox [0.47, 0.0, 0.881, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.47, 0.0, 0.881, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1490.0s, [0.466, 0.0, 0.881, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1488.0s to 1489.0s, [0.476, 0.0, 0.881, 0.982]->[0.467, 0.0, 0.881, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 1485.0s to 1487.0s, [0.546, 0.027, 0.881, 0.981]->[0.493, 0.006, 0.881, 0.987]->[0.485, 0.0, 0.881, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "iSlDMboCSao.mkv", "clip": [1419, 1463], "conversations": [{"from": "human", "timestamps": 1449.0, "value": "For the person at bbox [0.551, 0.183, 0.745, 0.937], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.551, 0.183, 0.745, 0.937]: bend/bow (at the waist), carry/hold (an object).", "At 1448.0s, [0.541, 0.31, 0.835, 0.998]: stand, lift/pick up.", "From 1445.0s to 1447.0s, [0.17, 0.045, 0.534, 0.99]->[0.169, 0.0, 0.492, 0.988]->[0.242, 0.046, 0.604, 0.972]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1463.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.575, 0.072, 0.775, 0.987]: run/jog, talk to (e.g., self, a person, a group)."}]}, {"video": "iSlDMboCSao.mkv", "clip": [1472, 1550], "conversations": [{"from": "human", "timestamps": 1502.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.333, 0.233, 0.442, 0.993]: watch (a person), talk to (e.g., self, a person, a group), walk; [0.427, 0.225, 0.538, 0.994]: listen to (a person), watch (a person), walk; [0.637, 0.124, 0.764, 0.979]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.826, 0.238, 0.881, 0.969]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1550.0, "value": "What actions is the person in bbox [0.446, 0.0, 0.881, 0.992] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.446, 0.0, 0.881, 0.992]: stand, listen to (a person).", "From 1545.0s to 1549.0s, [0.521, 0.0, 0.881, 0.984]->[0.486, 0.0, 0.881, 0.991]->[0.476, 0.0, 0.876, 0.992]->[0.455, 0.0, 0.879, 0.998]->[0.47, 0.0, 0.878, 0.99]: stand, listen to (a person).", "At 1544.0s, [0.457, 0.0, 0.881, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1543.0s, [0.505, 0.0, 0.881, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "iSlDMboCSao.mkv", "clip": [1592, 1701], "conversations": [{"from": "human", "timestamps": 1622.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.247, 0.072, 0.576, 0.991]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.652, 0.017, 0.881, 0.979]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1701.0, "value": "Describe the actions of the person at bbox [0.124, 0.147, 0.479, 0.978] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.124, 0.147, 0.479, 0.978]: stand, drink, listen to (a person).", "At 1700.0s, [0.124, 0.134, 0.497, 0.982]: stand, carry/hold (an object).", "At 1699.0s, [0.122, 0.117, 0.631, 0.97]: stand, carry/hold (an object), take (an object) from (a person).", "At 1698.0s, [0.138, 0.13, 0.609, 0.983]: stand, take (an object) from (a person), watch (a person).", "At 1697.0s, [0.135, 0.047, 0.427, 0.976]: walk, watch (a person).", "At 1696.0s, [0.154, 0.089, 0.429, 0.968]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1695.0s, [0.18, 0.089, 0.415, 0.98]: walk, hand wave, talk to (e.g., self, a person, a group), watch (a person).", "At 1694.0s, [0.167, 0.045, 0.387, 0.967]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1693.0s, [0.326, 0.04, 0.53, 0.985]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "iSlDMboCSao.mkv", "clip": [1729, 1792], "conversations": [{"from": "human", "timestamps": 1759.0, "value": "What actions did the person at bbox [0.676, 0.202, 0.795, 0.483] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.676, 0.202, 0.795, 0.483]: walk, lift (a person).", "At 1758.0s, [0.59, 0.282, 0.731, 0.647]: walk, lift (a person).", "At 1757.0s, [0.53, 0.219, 0.659, 0.629]: walk, grab (a person), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1792.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.464, 0.45, 0.639, 0.998]: sit; [0.609, 0.266, 0.696, 0.9]: watch (a person), stand."}]}, {"video": "j35JnR0Q7Es.mp4", "clip": [920, 986], "conversations": [{"from": "human", "timestamps": 950.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.449, 0.425, 0.95, 0.935]: ride (e.g., a bike, a car, a horse), sit, watch (a person)."}, {"from": "human", "timestamps": 986.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.053, 0.177, 0.846, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.053, 0.177, 0.846, 1.0]: sit, ride (e.g., a bike, a car, a horse).", "From 980.0s to 985.0s, [0.0, 0.156, 0.903, 0.997]->[0.049, 0.138, 0.911, 0.957]->[0.043, 0.131, 0.885, 0.98]->[0.087, 0.087, 0.916, 0.951]->[0.032, 0.176, 0.698, 0.982]->[0.059, 0.22, 0.857, 0.98]: sit, ride (e.g., a bike, a car, a horse).", "At 979.0s, [0.017, 0.16, 1.0, 0.962]: sit, ride (e.g., a bike, a car, a horse), watch (a person)."]}]}, {"video": "j35JnR0Q7Es.mp4", "clip": [953, 1007], "conversations": [{"from": "human", "timestamps": 983.0, "value": "What actions did the person at bbox [0.087, 0.087, 0.916, 0.951] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.087, 0.087, 0.916, 0.951]: sit, ride (e.g., a bike, a car, a horse).", "From 980.0s to 982.0s, [0.0, 0.156, 0.903, 0.997]->[0.049, 0.138, 0.911, 0.957]->[0.043, 0.131, 0.885, 0.98]: sit, ride (e.g., a bike, a car, a horse).", "At 979.0s, [0.017, 0.16, 1.0, 0.962]: sit, ride (e.g., a bike, a car, a horse), watch (a person)."]}, {"from": "human", "timestamps": 1007.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.204, 0.153, 0.987, 0.961]: ride (e.g., a bike, a car, a horse), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "j35JnR0Q7Es.mp4", "clip": [999, 1065], "conversations": [{"from": "human", "timestamps": 1029.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.067, 0.226, 0.398, 0.791]: ride (e.g., a bike, a car, a horse), sit."}, {"from": "human", "timestamps": 1065.0, "value": "What actions did the person at bbox [0.463, 0.283, 0.71, 0.641] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.463, 0.283, 0.71, 0.641]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), watch (a person).", "From 1063.0s to 1064.0s, [0.483, 0.299, 0.716, 0.642]->[0.427, 0.27, 0.757, 0.624]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), watch (a person)."]}]}, {"video": "j35JnR0Q7Es.mp4", "clip": [1034, 1087], "conversations": [{"from": "human", "timestamps": 1064.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.208, 0.282, 0.443, 0.622]: ride (e.g., a bike, a car, a horse), sit, watch (a person), talk to (e.g., self, a person, a group); [0.427, 0.27, 0.757, 0.624]: ride (e.g., a bike, a car, a horse), sit, watch (a person), listen to (a person); [0.693, 0.283, 0.963, 0.66]: listen to (a person), sit, drive (e.g., a car, a truck)."}, {"from": "human", "timestamps": 1087.0, "value": "For the person at bbox [0.073, 0.183, 0.849, 0.915], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.073, 0.183, 0.849, 0.915]: sit, ride (e.g., a bike, a car, a horse), watch (a person).", "From 1085.0s to 1086.0s, [0.029, 0.21, 0.664, 0.943]->[0.062, 0.208, 0.596, 0.926]: sit, ride (e.g., a bike, a car, a horse), watch (a person).", "From 1082.0s to 1084.0s, [0.031, 0.176, 0.596, 0.926]->[0.048, 0.217, 0.616, 0.948]->[0.033, 0.167, 0.68, 0.933]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "j35JnR0Q7Es.mp4", "clip": [1157, 1197], "conversations": [{"from": "human", "timestamps": 1187.0, "value": "Describe the actions of the person at bbox [0.318, 0.226, 0.696, 0.931] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.318, 0.226, 0.696, 0.931]: stand, talk to (e.g., self, a person, a group).", "From 1185.0s to 1186.0s, [0.284, 0.21, 0.707, 0.987]->[0.283, 0.181, 0.645, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1184.0s, [0.359, 0.195, 0.667, 0.898]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1183.0s, [0.372, 0.192, 0.631, 0.865]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1182.0s, [0.362, 0.229, 0.628, 0.882]: stand, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1197.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.327, 0.383, 0.538, 0.855]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "j35JnR0Q7Es.mp4", "clip": [1239, 1309], "conversations": [{"from": "human", "timestamps": 1269.0, "value": "What actions is the person in bbox [0.68, 0.195, 0.934, 0.862] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.68, 0.195, 0.934, 0.862]: sit, drive (e.g., a car, a truck).", "From 1267.0s to 1268.0s, [0.591, 0.204, 0.914, 0.895]->[0.596, 0.188, 0.922, 0.837]: sit, drive (e.g., a car, a truck).", "From 1265.0s to 1266.0s, [0.616, 0.194, 0.903, 0.784]->[0.604, 0.21, 0.915, 0.911]: sit, drive (e.g., a car, a truck), listen to (a person).", "At 1264.0s, [0.615, 0.188, 0.921, 0.817]: sit, drive (e.g., a car, a truck)."]}, {"from": "human", "timestamps": 1309.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.026, 0.206, 0.62, 0.924]: ride (e.g., a bike, a car, a horse), sit, listen to (a person); [0.416, 0.165, 0.962, 0.807]: ride (e.g., a bike, a car, a horse), sit, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "j35JnR0Q7Es.mp4", "clip": [1314, 1370], "conversations": [{"from": "human", "timestamps": 1344.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.068, 0.073, 0.849, 0.941]?"}, {"from": "gpt", "value": ["At Now, [0.068, 0.073, 0.849, 0.941]: sit, drive (e.g., a car, a truck).", "At 1343.0s, [0.038, 0.073, 0.842, 0.921]: sit, drive (e.g., a car, a truck), listen to (a person)."]}, {"from": "human", "timestamps": 1370.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.01, 0.254, 0.298, 0.814]: ride (e.g., a bike, a car, a horse), sit, watch (a person), listen to (a person), talk to (e.g., self, a person, a group); [0.262, 0.276, 0.689, 0.689]: ride (e.g., a bike, a car, a horse), sit, watch (a person), listen to (a person); [0.634, 0.266, 0.97, 0.864]: listen to (a person), sit, drive (e.g., a car, a truck)."}]}, {"video": "j35JnR0Q7Es.mp4", "clip": [1478, 1509], "conversations": [{"from": "human", "timestamps": 1508.0, "value": "For the person at bbox [0.068, 0.05, 0.872, 0.953], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.068, 0.05, 0.872, 0.953]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), watch (a person).", "At 1507.0s, [0.075, 0.062, 0.924, 1.0]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), watch (a person).", "At 1506.0s, [0.035, 0.087, 0.892, 0.956]: sit, ride (e.g., a bike, a car, a horse), listen to (a person)."]}, {"from": "human", "timestamps": 1509.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.039, 0.149, 0.729, 0.9]: ride (e.g., a bike, a car, a horse), sit, watch (a person), talk to (e.g., self, a person, a group); [0.474, 0.22, 0.895, 0.924]: listen to (a person), sit, drive (e.g., a car, a truck), watch (a person)."}]}, {"video": "j35JnR0Q7Es.mp4", "clip": [1512, 1588], "conversations": [{"from": "human", "timestamps": 1542.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.289, 0.279, 0.614, 0.608]: ride (e.g., a bike, a car, a horse), sit, watch (a person), talk to (e.g., self, a person, a group); [0.515, 0.289, 0.782, 0.716]: listen to (a person), sit, drive (e.g., a car, a truck), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1588.0, "value": "What actions is the person in bbox [0.036, 0.097, 0.953, 0.939] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.036, 0.097, 0.953, 0.939]: sit, ride (e.g., a bike, a car, a horse).", "From 1579.0s to 1587.0s, [0.129, 0.09, 0.955, 0.955]->[0.189, 0.077, 0.965, 0.925]->[0.002, 0.122, 0.925, 0.984]->[0.017, 0.107, 0.966, 0.957]->[0.039, 0.098, 0.932, 0.959]->[0.0, 0.077, 0.999, 0.963]->[0.068, 0.065, 0.981, 0.949]->[0.015, 0.091, 0.975, 0.959]->[0.0, 0.118, 0.943, 0.955]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1578.0s, [0.013, 0.061, 0.957, 0.965]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), touch (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "j35JnR0Q7Es.mp4", "clip": [1751, 1789], "conversations": [{"from": "human", "timestamps": 1781.0, "value": "What actions is the person in bbox [0.065, 0.128, 0.928, 0.956] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.065, 0.128, 0.928, 0.956]: sit, ride (e.g., a bike, a car, a horse).", "At 1780.0s, [0.082, 0.098, 0.967, 0.934]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1779.0s, [0.06, 0.11, 0.947, 0.942]: sit, ride (e.g., a bike, a car, a horse).", "At 1778.0s, [0.073, 0.092, 0.878, 0.945]: sit, ride (e.g., a bike, a car, a horse), listen to (a person)."]}, {"from": "human", "timestamps": 1789.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.088, 0.109, 0.904, 0.974]: ride (e.g., a bike, a car, a horse), sit."}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [882, 1020], "conversations": [{"from": "human", "timestamps": 912.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.066, 0.049, 0.788, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1020.0, "value": "What actions is the person in bbox [0.228, 0.117, 0.537, 0.977] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.228, 0.117, 0.537, 0.977]: stand, listen to (a person), watch (a person).", "From 1017.0s to 1018.0s, [0.227, 0.12, 0.525, 0.974]->[0.23, 0.12, 0.558, 0.975]: stand, carry/hold (an object), listen to (a person).", "At 1016.0s, [0.226, 0.121, 0.526, 0.972]: stand, listen to (a person)."]}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [941, 979], "conversations": [{"from": "human", "timestamps": 971.0, "value": "What actions did the person at bbox [0.0, 0.103, 0.875, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.103, 0.875, 1.0]: stand, listen to (a person), watch (a person).", "From 969.0s to 970.0s, [0.0, 0.071, 0.849, 1.0]->[0.145, 0.096, 0.998, 1.0]: stand, watch (a person)."]}, {"from": "human", "timestamps": 979.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.034, 0.083, 0.809, 0.991]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [995, 1038], "conversations": [{"from": "human", "timestamps": 1025.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.148, 0.106, 0.87, 0.992]: stand."}, {"from": "human", "timestamps": 1038.0, "value": "Describe the actions of the person at bbox [0.461, 0.071, 0.784, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.461, 0.071, 0.784, 1.0]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1037.0s, [0.525, 0.088, 0.854, 0.991]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1027, 1104], "conversations": [{"from": "human", "timestamps": 1057.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.287, 0.137, 0.654, 0.998]: run/jog, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1104.0, "value": "What actions did the person at bbox [0.029, 0.106, 0.864, 0.993] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.029, 0.106, 0.864, 0.993]: stand, watch (a person).", "At 1103.0s, [0.02, 0.102, 0.783, 0.998]: sit, stand, watch (a person)."]}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1056, 1103], "conversations": [{"from": "human", "timestamps": 1086.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.775, 0.366, 0.87, 0.782]?"}, {"from": "gpt", "value": ["At Now, [0.775, 0.366, 0.87, 0.782]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1085.0s, [0.796, 0.392, 0.898, 0.832]: stand, watch (a person).", "From 1082.0s to 1084.0s, [0.551, 0.162, 0.831, 0.981]->[0.588, 0.159, 0.84, 1.0]->[0.6, 0.178, 0.815, 0.986]: walk.", "At 1081.0s, [0.478, 0.172, 0.753, 1.0]: walk, smoke, watch (a person).", "At 1080.0s, [0.533, 0.197, 0.806, 0.985]: stand, smoke, watch (a person)."]}, {"from": "human", "timestamps": 1103.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.02, 0.102, 0.783, 0.998]: watch (a person), sit, stand."}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1114, 1151], "conversations": [{"from": "human", "timestamps": 1144.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.192, 0.278, 0.378, 0.744]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.55, 0.1, 0.729, 0.919]: listen to (a person), stand."}, {"from": "human", "timestamps": 1151.0, "value": "What actions is the person in bbox [0.189, 0.265, 0.378, 0.797] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.189, 0.265, 0.378, 0.797]: sit, listen to (a person), watch (a person).", "At 1150.0s, [0.191, 0.265, 0.371, 0.801]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1149.0s, [0.201, 0.284, 0.395, 0.796]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1146.0s to 1148.0s, [0.207, 0.267, 0.388, 0.787]->[0.212, 0.274, 0.396, 0.798]->[0.193, 0.27, 0.387, 0.711]: sit, talk to (e.g., self, a person, a group).", "At 1145.0s, [0.196, 0.271, 0.375, 0.748]: sit, watch (a person).", "From 1143.0s to 1144.0s, [0.197, 0.272, 0.377, 0.707]->[0.192, 0.278, 0.378, 0.744]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1142.0s, [0.196, 0.266, 0.382, 0.708]: sit.", "At 1141.0s, [0.202, 0.265, 0.385, 0.777]: sit, listen to (a person)."]}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1155, 1249], "conversations": [{"from": "human", "timestamps": 1185.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.179, 0.046, 0.942, 1.0]: stand."}, {"from": "human", "timestamps": 1249.0, "value": "What actions is the person in bbox [0.28, 0.206, 0.869, 0.991] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.28, 0.206, 0.869, 0.991]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1248.0s, [0.179, 0.129, 0.844, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1317, 1360], "conversations": [{"from": "human", "timestamps": 1347.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.171, 0.169, 0.796, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1360.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.713, 0.232, 0.843, 0.786]?"}, {"from": "gpt", "value": ["In the current frame, [0.713, 0.232, 0.843, 0.786]: stand, listen to (a person), watch (a person).", "From 1357.0s to 1359.0s, [0.701, 0.232, 0.821, 0.792]->[0.706, 0.223, 0.822, 0.782]->[0.712, 0.226, 0.818, 0.782]: stand, listen to (a person), watch (a person).", "At 1356.0s, [0.673, 0.229, 0.796, 0.788]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1355.0s, [0.684, 0.235, 0.849, 0.793]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1354.0s, [0.644, 0.241, 0.828, 0.795]: stand, listen to (a person).", "From 1352.0s to 1353.0s, [0.698, 0.245, 0.893, 0.8]->[0.701, 0.243, 0.82, 0.801]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1357, 1438], "conversations": [{"from": "human", "timestamps": 1387.0, "value": "For the person at bbox [0.017, 0.063, 0.876, 0.962], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.017, 0.063, 0.876, 0.962]: stand, talk to (e.g., self, a person, a group).", "At 1386.0s, [0.019, 0.05, 0.739, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1438.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.014, 0.246, 0.832, 0.998]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1356, 1475], "conversations": [{"from": "human", "timestamps": 1386.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.019, 0.05, 0.739, 0.985]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1475.0, "value": "Describe the actions of the person at bbox [0.205, 0.232, 0.507, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.205, 0.232, 0.507, 0.984]: stand, carry/hold (an object).", "At 1474.0s, [0.221, 0.21, 0.57, 0.985]: stand, carry/hold (an object).", "From 1467.0s to 1473.0s, [0.758, 0.091, 1.0, 0.922]->[0.349, 0.213, 0.653, 1.0]->[0.428, 0.249, 0.624, 0.984]->[0.214, 0.227, 0.454, 0.981]->[0.211, 0.296, 0.423, 0.963]->[0.237, 0.288, 0.453, 0.993]->[0.144, 0.294, 0.511, 0.981]: walk, carry/hold (an object)."]}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1411, 1470], "conversations": [{"from": "human", "timestamps": 1441.0, "value": "For the person at bbox [0.02, 0.083, 0.8, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.02, 0.083, 0.8, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1439.0s to 1440.0s, [0.003, 0.146, 0.857, 1.0]->[0.006, 0.056, 0.732, 0.976]: stand, talk to (e.g., self, a person, a group).", "At 1438.0s, [0.014, 0.246, 0.832, 0.998]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1470.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.214, 0.227, 0.454, 0.981]: carry/hold (an object), walk."}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1491, 1552], "conversations": [{"from": "human", "timestamps": 1521.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.058, 0.064, 0.905, 0.993]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1552.0, "value": "For the person at bbox [0.079, 0.04, 0.91, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.079, 0.04, 0.91, 1.0]: stand, listen to (a person), watch (a person).", "At 1551.0s, [0.061, 0.009, 0.871, 0.999]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1549.0s to 1550.0s, [0.08, 0.028, 0.899, 0.989]->[0.157, 0.049, 0.912, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1548.0s, [0.165, 0.066, 0.839, 0.985]: stand."]}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1554, 1612], "conversations": [{"from": "human", "timestamps": 1584.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.289, 0.12, 0.8, 0.994]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1612.0, "value": "What actions is the person in bbox [0.03, 0.101, 0.928, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.03, 0.101, 0.928, 0.984]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1609.0s to 1611.0s, [0.023, 0.104, 0.947, 0.977]->[0.185, 0.1, 0.977, 0.983]->[0.019, 0.089, 0.688, 0.994]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1605.0s to 1608.0s, [0.185, 0.461, 0.392, 0.966]->[0.186, 0.458, 0.384, 0.955]->[0.18, 0.432, 0.39, 0.956]->[0.188, 0.442, 0.391, 0.958]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1604.0s, [0.187, 0.459, 0.386, 0.957]: sit, talk to (e.g., self, a person, a group).", "From 1602.0s to 1603.0s, [0.184, 0.447, 0.392, 0.96]->[0.185, 0.456, 0.388, 0.96]: sit, eat, talk to (e.g., self, a person, a group).", "From 1599.0s to 1601.0s, [0.181, 0.456, 0.386, 0.958]->[0.182, 0.454, 0.385, 0.953]->[0.186, 0.455, 0.39, 0.967]: sit, talk to (e.g., self, a person, a group).", "At 1598.0s, [0.187, 0.453, 0.391, 0.963]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1597.0s, [0.18, 0.454, 0.391, 0.961]: sit, listen to (a person).", "From 1595.0s to 1596.0s, [0.182, 0.433, 0.391, 0.96]->[0.188, 0.456, 0.393, 0.963]: sit, smoke, talk to (e.g., self, a person, a group).", "At 1594.0s, [0.177, 0.462, 0.392, 0.956]: sit, talk to (e.g., self, a person, a group).", "At 1593.0s, [0.177, 0.459, 0.393, 0.961]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1590.0s to 1592.0s, [0.177, 0.464, 0.389, 0.958]->[0.174, 0.46, 0.388, 0.95]->[0.176, 0.461, 0.392, 0.959]: sit, listen to (a person).", "At 1589.0s, [0.177, 0.436, 0.383, 0.969]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1587.0s to 1588.0s, [0.177, 0.429, 0.535, 0.962]->[0.184, 0.416, 0.421, 0.956]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1586.0s, [0.116, 0.08, 0.958, 0.946]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1585.0s, [0.103, 0.096, 1.0, 0.987]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1627, 1676], "conversations": [{"from": "human", "timestamps": 1657.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.082, 0.109, 0.9, 0.968]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1676.0, "value": "For the person at bbox [0.812, 0.286, 0.88, 0.564], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.812, 0.286, 0.88, 0.564]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1675.0s, [0.815, 0.282, 0.901, 0.552]: walk, carry/hold (an object), watch (a person)."]}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1670, 1705], "conversations": [{"from": "human", "timestamps": 1700.0, "value": "What actions is the person in bbox [0.083, 0.14, 0.313, 0.973] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.083, 0.14, 0.313, 0.973]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1699.0s, [0.076, 0.14, 0.319, 0.968]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1697.0s to 1698.0s, [0.078, 0.139, 0.343, 0.966]->[0.076, 0.141, 0.325, 0.964]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1692.0s to 1696.0s, [0.121, 0.119, 0.346, 0.972]->[0.08, 0.101, 0.297, 0.986]->[0.098, 0.168, 0.316, 0.964]->[0.085, 0.152, 0.301, 0.98]->[0.082, 0.149, 0.311, 0.976]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1705.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.174, 0.17, 0.894, 0.981]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "jBs_XYHI7gM.mkv", "clip": [1739, 1791], "conversations": [{"from": "human", "timestamps": 1769.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.025, 0.118, 0.883, 0.985]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1791.0, "value": "Describe the actions of the person at bbox [0.372, 0.195, 0.568, 0.905] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.372, 0.195, 0.568, 0.905]: walk.", "At 1790.0s, [0.41, 0.16, 0.563, 0.873]: walk.", "From 1788.0s to 1789.0s, [0.443, 0.186, 0.59, 0.818]->[0.376, 0.151, 0.558, 0.848]: walk, talk to (e.g., self, a person, a group).", "From 1784.0s to 1787.0s, [0.425, 0.143, 0.609, 0.934]->[0.409, 0.18, 0.58, 0.887]->[0.489, 0.218, 0.653, 0.827]->[0.472, 0.215, 0.628, 0.803]: walk.", "From 1781.0s to 1783.0s, [0.408, 0.125, 0.564, 0.945]->[0.36, 0.13, 0.577, 0.933]->[0.457, 0.109, 0.584, 0.959]: stand.", "From 1778.0s to 1780.0s, [0.729, 0.179, 0.888, 0.991]->[0.685, 0.169, 0.846, 0.992]->[0.533, 0.158, 0.697, 0.971]: stand, watch (a person).", "From 1775.0s to 1777.0s, [0.691, 0.091, 0.859, 0.951]->[0.707, 0.131, 0.877, 0.964]->[0.729, 0.172, 0.884, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1774.0s, [0.706, 0.086, 0.903, 0.942]: stand, watch (a person).", "From 1771.0s to 1773.0s, [0.691, 0.087, 0.902, 0.944]->[0.703, 0.097, 0.898, 0.93]->[0.709, 0.086, 0.903, 0.947]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [885, 1005], "conversations": [{"from": "human", "timestamps": 915.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.065, 0.0, 0.957, 0.979]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1005.0, "value": "For the person at bbox [0.2, 0.0, 1.0, 0.985], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.2, 0.0, 1.0, 0.985]: stand, watch (a person).", "At 1004.0s, [0.282, 0.0, 1.0, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1002.0s to 1003.0s, [0.243, 0.0, 1.0, 0.967]->[0.342, 0.0, 1.0, 0.992]: bend/bow (at the waist), listen to (a person).", "At 1001.0s, [0.243, 0.036, 0.997, 0.988]: stand, listen to (a person).", "At 1000.0s, [0.226, 0.013, 1.0, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 998.0s to 999.0s, [0.253, 0.004, 0.997, 0.994]->[0.224, 0.018, 0.985, 0.994]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 997.0s, [0.261, 0.043, 0.994, 0.996]: bend/bow (at the waist), carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group).", "At 996.0s, [0.385, 0.017, 0.989, 0.999]: bend/bow (at the waist), carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 995.0s, [0.336, 0.053, 0.99, 0.996]: bend/bow (at the waist), carry/hold (an object).", "From 992.0s to 994.0s, [0.343, 0.035, 0.997, 0.996]->[0.392, 0.004, 0.987, 0.989]->[0.371, 0.019, 0.985, 0.979]: bend/bow (at the waist), carry/hold (an object), listen to (a person), watch (a person).", "At 991.0s, [0.282, 0.015, 1.0, 1.0]: bend/bow (at the waist), carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 990.0s, [0.231, 0.005, 0.999, 1.0]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 989.0s, [0.224, 0.06, 0.933, 0.992]: carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [961, 1008], "conversations": [{"from": "human", "timestamps": 991.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.028, 0.347, 0.953]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.028, 0.347, 0.953]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 989.0s to 990.0s, [0.0, 0.014, 0.278, 0.986]->[0.0, 0.094, 0.281, 0.967]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1008.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.001, 0.002, 0.411, 0.996]: talk to (e.g., self, a person, a group), walk."}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [998, 1038], "conversations": [{"from": "human", "timestamps": 1028.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.142, 0.18, 0.934, 0.894]: read, sit, listen to (a person); [0.159, 0.077, 0.437, 0.657]: touch (an object), watch (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1038.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.291, 0.06, 0.975, 0.858]?"}, {"from": "gpt", "value": ["At Now, [0.291, 0.06, 0.975, 0.858]: sit, carry/hold (an object), read, listen to (a person), watch (a person).", "From 1036.0s to 1037.0s, [0.263, 0.046, 0.989, 0.897]->[0.265, 0.057, 0.985, 0.855]: sit, read, listen to (a person).", "At 1035.0s, [0.298, 0.067, 0.979, 0.866]: sit, read, listen to (a person), talk to (e.g., self, a person, a group).", "At 1034.0s, [0.309, 0.073, 0.981, 0.853]: sit, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 1031.0s to 1033.0s, [0.069, 0.263, 0.366, 0.735]->[0.069, 0.265, 0.381, 0.741]->[0.074, 0.26, 0.377, 0.74]: sit, read, listen to (a person)."]}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [1020, 1083], "conversations": [{"from": "human", "timestamps": 1050.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.04, 0.035, 0.486, 0.974]: carry/hold (an object), stand, watch (a person); [0.204, 0.03, 0.975, 0.966]: read, sit."}, {"from": "human", "timestamps": 1083.0, "value": "What actions is the person in bbox [0.015, 0.044, 0.46, 0.766] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.015, 0.044, 0.46, 0.766]: stand, touch (an object), watch (a person).", "At 1082.0s, [0.07, 0.062, 0.802, 0.988]: stand, watch (a person)."]}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [1119, 1170], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.01, 0.655, 0.986]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.01, 0.655, 0.986]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1148.0s, [0.015, 0.032, 0.671, 0.987]: stand, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1139.0s to 1147.0s, [0.006, 0.045, 0.573, 0.977]->[0.0, 0.065, 0.574, 0.958]->[0.0, 0.066, 0.667, 0.969]->[0.0, 0.052, 0.552, 0.973]->[0.0, 0.058, 0.549, 0.965]->[0.0, 0.03, 0.561, 0.968]->[0.0, 0.033, 0.562, 0.971]->[0.0, 0.031, 0.521, 0.964]->[0.013, 0.037, 0.583, 0.965]: stand, listen to (a person), watch (a person).", "At 1138.0s, [0.0, 0.054, 0.611, 0.991]: stand, carry/hold (an object), listen to (a person).", "From 1136.0s to 1137.0s, [0.004, 0.006, 0.523, 0.88]->[0.023, 0.014, 0.595, 0.915]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1135.0s, [0.0, 0.005, 0.532, 0.965]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1134.0s, [0.021, 0.0, 0.585, 0.976]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1133.0s, [0.0, 0.0, 0.595, 0.95]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1132.0s, [0.055, 0.017, 0.585, 0.96]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1131.0s, [0.0, 0.009, 0.608, 0.967]: stand, carry/hold (an object).", "From 1129.0s to 1130.0s, [0.017, 0.025, 0.558, 0.966]->[0.037, 0.0, 0.508, 0.977]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1128.0s, [0.009, 0.063, 0.53, 0.958]: stand, carry/hold (an object).", "At 1127.0s, [0.008, 0.012, 0.532, 0.98]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1170.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.482, 0.123, 0.927, 0.598]: watch (a person), lie/sleep, talk to (e.g., self, a person, a group)."}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [1232, 1298], "conversations": [{"from": "human", "timestamps": 1262.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.043, 0.228, 0.989, 0.994]: bend/bow (at the waist)."}, {"from": "human", "timestamps": 1298.0, "value": "For the person at bbox [0.011, 0.002, 0.628, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.011, 0.002, 0.628, 0.994]: stand, listen to (a person), watch (a person).", "At 1297.0s, [0.0, 0.053, 0.867, 0.976]: stand, listen to (a person)."]}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [1279, 1344], "conversations": [{"from": "human", "timestamps": 1309.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.039, 0.031, 0.804, 0.991]: listen to (a person), stand, watch (a person); [0.6, 0.0, 0.997, 0.996]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1344.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.78, 0.078, 0.972, 0.965]?"}, {"from": "gpt", "value": ["In the current frame, [0.78, 0.078, 0.972, 0.965]: stand, carry/hold (an object), touch (an object), listen to (a person).", "At 1343.0s, [0.775, 0.096, 0.967, 0.967]: stand, carry/hold (an object), touch (an object).", "At 1342.0s, [0.774, 0.111, 0.96, 0.932]: stand, touch (an object).", "At 1341.0s, [0.776, 0.0, 0.981, 0.963]: stand."]}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [1376, 1469], "conversations": [{"from": "human", "timestamps": 1406.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.006, 0.872, 0.996]: stand, hug (a person); [0.215, 0.083, 0.982, 0.98]: watch (a person), stand, hug (a person)."}, {"from": "human", "timestamps": 1469.0, "value": "For the person at bbox [0.03, 0.028, 0.727, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.03, 0.028, 0.727, 0.994]: stand, close (e.g., a door, a box).", "At 1468.0s, [0.305, 0.09, 0.737, 0.975]: stand, close (e.g., a door, a box), watch (a person).", "From 1466.0s to 1467.0s, [0.435, 0.121, 0.809, 0.975]->[0.418, 0.119, 0.89, 0.986]: stand, open (e.g., a window, a car door), talk to (e.g., self, a person, a group), watch (a person).", "At 1465.0s, [0.429, 0.134, 0.927, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1464.0s, [0.389, 0.128, 0.997, 0.994]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [1412, 1452], "conversations": [{"from": "human", "timestamps": 1442.0, "value": "What actions did the person at bbox [0.011, 0.0, 0.488, 0.968] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.011, 0.0, 0.488, 0.968]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1441.0s, [0.026, 0.013, 0.582, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1452.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.05, 0.009, 0.992, 0.996]: carry/hold (an object), push (another person), watch (a person), walk."}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [1482, 1554], "conversations": [{"from": "human", "timestamps": 1512.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.09, 0.0, 0.98, 0.991]: sit."}, {"from": "human", "timestamps": 1554.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.0, 0.902, 0.988]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.0, 0.902, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1553.0s, [0.0, 0.015, 0.819, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1551.0s to 1552.0s, [0.036, 0.003, 0.933, 0.979]->[0.033, 0.0, 1.0, 0.986]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [1470, 1607], "conversations": [{"from": "human", "timestamps": 1500.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.039, 0.968, 0.996]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1607.0, "value": "What actions did the person at bbox [0.495, 0.016, 0.899, 0.602] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.495, 0.016, 0.899, 0.602]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1606.0s, [0.534, 0.059, 0.896, 0.582]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1605.0s, [0.504, 0.094, 0.817, 0.504]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1604.0s, [0.594, 0.194, 0.816, 0.973]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [1556, 1602], "conversations": [{"from": "human", "timestamps": 1586.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.594, 0.099, 0.833, 0.963]?"}, {"from": "gpt", "value": ["At Now, [0.594, 0.099, 0.833, 0.963]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1585.0s, [0.638, 0.08, 0.79, 0.649]: walk, carry/hold (an object), enter, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1602.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.364, 0.282, 0.504, 0.501]: watch (a person), sit; [0.487, 0.121, 0.723, 0.917]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [1653, 1687], "conversations": [{"from": "human", "timestamps": 1683.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.026, 0.008, 0.995, 0.998]: sit, talk to (e.g., self, a person, a group), stand."}, {"from": "human", "timestamps": 1687.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.053, 0.0, 0.975, 0.99]?"}, {"from": "gpt", "value": ["At Now, [0.053, 0.0, 0.975, 0.99]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1684.0s to 1686.0s, [0.0, 0.011, 0.981, 0.995]->[0.0, 0.0, 0.965, 0.993]->[0.0, 0.015, 1.0, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1683.0s, [0.026, 0.008, 0.995, 0.998]: sit, stand, talk to (e.g., self, a person, a group).", "At 1682.0s, [0.0, 0.02, 0.946, 0.983]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "jI0HIlSsa3s.mkv", "clip": [1712, 1755], "conversations": [{"from": "human", "timestamps": 1742.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.224, 0.024, 1.0, 0.964]: watch (a person), stand."}, {"from": "human", "timestamps": 1755.0, "value": "Describe the actions of the person at bbox [0.305, 0.031, 1.0, 0.985] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.305, 0.031, 1.0, 0.985]: stand.", "At 1754.0s, [0.225, 0.041, 1.0, 0.978]: stand."]}]}, {"video": "jgAwJ0RqmYg.mp4", "clip": [879, 952], "conversations": [{"from": "human", "timestamps": 909.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.058, 0.045, 0.854, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 952.0, "value": "For the person at bbox [0.546, 0.101, 0.898, 0.995], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.546, 0.101, 0.898, 0.995]: sit, talk to (e.g., self, a person, a group).", "From 947.0s to 951.0s, [0.517, 0.123, 0.879, 0.978]->[0.531, 0.129, 0.872, 0.976]->[0.53, 0.134, 0.86, 0.969]->[0.542, 0.133, 0.866, 0.97]->[0.536, 0.111, 0.87, 0.972]: sit, listen to (a person), watch (a person).", "At 946.0s, [0.499, 0.131, 0.864, 0.973]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 943.0s to 945.0s, [0.509, 0.169, 0.676, 0.888]->[0.517, 0.127, 0.847, 0.992]->[0.528, 0.127, 0.858, 0.976]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 942.0s, [0.545, 0.023, 0.734, 0.858]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 941.0s, [0.572, 0.058, 0.766, 0.913]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 940.0s, [0.645, 0.112, 0.82, 0.822]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 939.0s, [0.741, 0.192, 0.88, 0.795]: walk, listen to (a person), watch (a person).", "From 937.0s to 938.0s, [0.761, 0.21, 0.897, 0.772]->[0.758, 0.218, 0.895, 0.77]: stand, close (e.g., a door, a box), listen to (a person), watch (a person).", "At 936.0s, [0.778, 0.169, 0.905, 0.738]: walk, close (e.g., a door, a box), listen to (a person).", "At 935.0s, [0.833, 0.17, 0.897, 0.685]: walk, enter."]}]}, {"video": "jgAwJ0RqmYg.mp4", "clip": [915, 987], "conversations": [{"from": "human", "timestamps": 945.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.12, 0.056, 0.528, 1.0]: listen to (a person), sit; [0.528, 0.127, 0.858, 0.976]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 987.0, "value": "Describe the actions of the person at bbox [0.241, 0.064, 0.999, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.241, 0.064, 0.999, 0.982]: sit, watch (a person).", "At 986.0s, [0.181, 0.071, 0.991, 0.993]: sit, listen to (a person), watch (a person)."]}]}, {"video": "jgAwJ0RqmYg.mp4", "clip": [947, 994], "conversations": [{"from": "human", "timestamps": 977.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.269, 0.088, 0.989, 0.987]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 994.0, "value": "What actions did the person at bbox [0.152, 0.034, 0.891, 0.976] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.152, 0.034, 0.891, 0.976]: sit, talk to (e.g., self, a person, a group).", "From 991.0s to 993.0s, [0.128, 0.286, 0.939, 0.97]->[0.134, 0.081, 0.943, 0.953]->[0.127, 0.06, 0.921, 0.955]: sit, talk to (e.g., self, a person, a group).", "At 990.0s, [0.095, 0.076, 0.921, 0.961]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "jgAwJ0RqmYg.mp4", "clip": [1256, 1364], "conversations": [{"from": "human", "timestamps": 1286.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.539, 0.188, 0.999, 0.849]: listen to (a person), sit."}, {"from": "human", "timestamps": 1364.0, "value": "What actions is the person in bbox [0.018, 0.023, 0.706, 0.976] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.018, 0.023, 0.706, 0.976]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1362.0s to 1363.0s, [0.029, 0.052, 0.766, 0.966]->[0.018, 0.035, 0.724, 0.969]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "jgAwJ0RqmYg.mp4", "clip": [1421, 1461], "conversations": [{"from": "human", "timestamps": 1451.0, "value": "What actions did the person at bbox [0.182, 0.066, 0.941, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.182, 0.066, 0.941, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1449.0s to 1450.0s, [0.159, 0.1, 0.924, 0.997]->[0.109, 0.041, 0.926, 0.977]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1461.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.067, 0.057, 0.823, 0.965]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "jgAwJ0RqmYg.mp4", "clip": [1505, 1542], "conversations": [{"from": "human", "timestamps": 1535.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.107, 0.109, 0.855, 0.987]?"}, {"from": "gpt", "value": ["At Now, [0.107, 0.109, 0.855, 0.987]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1532.0s to 1534.0s, [0.165, 0.106, 0.891, 0.977]->[0.095, 0.052, 0.87, 0.96]->[0.096, 0.102, 0.894, 1.0]: sit, carry/hold (an object).", "From 1530.0s to 1531.0s, [0.232, 0.199, 0.799, 0.994]->[0.195, 0.137, 0.852, 0.984]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1528.0s to 1529.0s, [0.275, 0.255, 0.648, 0.968]->[0.253, 0.233, 0.72, 0.981]: sit, carry/hold (an object).", "At 1527.0s, [0.301, 0.31, 0.593, 0.988]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1526.0s, [0.278, 0.345, 0.553, 0.973]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1525.0s, [0.288, 0.351, 0.529, 0.961]: sit, carry/hold (an object), hand wave, talk to (e.g., self, a person, a group), watch (a person).", "From 1522.0s to 1524.0s, [0.299, 0.354, 0.548, 0.971]->[0.301, 0.342, 0.545, 0.966]->[0.303, 0.346, 0.536, 0.975]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1521.0s, [0.302, 0.293, 0.544, 0.941]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1520.0s, [0.31, 0.242, 0.569, 0.866]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1519.0s, [0.297, 0.246, 0.57, 0.884]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1518.0s, [0.306, 0.257, 0.562, 0.855]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1517.0s, [0.304, 0.249, 0.55, 0.871]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1542.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.08, 0.115, 0.851, 0.97]: sit."}]}, {"video": "jgAwJ0RqmYg.mp4", "clip": [1469, 1582], "conversations": [{"from": "human", "timestamps": 1499.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.255, 0.274, 0.555, 0.846]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.525, 0.244, 0.724, 0.819]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1582.0, "value": "For the person at bbox [0.087, 0.014, 0.704, 0.982], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.087, 0.014, 0.704, 0.982]: stand, talk to (e.g., self, a person, a group).", "From 1578.0s to 1581.0s, [0.003, 0.0, 0.64, 0.991]->[0.012, 0.0, 0.63, 0.975]->[0.041, 0.014, 0.648, 0.993]->[0.044, 0.0, 0.662, 0.985]: stand, talk to (e.g., self, a person, a group).", "At 1577.0s, [0.0, 0.0, 0.602, 0.96]: stand.", "From 1575.0s to 1576.0s, [0.23, 0.117, 0.31, 0.717]->[0.228, 0.115, 0.31, 0.719]: stand, listen to (a person), watch (a person).", "From 1573.0s to 1574.0s, [0.23, 0.121, 0.316, 0.719]->[0.231, 0.115, 0.313, 0.718]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "jgAwJ0RqmYg.mp4", "clip": [1530, 1611], "conversations": [{"from": "human", "timestamps": 1560.0, "value": "Describe the actions of the person at bbox [0.146, 0.141, 0.553, 0.953] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.146, 0.141, 0.553, 0.953]: stand, enter, open (e.g., a window, a car door).", "At 1559.0s, [0.147, 0.109, 0.42, 0.922]: walk, open (e.g., a window, a car door)."]}, {"from": "human", "timestamps": 1611.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.032, 0.054, 0.927, 0.963]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "jgAwJ0RqmYg.mp4", "clip": [1607, 1675], "conversations": [{"from": "human", "timestamps": 1637.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.099, 0.018, 0.808, 0.965]: sit."}, {"from": "human", "timestamps": 1675.0, "value": "Describe the actions of the person at bbox [0.201, 0.488, 0.448, 0.94] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.201, 0.488, 0.448, 0.94]: sit.", "At 1674.0s, [0.194, 0.489, 0.455, 0.963]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1673.0s, [0.191, 0.49, 0.454, 0.97]: sit, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "jgAwJ0RqmYg.mp4", "clip": [1620, 1730], "conversations": [{"from": "human", "timestamps": 1650.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.029, 0.032, 0.923, 0.997]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1730.0, "value": "What actions is the person in bbox [0.042, 0.013, 0.863, 0.995] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.042, 0.013, 0.863, 0.995]: sit, talk to (e.g., self, a person, a group).", "At 1729.0s, [0.02, 0.042, 0.842, 0.982]: sit, talk to (e.g., self, a person, a group).", "From 1723.0s to 1728.0s, [0.112, 0.0, 0.911, 1.0]->[0.084, 0.012, 0.93, 0.978]->[0.09, 0.029, 0.921, 0.979]->[0.023, 0.012, 0.852, 1.0]->[0.036, 0.053, 0.843, 0.984]->[0.045, 0.01, 0.843, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1722.0s, [0.109, 0.034, 0.874, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1721.0s, [0.0, 0.096, 0.673, 0.969]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "jqZpiHlJUig.mkv", "clip": [878, 992], "conversations": [{"from": "human", "timestamps": 908.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.443, 0.0, 0.881, 0.952]: martial art, fight/hit (a person)."}, {"from": "human", "timestamps": 992.0, "value": "Describe the actions of the person at bbox [0.116, 0.089, 0.884, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.116, 0.089, 0.884, 0.981]: stand, watch (a person), martial art.", "At 991.0s, [0.097, 0.129, 0.917, 0.976]: stand, watch (a person)."]}]}, {"video": "jqZpiHlJUig.mkv", "clip": [1142, 1177], "conversations": [{"from": "human", "timestamps": 1172.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.029, 0.212, 0.243, 0.805]: talk to (e.g., self, a person, a group), walk; [0.762, 0.14, 0.919, 0.974]: listen to (a person), walk."}, {"from": "human", "timestamps": 1177.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.175, 0.264, 0.276, 0.964]?"}, {"from": "gpt", "value": ["At Now, [0.175, 0.264, 0.276, 0.964]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1176.0s, [0.219, 0.275, 0.336, 0.958]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "jqZpiHlJUig.mkv", "clip": [1161, 1202], "conversations": [{"from": "human", "timestamps": 1191.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.001, 0.026, 0.474, 0.99]: listen to (a person), sit."}, {"from": "human", "timestamps": 1202.0, "value": "What actions did the person at bbox [0.553, 0.093, 0.661, 0.591] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.553, 0.093, 0.661, 0.591]: stand, listen to (a person), watch (a person).", "From 1199.0s to 1201.0s, [0.677, 0.03, 0.826, 0.748]->[0.619, 0.048, 0.741, 0.67]->[0.587, 0.084, 0.696, 0.627]: stand, listen to (a person), watch (a person)."]}]}, {"video": "jqZpiHlJUig.mkv", "clip": [1188, 1242], "conversations": [{"from": "human", "timestamps": 1218.0, "value": "Describe the actions of the person at bbox [0.356, 0.354, 0.528, 0.818] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.356, 0.354, 0.528, 0.818]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1216.0s to 1217.0s, [0.358, 0.36, 0.526, 0.814]->[0.357, 0.358, 0.527, 0.821]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1215.0s, [0.358, 0.357, 0.526, 0.817]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1210.0s to 1214.0s, [0.348, 0.372, 0.521, 0.83]->[0.351, 0.366, 0.517, 0.828]->[0.35, 0.363, 0.523, 0.822]->[0.359, 0.36, 0.524, 0.818]->[0.358, 0.361, 0.531, 0.826]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1209.0s, [0.347, 0.378, 0.513, 0.842]: sit, watch (a person).", "From 1203.0s to 1208.0s, [0.343, 0.409, 0.514, 0.869]->[0.344, 0.403, 0.515, 0.876]->[0.342, 0.402, 0.516, 0.864]->[0.346, 0.395, 0.506, 0.852]->[0.34, 0.387, 0.511, 0.852]->[0.345, 0.383, 0.516, 0.841]: sit, listen to (a person), watch (a person).", "At 1202.0s, [0.346, 0.397, 0.508, 0.877]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1201.0s, [0.367, 0.397, 0.546, 0.888]: sit, talk to (e.g., self, a person, a group).", "From 1196.0s to 1200.0s, [0.076, 0.062, 0.896, 0.976]->[0.278, 0.227, 0.818, 0.984]->[0.38, 0.319, 0.689, 0.978]->[0.393, 0.383, 0.626, 0.982]->[0.377, 0.399, 0.569, 0.931]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1242.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.173, 0.191, 0.444, 0.986]: listen to (a person), stand, watch (a person); [0.403, 0.081, 0.707, 0.954]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "jqZpiHlJUig.mkv", "clip": [1256, 1314], "conversations": [{"from": "human", "timestamps": 1286.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.706, 0.454, 0.781, 0.975]?"}, {"from": "gpt", "value": ["In the current frame, [0.706, 0.454, 0.781, 0.975]: stand, talk to (e.g., self, a person, a group).", "At 1285.0s, [0.699, 0.448, 0.778, 0.977]: stand, carry/hold (an object), smoke.", "At 1284.0s, [0.713, 0.451, 0.801, 0.98]: walk, smoke."]}, {"from": "human", "timestamps": 1314.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.247, 0.13, 0.906]: stand; [0.045, 0.035, 0.579, 0.967]: stand, talk to (e.g., self, a person, a group); [0.606, 0.273, 0.814, 0.969]: listen to (a person), stand, watch (a person)."}]}, {"video": "jqZpiHlJUig.mkv", "clip": [1243, 1334], "conversations": [{"from": "human", "timestamps": 1273.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.599, 0.641, 0.643, 0.882]: walk."}, {"from": "human", "timestamps": 1334.0, "value": "For the person at bbox [0.41, 0.0, 0.969, 0.967], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.41, 0.0, 0.969, 0.967]: stand.", "At 1333.0s, [0.121, 0.166, 0.381, 0.959]: stand, watch (a person).", "From 1329.0s to 1332.0s, [0.119, 0.149, 0.37, 0.966]->[0.125, 0.151, 0.37, 0.961]->[0.119, 0.162, 0.364, 0.955]->[0.12, 0.161, 0.364, 0.953]: stand, listen to (a person), watch (a person)."]}]}, {"video": "jqZpiHlJUig.mkv", "clip": [1519, 1617], "conversations": [{"from": "human", "timestamps": 1549.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.245, 0.426, 0.514, 0.773]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1617.0, "value": "What actions did the person at bbox [0.662, 0.055, 1.0, 0.979] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.662, 0.055, 1.0, 0.979]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1616.0s, [0.534, 0.131, 0.985, 0.962]: walk, talk to (e.g., self, a person, a group).", "From 1614.0s to 1615.0s, [0.425, 0.057, 1.0, 1.0]->[0.376, 0.015, 0.998, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 1613.0s, [0.408, 0.072, 0.978, 0.996]: stand, smoke, talk to (e.g., self, a person, a group).", "At 1612.0s, [0.304, 0.018, 1.0, 0.991]: stand.", "At 1611.0s, [0.183, 0.044, 0.788, 0.99]: walk.", "From 1609.0s to 1610.0s, [0.012, 0.015, 0.572, 0.993]->[0.032, 0.052, 0.706, 0.975]: walk, smoke.", "At 1608.0s, [0.149, 0.014, 0.963, 0.983]: stand.", "At 1607.0s, [0.219, 0.005, 0.999, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1606.0s, [0.533, 0.111, 0.988, 0.974]: smoke, get up.", "At 1605.0s, [0.56, 0.052, 0.968, 0.941]: sit, carry/hold (an object), smoke, touch (an object), listen to (a person).", "At 1604.0s, [0.683, 0.073, 0.972, 0.909]: sit, carry/hold (an object), listen to (a person).", "At 1603.0s, [0.67, 0.045, 0.969, 0.661]: sit.", "At 1602.0s, [0.68, 0.066, 0.971, 0.822]: sit, carry/hold (an object), listen to (a person)."]}]}, {"video": "jqZpiHlJUig.mkv", "clip": [1555, 1588], "conversations": [{"from": "human", "timestamps": 1585.0, "value": "What actions is the person in bbox [0.475, 0.123, 0.986, 0.99] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.475, 0.123, 0.986, 0.99]: sit, talk to (e.g., self, a person, a group).", "At 1584.0s, [0.526, 0.079, 0.961, 0.984]: sit.", "From 1582.0s to 1583.0s, [0.569, 0.125, 0.961, 1.0]->[0.669, 0.085, 0.983, 0.956]: sit, listen to (a person).", "At 1581.0s, [0.473, 0.116, 0.853, 0.971]: bend/bow (at the waist), listen to (a person).", "At 1580.0s, [0.621, 0.037, 0.916, 0.985]: walk, listen to (a person), watch (a person).", "At 1579.0s, [0.752, 0.06, 1.0, 0.98]: walk, listen to (a person).", "From 1576.0s to 1578.0s, [0.409, 0.125, 0.965, 0.996]->[0.749, 0.181, 0.993, 0.969]->[0.706, 0.161, 1.0, 0.972]: walk, talk to (e.g., self, a person, a group).", "At 1575.0s, [0.498, 0.153, 0.908, 0.981]: walk."]}, {"from": "human", "timestamps": 1588.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.035, 0.104, 0.395, 0.968]: carry/hold (an object), sit, smoke, talk to (e.g., self, a person, a group); [0.413, 0.099, 0.986, 0.972]: sit, lift/pick up, listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "jqZpiHlJUig.mkv", "clip": [1695, 1728], "conversations": [{"from": "human", "timestamps": 1725.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.447, 0.256, 0.785, 0.978]?"}, {"from": "gpt", "value": ["At Now, [0.447, 0.256, 0.785, 0.978]: walk, talk to (e.g., self, a person, a group).", "At 1724.0s, [0.449, 0.19, 0.739, 0.972]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1728.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.12, 0.289, 0.891]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.715, 0.25, 1.0, 0.969]: listen to (a person), sit, watch (a person)."}]}, {"video": "jqZpiHlJUig.mkv", "clip": [1708, 1763], "conversations": [{"from": "human", "timestamps": 1738.0, "value": "Describe the actions of the person at bbox [0.336, 0.017, 0.954, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.336, 0.017, 0.954, 0.984]: sit, talk to (e.g., self, a person, a group).", "From 1736.0s to 1737.0s, [0.731, 0.302, 0.993, 0.963]->[0.718, 0.283, 1.0, 0.978]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1763.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.011, 0.093, 0.484, 0.988]: listen to (a person), sit, watch (a person); [0.814, 0.139, 1.0, 0.908]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "kAsz-76DTDE.mkv", "clip": [885, 938], "conversations": [{"from": "human", "timestamps": 915.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.611, 0.176, 0.94, 0.871]?"}, {"from": "gpt", "value": ["In the current frame, [0.611, 0.176, 0.94, 0.871]: stand, talk to (e.g., self, a person, a group).", "At 914.0s, [0.614, 0.173, 0.944, 0.871]: stand, talk to (e.g., self, a person, a group).", "From 912.0s to 913.0s, [0.609, 0.167, 0.938, 0.874]->[0.605, 0.166, 0.941, 0.87]: stand, touch (an object), talk to (e.g., self, a person, a group).", "From 910.0s to 911.0s, [0.605, 0.176, 0.936, 0.866]->[0.605, 0.174, 0.939, 0.866]: stand, talk to (e.g., self, a person, a group).", "At 909.0s, [0.602, 0.175, 0.935, 0.866]: stand, touch (an object), talk to (e.g., self, a person, a group).", "From 906.0s to 908.0s, [0.603, 0.176, 0.938, 0.866]->[0.612, 0.171, 0.938, 0.873]->[0.606, 0.175, 0.936, 0.867]: stand, talk to (e.g., self, a person, a group).", "At 905.0s, [0.608, 0.17, 0.935, 0.87]: stand, touch (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 938.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.324, 0.232, 0.555, 0.59]: sit, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1026, 1090], "conversations": [{"from": "human", "timestamps": 1056.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.362, 0.187, 0.705, 0.795]: read, sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1090.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.374, 0.19, 0.702, 0.801]?"}, {"from": "gpt", "value": ["In the current frame, [0.374, 0.19, 0.702, 0.801]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1089.0s, [0.373, 0.202, 0.698, 0.792]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1088.0s, [0.363, 0.199, 0.701, 0.794]: sit, read, touch (an object), talk to (e.g., self, a person, a group).", "From 1086.0s to 1087.0s, [0.394, 0.202, 0.707, 0.78]->[0.355, 0.193, 0.708, 0.766]: sit, read, touch (an object).", "At 1085.0s, [0.394, 0.198, 0.705, 0.779]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1084.0s, [0.391, 0.202, 0.708, 0.781]: sit, read, touch (an object), talk to (e.g., self, a person, a group).", "At 1083.0s, [0.352, 0.198, 0.712, 0.763]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1081.0s to 1082.0s, [0.361, 0.193, 0.701, 0.755]->[0.37, 0.197, 0.696, 0.755]: sit, read, touch (an object), talk to (e.g., self, a person, a group).", "From 1079.0s to 1080.0s, [0.369, 0.189, 0.706, 0.798]->[0.367, 0.179, 0.702, 0.796]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1078.0s, [0.363, 0.196, 0.705, 0.779]: sit, read, touch (an object), talk to (e.g., self, a person, a group).", "At 1077.0s, [0.369, 0.198, 0.707, 0.739]: sit, read, touch (an object).", "From 1073.0s to 1076.0s, [0.379, 0.196, 0.71, 0.762]->[0.36, 0.194, 0.704, 0.755]->[0.373, 0.204, 0.703, 0.759]->[0.373, 0.19, 0.708, 0.761]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1072.0s, [0.391, 0.202, 0.713, 0.776]: sit, read, touch (an object), talk to (e.g., self, a person, a group).", "From 1070.0s to 1071.0s, [0.39, 0.198, 0.717, 0.789]->[0.385, 0.202, 0.715, 0.783]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1068.0s to 1069.0s, [0.37, 0.2, 0.703, 0.754]->[0.36, 0.201, 0.701, 0.754]: sit, read, touch (an object), talk to (e.g., self, a person, a group).", "From 1066.0s to 1067.0s, [0.37, 0.189, 0.703, 0.794]->[0.373, 0.183, 0.705, 0.78]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1064.0s to 1065.0s, [0.362, 0.198, 0.709, 0.787]->[0.372, 0.19, 0.703, 0.789]: sit, read, touch (an object), talk to (e.g., self, a person, a group).", "From 1062.0s to 1063.0s, [0.363, 0.191, 0.692, 0.754]->[0.362, 0.186, 0.704, 0.8]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1061.0s, [0.364, 0.202, 0.709, 0.758]: sit, touch (an object).", "At 1060.0s, [0.328, 0.203, 0.726, 0.776]: sit, carry/hold (an object), read, touch (an object).", "At 1059.0s, [0.375, 0.192, 0.704, 0.787]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1058.0s, [0.371, 0.197, 0.707, 0.79]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1056.0s to 1057.0s, [0.362, 0.187, 0.705, 0.795]->[0.365, 0.186, 0.712, 0.794]: sit, read, touch (an object), talk to (e.g., self, a person, a group).", "From 1054.0s to 1055.0s, [0.356, 0.191, 0.706, 0.769]->[0.366, 0.192, 0.704, 0.788]: sit, touch (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1089, 1123], "conversations": [{"from": "human", "timestamps": 1119.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.348, 0.173, 0.707, 0.776]?"}, {"from": "gpt", "value": ["At Now, [0.348, 0.173, 0.707, 0.776]: sit, read, touch (an object), talk to (e.g., self, a person, a group).", "At 1118.0s, [0.33, 0.186, 0.712, 0.781]: sit, touch (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1123.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.353, 0.187, 0.695, 0.789]: read, sit, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1166, 1250], "conversations": [{"from": "human", "timestamps": 1196.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.007, 0.153, 0.317, 0.867]: listen to (a person), stand, watch (a person); [0.244, 0.162, 0.851, 0.874]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1250.0, "value": "Describe the actions of the person at bbox [0.42, 0.316, 0.601, 0.635] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.42, 0.316, 0.601, 0.635]: bend/bow (at the waist), take (an object) from (a person), watch (a person).", "At 1249.0s, [0.426, 0.317, 0.6, 0.633]: bend/bow (at the waist), stand, listen to (a person).", "At 1248.0s, [0.426, 0.318, 0.603, 0.635]: bend/bow (at the waist), carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1247.0s, [0.423, 0.315, 0.605, 0.64]: bend/bow (at the waist), stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1246.0s, [0.317, 0.259, 0.813, 0.845]: bend/bow (at the waist), carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group).", "At 1245.0s, [0.328, 0.262, 0.792, 0.876]: bend/bow (at the waist), carry/hold (an object).", "At 1244.0s, [0.33, 0.288, 0.788, 0.887]: bend/bow (at the waist), carry/hold (an object), listen to (a person).", "At 1243.0s, [0.331, 0.284, 0.809, 0.886]: bend/bow (at the waist)."]}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1212, 1260], "conversations": [{"from": "human", "timestamps": 1242.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.217, 0.167, 0.763, 0.883]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.614, 0.267, 0.997, 0.87]: listen to (a person), stand."}, {"from": "human", "timestamps": 1260.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.141, 0.128, 0.723]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.141, 0.128, 0.723]: stand, listen to (a person).", "At 1259.0s, [0.0, 0.141, 0.151, 0.721]: stand, listen to (a person), watch (a person)."]}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1248, 1315], "conversations": [{"from": "human", "timestamps": 1278.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.219, 0.178, 0.877]: listen to (a person), stand, watch (a person); [0.005, 0.125, 0.324, 0.874]: listen to (a person), stand, watch (a person); [0.283, 0.363, 0.739, 0.876]: bend/bow (at the waist); [0.653, 0.264, 0.997, 0.87]: watch (a person), stand; [0.685, 0.151, 1.0, 0.846]: watch (a person), stand."}, {"from": "human", "timestamps": 1315.0, "value": "What actions did the person at bbox [0.158, 0.19, 0.756, 0.876] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.158, 0.19, 0.756, 0.876]: stand, talk to (e.g., self, a person, a group).", "At 1314.0s, [0.254, 0.179, 0.763, 0.863]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1304, 1341], "conversations": [{"from": "human", "timestamps": 1334.0, "value": "What actions did the person at bbox [0.422, 0.336, 0.6, 0.632] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.422, 0.336, 0.6, 0.632]: bend/bow (at the waist), carry/hold (an object), cut, listen to (a person), talk to (e.g., self, a person, a group).", "From 1332.0s to 1333.0s, [0.423, 0.334, 0.605, 0.634]->[0.422, 0.333, 0.602, 0.635]: bend/bow (at the waist), carry/hold (an object), listen to (a person), watch (a person).", "At 1331.0s, [0.421, 0.332, 0.606, 0.635]: bend/bow (at the waist), carry/hold (an object), touch (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1341.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.003, 0.227, 0.182, 0.88]: listen to (a person), stand, watch (a person); [0.004, 0.135, 0.353, 0.892]: listen to (a person), stand, watch (a person); [0.336, 0.315, 0.788, 0.889]: carry/hold (an object), watch (a person), bend/bow (at the waist), listen to (a person); [0.796, 0.179, 0.997, 0.824]: watch (a person), stand."}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1378, 1440], "conversations": [{"from": "human", "timestamps": 1408.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.065, 0.194, 0.309, 0.461]: sit, play musical instrument; [0.426, 0.37, 0.511, 0.476]: listen (e.g., to music), sit, play musical instrument; [0.675, 0.468, 0.994, 0.874]: sit, hit (an object), play musical instrument."}, {"from": "human", "timestamps": 1440.0, "value": "Describe the actions of the person at bbox [0.12, 0.257, 0.555, 0.699] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.12, 0.257, 0.555, 0.699]: sit, eat.", "From 1435.0s to 1439.0s, [0.112, 0.262, 0.561, 0.692]->[0.125, 0.253, 0.564, 0.695]->[0.127, 0.258, 0.551, 0.687]->[0.134, 0.265, 0.564, 0.696]->[0.113, 0.241, 0.561, 0.694]: sit, eat.", "At 1434.0s, [0.125, 0.264, 0.55, 0.703]: sit, carry/hold (an object)."]}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1450, 1489], "conversations": [{"from": "human", "timestamps": 1480.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.098, 0.22, 0.584, 0.696]: carry/hold (an object), sit, answer phone."}, {"from": "human", "timestamps": 1489.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.097, 0.229, 0.538, 0.696]?"}, {"from": "gpt", "value": ["At Now, [0.097, 0.229, 0.538, 0.696]: sit, answer phone.", "At 1488.0s, [0.099, 0.214, 0.588, 0.697]: sit, answer phone.", "From 1485.0s to 1487.0s, [0.088, 0.222, 0.495, 0.688]->[0.082, 0.224, 0.523, 0.686]->[0.076, 0.213, 0.523, 0.695]: sit, answer phone, carry/hold (an object).", "At 1484.0s, [0.083, 0.215, 0.493, 0.697]: sit, answer phone.", "From 1480.0s to 1483.0s, [0.098, 0.22, 0.584, 0.696]->[0.089, 0.213, 0.611, 0.695]->[0.082, 0.21, 0.539, 0.689]->[0.089, 0.211, 0.519, 0.691]: sit, answer phone, carry/hold (an object).", "From 1478.0s to 1479.0s, [0.098, 0.217, 0.588, 0.697]->[0.093, 0.218, 0.634, 0.702]: sit, answer phone, carry/hold (an object), touch (an object).", "At 1477.0s, [0.099, 0.216, 0.59, 0.706]: sit, answer phone, carry/hold (an object).", "At 1476.0s, [0.093, 0.221, 0.578, 0.704]: sit, stand, answer phone, carry/hold (an object), eat.", "At 1475.0s, [0.095, 0.217, 0.552, 0.687]: sit, answer phone, carry/hold (an object).", "From 1470.0s to 1474.0s, [0.141, 0.228, 0.561, 0.689]->[0.076, 0.256, 0.619, 0.685]->[0.058, 0.228, 0.639, 0.687]->[0.065, 0.228, 0.528, 0.681]->[0.103, 0.217, 0.582, 0.701]: sit, carry/hold (an object).", "At 1469.0s, [0.172, 0.234, 0.563, 0.698]: stand, carry/hold (an object).", "At 1468.0s, [0.122, 0.192, 0.564, 0.722]: sit, stand, carry/hold (an object).", "From 1466.0s to 1467.0s, [0.193, 0.264, 0.624, 0.712]->[0.168, 0.211, 0.556, 0.721]: stand, carry/hold (an object).", "From 1464.0s to 1465.0s, [0.202, 0.244, 0.661, 0.706]->[0.187, 0.329, 0.653, 0.717]: stand.", "From 1462.0s to 1463.0s, [0.153, 0.229, 0.554, 0.717]->[0.228, 0.218, 0.703, 0.702]: sit.", "At 1461.0s, [0.188, 0.265, 0.6, 0.713]: sit, talk to (e.g., self, a person, a group).", "At 1460.0s, [0.152, 0.238, 0.538, 0.714]: sit.", "At 1459.0s, [0.084, 0.264, 0.495, 0.752]: sit, carry/hold (an object).", "At 1458.0s, [0.071, 0.276, 0.873, 0.72]: sit, stand, carry/hold (an object), lift/pick up.", "From 1455.0s to 1457.0s, [0.098, 0.187, 0.736, 0.758]->[0.082, 0.28, 0.524, 0.745]->[0.078, 0.299, 0.591, 0.731]: sit, carry/hold (an object).", "At 1454.0s, [0.075, 0.325, 0.588, 0.732]: sit, stand, carry/hold (an object).", "From 1452.0s to 1453.0s, [0.081, 0.344, 0.59, 0.732]->[0.086, 0.378, 0.654, 0.743]: sit, carry/hold (an object).", "From 1450.0s to 1451.0s, [0.116, 0.365, 0.672, 0.73]->[0.027, 0.235, 0.505, 0.705]: stand, carry/hold (an object).", "From 1448.0s to 1449.0s, [0.103, 0.274, 0.526, 0.701]->[0.13, 0.379, 0.682, 0.711]: sit, eat.", "At 1447.0s, [0.098, 0.242, 0.492, 0.699]: sit, carry/hold (an object), eat.", "At 1446.0s, [0.101, 0.255, 0.519, 0.699]: sit, eat.", "From 1444.0s to 1445.0s, [0.112, 0.318, 0.572, 0.7]->[0.093, 0.28, 0.559, 0.702]: sit, carry/hold (an object), eat.", "At 1443.0s, [0.126, 0.296, 0.566, 0.697]: sit, carry/hold (an object).", "At 1442.0s, [0.126, 0.266, 0.549, 0.698]: sit, carry/hold (an object), read.", "At 1441.0s, [0.115, 0.279, 0.545, 0.693]: sit, carry/hold (an object), eat.", "From 1435.0s to 1440.0s, [0.112, 0.262, 0.561, 0.692]->[0.125, 0.253, 0.564, 0.695]->[0.127, 0.258, 0.551, 0.687]->[0.134, 0.265, 0.564, 0.696]->[0.113, 0.241, 0.561, 0.694]->[0.12, 0.257, 0.555, 0.699]: sit, eat.", "At 1434.0s, [0.125, 0.264, 0.55, 0.703]: sit, carry/hold (an object)."]}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1479, 1537], "conversations": [{"from": "human", "timestamps": 1509.0, "value": "What actions is the person in bbox [0.09, 0.248, 0.481, 0.684] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.09, 0.248, 0.481, 0.684]: sit, answer phone.", "At 1508.0s, [0.095, 0.239, 0.486, 0.684]: sit, answer phone, touch (an object).", "At 1507.0s, [0.098, 0.246, 0.489, 0.681]: sit, answer phone.", "At 1506.0s, [0.085, 0.235, 0.499, 0.701]: sit, answer phone, carry/hold (an object).", "From 1503.0s to 1505.0s, [0.097, 0.243, 0.497, 0.685]->[0.098, 0.24, 0.506, 0.69]->[0.088, 0.236, 0.513, 0.683]: sit, answer phone.", "From 1501.0s to 1502.0s, [0.098, 0.239, 0.493, 0.689]->[0.093, 0.245, 0.494, 0.695]: sit, answer phone, touch (an object).", "From 1493.0s to 1500.0s, [0.1, 0.241, 0.49, 0.691]->[0.086, 0.229, 0.49, 0.691]->[0.096, 0.23, 0.516, 0.681]->[0.087, 0.231, 0.498, 0.695]->[0.086, 0.23, 0.528, 0.693]->[0.081, 0.234, 0.587, 0.688]->[0.085, 0.231, 0.494, 0.693]->[0.095, 0.231, 0.562, 0.682]: sit, answer phone, carry/hold (an object).", "At 1492.0s, [0.091, 0.241, 0.516, 0.688]: answer phone.", "At 1491.0s, [0.098, 0.239, 0.554, 0.691]: stand, answer phone.", "From 1488.0s to 1490.0s, [0.099, 0.214, 0.588, 0.697]->[0.097, 0.229, 0.538, 0.696]->[0.097, 0.238, 0.547, 0.684]: sit, answer phone.", "From 1485.0s to 1487.0s, [0.088, 0.222, 0.495, 0.688]->[0.082, 0.224, 0.523, 0.686]->[0.076, 0.213, 0.523, 0.695]: sit, answer phone, carry/hold (an object).", "At 1484.0s, [0.083, 0.215, 0.493, 0.697]: sit, answer phone.", "From 1480.0s to 1483.0s, [0.098, 0.22, 0.584, 0.696]->[0.089, 0.213, 0.611, 0.695]->[0.082, 0.21, 0.539, 0.689]->[0.089, 0.211, 0.519, 0.691]: sit, answer phone, carry/hold (an object).", "From 1478.0s to 1479.0s, [0.098, 0.217, 0.588, 0.697]->[0.093, 0.218, 0.634, 0.702]: sit, answer phone, carry/hold (an object), touch (an object).", "At 1477.0s, [0.099, 0.216, 0.59, 0.706]: sit, answer phone, carry/hold (an object).", "At 1476.0s, [0.093, 0.221, 0.578, 0.704]: sit, stand, answer phone, carry/hold (an object), eat.", "At 1475.0s, [0.095, 0.217, 0.552, 0.687]: sit, answer phone, carry/hold (an object).", "From 1470.0s to 1474.0s, [0.141, 0.228, 0.561, 0.689]->[0.076, 0.256, 0.619, 0.685]->[0.058, 0.228, 0.639, 0.687]->[0.065, 0.228, 0.528, 0.681]->[0.103, 0.217, 0.582, 0.701]: sit, carry/hold (an object).", "At 1469.0s, [0.172, 0.234, 0.563, 0.698]: stand, carry/hold (an object).", "At 1468.0s, [0.122, 0.192, 0.564, 0.722]: sit, stand, carry/hold (an object).", "From 1466.0s to 1467.0s, [0.193, 0.264, 0.624, 0.712]->[0.168, 0.211, 0.556, 0.721]: stand, carry/hold (an object).", "From 1464.0s to 1465.0s, [0.202, 0.244, 0.661, 0.706]->[0.187, 0.329, 0.653, 0.717]: stand.", "From 1462.0s to 1463.0s, [0.153, 0.229, 0.554, 0.717]->[0.228, 0.218, 0.703, 0.702]: sit.", "At 1461.0s, [0.188, 0.265, 0.6, 0.713]: sit, talk to (e.g., self, a person, a group).", "At 1460.0s, [0.152, 0.238, 0.538, 0.714]: sit.", "At 1459.0s, [0.084, 0.264, 0.495, 0.752]: sit, carry/hold (an object).", "At 1458.0s, [0.071, 0.276, 0.873, 0.72]: sit, stand, carry/hold (an object), lift/pick up.", "From 1455.0s to 1457.0s, [0.098, 0.187, 0.736, 0.758]->[0.082, 0.28, 0.524, 0.745]->[0.078, 0.299, 0.591, 0.731]: sit, carry/hold (an object).", "At 1454.0s, [0.075, 0.325, 0.588, 0.732]: sit, stand, carry/hold (an object).", "From 1452.0s to 1453.0s, [0.081, 0.344, 0.59, 0.732]->[0.086, 0.378, 0.654, 0.743]: sit, carry/hold (an object).", "From 1450.0s to 1451.0s, [0.116, 0.365, 0.672, 0.73]->[0.027, 0.235, 0.505, 0.705]: stand, carry/hold (an object).", "From 1448.0s to 1449.0s, [0.103, 0.274, 0.526, 0.701]->[0.13, 0.379, 0.682, 0.711]: sit, eat.", "At 1447.0s, [0.098, 0.242, 0.492, 0.699]: sit, carry/hold (an object), eat.", "At 1446.0s, [0.101, 0.255, 0.519, 0.699]: sit, eat.", "From 1444.0s to 1445.0s, [0.112, 0.318, 0.572, 0.7]->[0.093, 0.28, 0.559, 0.702]: sit, carry/hold (an object), eat.", "At 1443.0s, [0.126, 0.296, 0.566, 0.697]: sit, carry/hold (an object).", "At 1442.0s, [0.126, 0.266, 0.549, 0.698]: sit, carry/hold (an object), read.", "At 1441.0s, [0.115, 0.279, 0.545, 0.693]: sit, carry/hold (an object), eat.", "From 1435.0s to 1440.0s, [0.112, 0.262, 0.561, 0.692]->[0.125, 0.253, 0.564, 0.695]->[0.127, 0.258, 0.551, 0.687]->[0.134, 0.265, 0.564, 0.696]->[0.113, 0.241, 0.561, 0.694]->[0.12, 0.257, 0.555, 0.699]: sit, eat.", "At 1434.0s, [0.125, 0.264, 0.55, 0.703]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1537.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.105, 0.217, 0.527, 0.694]: stand, answer phone."}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1471, 1598], "conversations": [{"from": "human", "timestamps": 1501.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.098, 0.239, 0.493, 0.689]: sit, touch (an object), answer phone."}, {"from": "human", "timestamps": 1598.0, "value": "Describe the actions of the person at bbox [0.082, 0.237, 0.553, 0.69] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.082, 0.237, 0.553, 0.69]: sit, stand, answer phone.", "From 1595.0s to 1597.0s, [0.1, 0.212, 0.523, 0.696]->[0.114, 0.216, 0.509, 0.693]->[0.097, 0.232, 0.631, 0.707]: sit, answer phone.", "At 1594.0s, [0.083, 0.216, 0.506, 0.685]: sit, answer phone, carry/hold (an object).", "From 1589.0s to 1593.0s, [0.084, 0.224, 0.524, 0.69]->[0.089, 0.218, 0.585, 0.687]->[0.09, 0.221, 0.55, 0.69]->[0.09, 0.216, 0.551, 0.685]->[0.102, 0.222, 0.507, 0.684]: sit, answer phone.", "At 1588.0s, [0.086, 0.225, 0.533, 0.687]: sit, stand, answer phone.", "From 1582.0s to 1587.0s, [0.092, 0.232, 0.556, 0.689]->[0.091, 0.227, 0.558, 0.69]->[0.091, 0.232, 0.557, 0.685]->[0.091, 0.231, 0.574, 0.687]->[0.088, 0.227, 0.555, 0.688]->[0.088, 0.225, 0.536, 0.686]: sit, answer phone.", "At 1581.0s, [0.09, 0.233, 0.55, 0.692]: sit, stand, answer phone.", "At 1580.0s, [0.088, 0.234, 0.551, 0.69]: sit, answer phone, talk to (e.g., self, a person, a group).", "From 1577.0s to 1579.0s, [0.085, 0.235, 0.499, 0.689]->[0.082, 0.238, 0.589, 0.691]->[0.087, 0.24, 0.538, 0.691]: sit, answer phone.", "From 1575.0s to 1576.0s, [0.095, 0.214, 0.614, 0.702]->[0.081, 0.223, 0.504, 0.691]: sit, answer phone, carry/hold (an object).", "At 1574.0s, [0.081, 0.206, 0.643, 0.698]: stand, answer phone.", "At 1573.0s, [0.123, 0.218, 0.597, 0.705]: stand, answer phone, touch (an object).", "At 1572.0s, [0.146, 0.216, 0.594, 0.684]: stand, answer phone.", "At 1571.0s, [0.128, 0.224, 0.596, 0.704]: sit, answer phone.", "At 1570.0s, [0.13, 0.234, 0.593, 0.702]: answer phone.", "At 1569.0s, [0.128, 0.237, 0.643, 0.703]: stand, answer phone, touch (an object).", "From 1567.0s to 1568.0s, [0.122, 0.218, 0.57, 0.707]->[0.13, 0.23, 0.639, 0.709]: sit, answer phone.", "At 1566.0s, [0.129, 0.219, 0.634, 0.707]: sit, answer phone, carry/hold (an object), touch (an object).", "At 1565.0s, [0.135, 0.224, 0.631, 0.699]: sit, answer phone, carry/hold (an object).", "From 1563.0s to 1564.0s, [0.131, 0.24, 0.657, 0.705]->[0.13, 0.234, 0.607, 0.698]: sit, stand, answer phone, carry/hold (an object).", "At 1562.0s, [0.132, 0.241, 0.57, 0.706]: sit, stand, answer phone, touch (an object).", "At 1561.0s, [0.136, 0.252, 0.579, 0.704]: sit, answer phone.", "At 1560.0s, [0.135, 0.248, 0.571, 0.707]: sit, stand, answer phone, touch (an object).", "From 1553.0s to 1559.0s, [0.085, 0.224, 0.625, 0.694]->[0.128, 0.236, 0.64, 0.715]->[0.133, 0.24, 0.639, 0.704]->[0.125, 0.232, 0.597, 0.699]->[0.152, 0.249, 0.597, 0.698]->[0.153, 0.257, 0.583, 0.699]->[0.128, 0.256, 0.627, 0.716]: sit, answer phone, touch (an object).", "From 1545.0s to 1552.0s, [0.084, 0.232, 0.507, 0.689]->[0.082, 0.228, 0.495, 0.689]->[0.078, 0.227, 0.521, 0.682]->[0.082, 0.229, 0.534, 0.687]->[0.082, 0.232, 0.518, 0.689]->[0.079, 0.228, 0.518, 0.689]->[0.078, 0.233, 0.515, 0.685]->[0.082, 0.227, 0.564, 0.696]: sit, answer phone.", "At 1544.0s, [0.077, 0.224, 0.503, 0.696]: sit, stand, answer phone, talk to (e.g., self, a person, a group).", "From 1541.0s to 1543.0s, [0.094, 0.245, 0.646, 0.687]->[0.074, 0.231, 0.514, 0.691]->[0.069, 0.255, 0.478, 0.692]: sit, answer phone.", "At 1540.0s, [0.115, 0.211, 0.528, 0.695]: sit, answer phone, touch (an object).", "At 1539.0s, [0.127, 0.219, 0.524, 0.696]: sit, answer phone, carry/hold (an object).", "At 1538.0s, [0.117, 0.213, 0.614, 0.706]: sit, answer phone.", "At 1537.0s, [0.105, 0.217, 0.527, 0.694]: stand, answer phone.", "From 1528.0s to 1536.0s, [0.099, 0.24, 0.594, 0.689]->[0.09, 0.242, 0.611, 0.703]->[0.104, 0.233, 0.65, 0.697]->[0.091, 0.218, 0.563, 0.7]->[0.096, 0.22, 0.473, 0.703]->[0.101, 0.227, 0.478, 0.695]->[0.1, 0.228, 0.487, 0.694]->[0.1, 0.234, 0.481, 0.697]->[0.086, 0.228, 0.489, 0.692]: sit, answer phone.", "At 1527.0s, [0.091, 0.243, 0.596, 0.713]: sit, stand, answer phone.", "From 1524.0s to 1526.0s, [0.098, 0.237, 0.61, 0.698]->[0.106, 0.239, 0.591, 0.7]->[0.097, 0.242, 0.583, 0.693]: sit, answer phone.", "At 1523.0s, [0.086, 0.234, 0.55, 0.688]: sit, stand, answer phone.", "At 1522.0s, [0.078, 0.225, 0.509, 0.694]: sit, answer phone, carry/hold (an object).", "At 1521.0s, [0.08, 0.212, 0.588, 0.696]: sit, answer phone, touch (an object).", "At 1520.0s, [0.091, 0.209, 0.488, 0.698]: sit, stand, answer phone.", "At 1519.0s, [0.091, 0.212, 0.63, 0.698]: sit, answer phone.", "At 1518.0s, [0.089, 0.225, 0.622, 0.689]: sit, answer phone, touch (an object).", "At 1517.0s, [0.102, 0.252, 0.504, 0.688]: answer phone.", "From 1514.0s to 1516.0s, [0.105, 0.244, 0.506, 0.686]->[0.095, 0.244, 0.488, 0.696]->[0.093, 0.242, 0.495, 0.694]: sit, answer phone.", "At 1513.0s, [0.098, 0.247, 0.509, 0.693]: sit, answer phone, touch (an object).", "From 1509.0s to 1512.0s, [0.09, 0.248, 0.481, 0.684]->[0.1, 0.241, 0.488, 0.692]->[0.1, 0.252, 0.512, 0.689]->[0.107, 0.243, 0.513, 0.686]: sit, answer phone.", "At 1508.0s, [0.095, 0.239, 0.486, 0.684]: sit, answer phone, touch (an object).", "At 1507.0s, [0.098, 0.246, 0.489, 0.681]: sit, answer phone.", "At 1506.0s, [0.085, 0.235, 0.499, 0.701]: sit, answer phone, carry/hold (an object).", "From 1503.0s to 1505.0s, [0.097, 0.243, 0.497, 0.685]->[0.098, 0.24, 0.506, 0.69]->[0.088, 0.236, 0.513, 0.683]: sit, answer phone.", "From 1501.0s to 1502.0s, [0.098, 0.239, 0.493, 0.689]->[0.093, 0.245, 0.494, 0.695]: sit, answer phone, touch (an object).", "From 1493.0s to 1500.0s, [0.1, 0.241, 0.49, 0.691]->[0.086, 0.229, 0.49, 0.691]->[0.096, 0.23, 0.516, 0.681]->[0.087, 0.231, 0.498, 0.695]->[0.086, 0.23, 0.528, 0.693]->[0.081, 0.234, 0.587, 0.688]->[0.085, 0.231, 0.494, 0.693]->[0.095, 0.231, 0.562, 0.682]: sit, answer phone, carry/hold (an object).", "At 1492.0s, [0.091, 0.241, 0.516, 0.688]: answer phone.", "At 1491.0s, [0.098, 0.239, 0.554, 0.691]: stand, answer phone.", "From 1488.0s to 1490.0s, [0.099, 0.214, 0.588, 0.697]->[0.097, 0.229, 0.538, 0.696]->[0.097, 0.238, 0.547, 0.684]: sit, answer phone.", "From 1485.0s to 1487.0s, [0.088, 0.222, 0.495, 0.688]->[0.082, 0.224, 0.523, 0.686]->[0.076, 0.213, 0.523, 0.695]: sit, answer phone, carry/hold (an object).", "At 1484.0s, [0.083, 0.215, 0.493, 0.697]: sit, answer phone.", "From 1480.0s to 1483.0s, [0.098, 0.22, 0.584, 0.696]->[0.089, 0.213, 0.611, 0.695]->[0.082, 0.21, 0.539, 0.689]->[0.089, 0.211, 0.519, 0.691]: sit, answer phone, carry/hold (an object).", "From 1478.0s to 1479.0s, [0.098, 0.217, 0.588, 0.697]->[0.093, 0.218, 0.634, 0.702]: sit, answer phone, carry/hold (an object), touch (an object).", "At 1477.0s, [0.099, 0.216, 0.59, 0.706]: sit, answer phone, carry/hold (an object).", "At 1476.0s, [0.093, 0.221, 0.578, 0.704]: sit, stand, answer phone, carry/hold (an object), eat.", "At 1475.0s, [0.095, 0.217, 0.552, 0.687]: sit, answer phone, carry/hold (an object).", "From 1470.0s to 1474.0s, [0.141, 0.228, 0.561, 0.689]->[0.076, 0.256, 0.619, 0.685]->[0.058, 0.228, 0.639, 0.687]->[0.065, 0.228, 0.528, 0.681]->[0.103, 0.217, 0.582, 0.701]: sit, carry/hold (an object).", "At 1469.0s, [0.172, 0.234, 0.563, 0.698]: stand, carry/hold (an object).", "At 1468.0s, [0.122, 0.192, 0.564, 0.722]: sit, stand, carry/hold (an object).", "From 1466.0s to 1467.0s, [0.193, 0.264, 0.624, 0.712]->[0.168, 0.211, 0.556, 0.721]: stand, carry/hold (an object).", "From 1464.0s to 1465.0s, [0.202, 0.244, 0.661, 0.706]->[0.187, 0.329, 0.653, 0.717]: stand.", "From 1462.0s to 1463.0s, [0.153, 0.229, 0.554, 0.717]->[0.228, 0.218, 0.703, 0.702]: sit.", "At 1461.0s, [0.188, 0.265, 0.6, 0.713]: sit, talk to (e.g., self, a person, a group).", "At 1460.0s, [0.152, 0.238, 0.538, 0.714]: sit.", "At 1459.0s, [0.084, 0.264, 0.495, 0.752]: sit, carry/hold (an object).", "At 1458.0s, [0.071, 0.276, 0.873, 0.72]: sit, stand, carry/hold (an object), lift/pick up.", "From 1455.0s to 1457.0s, [0.098, 0.187, 0.736, 0.758]->[0.082, 0.28, 0.524, 0.745]->[0.078, 0.299, 0.591, 0.731]: sit, carry/hold (an object).", "At 1454.0s, [0.075, 0.325, 0.588, 0.732]: sit, stand, carry/hold (an object).", "From 1452.0s to 1453.0s, [0.081, 0.344, 0.59, 0.732]->[0.086, 0.378, 0.654, 0.743]: sit, carry/hold (an object).", "From 1450.0s to 1451.0s, [0.116, 0.365, 0.672, 0.73]->[0.027, 0.235, 0.505, 0.705]: stand, carry/hold (an object).", "From 1448.0s to 1449.0s, [0.103, 0.274, 0.526, 0.701]->[0.13, 0.379, 0.682, 0.711]: sit, eat.", "At 1447.0s, [0.098, 0.242, 0.492, 0.699]: sit, carry/hold (an object), eat.", "At 1446.0s, [0.101, 0.255, 0.519, 0.699]: sit, eat.", "From 1444.0s to 1445.0s, [0.112, 0.318, 0.572, 0.7]->[0.093, 0.28, 0.559, 0.702]: sit, carry/hold (an object), eat.", "At 1443.0s, [0.126, 0.296, 0.566, 0.697]: sit, carry/hold (an object).", "At 1442.0s, [0.126, 0.266, 0.549, 0.698]: sit, carry/hold (an object), read.", "At 1441.0s, [0.115, 0.279, 0.545, 0.693]: sit, carry/hold (an object), eat.", "From 1435.0s to 1440.0s, [0.112, 0.262, 0.561, 0.692]->[0.125, 0.253, 0.564, 0.695]->[0.127, 0.258, 0.551, 0.687]->[0.134, 0.265, 0.564, 0.696]->[0.113, 0.241, 0.561, 0.694]->[0.12, 0.257, 0.555, 0.699]: sit, eat.", "At 1434.0s, [0.125, 0.264, 0.55, 0.703]: sit, carry/hold (an object)."]}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1563, 1604], "conversations": [{"from": "human", "timestamps": 1593.0, "value": "For the person at bbox [0.102, 0.222, 0.507, 0.684], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.102, 0.222, 0.507, 0.684]: sit, answer phone.", "From 1589.0s to 1592.0s, [0.084, 0.224, 0.524, 0.69]->[0.089, 0.218, 0.585, 0.687]->[0.09, 0.221, 0.55, 0.69]->[0.09, 0.216, 0.551, 0.685]: sit, answer phone.", "At 1588.0s, [0.086, 0.225, 0.533, 0.687]: sit, stand, answer phone.", "From 1582.0s to 1587.0s, [0.092, 0.232, 0.556, 0.689]->[0.091, 0.227, 0.558, 0.69]->[0.091, 0.232, 0.557, 0.685]->[0.091, 0.231, 0.574, 0.687]->[0.088, 0.227, 0.555, 0.688]->[0.088, 0.225, 0.536, 0.686]: sit, answer phone.", "At 1581.0s, [0.09, 0.233, 0.55, 0.692]: sit, stand, answer phone.", "At 1580.0s, [0.088, 0.234, 0.551, 0.69]: sit, answer phone, talk to (e.g., self, a person, a group).", "From 1577.0s to 1579.0s, [0.085, 0.235, 0.499, 0.689]->[0.082, 0.238, 0.589, 0.691]->[0.087, 0.24, 0.538, 0.691]: sit, answer phone.", "From 1575.0s to 1576.0s, [0.095, 0.214, 0.614, 0.702]->[0.081, 0.223, 0.504, 0.691]: sit, answer phone, carry/hold (an object).", "At 1574.0s, [0.081, 0.206, 0.643, 0.698]: stand, answer phone.", "At 1573.0s, [0.123, 0.218, 0.597, 0.705]: stand, answer phone, touch (an object).", "At 1572.0s, [0.146, 0.216, 0.594, 0.684]: stand, answer phone.", "At 1571.0s, [0.128, 0.224, 0.596, 0.704]: sit, answer phone.", "At 1570.0s, [0.13, 0.234, 0.593, 0.702]: answer phone.", "At 1569.0s, [0.128, 0.237, 0.643, 0.703]: stand, answer phone, touch (an object).", "From 1567.0s to 1568.0s, [0.122, 0.218, 0.57, 0.707]->[0.13, 0.23, 0.639, 0.709]: sit, answer phone.", "At 1566.0s, [0.129, 0.219, 0.634, 0.707]: sit, answer phone, carry/hold (an object), touch (an object).", "At 1565.0s, [0.135, 0.224, 0.631, 0.699]: sit, answer phone, carry/hold (an object).", "From 1563.0s to 1564.0s, [0.131, 0.24, 0.657, 0.705]->[0.13, 0.234, 0.607, 0.698]: sit, stand, answer phone, carry/hold (an object).", "At 1562.0s, [0.132, 0.241, 0.57, 0.706]: sit, stand, answer phone, touch (an object).", "At 1561.0s, [0.136, 0.252, 0.579, 0.704]: sit, answer phone.", "At 1560.0s, [0.135, 0.248, 0.571, 0.707]: sit, stand, answer phone, touch (an object).", "From 1553.0s to 1559.0s, [0.085, 0.224, 0.625, 0.694]->[0.128, 0.236, 0.64, 0.715]->[0.133, 0.24, 0.639, 0.704]->[0.125, 0.232, 0.597, 0.699]->[0.152, 0.249, 0.597, 0.698]->[0.153, 0.257, 0.583, 0.699]->[0.128, 0.256, 0.627, 0.716]: sit, answer phone, touch (an object).", "From 1545.0s to 1552.0s, [0.084, 0.232, 0.507, 0.689]->[0.082, 0.228, 0.495, 0.689]->[0.078, 0.227, 0.521, 0.682]->[0.082, 0.229, 0.534, 0.687]->[0.082, 0.232, 0.518, 0.689]->[0.079, 0.228, 0.518, 0.689]->[0.078, 0.233, 0.515, 0.685]->[0.082, 0.227, 0.564, 0.696]: sit, answer phone.", "At 1544.0s, [0.077, 0.224, 0.503, 0.696]: sit, stand, answer phone, talk to (e.g., self, a person, a group).", "From 1541.0s to 1543.0s, [0.094, 0.245, 0.646, 0.687]->[0.074, 0.231, 0.514, 0.691]->[0.069, 0.255, 0.478, 0.692]: sit, answer phone.", "At 1540.0s, [0.115, 0.211, 0.528, 0.695]: sit, answer phone, touch (an object).", "At 1539.0s, [0.127, 0.219, 0.524, 0.696]: sit, answer phone, carry/hold (an object).", "At 1538.0s, [0.117, 0.213, 0.614, 0.706]: sit, answer phone.", "At 1537.0s, [0.105, 0.217, 0.527, 0.694]: stand, answer phone.", "From 1528.0s to 1536.0s, [0.099, 0.24, 0.594, 0.689]->[0.09, 0.242, 0.611, 0.703]->[0.104, 0.233, 0.65, 0.697]->[0.091, 0.218, 0.563, 0.7]->[0.096, 0.22, 0.473, 0.703]->[0.101, 0.227, 0.478, 0.695]->[0.1, 0.228, 0.487, 0.694]->[0.1, 0.234, 0.481, 0.697]->[0.086, 0.228, 0.489, 0.692]: sit, answer phone.", "At 1527.0s, [0.091, 0.243, 0.596, 0.713]: sit, stand, answer phone.", "From 1524.0s to 1526.0s, [0.098, 0.237, 0.61, 0.698]->[0.106, 0.239, 0.591, 0.7]->[0.097, 0.242, 0.583, 0.693]: sit, answer phone.", "At 1523.0s, [0.086, 0.234, 0.55, 0.688]: sit, stand, answer phone.", "At 1522.0s, [0.078, 0.225, 0.509, 0.694]: sit, answer phone, carry/hold (an object).", "At 1521.0s, [0.08, 0.212, 0.588, 0.696]: sit, answer phone, touch (an object).", "At 1520.0s, [0.091, 0.209, 0.488, 0.698]: sit, stand, answer phone.", "At 1519.0s, [0.091, 0.212, 0.63, 0.698]: sit, answer phone.", "At 1518.0s, [0.089, 0.225, 0.622, 0.689]: sit, answer phone, touch (an object).", "At 1517.0s, [0.102, 0.252, 0.504, 0.688]: answer phone.", "From 1514.0s to 1516.0s, [0.105, 0.244, 0.506, 0.686]->[0.095, 0.244, 0.488, 0.696]->[0.093, 0.242, 0.495, 0.694]: sit, answer phone.", "At 1513.0s, [0.098, 0.247, 0.509, 0.693]: sit, answer phone, touch (an object).", "From 1509.0s to 1512.0s, [0.09, 0.248, 0.481, 0.684]->[0.1, 0.241, 0.488, 0.692]->[0.1, 0.252, 0.512, 0.689]->[0.107, 0.243, 0.513, 0.686]: sit, answer phone.", "At 1508.0s, [0.095, 0.239, 0.486, 0.684]: sit, answer phone, touch (an object).", "At 1507.0s, [0.098, 0.246, 0.489, 0.681]: sit, answer phone.", "At 1506.0s, [0.085, 0.235, 0.499, 0.701]: sit, answer phone, carry/hold (an object).", "From 1503.0s to 1505.0s, [0.097, 0.243, 0.497, 0.685]->[0.098, 0.24, 0.506, 0.69]->[0.088, 0.236, 0.513, 0.683]: sit, answer phone.", "From 1501.0s to 1502.0s, [0.098, 0.239, 0.493, 0.689]->[0.093, 0.245, 0.494, 0.695]: sit, answer phone, touch (an object).", "From 1493.0s to 1500.0s, [0.1, 0.241, 0.49, 0.691]->[0.086, 0.229, 0.49, 0.691]->[0.096, 0.23, 0.516, 0.681]->[0.087, 0.231, 0.498, 0.695]->[0.086, 0.23, 0.528, 0.693]->[0.081, 0.234, 0.587, 0.688]->[0.085, 0.231, 0.494, 0.693]->[0.095, 0.231, 0.562, 0.682]: sit, answer phone, carry/hold (an object).", "At 1492.0s, [0.091, 0.241, 0.516, 0.688]: answer phone.", "At 1491.0s, [0.098, 0.239, 0.554, 0.691]: stand, answer phone.", "From 1488.0s to 1490.0s, [0.099, 0.214, 0.588, 0.697]->[0.097, 0.229, 0.538, 0.696]->[0.097, 0.238, 0.547, 0.684]: sit, answer phone.", "From 1485.0s to 1487.0s, [0.088, 0.222, 0.495, 0.688]->[0.082, 0.224, 0.523, 0.686]->[0.076, 0.213, 0.523, 0.695]: sit, answer phone, carry/hold (an object).", "At 1484.0s, [0.083, 0.215, 0.493, 0.697]: sit, answer phone.", "From 1480.0s to 1483.0s, [0.098, 0.22, 0.584, 0.696]->[0.089, 0.213, 0.611, 0.695]->[0.082, 0.21, 0.539, 0.689]->[0.089, 0.211, 0.519, 0.691]: sit, answer phone, carry/hold (an object).", "From 1478.0s to 1479.0s, [0.098, 0.217, 0.588, 0.697]->[0.093, 0.218, 0.634, 0.702]: sit, answer phone, carry/hold (an object), touch (an object).", "At 1477.0s, [0.099, 0.216, 0.59, 0.706]: sit, answer phone, carry/hold (an object).", "At 1476.0s, [0.093, 0.221, 0.578, 0.704]: sit, stand, answer phone, carry/hold (an object), eat.", "At 1475.0s, [0.095, 0.217, 0.552, 0.687]: sit, answer phone, carry/hold (an object).", "From 1470.0s to 1474.0s, [0.141, 0.228, 0.561, 0.689]->[0.076, 0.256, 0.619, 0.685]->[0.058, 0.228, 0.639, 0.687]->[0.065, 0.228, 0.528, 0.681]->[0.103, 0.217, 0.582, 0.701]: sit, carry/hold (an object).", "At 1469.0s, [0.172, 0.234, 0.563, 0.698]: stand, carry/hold (an object).", "At 1468.0s, [0.122, 0.192, 0.564, 0.722]: sit, stand, carry/hold (an object).", "From 1466.0s to 1467.0s, [0.193, 0.264, 0.624, 0.712]->[0.168, 0.211, 0.556, 0.721]: stand, carry/hold (an object).", "From 1464.0s to 1465.0s, [0.202, 0.244, 0.661, 0.706]->[0.187, 0.329, 0.653, 0.717]: stand.", "From 1462.0s to 1463.0s, [0.153, 0.229, 0.554, 0.717]->[0.228, 0.218, 0.703, 0.702]: sit.", "At 1461.0s, [0.188, 0.265, 0.6, 0.713]: sit, talk to (e.g., self, a person, a group).", "At 1460.0s, [0.152, 0.238, 0.538, 0.714]: sit.", "At 1459.0s, [0.084, 0.264, 0.495, 0.752]: sit, carry/hold (an object).", "At 1458.0s, [0.071, 0.276, 0.873, 0.72]: sit, stand, carry/hold (an object), lift/pick up.", "From 1455.0s to 1457.0s, [0.098, 0.187, 0.736, 0.758]->[0.082, 0.28, 0.524, 0.745]->[0.078, 0.299, 0.591, 0.731]: sit, carry/hold (an object).", "At 1454.0s, [0.075, 0.325, 0.588, 0.732]: sit, stand, carry/hold (an object).", "From 1452.0s to 1453.0s, [0.081, 0.344, 0.59, 0.732]->[0.086, 0.378, 0.654, 0.743]: sit, carry/hold (an object).", "From 1450.0s to 1451.0s, [0.116, 0.365, 0.672, 0.73]->[0.027, 0.235, 0.505, 0.705]: stand, carry/hold (an object).", "From 1448.0s to 1449.0s, [0.103, 0.274, 0.526, 0.701]->[0.13, 0.379, 0.682, 0.711]: sit, eat.", "At 1447.0s, [0.098, 0.242, 0.492, 0.699]: sit, carry/hold (an object), eat.", "At 1446.0s, [0.101, 0.255, 0.519, 0.699]: sit, eat.", "From 1444.0s to 1445.0s, [0.112, 0.318, 0.572, 0.7]->[0.093, 0.28, 0.559, 0.702]: sit, carry/hold (an object), eat.", "At 1443.0s, [0.126, 0.296, 0.566, 0.697]: sit, carry/hold (an object).", "At 1442.0s, [0.126, 0.266, 0.549, 0.698]: sit, carry/hold (an object), read.", "At 1441.0s, [0.115, 0.279, 0.545, 0.693]: sit, carry/hold (an object), eat.", "From 1435.0s to 1440.0s, [0.112, 0.262, 0.561, 0.692]->[0.125, 0.253, 0.564, 0.695]->[0.127, 0.258, 0.551, 0.687]->[0.134, 0.265, 0.564, 0.696]->[0.113, 0.241, 0.561, 0.694]->[0.12, 0.257, 0.555, 0.699]: sit, eat.", "At 1434.0s, [0.125, 0.264, 0.55, 0.703]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1604.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.091, 0.234, 0.57, 0.695]: sit, answer phone, stand."}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1594, 1677], "conversations": [{"from": "human", "timestamps": 1624.0, "value": "For the person at bbox [0.121, 0.242, 0.564, 0.7], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.121, 0.242, 0.564, 0.7]: sit, answer phone, touch (an object).", "At 1623.0s, [0.123, 0.219, 0.631, 0.714]: sit, stand, answer phone, touch (an object).", "From 1621.0s to 1622.0s, [0.082, 0.221, 0.477, 0.696]->[0.078, 0.209, 0.624, 0.722]: sit, answer phone.", "At 1620.0s, [0.085, 0.232, 0.485, 0.694]: sit, stand, answer phone.", "From 1617.0s to 1619.0s, [0.083, 0.225, 0.478, 0.686]->[0.086, 0.229, 0.498, 0.688]->[0.08, 0.235, 0.522, 0.685]: sit, answer phone.", "At 1616.0s, [0.084, 0.213, 0.487, 0.691]: sit, answer phone, talk to (e.g., self, a person, a group).", "At 1615.0s, [0.072, 0.209, 0.474, 0.698]: stand, answer phone.", "From 1608.0s to 1614.0s, [0.09, 0.239, 0.512, 0.689]->[0.089, 0.237, 0.506, 0.69]->[0.086, 0.234, 0.522, 0.686]->[0.092, 0.228, 0.496, 0.687]->[0.091, 0.237, 0.511, 0.69]->[0.092, 0.238, 0.506, 0.687]->[0.088, 0.233, 0.491, 0.69]: sit, answer phone.", "From 1606.0s to 1607.0s, [0.091, 0.243, 0.605, 0.692]->[0.092, 0.239, 0.605, 0.686]: stand, answer phone.", "At 1605.0s, [0.093, 0.245, 0.504, 0.7]: sit, answer phone.", "At 1604.0s, [0.091, 0.234, 0.57, 0.695]: sit, stand, answer phone.", "From 1602.0s to 1603.0s, [0.091, 0.234, 0.506, 0.689]->[0.094, 0.232, 0.558, 0.7]: sit, answer phone.", "From 1599.0s to 1601.0s, [0.086, 0.227, 0.51, 0.686]->[0.092, 0.233, 0.53, 0.689]->[0.092, 0.227, 0.529, 0.684]: sit, answer phone, carry/hold (an object).", "At 1598.0s, [0.082, 0.237, 0.553, 0.69]: sit, stand, answer phone.", "From 1595.0s to 1597.0s, [0.1, 0.212, 0.523, 0.696]->[0.114, 0.216, 0.509, 0.693]->[0.097, 0.232, 0.631, 0.707]: sit, answer phone.", "At 1594.0s, [0.083, 0.216, 0.506, 0.685]: sit, answer phone, carry/hold (an object).", "From 1589.0s to 1593.0s, [0.084, 0.224, 0.524, 0.69]->[0.089, 0.218, 0.585, 0.687]->[0.09, 0.221, 0.55, 0.69]->[0.09, 0.216, 0.551, 0.685]->[0.102, 0.222, 0.507, 0.684]: sit, answer phone.", "At 1588.0s, [0.086, 0.225, 0.533, 0.687]: sit, stand, answer phone.", "From 1582.0s to 1587.0s, [0.092, 0.232, 0.556, 0.689]->[0.091, 0.227, 0.558, 0.69]->[0.091, 0.232, 0.557, 0.685]->[0.091, 0.231, 0.574, 0.687]->[0.088, 0.227, 0.555, 0.688]->[0.088, 0.225, 0.536, 0.686]: sit, answer phone.", "At 1581.0s, [0.09, 0.233, 0.55, 0.692]: sit, stand, answer phone.", "At 1580.0s, [0.088, 0.234, 0.551, 0.69]: sit, answer phone, talk to (e.g., self, a person, a group).", "From 1577.0s to 1579.0s, [0.085, 0.235, 0.499, 0.689]->[0.082, 0.238, 0.589, 0.691]->[0.087, 0.24, 0.538, 0.691]: sit, answer phone.", "From 1575.0s to 1576.0s, [0.095, 0.214, 0.614, 0.702]->[0.081, 0.223, 0.504, 0.691]: sit, answer phone, carry/hold (an object).", "At 1574.0s, [0.081, 0.206, 0.643, 0.698]: stand, answer phone.", "At 1573.0s, [0.123, 0.218, 0.597, 0.705]: stand, answer phone, touch (an object).", "At 1572.0s, [0.146, 0.216, 0.594, 0.684]: stand, answer phone.", "At 1571.0s, [0.128, 0.224, 0.596, 0.704]: sit, answer phone.", "At 1570.0s, [0.13, 0.234, 0.593, 0.702]: answer phone.", "At 1569.0s, [0.128, 0.237, 0.643, 0.703]: stand, answer phone, touch (an object).", "From 1567.0s to 1568.0s, [0.122, 0.218, 0.57, 0.707]->[0.13, 0.23, 0.639, 0.709]: sit, answer phone.", "At 1566.0s, [0.129, 0.219, 0.634, 0.707]: sit, answer phone, carry/hold (an object), touch (an object).", "At 1565.0s, [0.135, 0.224, 0.631, 0.699]: sit, answer phone, carry/hold (an object).", "From 1563.0s to 1564.0s, [0.131, 0.24, 0.657, 0.705]->[0.13, 0.234, 0.607, 0.698]: sit, stand, answer phone, carry/hold (an object).", "At 1562.0s, [0.132, 0.241, 0.57, 0.706]: sit, stand, answer phone, touch (an object).", "At 1561.0s, [0.136, 0.252, 0.579, 0.704]: sit, answer phone.", "At 1560.0s, [0.135, 0.248, 0.571, 0.707]: sit, stand, answer phone, touch (an object).", "From 1553.0s to 1559.0s, [0.085, 0.224, 0.625, 0.694]->[0.128, 0.236, 0.64, 0.715]->[0.133, 0.24, 0.639, 0.704]->[0.125, 0.232, 0.597, 0.699]->[0.152, 0.249, 0.597, 0.698]->[0.153, 0.257, 0.583, 0.699]->[0.128, 0.256, 0.627, 0.716]: sit, answer phone, touch (an object).", "From 1545.0s to 1552.0s, [0.084, 0.232, 0.507, 0.689]->[0.082, 0.228, 0.495, 0.689]->[0.078, 0.227, 0.521, 0.682]->[0.082, 0.229, 0.534, 0.687]->[0.082, 0.232, 0.518, 0.689]->[0.079, 0.228, 0.518, 0.689]->[0.078, 0.233, 0.515, 0.685]->[0.082, 0.227, 0.564, 0.696]: sit, answer phone.", "At 1544.0s, [0.077, 0.224, 0.503, 0.696]: sit, stand, answer phone, talk to (e.g., self, a person, a group).", "From 1541.0s to 1543.0s, [0.094, 0.245, 0.646, 0.687]->[0.074, 0.231, 0.514, 0.691]->[0.069, 0.255, 0.478, 0.692]: sit, answer phone.", "At 1540.0s, [0.115, 0.211, 0.528, 0.695]: sit, answer phone, touch (an object).", "At 1539.0s, [0.127, 0.219, 0.524, 0.696]: sit, answer phone, carry/hold (an object).", "At 1538.0s, [0.117, 0.213, 0.614, 0.706]: sit, answer phone.", "At 1537.0s, [0.105, 0.217, 0.527, 0.694]: stand, answer phone.", "From 1528.0s to 1536.0s, [0.099, 0.24, 0.594, 0.689]->[0.09, 0.242, 0.611, 0.703]->[0.104, 0.233, 0.65, 0.697]->[0.091, 0.218, 0.563, 0.7]->[0.096, 0.22, 0.473, 0.703]->[0.101, 0.227, 0.478, 0.695]->[0.1, 0.228, 0.487, 0.694]->[0.1, 0.234, 0.481, 0.697]->[0.086, 0.228, 0.489, 0.692]: sit, answer phone.", "At 1527.0s, [0.091, 0.243, 0.596, 0.713]: sit, stand, answer phone.", "From 1524.0s to 1526.0s, [0.098, 0.237, 0.61, 0.698]->[0.106, 0.239, 0.591, 0.7]->[0.097, 0.242, 0.583, 0.693]: sit, answer phone.", "At 1523.0s, [0.086, 0.234, 0.55, 0.688]: sit, stand, answer phone.", "At 1522.0s, [0.078, 0.225, 0.509, 0.694]: sit, answer phone, carry/hold (an object).", "At 1521.0s, [0.08, 0.212, 0.588, 0.696]: sit, answer phone, touch (an object).", "At 1520.0s, [0.091, 0.209, 0.488, 0.698]: sit, stand, answer phone.", "At 1519.0s, [0.091, 0.212, 0.63, 0.698]: sit, answer phone.", "At 1518.0s, [0.089, 0.225, 0.622, 0.689]: sit, answer phone, touch (an object).", "At 1517.0s, [0.102, 0.252, 0.504, 0.688]: answer phone.", "From 1514.0s to 1516.0s, [0.105, 0.244, 0.506, 0.686]->[0.095, 0.244, 0.488, 0.696]->[0.093, 0.242, 0.495, 0.694]: sit, answer phone.", "At 1513.0s, [0.098, 0.247, 0.509, 0.693]: sit, answer phone, touch (an object).", "From 1509.0s to 1512.0s, [0.09, 0.248, 0.481, 0.684]->[0.1, 0.241, 0.488, 0.692]->[0.1, 0.252, 0.512, 0.689]->[0.107, 0.243, 0.513, 0.686]: sit, answer phone.", "At 1508.0s, [0.095, 0.239, 0.486, 0.684]: sit, answer phone, touch (an object).", "At 1507.0s, [0.098, 0.246, 0.489, 0.681]: sit, answer phone.", "At 1506.0s, [0.085, 0.235, 0.499, 0.701]: sit, answer phone, carry/hold (an object).", "From 1503.0s to 1505.0s, [0.097, 0.243, 0.497, 0.685]->[0.098, 0.24, 0.506, 0.69]->[0.088, 0.236, 0.513, 0.683]: sit, answer phone.", "From 1501.0s to 1502.0s, [0.098, 0.239, 0.493, 0.689]->[0.093, 0.245, 0.494, 0.695]: sit, answer phone, touch (an object).", "From 1493.0s to 1500.0s, [0.1, 0.241, 0.49, 0.691]->[0.086, 0.229, 0.49, 0.691]->[0.096, 0.23, 0.516, 0.681]->[0.087, 0.231, 0.498, 0.695]->[0.086, 0.23, 0.528, 0.693]->[0.081, 0.234, 0.587, 0.688]->[0.085, 0.231, 0.494, 0.693]->[0.095, 0.231, 0.562, 0.682]: sit, answer phone, carry/hold (an object).", "At 1492.0s, [0.091, 0.241, 0.516, 0.688]: answer phone.", "At 1491.0s, [0.098, 0.239, 0.554, 0.691]: stand, answer phone.", "From 1488.0s to 1490.0s, [0.099, 0.214, 0.588, 0.697]->[0.097, 0.229, 0.538, 0.696]->[0.097, 0.238, 0.547, 0.684]: sit, answer phone.", "From 1485.0s to 1487.0s, [0.088, 0.222, 0.495, 0.688]->[0.082, 0.224, 0.523, 0.686]->[0.076, 0.213, 0.523, 0.695]: sit, answer phone, carry/hold (an object).", "At 1484.0s, [0.083, 0.215, 0.493, 0.697]: sit, answer phone.", "From 1480.0s to 1483.0s, [0.098, 0.22, 0.584, 0.696]->[0.089, 0.213, 0.611, 0.695]->[0.082, 0.21, 0.539, 0.689]->[0.089, 0.211, 0.519, 0.691]: sit, answer phone, carry/hold (an object).", "From 1478.0s to 1479.0s, [0.098, 0.217, 0.588, 0.697]->[0.093, 0.218, 0.634, 0.702]: sit, answer phone, carry/hold (an object), touch (an object).", "At 1477.0s, [0.099, 0.216, 0.59, 0.706]: sit, answer phone, carry/hold (an object).", "At 1476.0s, [0.093, 0.221, 0.578, 0.704]: sit, stand, answer phone, carry/hold (an object), eat.", "At 1475.0s, [0.095, 0.217, 0.552, 0.687]: sit, answer phone, carry/hold (an object).", "From 1470.0s to 1474.0s, [0.141, 0.228, 0.561, 0.689]->[0.076, 0.256, 0.619, 0.685]->[0.058, 0.228, 0.639, 0.687]->[0.065, 0.228, 0.528, 0.681]->[0.103, 0.217, 0.582, 0.701]: sit, carry/hold (an object).", "At 1469.0s, [0.172, 0.234, 0.563, 0.698]: stand, carry/hold (an object).", "At 1468.0s, [0.122, 0.192, 0.564, 0.722]: sit, stand, carry/hold (an object).", "From 1466.0s to 1467.0s, [0.193, 0.264, 0.624, 0.712]->[0.168, 0.211, 0.556, 0.721]: stand, carry/hold (an object).", "From 1464.0s to 1465.0s, [0.202, 0.244, 0.661, 0.706]->[0.187, 0.329, 0.653, 0.717]: stand.", "From 1462.0s to 1463.0s, [0.153, 0.229, 0.554, 0.717]->[0.228, 0.218, 0.703, 0.702]: sit.", "At 1461.0s, [0.188, 0.265, 0.6, 0.713]: sit, talk to (e.g., self, a person, a group).", "At 1460.0s, [0.152, 0.238, 0.538, 0.714]: sit.", "At 1459.0s, [0.084, 0.264, 0.495, 0.752]: sit, carry/hold (an object).", "At 1458.0s, [0.071, 0.276, 0.873, 0.72]: sit, stand, carry/hold (an object), lift/pick up.", "From 1455.0s to 1457.0s, [0.098, 0.187, 0.736, 0.758]->[0.082, 0.28, 0.524, 0.745]->[0.078, 0.299, 0.591, 0.731]: sit, carry/hold (an object).", "At 1454.0s, [0.075, 0.325, 0.588, 0.732]: sit, stand, carry/hold (an object).", "From 1452.0s to 1453.0s, [0.081, 0.344, 0.59, 0.732]->[0.086, 0.378, 0.654, 0.743]: sit, carry/hold (an object).", "From 1450.0s to 1451.0s, [0.116, 0.365, 0.672, 0.73]->[0.027, 0.235, 0.505, 0.705]: stand, carry/hold (an object).", "From 1448.0s to 1449.0s, [0.103, 0.274, 0.526, 0.701]->[0.13, 0.379, 0.682, 0.711]: sit, eat.", "At 1447.0s, [0.098, 0.242, 0.492, 0.699]: sit, carry/hold (an object), eat.", "At 1446.0s, [0.101, 0.255, 0.519, 0.699]: sit, eat.", "From 1444.0s to 1445.0s, [0.112, 0.318, 0.572, 0.7]->[0.093, 0.28, 0.559, 0.702]: sit, carry/hold (an object), eat.", "At 1443.0s, [0.126, 0.296, 0.566, 0.697]: sit, carry/hold (an object).", "At 1442.0s, [0.126, 0.266, 0.549, 0.698]: sit, carry/hold (an object), read.", "At 1441.0s, [0.115, 0.279, 0.545, 0.693]: sit, carry/hold (an object), eat.", "From 1435.0s to 1440.0s, [0.112, 0.262, 0.561, 0.692]->[0.125, 0.253, 0.564, 0.695]->[0.127, 0.258, 0.551, 0.687]->[0.134, 0.265, 0.564, 0.696]->[0.113, 0.241, 0.561, 0.694]->[0.12, 0.257, 0.555, 0.699]: sit, eat.", "At 1434.0s, [0.125, 0.264, 0.55, 0.703]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1677.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.571, 0.184, 0.948, 0.88]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1625, 1702], "conversations": [{"from": "human", "timestamps": 1655.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.083, 0.237, 0.508, 0.69]: sit, answer phone."}, {"from": "human", "timestamps": 1702.0, "value": "For the person at bbox [0.145, 0.277, 0.338, 0.789], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.145, 0.277, 0.338, 0.789]: sit, touch (an object), listen to (a person), watch (a person).", "From 1697.0s to 1701.0s, [0.191, 0.285, 0.368, 0.73]->[0.178, 0.285, 0.365, 0.743]->[0.174, 0.289, 0.36, 0.749]->[0.163, 0.281, 0.354, 0.762]->[0.157, 0.282, 0.349, 0.778]: sit, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1649, 1725], "conversations": [{"from": "human", "timestamps": 1679.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.579, 0.197, 0.941, 0.879]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1725.0, "value": "For the person at bbox [0.086, 0.197, 0.565, 0.809], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.086, 0.197, 0.565, 0.809]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1719.0s to 1724.0s, [0.086, 0.193, 0.57, 0.824]->[0.078, 0.192, 0.568, 0.819]->[0.089, 0.195, 0.566, 0.821]->[0.078, 0.194, 0.565, 0.824]->[0.083, 0.196, 0.566, 0.814]->[0.084, 0.196, 0.566, 0.806]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1697.0s to 1718.0s, [0.191, 0.285, 0.368, 0.73]->[0.178, 0.285, 0.365, 0.743]->[0.174, 0.289, 0.36, 0.749]->[0.163, 0.281, 0.354, 0.762]->[0.157, 0.282, 0.349, 0.778]->[0.145, 0.277, 0.338, 0.789]->[0.137, 0.273, 0.33, 0.808]->[0.125, 0.28, 0.32, 0.821]->[0.112, 0.282, 0.312, 0.832]->[0.11, 0.274, 0.304, 0.846]->[0.099, 0.269, 0.303, 0.857]->[0.094, 0.267, 0.287, 0.855]->[0.083, 0.265, 0.277, 0.871]->[0.073, 0.27, 0.262, 0.852]->[0.069, 0.27, 0.256, 0.87]->[0.064, 0.267, 0.255, 0.86]->[0.052, 0.258, 0.25, 0.867]->[0.049, 0.257, 0.24, 0.87]->[0.033, 0.263, 0.235, 0.883]->[0.034, 0.258, 0.237, 0.873]->[0.031, 0.258, 0.238, 0.873]->[0.033, 0.255, 0.236, 0.874]: sit, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "kAsz-76DTDE.mkv", "clip": [1716, 1784], "conversations": [{"from": "human", "timestamps": 1746.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.089, 0.191, 0.557, 0.817]?"}, {"from": "gpt", "value": ["In the current frame, [0.089, 0.191, 0.557, 0.817]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1740.0s to 1745.0s, [0.094, 0.186, 0.555, 0.817]->[0.089, 0.187, 0.554, 0.812]->[0.093, 0.186, 0.555, 0.815]->[0.093, 0.185, 0.557, 0.822]->[0.091, 0.189, 0.554, 0.809]->[0.091, 0.188, 0.556, 0.82]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1784.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.203, 0.165, 0.951, 0.883]: sit, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [898, 945], "conversations": [{"from": "human", "timestamps": 928.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.283, 0.134, 0.97, 0.864]?"}, {"from": "gpt", "value": ["At Now, [0.283, 0.134, 0.97, 0.864]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 927.0s, [0.294, 0.12, 0.956, 0.848]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 924.0s to 926.0s, [0.275, 0.146, 0.929, 0.852]->[0.257, 0.142, 0.937, 0.852]->[0.259, 0.124, 0.941, 0.854]: sit, talk to (e.g., self, a person, a group).", "At 923.0s, [0.267, 0.133, 0.898, 0.853]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 945.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.047, 0.22, 0.977, 0.868]: listen to (a person), sit, watch (a person)."}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [951, 1001], "conversations": [{"from": "human", "timestamps": 981.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.001, 0.049, 0.297, 0.871]: listen to (a person), stand, watch (a person); [0.273, 0.093, 0.96, 0.859]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1001.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.236, 0.159, 0.992, 0.861]?"}, {"from": "gpt", "value": ["In the current frame, [0.236, 0.159, 0.992, 0.861]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1000.0s, [0.234, 0.142, 0.989, 0.851]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 998.0s to 999.0s, [0.181, 0.125, 0.972, 0.858]->[0.195, 0.16, 0.977, 0.861]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1018, 1078], "conversations": [{"from": "human", "timestamps": 1048.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.351, 0.147, 0.932, 0.864]?"}, {"from": "gpt", "value": ["At Now, [0.351, 0.147, 0.932, 0.864]: sit, carry/hold (an object), listen to (a person).", "At 1047.0s, [0.35, 0.135, 0.936, 0.858]: sit, carry/hold (an object), listen to (a person).", "At 1046.0s, [0.463, 0.418, 0.575, 0.637]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1045.0s, [0.466, 0.426, 0.565, 0.626]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1042.0s to 1043.0s, [0.465, 0.459, 0.554, 0.637]->[0.465, 0.447, 0.56, 0.636]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1041.0s, [0.466, 0.477, 0.54, 0.631]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1040.0s, [0.464, 0.491, 0.541, 0.648]: sit, carry/hold (an object), watch (a person).", "From 1034.0s to 1039.0s, [0.461, 0.519, 0.524, 0.66]->[0.447, 0.519, 0.525, 0.661]->[0.454, 0.517, 0.527, 0.662]->[0.463, 0.518, 0.527, 0.663]->[0.466, 0.515, 0.528, 0.663]->[0.463, 0.506, 0.534, 0.658]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1078.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.175, 0.429, 0.878]: listen to (a person), sit, watch (a person); [0.457, 0.197, 0.975, 0.859]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1041, 1090], "conversations": [{"from": "human", "timestamps": 1071.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.11, 0.658, 0.875]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1090.0, "value": "Describe the actions of the person at bbox [0.452, 0.487, 0.763, 0.866] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.452, 0.487, 0.763, 0.866]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1089.0s, [0.476, 0.301, 0.824, 0.87]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1061, 1099], "conversations": [{"from": "human", "timestamps": 1091.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.533, 0.585, 0.647, 0.762]: listen to (a person), sit, watch (a person); [0.648, 0.462, 0.788, 0.768]: walk."}, {"from": "human", "timestamps": 1099.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.321, 0.172, 0.78, 0.853]?"}, {"from": "gpt", "value": ["At Now, [0.321, 0.172, 0.78, 0.853]: sit, watch (a person).", "At 1098.0s, [0.432, 0.173, 0.781, 0.865]: sit, listen to (a person), watch (a person)."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1120, 1241], "conversations": [{"from": "human", "timestamps": 1150.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.048, 0.131, 0.381, 0.853]: listen to (a person), stand, watch (a person); [0.485, 0.52, 0.617, 0.825]: listen to (a person), sit; [0.666, 0.298, 0.981, 0.859]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1241.0, "value": "What actions did the person at bbox [0.161, 0.142, 0.747, 0.835] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.161, 0.142, 0.747, 0.835]: sit, answer phone.", "From 1239.0s to 1240.0s, [0.236, 0.14, 0.72, 0.839]->[0.202, 0.131, 0.783, 0.841]: sit, answer phone."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1266, 1312], "conversations": [{"from": "human", "timestamps": 1296.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.127, 0.209, 0.314, 0.675]: listen to (a person), stand; [0.24, 0.27, 0.507, 0.718]: carry/hold (an object), bend/bow (at the waist), listen to (a person); [0.369, 0.213, 0.466, 0.344]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.489, 0.272, 0.728, 0.696]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.732, 0.135, 0.909, 0.607]: listen to (a person), watch (a person), walk."}, {"from": "human", "timestamps": 1312.0, "value": "For the person at bbox [0.135, 0.371, 0.363, 0.761], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.135, 0.371, 0.363, 0.761]: stand, listen to (a person), watch (a person).", "From 1310.0s to 1311.0s, [0.217, 0.355, 0.466, 0.87]->[0.226, 0.375, 0.435, 0.854]: stand, listen to (a person), watch (a person).", "From 1308.0s to 1309.0s, [0.164, 0.339, 0.379, 0.861]->[0.209, 0.335, 0.434, 0.865]: stand, watch (a person)."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1230, 1325], "conversations": [{"from": "human", "timestamps": 1260.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.001, 0.12, 0.366, 0.847]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.254, 0.13, 0.915, 0.842]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1325.0, "value": "For the person at bbox [0.016, 0.115, 0.464, 0.811], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.016, 0.115, 0.464, 0.811]: stand, carry/hold (an object), listen to (a person).", "At 1324.0s, [0.026, 0.126, 0.463, 0.829]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1323.0s, [0.043, 0.118, 0.473, 0.846]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1322.0s, [0.032, 0.112, 0.49, 0.86]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1309, 1340], "conversations": [{"from": "human", "timestamps": 1339.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.113, 0.145, 0.473]: carry/hold (an object), listen to (a person), walk; [0.096, 0.128, 0.527, 0.855]: carry/hold (an object), stand, listen to (a person); [0.488, 0.284, 0.885, 0.849]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.678, 0.119, 0.877, 0.59]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1340.0, "value": "Describe the actions of the person at bbox [0.116, 0.141, 0.55, 0.856] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.116, 0.141, 0.55, 0.856]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1339.0s, [0.096, 0.128, 0.527, 0.855]: stand, carry/hold (an object), listen to (a person).", "At 1338.0s, [0.135, 0.116, 0.522, 0.848]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1337.0s, [0.132, 0.124, 0.582, 0.846]: stand, carry/hold (an object), listen to (a person).", "At 1336.0s, [0.125, 0.12, 0.569, 0.859]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1385, 1471], "conversations": [{"from": "human", "timestamps": 1415.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.034, 0.26, 0.444, 0.862]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.401, 0.221, 0.876, 0.873]: carry/hold (an object), sit, listen to (a person)."}, {"from": "human", "timestamps": 1471.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.124, 0.27, 0.537, 0.879]?"}, {"from": "gpt", "value": ["In the current frame, [0.124, 0.27, 0.537, 0.879]: walk, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1468.0s to 1470.0s, [0.079, 0.238, 0.483, 0.859]->[0.02, 0.246, 0.592, 0.877]->[0.003, 0.256, 0.559, 0.891]: walk, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1467.0s, [0.181, 0.444, 0.334, 0.685]: walk, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1466.0s, [0.136, 0.425, 0.302, 0.66]: walk, touch (an object), talk to (e.g., self, a person, a group).", "At 1465.0s, [0.226, 0.436, 0.317, 0.645]: walk, carry/hold (an object), climb (e.g., a mountain)."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1407, 1447], "conversations": [{"from": "human", "timestamps": 1437.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.012, 0.126, 0.482, 0.862]: sit, talk to (e.g., self, a person, a group); [0.488, 0.121, 0.972, 0.883]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1447.0, "value": "For the person at bbox [0.383, 0.329, 0.503, 0.754], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.383, 0.329, 0.503, 0.754]: walk.", "From 1445.0s to 1446.0s, [0.266, 0.255, 0.361, 0.663]->[0.446, 0.287, 0.574, 0.829]: walk."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1499, 1543], "conversations": [{"from": "human", "timestamps": 1529.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.026, 0.31, 0.531, 0.87]: carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), walk; [0.515, 0.116, 0.948, 0.874]: listen to (a person), walk."}, {"from": "human", "timestamps": 1543.0, "value": "What actions did the person at bbox [0.02, 0.268, 0.491, 0.879] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.02, 0.268, 0.491, 0.879]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1542.0s, [0.067, 0.28, 0.422, 0.87]: stand, touch (an object), listen to (a person)."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1482, 1563], "conversations": [{"from": "human", "timestamps": 1512.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.028, 0.242, 0.491, 0.861]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk; [0.528, 0.164, 0.987, 0.869]: listen to (a person), watch (a person), walk."}, {"from": "human", "timestamps": 1563.0, "value": "Describe the actions of the person at bbox [0.338, 0.214, 0.77, 0.737] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.338, 0.214, 0.77, 0.737]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1562.0s, [0.383, 0.232, 0.768, 0.716]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1561.0s, [0.403, 0.292, 0.765, 0.716]: sit, carry/hold (an object).", "At 1560.0s, [0.398, 0.284, 0.768, 0.703]: sit, carry/hold (an object), touch (an object)."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1538, 1594], "conversations": [{"from": "human", "timestamps": 1568.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.188, 0.159, 0.485, 0.871]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1594.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.012, 0.121, 0.897, 0.865]?"}, {"from": "gpt", "value": ["In the current frame, [0.012, 0.121, 0.897, 0.865]: sit, stand, talk to (e.g., self, a person, a group).", "From 1592.0s to 1593.0s, [0.055, 0.114, 0.944, 0.889]->[0.025, 0.104, 0.903, 0.897]: sit, read, talk to (e.g., self, a person, a group).", "From 1590.0s to 1591.0s, [0.232, 0.152, 0.983, 0.865]->[0.213, 0.127, 0.86, 0.855]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1588.0s to 1589.0s, [0.606, 0.287, 0.984, 0.722]->[0.625, 0.295, 0.981, 0.733]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1587.0s, [0.525, 0.275, 0.981, 0.718]: sit, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1586.0s, [0.535, 0.252, 0.981, 0.72]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1584.0s to 1585.0s, [0.638, 0.255, 0.996, 0.74]->[0.624, 0.236, 0.993, 0.726]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1583.0s, [0.588, 0.374, 0.947, 0.837]: sit, carry/hold (an object), read, touch (an object), listen to (a person).", "At 1582.0s, [0.715, 0.556, 0.988, 0.858]: sit, carry/hold (an object), listen to (a person)."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1586, 1649], "conversations": [{"from": "human", "timestamps": 1616.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.001, 0.134, 0.384, 0.881]: listen to (a person), sit, touch (an object), watch (a person); [0.365, 0.137, 0.94, 0.851]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1649.0, "value": "For the person at bbox [0.597, 0.213, 0.995, 0.711], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.597, 0.213, 0.995, 0.711]: sit, talk to (e.g., self, a person, a group).", "At 1648.0s, [0.658, 0.192, 0.987, 0.707]: sit, talk to (e.g., self, a person, a group).", "At 1647.0s, [0.627, 0.273, 0.986, 0.714]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1646.0s, [0.585, 0.251, 0.927, 0.694]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "From 1644.0s to 1645.0s, [0.803, 0.199, 0.994, 0.88]->[0.585, 0.201, 0.931, 0.713]: sit, touch (an object), listen to (a person), watch (a person).", "From 1642.0s to 1643.0s, [0.77, 0.177, 0.998, 0.885]->[0.768, 0.186, 0.997, 0.87]: sit, listen to (a person), watch (a person).", "From 1640.0s to 1641.0s, [0.087, 0.109, 0.97, 0.883]->[0.056, 0.106, 0.976, 0.9]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1638.0s to 1639.0s, [0.116, 0.115, 0.927, 0.893]->[0.071, 0.107, 0.868, 0.884]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "kLDpP9QEVBs.mp4", "clip": [1654, 1723], "conversations": [{"from": "human", "timestamps": 1684.0, "value": "What actions did the person at bbox [0.477, 0.353, 0.713, 0.685] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.477, 0.353, 0.713, 0.685]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1682.0s to 1683.0s, [0.513, 0.357, 0.724, 0.683]->[0.491, 0.352, 0.723, 0.68]: sit, touch (an object), listen to (a person).", "From 1680.0s to 1681.0s, [0.473, 0.347, 0.855, 0.859]->[0.484, 0.351, 0.726, 0.69]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1679.0s, [0.481, 0.356, 0.857, 0.855]: sit, eat, touch (an object), talk to (e.g., self, a person, a group).", "At 1678.0s, [0.529, 0.346, 0.971, 0.879]: sit, eat, touch (an object), listen to (a person).", "From 1676.0s to 1677.0s, [0.498, 0.349, 0.974, 0.869]->[0.531, 0.346, 0.982, 0.865]: sit, eat, touch (an object), listen to (a person), watch (a person).", "From 1671.0s to 1675.0s, [0.569, 0.347, 0.937, 0.864]->[0.595, 0.39, 0.966, 0.862]->[0.545, 0.38, 0.976, 0.873]->[0.506, 0.357, 0.955, 0.867]->[0.5, 0.343, 0.972, 0.87]: sit, eat, touch (an object), listen to (a person).", "From 1667.0s to 1670.0s, [0.167, 0.242, 0.562, 0.778]->[0.232, 0.33, 0.629, 0.857]->[0.376, 0.345, 0.753, 0.863]->[0.528, 0.362, 0.993, 0.871]: sit, touch (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1723.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.008, 0.189, 0.422, 0.876]: carry/hold (an object), stand, listen to (a person); [0.38, 0.217, 0.765, 0.858]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "kMy-6RtoOVU.mkv", "clip": [945, 1001], "conversations": [{"from": "human", "timestamps": 975.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.474, 0.107, 0.973, 0.992]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1001.0, "value": "Describe the actions of the person at bbox [0.705, 0.268, 0.979, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.705, 0.268, 0.979, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 996.0s to 1000.0s, [0.621, 0.248, 0.909, 0.988]->[0.722, 0.272, 0.984, 0.984]->[0.719, 0.272, 0.983, 0.982]->[0.713, 0.275, 0.976, 0.984]->[0.704, 0.274, 0.977, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 995.0s, [0.572, 0.266, 0.831, 0.986]: stand, watch (a person).", "At 994.0s, [0.475, 0.336, 0.778, 0.997]: run/jog, watch (a person)."]}]}, {"video": "kMy-6RtoOVU.mkv", "clip": [957, 1020], "conversations": [{"from": "human", "timestamps": 987.0, "value": "What actions is the person in bbox [0.537, 0.004, 0.99, 0.991] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.537, 0.004, 0.99, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 986.0s, [0.46, 0.008, 1.0, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 982.0s to 985.0s, [0.486, 0.0, 0.994, 0.998]->[0.486, 0.0, 0.969, 0.991]->[0.513, 0.0, 0.991, 0.997]->[0.497, 0.005, 1.0, 0.994]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1020.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.278, 0.084, 0.7, 0.994]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.641, 0.268, 0.935, 0.983]: listen to (a person), stand, watch (a person)."}]}, {"video": "kMy-6RtoOVU.mkv", "clip": [1033, 1077], "conversations": [{"from": "human", "timestamps": 1063.0, "value": "What actions is the person in bbox [0.114, 0.008, 0.643, 0.974] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.114, 0.008, 0.643, 0.974]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1061.0s to 1062.0s, [0.511, 0.086, 0.943, 0.989]->[0.514, 0.083, 0.914, 0.988]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1060.0s, [0.44, 0.0, 0.999, 0.995]: sit, carry/hold (an object).", "At 1059.0s, [0.515, 0.049, 0.995, 0.999]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1058.0s, [0.518, 0.057, 0.995, 0.982]: sit, carry/hold (an object).", "At 1057.0s, [0.459, 0.011, 0.918, 1.0]: sit, pull (an object).", "At 1056.0s, [0.505, 0.0, 0.996, 0.987]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1077.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.603, 0.021, 0.999, 0.994]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "kMy-6RtoOVU.mkv", "clip": [1188, 1242], "conversations": [{"from": "human", "timestamps": 1218.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.029, 0.047, 0.475, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1242.0, "value": "What actions did the person at bbox [0.0, 0.334, 0.241, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.334, 0.241, 1.0]: stand, listen to (a person), watch (a person).", "At 1241.0s, [0.002, 0.36, 0.271, 0.996]: stand, listen to (a person), watch (a person)."]}]}, {"video": "kMy-6RtoOVU.mkv", "clip": [1245, 1363], "conversations": [{"from": "human", "timestamps": 1275.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.302, 0.087, 0.498, 0.956]: open (e.g., a window, a car door), grab (a person), walk; [0.428, 0.151, 0.559, 0.995]: carry/hold (an object), grab (a person), walk."}, {"from": "human", "timestamps": 1363.0, "value": "What actions did the person at bbox [0.005, 0.0, 0.664, 0.996] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.005, 0.0, 0.664, 0.996]: bend/bow (at the waist), grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1362.0s, [0.004, 0.0, 0.657, 0.963]: bend/bow (at the waist), fight/hit (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "kMy-6RtoOVU.mkv", "clip": [1459, 1504], "conversations": [{"from": "human", "timestamps": 1489.0, "value": "What actions did the person at bbox [0.517, 0.012, 0.941, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.517, 0.012, 0.941, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1487.0s to 1488.0s, [0.548, 0.001, 0.896, 0.996]->[0.531, 0.013, 0.889, 0.994]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1504.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.241, 0.171, 0.649, 0.997]: carry/hold (an object), stand."}]}, {"video": "kMy-6RtoOVU.mkv", "clip": [1541, 1575], "conversations": [{"from": "human", "timestamps": 1571.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.051, 0.144, 1.0, 0.996]: fall down."}, {"from": "human", "timestamps": 1575.0, "value": "For the person at bbox [0.287, 0.0, 0.969, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.287, 0.0, 0.969, 1.0]: stand.", "At 1574.0s, [0.233, 0.0, 0.899, 0.965]: stand."]}]}, {"video": "kMy-6RtoOVU.mkv", "clip": [1713, 1754], "conversations": [{"from": "human", "timestamps": 1743.0, "value": "What actions is the person in bbox [0.058, 0.003, 0.828, 0.986] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.058, 0.003, 0.828, 0.986]: stand, talk to (e.g., self, a person, a group).", "From 1740.0s to 1742.0s, [0.125, 0.0, 0.879, 0.975]->[0.15, 0.003, 0.879, 0.975]->[0.086, 0.0, 0.839, 0.983]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1754.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.152, 0.388, 0.987]: listen to (a person), stand, watch (a person); [0.365, 0.0, 0.897, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "kplbKz3_fZk.mkv", "clip": [879, 968], "conversations": [{"from": "human", "timestamps": 909.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.048, 0.521, 0.167, 0.814]: listen to (a person), sit, watch (a person); [0.243, 0.52, 0.331, 0.852]: listen to (a person), stand, watch (a person); [0.449, 0.671, 0.534, 0.886]: sit, watch (a person), listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.659, 0.565, 0.749, 0.899]: listen to (a person), stand, watch (a person); [0.724, 0.592, 0.907, 0.93]: watch (a person), sit."}, {"from": "human", "timestamps": 968.0, "value": "What actions is the person in bbox [0.015, 0.135, 0.343, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.015, 0.135, 0.343, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 966.0s to 967.0s, [0.069, 0.138, 0.349, 0.987]->[0.061, 0.116, 0.355, 0.996]: stand, listen to (a person), watch (a person)."]}]}, {"video": "kplbKz3_fZk.mkv", "clip": [941, 989], "conversations": [{"from": "human", "timestamps": 971.0, "value": "Describe the actions of the person at bbox [0.613, 0.042, 0.821, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.613, 0.042, 0.821, 0.991]: stand, listen to (a person), watch (a person).", "From 962.0s to 970.0s, [0.741, 0.443, 0.839, 0.955]->[0.726, 0.44, 0.833, 0.946]->[0.726, 0.433, 0.826, 0.916]->[0.711, 0.426, 0.828, 0.957]->[0.634, 0.029, 0.817, 0.992]->[0.616, 0.033, 0.83, 0.985]->[0.615, 0.047, 0.829, 0.997]->[0.596, 0.033, 0.813, 0.992]->[0.626, 0.041, 0.821, 0.979]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 989.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.013, 0.005, 0.718, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1028, 1105], "conversations": [{"from": "human", "timestamps": 1058.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.015, 0.043, 0.717, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1105.0, "value": "For the person at bbox [0.786, 0.168, 0.994, 0.953], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.786, 0.168, 0.994, 0.953]: walk, listen to (a person), watch (a person).", "At 1104.0s, [0.865, 0.166, 1.0, 0.963]: walk, listen to (a person)."]}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1111, 1158], "conversations": [{"from": "human", "timestamps": 1141.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.6, 0.076, 0.986]: stand, touch (an object); [0.298, 0.599, 0.386, 0.96]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group); [0.548, 0.623, 0.623, 0.9]: carry/hold (an object), walk; [0.713, 0.686, 0.758, 0.932]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.778, 0.7, 0.848, 0.885]: carry/hold (an object), crouch/kneel, watch (a person), listen to (a person); [0.879, 0.655, 0.925, 0.909]: watch (a person), walk; [0.926, 0.534, 1.0, 0.995]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1158.0, "value": "For the person at bbox [0.503, 0.294, 0.757, 0.999], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.503, 0.294, 0.757, 0.999]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1157.0s, [0.469, 0.319, 0.678, 0.981]: walk, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1151, 1250], "conversations": [{"from": "human", "timestamps": 1181.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.247, 0.055, 0.927, 0.967]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1250.0, "value": "What actions is the person in bbox [0.129, 0.246, 0.357, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.129, 0.246, 0.357, 0.993]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1249.0s, [0.501, 0.35, 0.614, 0.778]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1230, 1318], "conversations": [{"from": "human", "timestamps": 1260.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.007, 0.198, 0.975]: listen to (a person), stand, watch (a person); [0.265, 0.053, 0.973, 0.962]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1318.0, "value": "What actions is the person in bbox [0.087, 0.075, 0.558, 0.954] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.087, 0.075, 0.558, 0.954]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1314.0s to 1317.0s, [0.037, 0.082, 0.643, 0.976]->[0.05, 0.057, 0.617, 0.976]->[0.075, 0.066, 0.545, 0.965]->[0.047, 0.055, 0.611, 0.977]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1259, 1338], "conversations": [{"from": "human", "timestamps": 1289.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.491, 0.625, 0.6, 0.834]: watch (a person), sit; [0.635, 0.304, 0.708, 0.661]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.694, 0.324, 0.759, 0.697]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1338.0, "value": "What actions is the person in bbox [0.667, 0.384, 0.844, 0.928] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.667, 0.384, 0.844, 0.928]: stand, drink, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1337.0s, [0.841, 0.351, 0.995, 0.988]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1298, 1373], "conversations": [{"from": "human", "timestamps": 1328.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.486, 0.0, 1.0, 0.978]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1373.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.545, 0.655, 0.634, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.545, 0.655, 0.634, 1.0]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1372.0s, [0.529, 0.669, 0.626, 0.997]: walk, carry/hold (an object)."]}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1351, 1404], "conversations": [{"from": "human", "timestamps": 1381.0, "value": "Describe the actions of the person at bbox [0.25, 0.366, 0.339, 0.593] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.25, 0.366, 0.339, 0.593]: stand, listen to (a person), watch (a person).", "From 1379.0s to 1380.0s, [0.252, 0.376, 0.341, 0.616]->[0.25, 0.371, 0.338, 0.593]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1404.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.456, 0.747, 0.528, 0.995]: listen to (a person), stand, watch (a person); [0.522, 0.723, 0.597, 0.996]: watch (a person), stand; [0.581, 0.724, 0.659, 0.99]: watch (a person), stand; [0.773, 0.645, 0.899, 0.928]: listen to (a person), stand, touch (an object), watch (a person); [0.904, 0.6, 0.999, 0.843]: listen to (a person), stand, watch (a person), drink."}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1406, 1473], "conversations": [{"from": "human", "timestamps": 1436.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.03, 0.523, 0.261, 1.0]: watch (a person), stand; [0.235, 0.329, 0.316, 0.606]: walk; [0.279, 0.39, 0.416, 0.994]: watch (a person), walk; [0.361, 0.358, 0.455, 0.98]: listen to (a person), stand, watch (a person); [0.459, 0.331, 0.522, 0.79]: watch (a person), walk; [0.476, 0.393, 0.62, 0.987]: watch (a person), drink, walk; [0.61, 0.394, 0.736, 0.938]: watch (a person), stand; [0.67, 0.543, 0.917, 0.969]: walk; [0.826, 0.371, 0.959, 0.892]: walk."}, {"from": "human", "timestamps": 1473.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.098, 0.396, 0.244, 0.938]?"}, {"from": "gpt", "value": ["At Now, [0.098, 0.396, 0.244, 0.938]: stand, watch (a person).", "At 1472.0s, [0.085, 0.4, 0.227, 0.931]: stand, watch (a person).", "At 1471.0s, [0.092, 0.413, 0.225, 0.995]: stand, listen to (a person), watch (a person)."]}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1442, 1497], "conversations": [{"from": "human", "timestamps": 1472.0, "value": "What actions is the person in bbox [0.619, 0.408, 0.789, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.619, 0.408, 0.789, 0.985]: stand, listen to (a person).", "From 1470.0s to 1471.0s, [0.597, 0.427, 0.761, 1.0]->[0.613, 0.409, 0.774, 0.993]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1497.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.003, 0.426, 0.295, 1.0]: watch (a person), stand; [0.23, 0.286, 0.327, 0.701]: watch (a person), stand; [0.283, 0.299, 0.774, 0.99]: watch (a person), drink, walk; [0.289, 0.225, 0.456, 0.995]: watch (a person), stand; [0.651, 0.405, 0.788, 0.877]: stand; [0.825, 0.004, 1.0, 1.0]: watch (a person), stand."}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1471, 1520], "conversations": [{"from": "human", "timestamps": 1501.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.044, 0.032, 0.632, 0.93]: watch (a person), stand; [0.753, 0.026, 0.999, 0.89]: watch (a person), stand."}, {"from": "human", "timestamps": 1520.0, "value": "Describe the actions of the person at bbox [0.124, 0.003, 0.833, 0.992] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.124, 0.003, 0.833, 0.992]: stand, watch (a person).", "At 1519.0s, [0.638, 0.247, 0.905, 0.977]: stand, carry/hold (an object), watch (a person).", "At 1518.0s, [0.631, 0.227, 0.916, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1510, 1560], "conversations": [{"from": "human", "timestamps": 1540.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.067, 0.549, 0.194, 0.995]: carry/hold (an object), stand, watch (a person), drink; [0.179, 0.711, 0.358, 0.994]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.188, 0.517, 0.299, 0.829]: listen to (a person), stand, watch (a person); [0.626, 0.353, 0.657, 0.564]: stand; [0.658, 0.399, 0.739, 0.761]: watch (a person), stand; [0.719, 0.414, 0.812, 0.818]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.875, 0.408, 0.946, 0.754]: watch (a person), stand, drink; [0.894, 0.396, 1.0, 0.931]: watch (a person), stand, drink."}, {"from": "human", "timestamps": 1560.0, "value": "What actions is the person in bbox [0.034, 0.085, 0.504, 0.972] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.034, 0.085, 0.504, 0.972]: stand, talk to (e.g., self, a person, a group).", "From 1552.0s to 1559.0s, [0.084, 0.285, 0.496, 0.994]->[0.069, 0.229, 0.498, 1.0]->[0.04, 0.189, 0.48, 0.97]->[0.019, 0.18, 0.479, 0.986]->[0.023, 0.158, 0.525, 0.969]->[0.037, 0.143, 0.495, 0.967]->[0.025, 0.138, 0.519, 0.987]->[0.037, 0.099, 0.528, 0.973]: stand, talk to (e.g., self, a person, a group).", "From 1550.0s to 1551.0s, [0.148, 0.418, 0.535, 0.997]->[0.133, 0.313, 0.466, 0.972]: stand."]}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1533, 1589], "conversations": [{"from": "human", "timestamps": 1563.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.173, 0.012, 0.924, 0.964]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1589.0, "value": "Describe the actions of the person at bbox [0.221, 0.0, 0.869, 0.967] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.221, 0.0, 0.869, 0.967]: stand.", "From 1587.0s to 1588.0s, [0.136, 0.003, 0.889, 0.99]->[0.081, 0.011, 0.911, 0.969]: stand, listen to (a person)."]}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1594, 1671], "conversations": [{"from": "human", "timestamps": 1624.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.004, 0.441, 0.212, 0.996]: watch (a person), stand, drink; [0.128, 0.43, 0.197, 0.828]: stand; [0.207, 0.241, 0.528, 0.979]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.842, 0.36, 0.99, 0.975]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1671.0, "value": "What actions is the person in bbox [0.46, 0.063, 1.0, 0.948] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.46, 0.063, 1.0, 0.948]: stand, listen to (a person), watch (a person).", "At 1670.0s, [0.418, 0.107, 1.0, 0.956]: stand, listen to (a person), watch (a person)."]}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1667, 1724], "conversations": [{"from": "human", "timestamps": 1697.0, "value": "Describe the actions of the person at bbox [0.06, 0.012, 0.874, 0.963] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.06, 0.012, 0.874, 0.963]: stand, listen to (a person), watch (a person).", "From 1694.0s to 1696.0s, [0.031, 0.005, 0.649, 0.981]->[0.034, 0.023, 0.796, 0.955]->[0.037, 0.019, 0.767, 0.996]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1724.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.248, 0.0, 0.819, 0.948]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "kplbKz3_fZk.mkv", "clip": [1725, 1758], "conversations": [{"from": "human", "timestamps": 1755.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.037, 0.02, 0.628, 1.0]: listen to (a person), stand, watch (a person); [0.419, 0.006, 0.999, 0.991]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1758.0, "value": "What actions did the person at bbox [0.859, 0.025, 1.0, 0.746] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.859, 0.025, 1.0, 0.746]: stand, watch (a person).", "From 1747.0s to 1757.0s, [0.554, 0.002, 0.997, 0.983]->[0.553, 0.0, 0.999, 0.995]->[0.423, 0.003, 1.0, 0.984]->[0.443, 0.0, 0.991, 0.983]->[0.432, 0.0, 1.0, 0.988]->[0.439, 0.0, 1.0, 0.992]->[0.439, 0.001, 0.994, 0.987]->[0.405, 0.004, 1.0, 0.994]->[0.419, 0.006, 0.999, 0.991]->[0.423, 0.005, 1.0, 0.983]->[0.819, 0.028, 1.0, 0.882]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [922, 953], "conversations": [{"from": "human", "timestamps": 952.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.001, 0.22, 0.107, 0.555]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.017, 0.182, 0.527, 0.981]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.442, 0.062, 1.0, 0.995]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 953.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.446, 0.045, 1.0, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.446, 0.045, 1.0, 1.0]: stand, listen to (a person), watch (a person).", "From 949.0s to 952.0s, [0.422, 0.006, 0.99, 0.984]->[0.476, 0.021, 0.997, 0.982]->[0.444, 0.051, 0.996, 1.0]->[0.442, 0.062, 1.0, 0.995]: stand, listen to (a person), watch (a person).", "From 947.0s to 948.0s, [0.493, 0.058, 1.0, 0.995]->[0.444, 0.017, 0.993, 0.981]: walk, listen to (a person), watch (a person).", "At 946.0s, [0.388, 0.093, 0.876, 0.994]: walk, listen to (a person).", "At 945.0s, [0.499, 0.157, 0.849, 0.984]: stand, listen to (a person), watch (a person)."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [896, 978], "conversations": [{"from": "human", "timestamps": 926.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.059, 0.004, 0.715, 0.991]: listen to (a person), stand, talk to (e.g., self, a person, a group); [0.579, 0.083, 0.997, 0.996]: listen to (a person), stand."}, {"from": "human", "timestamps": 978.0, "value": "What actions is the person in bbox [0.198, 0.11, 0.717, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.198, 0.11, 0.717, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 977.0s, [0.209, 0.148, 0.652, 0.99]: stand, grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 974.0s to 975.0s, [0.19, 0.125, 0.575, 0.995]->[0.184, 0.123, 0.595, 0.989]: stand, listen to (a person), watch (a person).", "At 973.0s, [0.154, 0.117, 0.561, 0.999]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 971.0s to 972.0s, [0.201, 0.264, 0.572, 0.994]->[0.167, 0.23, 0.586, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [999, 1080], "conversations": [{"from": "human", "timestamps": 1029.0, "value": "What actions is the person in bbox [0.0, 0.172, 0.549, 0.989] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.172, 0.549, 0.989]: stand, watch (a person).", "At 1028.0s, [0.388, 0.211, 0.667, 0.997]: walk, watch (a person).", "At 1027.0s, [0.25, 0.008, 0.928, 1.0]: bend/bow (at the waist), watch (a person)."]}, {"from": "human", "timestamps": 1080.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.028, 0.048, 0.611, 0.985]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.461, 0.008, 0.999, 0.992]: listen to (a person), stand, watch (a person)."}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1028, 1110], "conversations": [{"from": "human", "timestamps": 1058.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.249, 0.377, 0.955]: carry/hold (an object), listen to (a person), walk; [0.708, 0.111, 1.0, 0.759]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1110.0, "value": "For the person at bbox [0.0, 0.158, 0.622, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.158, 0.622, 0.992]: stand, listen to (a person), watch (a person).", "At 1109.0s, [0.004, 0.153, 0.622, 0.999]: stand, listen to (a person), watch (a person).", "From 1107.0s to 1108.0s, [0.062, 0.06, 0.791, 0.984]->[0.0, 0.083, 0.721, 0.985]: walk."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1058, 1089], "conversations": [{"from": "human", "timestamps": 1088.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.055, 0.099, 0.665, 0.978]: carry/hold (an object), stand, listen to (a person); [0.413, 0.003, 0.979, 0.993]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1089.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.452, 0.01, 0.975, 0.998]?"}, {"from": "gpt", "value": ["At Now, [0.452, 0.01, 0.975, 0.998]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1088.0s, [0.413, 0.003, 0.979, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1084.0s to 1086.0s, [0.407, 0.009, 0.991, 1.0]->[0.471, 0.0, 0.968, 1.0]->[0.385, 0.0, 0.969, 1.0]: stand, listen to (a person), watch (a person).", "At 1083.0s, [0.541, 0.004, 1.0, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1082.0s, [0.389, 0.002, 0.966, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1078.0s to 1081.0s, [0.482, 0.0, 0.992, 0.979]->[0.376, 0.006, 0.979, 0.989]->[0.461, 0.008, 0.999, 0.992]->[0.434, 0.0, 0.976, 0.997]: stand, listen to (a person), watch (a person).", "At 1077.0s, [0.501, 0.0, 0.991, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1075.0s to 1076.0s, [0.522, 0.013, 0.988, 0.984]->[0.506, 0.0, 0.996, 0.987]: stand, listen to (a person), watch (a person).", "From 1073.0s to 1074.0s, [0.491, 0.021, 0.986, 0.999]->[0.544, 0.023, 0.999, 0.99]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1072.0s, [0.498, 0.052, 0.988, 0.991]: stand, talk to (e.g., self, a person, a group).", "From 1069.0s to 1071.0s, [0.54, 0.103, 0.981, 0.991]->[0.401, 0.091, 0.989, 0.982]->[0.461, 0.065, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1068.0s, [0.434, 0.147, 0.99, 0.994]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1067.0s, [0.434, 0.14, 0.995, 0.984]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1066.0s, [0.382, 0.089, 0.996, 0.97]: walk, listen to (a person)."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1130, 1195], "conversations": [{"from": "human", "timestamps": 1160.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.003, 0.002, 0.292, 0.996]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.208, 0.038, 1.0, 0.981]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1195.0, "value": "What actions is the person in bbox [0.288, 0.142, 0.727, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.288, 0.142, 0.727, 1.0]: sit, smoke, talk to (e.g., self, a person, a group).", "At 1194.0s, [0.276, 0.131, 0.757, 0.991]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1193.0s, [0.148, 0.126, 0.856, 0.984]: bend/bow (at the waist), carry/hold (an object).", "At 1192.0s, [0.207, 0.0, 0.745, 0.983]: walk, carry/hold (an object).", "At 1191.0s, [0.0, 0.166, 0.805, 0.986]: get up.", "At 1190.0s, [0.016, 0.029, 0.935, 0.986]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1119, 1248], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.004, 0.024, 0.337, 0.992]: walk; [0.442, 0.031, 1.0, 0.982]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1248.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.008, 0.458, 0.415, 0.999]?"}, {"from": "gpt", "value": ["In the current frame, [0.008, 0.458, 0.415, 0.999]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1247.0s, [0.0, 0.338, 0.44, 0.992]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1246.0s, [0.046, 0.325, 0.445, 0.998]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1245.0s, [0.028, 0.305, 0.392, 0.996]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1243.0s, [0.054, 0.29, 0.419, 0.952]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1242.0s, [0.16, 0.335, 0.495, 0.867]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1241.0s, [0.186, 0.15, 0.577, 0.917]: bend/bow (at the waist), carry/hold (an object), watch (a person)."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1256, 1360], "conversations": [{"from": "human", "timestamps": 1286.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.068, 0.006, 0.965, 0.99]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1360.0, "value": "What actions did the person at bbox [0.0, 0.073, 0.587, 0.964] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.073, 0.587, 0.964]: stand, watch (a person).", "At 1359.0s, [0.008, 0.048, 0.564, 0.999]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1358.0s, [0.053, 0.075, 0.58, 0.988]: stand, carry/hold (an object), give/serve (an object) to (a person), listen to (a person).", "At 1357.0s, [0.12, 0.131, 0.577, 0.994]: stand, carry/hold (an object), open (e.g., a window, a car door).", "At 1356.0s, [0.219, 0.12, 0.594, 0.988]: stand, carry/hold (an object).", "At 1355.0s, [0.223, 0.145, 0.532, 0.988]: stand, watch (a person).", "At 1354.0s, [0.273, 0.165, 0.58, 0.986]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1353.0s, [0.311, 0.183, 0.562, 0.936]: walk.", "At 1351.0s, [0.402, 0.167, 0.566, 0.744]: walk.", "At 1350.0s, [0.415, 0.185, 0.554, 0.696]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1349.0s, [0.462, 0.219, 0.578, 0.666]: walk, talk to (e.g., self, a person, a group).", "At 1348.0s, [0.494, 0.232, 0.588, 0.656]: walk, listen to (a person)."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1318, 1362], "conversations": [{"from": "human", "timestamps": 1348.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.494, 0.232, 0.588, 0.656]: listen to (a person), walk; [0.579, 0.227, 0.677, 0.653]: walk."}, {"from": "human", "timestamps": 1362.0, "value": "Describe the actions of the person at bbox [0.438, 0.017, 0.978, 0.976] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.438, 0.017, 0.978, 0.976]: stand, smoke, watch (a person).", "At 1361.0s, [0.285, 0.007, 1.0, 0.98]: stand, smoke."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1356, 1412], "conversations": [{"from": "human", "timestamps": 1386.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.014, 0.363, 0.4, 0.993]: sit, talk to (e.g., self, a person, a group); [0.519, 0.001, 0.913, 1.0]: listen to (a person), stand."}, {"from": "human", "timestamps": 1412.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.018, 0.017, 0.765, 0.972]?"}, {"from": "gpt", "value": ["At Now, [0.018, 0.017, 0.765, 0.972]: stand, listen to (a person), watch (a person).", "From 1410.0s to 1411.0s, [0.022, 0.049, 0.757, 0.98]->[0.025, 0.052, 0.745, 0.98]: stand, watch (a person).", "At 1409.0s, [0.023, 0.167, 0.671, 1.0]: get up, talk to (e.g., self, a person, a group), watch (a person).", "At 1408.0s, [0.01, 0.024, 0.685, 0.988]: sit, talk to (e.g., self, a person, a group).", "At 1407.0s, [0.017, 0.031, 0.661, 0.998]: stand.", "From 1403.0s to 1406.0s, [0.0, 0.005, 0.674, 1.0]->[0.014, 0.04, 0.667, 0.999]->[0.018, 0.034, 0.664, 0.991]->[0.015, 0.032, 0.656, 0.991]: stand, talk to (e.g., self, a person, a group).", "At 1402.0s, [0.009, 0.005, 0.701, 1.0]: stand.", "At 1401.0s, [0.003, 0.019, 0.701, 0.989]: stand, talk to (e.g., self, a person, a group).", "From 1399.0s to 1400.0s, [0.079, 0.111, 0.668, 0.993]->[0.015, 0.028, 0.682, 0.995]: sit, stand, talk to (e.g., self, a person, a group).", "From 1396.0s to 1398.0s, [0.002, 0.336, 0.386, 0.996]->[0.002, 0.33, 0.388, 0.998]->[0.034, 0.244, 0.499, 1.0]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1364, 1475], "conversations": [{"from": "human", "timestamps": 1394.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.003, 0.341, 0.384, 0.994]: watch (a person), sit, talk to (e.g., self, a person, a group), stand; [0.547, 0.0, 0.919, 0.994]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1475.0, "value": "What actions is the person in bbox [0.019, 0.085, 0.863, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.019, 0.085, 0.863, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1474.0s, [0.005, 0.091, 0.891, 0.983]: sit, talk to (e.g., self, a person, a group).", "At 1473.0s, [0.052, 0.089, 0.924, 0.989]: sit."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1439, 1481], "conversations": [{"from": "human", "timestamps": 1469.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.013, 0.002, 0.994, 0.979]: walk."}, {"from": "human", "timestamps": 1481.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.091, 0.033, 0.981, 0.978]?"}, {"from": "gpt", "value": ["At Now, [0.091, 0.033, 0.981, 0.978]: stand, talk to (e.g., self, a person, a group).", "At 1480.0s, [0.07, 0.016, 1.0, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1486, 1530], "conversations": [{"from": "human", "timestamps": 1516.0, "value": "For the person at bbox [0.021, 0.116, 0.491, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.021, 0.116, 0.491, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1515.0s, [0.02, 0.123, 0.495, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1514.0s, [0.019, 0.131, 0.503, 0.999]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1530.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.068, 0.697, 0.989]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1489, 1608], "conversations": [{"from": "human", "timestamps": 1519.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.15, 0.026, 0.994, 0.973]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1608.0, "value": "What actions did the person at bbox [0.007, 0.283, 0.52, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.007, 0.283, 0.52, 0.982]: stand, listen to (a person).", "At 1607.0s, [0.007, 0.294, 0.511, 0.983]: sit, listen to (a person).", "At 1606.0s, [0.014, 0.279, 0.514, 0.972]: stand, listen to (a person).", "At 1605.0s, [0.015, 0.289, 0.525, 0.977]: sit, listen to (a person).", "At 1604.0s, [0.016, 0.293, 0.531, 0.985]: sit, stand, listen to (a person).", "From 1601.0s to 1603.0s, [0.014, 0.306, 0.569, 0.981]->[0.005, 0.3, 0.549, 0.976]->[0.011, 0.291, 0.534, 0.986]: sit, smoke, listen to (a person).", "At 1600.0s, [0.005, 0.288, 0.566, 0.988]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1599.0s, [0.009, 0.291, 0.552, 0.979]: listen to (a person).", "From 1591.0s to 1598.0s, [0.003, 0.191, 0.714, 0.983]->[0.008, 0.192, 0.692, 0.987]->[0.001, 0.2, 0.677, 0.986]->[0.006, 0.204, 0.687, 0.987]->[0.0, 0.182, 0.671, 0.985]->[0.0, 0.19, 0.683, 0.989]->[0.004, 0.243, 0.582, 0.988]->[0.007, 0.286, 0.584, 0.984]: sit, listen to (a person).", "From 1589.0s to 1590.0s, [0.002, 0.241, 0.697, 0.975]->[0.0, 0.207, 0.715, 0.982]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "From 1579.0s to 1588.0s, [0.006, 0.216, 0.678, 0.987]->[0.001, 0.227, 0.622, 0.974]->[0.003, 0.23, 0.668, 1.0]->[0.006, 0.24, 0.659, 0.974]->[0.011, 0.245, 0.671, 0.979]->[0.012, 0.257, 0.674, 0.985]->[0.007, 0.268, 0.678, 0.986]->[0.003, 0.264, 0.713, 0.98]->[0.007, 0.26, 0.682, 0.98]->[0.003, 0.256, 0.694, 0.981]: sit, listen to (a person).", "At 1578.0s, [0.002, 0.238, 0.672, 0.99]: sit, carry/hold (an object), listen to (a person).", "At 1577.0s, [0.027, 0.242, 0.638, 0.988]: sit, smoke, listen to (a person).", "At 1576.0s, [0.016, 0.278, 0.561, 0.974]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1575.0s, [0.016, 0.281, 0.552, 0.975]: sit, stand, carry/hold (an object), listen to (a person).", "At 1574.0s, [0.014, 0.288, 0.561, 0.977]: sit, listen to (a person).", "From 1572.0s to 1573.0s, [0.007, 0.313, 0.585, 0.976]->[0.011, 0.276, 0.565, 0.982]: sit, talk to (e.g., self, a person, a group).", "At 1571.0s, [0.013, 0.286, 0.586, 0.983]: sit, listen to (a person).", "At 1570.0s, [0.015, 0.29, 0.572, 0.98]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "From 1568.0s to 1569.0s, [0.015, 0.293, 0.562, 0.98]->[0.01, 0.297, 0.573, 0.989]: sit, talk to (e.g., self, a person, a group).", "At 1567.0s, [0.027, 0.263, 0.536, 0.988]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1566.0s, [0.015, 0.277, 0.518, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1565.0s, [0.018, 0.304, 0.573, 0.988]: stand, talk to (e.g., self, a person, a group).", "At 1563.0s, [0.03, 0.325, 0.52, 0.999]: sit, talk to (e.g., self, a person, a group).", "At 1562.0s, [0.054, 0.315, 0.507, 0.991]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1561.0s, [0.047, 0.312, 0.467, 0.992]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1559, 1605], "conversations": [{"from": "human", "timestamps": 1589.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.551, 0.073, 0.967, 0.967]?"}, {"from": "gpt", "value": ["In the current frame, [0.551, 0.073, 0.967, 0.967]: stand, talk to (e.g., self, a person, a group).", "From 1584.0s to 1588.0s, [0.522, 0.076, 0.94, 0.963]->[0.503, 0.077, 0.949, 0.991]->[0.53, 0.093, 0.961, 0.965]->[0.523, 0.091, 0.959, 0.964]->[0.532, 0.077, 0.96, 0.97]: stand, talk to (e.g., self, a person, a group).", "From 1579.0s to 1583.0s, [0.432, 0.025, 0.79, 0.971]->[0.524, 0.036, 0.947, 0.967]->[0.497, 0.032, 0.92, 0.969]->[0.487, 0.069, 0.901, 0.96]->[0.514, 0.079, 0.922, 0.963]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1578.0s, [0.454, 0.0, 0.911, 0.975]: walk.", "At 1577.0s, [0.542, 0.003, 0.979, 0.983]: walk, listen to (a person).", "At 1576.0s, [0.585, 0.013, 1.0, 0.983]: stand, listen to (a person).", "From 1570.0s to 1575.0s, [0.534, 0.0, 0.998, 0.989]->[0.528, 0.0, 1.0, 1.0]->[0.52, 0.015, 1.0, 0.984]->[0.53, 0.0, 0.999, 0.987]->[0.513, 0.0, 0.997, 0.98]->[0.533, 0.005, 0.997, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1567.0s to 1569.0s, [0.53, 0.0, 1.0, 0.993]->[0.526, 0.009, 1.0, 0.982]->[0.531, 0.0, 1.0, 0.983]: stand, listen to (a person).", "From 1562.0s to 1566.0s, [0.494, 0.108, 0.981, 0.987]->[0.488, 0.072, 0.907, 0.998]->[0.476, 0.035, 0.971, 0.985]->[0.498, 0.012, 1.0, 0.99]->[0.494, 0.0, 0.991, 0.989]: stand, listen to (a person), watch (a person).", "From 1560.0s to 1561.0s, [0.534, 0.029, 0.987, 0.983]->[0.433, 0.069, 0.998, 0.997]: walk, listen to (a person), watch (a person).", "At 1559.0s, [0.426, 0.0, 1.0, 0.991]: walk, watch (a person).", "At 1558.0s, [0.585, 0.016, 1.0, 1.0]: walk, listen to (a person).", "From 1555.0s to 1557.0s, [0.483, 0.0, 1.0, 0.991]->[0.495, 0.0, 1.0, 0.998]->[0.499, 0.002, 1.0, 0.99]: stand, listen to (a person), watch (a person).", "At 1554.0s, [0.544, 0.027, 1.0, 0.972]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1553.0s, [0.558, 0.01, 0.999, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1550.0s to 1552.0s, [0.232, 0.011, 1.0, 1.0]->[0.478, 0.011, 1.0, 0.99]->[0.474, 0.013, 0.986, 0.995]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1605.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.015, 0.289, 0.525, 0.977]: listen to (a person), sit; [0.506, 0.006, 0.994, 0.996]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1593, 1632], "conversations": [{"from": "human", "timestamps": 1623.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.55, 0.051, 0.989, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.55, 0.051, 0.989, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1619.0s to 1622.0s, [0.49, 0.069, 0.992, 0.995]->[0.536, 0.067, 0.994, 0.996]->[0.555, 0.066, 0.992, 0.988]->[0.556, 0.066, 0.983, 0.991]: stand, talk to (e.g., self, a person, a group).", "From 1616.0s to 1618.0s, [0.491, 0.002, 0.979, 1.0]->[0.451, 0.038, 0.986, 0.984]->[0.451, 0.071, 0.983, 0.994]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1612.0s to 1615.0s, [0.497, 0.004, 0.989, 0.997]->[0.481, 0.0, 1.0, 0.995]->[0.499, 0.005, 0.985, 0.99]->[0.441, 0.003, 0.971, 0.993]: stand, talk to (e.g., self, a person, a group).", "At 1611.0s, [0.489, 0.0, 1.0, 0.991]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1610.0s, [0.481, 0.002, 0.978, 0.99]: stand, talk to (e.g., self, a person, a group).", "From 1600.0s to 1608.0s, [0.418, 0.002, 1.0, 0.994]->[0.53, 0.014, 0.981, 0.989]->[0.52, 0.0, 1.0, 1.0]->[0.509, 0.004, 1.0, 1.0]->[0.508, 0.004, 0.991, 0.991]->[0.506, 0.006, 0.994, 0.996]->[0.498, 0.009, 0.99, 0.996]->[0.498, 0.0, 0.987, 1.0]->[0.497, 0.004, 0.992, 0.99]: stand, talk to (e.g., self, a person, a group).", "At 1599.0s, [0.45, 0.0, 0.995, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1597.0s to 1598.0s, [0.438, 0.034, 0.989, 0.996]->[0.399, 0.0, 0.944, 0.991]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1591.0s to 1595.0s, [0.547, 0.036, 0.958, 0.975]->[0.553, 0.028, 0.972, 0.967]->[0.533, 0.019, 0.978, 0.979]->[0.535, 0.023, 0.966, 0.981]->[0.528, 0.009, 0.973, 0.969]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1590.0s, [0.516, 0.04, 0.955, 0.983]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1584.0s to 1589.0s, [0.522, 0.076, 0.94, 0.963]->[0.503, 0.077, 0.949, 0.991]->[0.53, 0.093, 0.961, 0.965]->[0.523, 0.091, 0.959, 0.964]->[0.532, 0.077, 0.96, 0.97]->[0.551, 0.073, 0.967, 0.967]: stand, talk to (e.g., self, a person, a group).", "From 1579.0s to 1583.0s, [0.432, 0.025, 0.79, 0.971]->[0.524, 0.036, 0.947, 0.967]->[0.497, 0.032, 0.92, 0.969]->[0.487, 0.069, 0.901, 0.96]->[0.514, 0.079, 0.922, 0.963]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1578.0s, [0.454, 0.0, 0.911, 0.975]: walk.", "At 1577.0s, [0.542, 0.003, 0.979, 0.983]: walk, listen to (a person).", "At 1576.0s, [0.585, 0.013, 1.0, 0.983]: stand, listen to (a person).", "From 1570.0s to 1575.0s, [0.534, 0.0, 0.998, 0.989]->[0.528, 0.0, 1.0, 1.0]->[0.52, 0.015, 1.0, 0.984]->[0.53, 0.0, 0.999, 0.987]->[0.513, 0.0, 0.997, 0.98]->[0.533, 0.005, 0.997, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1567.0s to 1569.0s, [0.53, 0.0, 1.0, 0.993]->[0.526, 0.009, 1.0, 0.982]->[0.531, 0.0, 1.0, 0.983]: stand, listen to (a person).", "From 1562.0s to 1566.0s, [0.494, 0.108, 0.981, 0.987]->[0.488, 0.072, 0.907, 0.998]->[0.476, 0.035, 0.971, 0.985]->[0.498, 0.012, 1.0, 0.99]->[0.494, 0.0, 0.991, 0.989]: stand, listen to (a person), watch (a person).", "From 1560.0s to 1561.0s, [0.534, 0.029, 0.987, 0.983]->[0.433, 0.069, 0.998, 0.997]: walk, listen to (a person), watch (a person).", "At 1559.0s, [0.426, 0.0, 1.0, 0.991]: walk, watch (a person).", "At 1558.0s, [0.585, 0.016, 1.0, 1.0]: walk, listen to (a person).", "From 1555.0s to 1557.0s, [0.483, 0.0, 1.0, 0.991]->[0.495, 0.0, 1.0, 0.998]->[0.499, 0.002, 1.0, 0.99]: stand, listen to (a person), watch (a person).", "At 1554.0s, [0.544, 0.027, 1.0, 0.972]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1553.0s, [0.558, 0.01, 0.999, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1550.0s to 1552.0s, [0.232, 0.011, 1.0, 1.0]->[0.478, 0.011, 1.0, 0.99]->[0.474, 0.013, 0.986, 0.995]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1632.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.006, 0.455, 0.99]: listen to (a person), stand; [0.506, 0.023, 0.99, 0.999]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1604, 1727], "conversations": [{"from": "human", "timestamps": 1634.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.008, 0.454, 0.984]: listen to (a person), stand; [0.5, 0.018, 0.998, 0.995]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1727.0, "value": "Describe the actions of the person at bbox [0.316, 0.107, 0.577, 0.976] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.316, 0.107, 0.577, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1724.0s to 1726.0s, [0.314, 0.111, 0.559, 0.946]->[0.326, 0.11, 0.58, 0.966]->[0.325, 0.107, 0.577, 0.971]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1723.0s, [0.35, 0.11, 0.62, 0.943]: stand, listen to (a person).", "From 1720.0s to 1722.0s, [0.409, 0.106, 0.672, 0.852]->[0.365, 0.099, 0.612, 0.882]->[0.315, 0.093, 0.581, 0.919]: walk, listen to (a person).", "At 1719.0s, [0.404, 0.125, 0.635, 0.567]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1718.0s, [0.365, 0.138, 0.634, 0.986]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1716.0s to 1717.0s, [0.319, 0.165, 0.568, 0.985]->[0.324, 0.148, 0.568, 0.989]: walk, talk to (e.g., self, a person, a group).", "From 1714.0s to 1715.0s, [0.026, 0.364, 0.443, 0.942]->[0.151, 0.139, 0.549, 0.721]: sit.", "At 1713.0s, [0.0, 0.055, 0.617, 0.978]: stand, carry/hold (an object), enter, talk to (e.g., self, a person, a group), watch (a person).", "From 1711.0s to 1712.0s, [0.214, 0.094, 0.7, 0.986]->[0.185, 0.092, 0.598, 0.986]: stand, open (e.g., a window, a car door), talk to (e.g., self, a person, a group).", "From 1709.0s to 1710.0s, [0.057, 0.128, 0.563, 0.989]->[0.253, 0.107, 0.846, 0.985]: walk, talk to (e.g., self, a person, a group).", "From 1707.0s to 1708.0s, [0.026, 0.027, 0.5, 1.0]->[0.011, 0.027, 0.519, 0.986]: stand, talk to (e.g., self, a person, a group).", "At 1706.0s, [0.014, 0.028, 0.506, 0.997]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1705.0s, [0.009, 0.013, 0.512, 0.997]: stand, listen to (a person).", "At 1704.0s, [0.022, 0.026, 0.508, 0.991]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1703.0s, [0.026, 0.015, 0.585, 0.99]: stand, carry/hold (an object), give/serve (an object) to (a person), listen to (a person), talk to (e.g., self, a person, a group).", "From 1700.0s to 1702.0s, [0.009, 0.054, 0.533, 0.98]->[0.01, 0.038, 0.576, 0.985]->[0.021, 0.037, 0.584, 0.995]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 1699.0s, [0.017, 0.043, 0.534, 0.992]: stand, carry/hold (an object), read, listen to (a person).", "From 1696.0s to 1698.0s, [0.004, 0.058, 0.41, 1.0]->[0.0, 0.06, 0.301, 0.99]->[0.011, 0.058, 0.442, 1.0]: stand, listen to (a person).", "At 1695.0s, [0.019, 0.049, 0.536, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1694.0s, [0.003, 0.06, 0.481, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1693.0s, [0.146, 0.063, 0.668, 0.993]: walk, listen to (a person).", "From 1689.0s to 1691.0s, [0.153, 0.032, 0.578, 0.828]->[0.084, 0.045, 0.63, 1.0]->[0.152, 0.071, 0.578, 0.989]: walk, listen to (a person).", "At 1688.0s, [0.113, 0.082, 0.701, 0.706]: stand, walk, listen to (a person).", "At 1687.0s, [0.077, 0.079, 0.639, 0.621]: walk, listen to (a person).", "From 1682.0s to 1686.0s, [0.003, 0.119, 0.333, 0.817]->[0.0, 0.115, 0.428, 0.994]->[0.003, 0.099, 0.42, 0.976]->[0.013, 0.074, 0.426, 0.757]->[0.001, 0.042, 0.312, 0.678]: walk, listen to (a person), watch (a person).", "At 1681.0s, [0.008, 0.133, 0.392, 0.639]: walk, listen to (a person).", "From 1679.0s to 1680.0s, [0.004, 0.041, 0.396, 0.833]->[0.0, 0.09, 0.347, 0.922]: walk, talk to (e.g., self, a person, a group).", "At 1678.0s, [0.0, 0.068, 0.441, 0.867]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "At 1677.0s, [0.019, 0.077, 0.466, 0.959]: walk, listen to (a person), watch (a person).", "From 1675.0s to 1676.0s, [0.01, 0.04, 0.393, 0.889]->[0.0, 0.066, 0.33, 1.0]: walk, listen to (a person)."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1666, 1723], "conversations": [{"from": "human", "timestamps": 1696.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.004, 0.058, 0.41, 1.0]: listen to (a person), stand; [0.536, 0.195, 0.972, 1.0]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1723.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.164, 0.192, 0.353, 0.888]?"}, {"from": "gpt", "value": ["In the current frame, [0.164, 0.192, 0.353, 0.888]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1722.0s, [0.143, 0.198, 0.341, 0.767]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "From 1719.0s to 1721.0s, [0.167, 0.216, 0.364, 0.989]->[0.169, 0.206, 0.379, 0.985]->[0.155, 0.203, 0.331, 0.56]: walk, talk to (e.g., self, a person, a group).", "From 1716.0s to 1718.0s, [0.149, 0.242, 0.354, 0.996]->[0.15, 0.248, 0.355, 0.99]->[0.139, 0.218, 0.359, 0.986]: walk, listen to (a person)."]}]}, {"video": "l-jxh8gpxuY.mkv", "clip": [1713, 1760], "conversations": [{"from": "human", "timestamps": 1743.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.04, 0.017, 0.953, 1.0]: watch (a person), sit."}, {"from": "human", "timestamps": 1760.0, "value": "What actions did the person at bbox [0.036, 0.003, 0.966, 0.994] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.036, 0.003, 0.966, 0.994]: sit, read.", "From 1758.0s to 1759.0s, [0.008, 0.019, 0.896, 1.0]->[0.001, 0.016, 0.918, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1757.0s, [0.013, 0.019, 0.892, 1.0]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1756.0s, [0.0, 0.026, 0.907, 0.998]: sit, listen to (a person)."]}]}, {"video": "lT1zdTL-3SM.mkv", "clip": [878, 943], "conversations": [{"from": "human", "timestamps": 908.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.44, 0.131, 0.581, 0.4]?"}, {"from": "gpt", "value": ["In the current frame, [0.44, 0.131, 0.581, 0.4]: stand, watch (a person).", "From 905.0s to 907.0s, [0.438, 0.141, 0.597, 0.948]->[0.442, 0.138, 0.59, 0.912]->[0.43, 0.144, 0.603, 0.954]: stand, listen to (a person), watch (a person).", "At 904.0s, [0.433, 0.124, 0.595, 0.965]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 943.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.002, 0.13, 0.347, 1.0]: listen to (a person), sit, watch (a person); [0.291, 0.071, 0.843, 0.942]: sit, talk to (e.g., self, a person, a group); [0.815, 0.029, 1.0, 0.705]: carry/hold (an object), sit, watch (a person), listen to (a person)."}]}, {"video": "lT1zdTL-3SM.mkv", "clip": [881, 990], "conversations": [{"from": "human", "timestamps": 911.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.469, 0.352, 0.623, 0.614]: sit, write."}, {"from": "human", "timestamps": 990.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.127, 0.069, 0.801, 0.979]?"}, {"from": "gpt", "value": ["In the current frame, [0.127, 0.069, 0.801, 0.979]: sit, carry/hold (an object).", "From 988.0s to 989.0s, [0.14, 0.083, 0.796, 0.964]->[0.129, 0.065, 0.847, 0.976]: sit, carry/hold (an object).", "At 987.0s, [0.153, 0.063, 0.862, 0.973]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 979.0s to 986.0s, [0.198, 0.119, 0.838, 0.961]->[0.191, 0.101, 0.826, 0.978]->[0.159, 0.097, 0.839, 0.965]->[0.185, 0.113, 0.796, 0.978]->[0.173, 0.103, 0.899, 0.983]->[0.16, 0.098, 0.819, 0.982]->[0.17, 0.081, 0.929, 0.989]->[0.165, 0.069, 0.89, 0.981]: sit, carry/hold (an object).", "At 978.0s, [0.206, 0.114, 0.776, 0.968]: sit, carry/hold (an object), write, talk to (e.g., self, a person, a group).", "From 976.0s to 977.0s, [0.245, 0.111, 0.891, 0.938]->[0.192, 0.116, 0.823, 0.956]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 973.0s to 975.0s, [0.27, 0.083, 0.732, 0.927]->[0.249, 0.124, 0.808, 0.928]->[0.247, 0.159, 0.7, 0.93]: sit, carry/hold (an object).", "At 972.0s, [0.297, 0.182, 0.767, 0.896]: sit, carry/hold (an object), write."]}]}, {"video": "lT1zdTL-3SM.mkv", "clip": [998, 1068], "conversations": [{"from": "human", "timestamps": 1028.0, "value": "What actions did the person at bbox [0.606, 0.385, 0.939, 0.911] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.606, 0.385, 0.939, 0.911]: sit, listen to (a person), watch (a person).", "At 1027.0s, [0.714, 0.406, 0.828, 0.63]: stand."]}, {"from": "human", "timestamps": 1068.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.054, 0.197, 0.228, 0.96]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.108, 0.216, 0.49, 1.0]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.459, 0.353, 0.584, 0.661]: watch (a person), stand; [0.481, 0.454, 0.684, 0.923]: listen to (a person), sit, watch (a person); [0.492, 0.693, 0.689, 0.996]: lie/sleep; [0.708, 0.195, 0.975, 0.983]: watch (a person), stand, talk to (e.g., self, a person, a group), push (another person)."}]}, {"video": "lT1zdTL-3SM.mkv", "clip": [1203, 1242], "conversations": [{"from": "human", "timestamps": 1233.0, "value": "What actions is the person in bbox [0.455, 0.023, 1.0, 0.97] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.455, 0.023, 1.0, 0.97]: sit.", "At 1232.0s, [0.478, 0.025, 1.0, 0.984]: sit."]}, {"from": "human", "timestamps": 1242.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.353, 0.0, 0.981, 0.996]: sit."}]}, {"video": "lT1zdTL-3SM.mkv", "clip": [1252, 1288], "conversations": [{"from": "human", "timestamps": 1282.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.177, 0.032, 0.955, 0.995]?"}, {"from": "gpt", "value": ["At Now, [0.177, 0.032, 0.955, 0.995]: stand.", "At 1281.0s, [0.178, 0.047, 0.923, 0.976]: sit."]}, {"from": "human", "timestamps": 1288.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.09, 0.106, 0.92, 0.978]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "lT1zdTL-3SM.mkv", "clip": [1246, 1327], "conversations": [{"from": "human", "timestamps": 1276.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.155, 0.092, 0.973, 0.976]: watch (a person), sit."}, {"from": "human", "timestamps": 1327.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.335, 0.122, 0.957, 0.99]?"}, {"from": "gpt", "value": ["In the current frame, [0.335, 0.122, 0.957, 0.99]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1326.0s, [0.383, 0.131, 0.969, 0.978]: sit, talk to (e.g., self, a person, a group).", "At 1325.0s, [0.44, 0.097, 0.984, 0.967]: stand, talk to (e.g., self, a person, a group).", "At 1324.0s, [0.455, 0.103, 0.983, 0.976]: sit, talk to (e.g., self, a person, a group).", "At 1323.0s, [0.449, 0.097, 0.987, 0.974]: sit.", "At 1322.0s, [0.463, 0.086, 0.979, 1.0]: stand, listen to (a person).", "From 1318.0s to 1321.0s, [0.455, 0.081, 0.987, 0.991]->[0.478, 0.093, 0.981, 0.995]->[0.458, 0.108, 0.987, 0.996]->[0.486, 0.108, 0.975, 0.984]: sit, talk to (e.g., self, a person, a group).", "At 1317.0s, [0.476, 0.204, 0.969, 0.965]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1316.0s, [0.467, 0.115, 0.986, 0.978]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "lT1zdTL-3SM.mkv", "clip": [1295, 1355], "conversations": [{"from": "human", "timestamps": 1325.0, "value": "What actions is the person in bbox [0.44, 0.097, 0.984, 0.967] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.44, 0.097, 0.984, 0.967]: stand, talk to (e.g., self, a person, a group).", "At 1324.0s, [0.455, 0.103, 0.983, 0.976]: sit, talk to (e.g., self, a person, a group).", "At 1323.0s, [0.449, 0.097, 0.987, 0.974]: sit.", "At 1322.0s, [0.463, 0.086, 0.979, 1.0]: stand, listen to (a person).", "From 1318.0s to 1321.0s, [0.455, 0.081, 0.987, 0.991]->[0.478, 0.093, 0.981, 0.995]->[0.458, 0.108, 0.987, 0.996]->[0.486, 0.108, 0.975, 0.984]: sit, talk to (e.g., self, a person, a group).", "At 1317.0s, [0.476, 0.204, 0.969, 0.965]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1316.0s, [0.467, 0.115, 0.986, 0.978]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1355.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.191, 0.009, 0.979, 0.987]: watch (a person), sit."}]}, {"video": "lT1zdTL-3SM.mkv", "clip": [1371, 1406], "conversations": [{"from": "human", "timestamps": 1401.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.125, 0.026, 0.863, 0.98]: watch (a person), stand."}, {"from": "human", "timestamps": 1406.0, "value": "What actions did the person at bbox [0.239, 0.005, 1.0, 0.979] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.239, 0.005, 1.0, 0.979]: sit.", "At 1405.0s, [0.162, 0.004, 1.0, 0.995]: sit, watch (a person).", "At 1404.0s, [0.156, 0.0, 0.996, 0.991]: stand, watch (a person)."]}]}, {"video": "lT1zdTL-3SM.mkv", "clip": [1362, 1448], "conversations": [{"from": "human", "timestamps": 1392.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.204, 0.261, 0.387, 0.678]: listen to (a person), sit, watch (a person); [0.521, 0.269, 0.718, 0.58]: listen to (a person), sit, watch (a person); [0.741, 0.269, 0.886, 0.627]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1448.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.187, 0.323, 0.432, 0.756]?"}, {"from": "gpt", "value": ["In the current frame, [0.187, 0.323, 0.432, 0.756]: sit.", "At 1447.0s, [0.162, 0.326, 0.408, 0.73]: sit, listen to (a person).", "From 1444.0s to 1446.0s, [0.136, 0.337, 0.306, 0.699]->[0.157, 0.335, 0.4, 0.721]->[0.162, 0.322, 0.422, 0.726]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "lT1zdTL-3SM.mkv", "clip": [1546, 1587], "conversations": [{"from": "human", "timestamps": 1576.0, "value": "What actions is the person in bbox [0.342, 0.542, 0.436, 0.886] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.342, 0.542, 0.436, 0.886]: walk, carry/hold (an object).", "At 1575.0s, [0.373, 0.536, 0.508, 0.907]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1587.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.354, 0.537, 0.742, 0.979]: watch (a person), lie/sleep; [0.741, 0.298, 0.847, 0.661]: walk."}]}, {"video": "lT1zdTL-3SM.mkv", "clip": [1594, 1641], "conversations": [{"from": "human", "timestamps": 1624.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.169, 0.013, 0.457, 0.974]?"}, {"from": "gpt", "value": ["At Now, [0.169, 0.013, 0.457, 0.974]: walk, carry/hold (an object).", "At 1623.0s, [0.007, 0.088, 0.155, 0.975]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1641.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.017, 0.036, 1.0, 0.965]: sit."}]}, {"video": "lWXhqIAvarw.mkv", "clip": [890, 939], "conversations": [{"from": "human", "timestamps": 920.0, "value": "For the person at bbox [0.473, 0.243, 0.591, 0.438], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.473, 0.243, 0.591, 0.438]: run/jog, carry/hold (an object).", "From 915.0s to 919.0s, [0.336, 0.211, 0.582, 0.908]->[0.399, 0.231, 0.555, 0.561]->[0.467, 0.269, 0.622, 0.502]->[0.42, 0.246, 0.578, 0.834]->[0.443, 0.261, 0.619, 0.768]: run/jog, carry/hold (an object)."]}, {"from": "human", "timestamps": 939.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.262, 0.155, 0.441, 0.979]: listen to (a person), watch (a person), walk; [0.422, 0.147, 0.774, 0.999]: carry/hold (an object), write, listen to (a person), walk."}]}, {"video": "lWXhqIAvarw.mkv", "clip": [898, 1018], "conversations": [{"from": "human", "timestamps": 928.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.108, 0.154, 0.328, 0.977]: carry/hold (an object), walk; [0.399, 0.184, 0.591, 0.989]: walk; [0.554, 0.168, 0.694, 0.991]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1018.0, "value": "What actions is the person in bbox [0.024, 0.032, 0.733, 0.991] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.024, 0.032, 0.733, 0.991]: sit, listen to (a person).", "At 1017.0s, [0.007, 0.05, 0.745, 0.994]: sit, listen to (a person)."]}]}, {"video": "lWXhqIAvarw.mkv", "clip": [994, 1028], "conversations": [{"from": "human", "timestamps": 1024.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.056, 0.081, 0.453, 1.0]: listen to (a person), stand; [0.56, 0.1, 0.986, 0.999]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1028.0, "value": "Describe the actions of the person at bbox [0.025, 0.055, 0.737, 0.979] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.025, 0.055, 0.737, 0.979]: sit, talk to (e.g., self, a person, a group).", "At 1027.0s, [0.011, 0.032, 0.739, 0.987]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1018, 1075], "conversations": [{"from": "human", "timestamps": 1048.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.542, 0.085, 0.982, 0.995]?"}, {"from": "gpt", "value": ["In the current frame, [0.542, 0.085, 0.982, 0.995]: stand, talk to (e.g., self, a person, a group).", "From 1044.0s to 1047.0s, [0.553, 0.073, 0.987, 0.995]->[0.546, 0.074, 0.982, 0.989]->[0.544, 0.076, 0.983, 0.989]->[0.541, 0.07, 0.981, 0.993]: stand, talk to (e.g., self, a person, a group).", "At 1043.0s, [0.55, 0.086, 0.981, 0.997]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1075.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.308, 0.12, 0.486, 0.865]: talk to (e.g., self, a person, a group), walk."}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1032, 1103], "conversations": [{"from": "human", "timestamps": 1062.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.187, 0.463, 0.991]: watch (a person), sit; [0.434, 0.14, 0.614, 0.759]: stand; [0.708, 0.083, 0.946, 0.768]: watch (a person), stand."}, {"from": "human", "timestamps": 1103.0, "value": "What actions did the person at bbox [0.343, 0.073, 0.978, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.343, 0.073, 0.978, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1102.0s, [0.398, 0.065, 0.97, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1100.0s to 1101.0s, [0.438, 0.056, 0.954, 0.977]->[0.43, 0.071, 0.981, 0.976]: stand, talk to (e.g., self, a person, a group).", "From 1096.0s to 1099.0s, [0.496, 0.081, 0.714, 0.987]->[0.618, 0.032, 0.999, 0.98]->[0.558, 0.0, 1.0, 0.997]->[0.521, 0.0, 1.0, 1.0]: stand, listen to (a person), watch (a person).", "At 1095.0s, [0.565, 0.078, 0.775, 0.994]: walk, listen to (a person), watch (a person).", "At 1094.0s, [0.553, 0.064, 0.711, 0.995]: walk, talk to (e.g., self, a person, a group).", "At 1093.0s, [0.537, 0.079, 0.694, 0.989]: walk."]}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1070, 1140], "conversations": [{"from": "human", "timestamps": 1100.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.051, 0.411, 0.99]: listen to (a person), stand, watch (a person); [0.438, 0.056, 0.954, 0.977]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1140.0, "value": "For the person at bbox [0.287, 0.114, 0.614, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.287, 0.114, 0.614, 0.99]: stand, listen to (a person), watch (a person).", "At 1139.0s, [0.281, 0.087, 0.679, 0.975]: stand, listen to (a person), watch (a person).", "At 1138.0s, [0.265, 0.109, 0.621, 0.994]: stand, listen to (a person).", "From 1136.0s to 1137.0s, [0.611, 0.11, 0.884, 0.997]->[0.632, 0.106, 0.899, 0.96]: stand, watch (a person).", "From 1132.0s to 1135.0s, [0.503, 0.027, 0.828, 1.0]->[0.569, 0.087, 0.782, 0.99]->[0.597, 0.111, 0.808, 0.995]->[0.621, 0.088, 0.853, 1.0]: stand, kiss (a person).", "At 1131.0s, [0.579, 0.097, 0.781, 0.995]: stand, kiss (a person), watch (a person).", "At 1130.0s, [0.616, 0.093, 0.864, 0.982]: stand, watch (a person).", "At 1129.0s, [0.505, 0.068, 0.861, 0.992]: walk, carry/hold (an object), kiss (a person).", "From 1123.0s to 1128.0s, [0.477, 0.136, 0.768, 0.974]->[0.541, 0.128, 0.943, 0.976]->[0.49, 0.08, 0.871, 0.975]->[0.543, 0.082, 0.752, 0.986]->[0.535, 0.081, 0.907, 0.965]->[0.6, 0.106, 0.924, 0.96]: walk, carry/hold (an object)."]}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1132, 1171], "conversations": [{"from": "human", "timestamps": 1162.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.132, 0.062, 0.473, 0.986]: listen to (a person), stand, hug (a person); [0.506, 0.136, 0.688, 0.988]: stand."}, {"from": "human", "timestamps": 1171.0, "value": "What actions is the person in bbox [0.266, 0.041, 0.907, 0.997] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.266, 0.041, 0.907, 0.997]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1170.0s, [0.279, 0.01, 0.919, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1169.0s, [0.256, 0.067, 0.94, 0.987]: sit, talk to (e.g., self, a person, a group).", "From 1167.0s to 1168.0s, [0.485, 0.472, 0.704, 0.915]->[0.273, 0.049, 0.928, 0.974]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1166.0s, [0.499, 0.484, 0.763, 0.949]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1165.0s, [0.563, 0.516, 0.963, 0.983]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1164.0s, [0.599, 0.15, 0.99, 1.0]: walk, talk to (e.g., self, a person, a group).", "At 1163.0s, [0.358, 0.125, 0.64, 0.993]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1162.0s, [0.132, 0.062, 0.473, 0.986]: stand, hug (a person), listen to (a person)."]}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1159, 1226], "conversations": [{"from": "human", "timestamps": 1189.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.076, 0.025, 0.291, 0.97]: watch (a person), stand; [0.38, 0.217, 0.488, 0.729]: carry/hold (an object), open (e.g., a window, a car door), walk; [0.487, 0.478, 0.787, 0.883]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1226.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.549, 0.339, 0.923, 0.95]?"}, {"from": "gpt", "value": ["At Now, [0.549, 0.339, 0.923, 0.95]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1225.0s, [0.598, 0.334, 0.951, 0.97]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1224.0s, [0.553, 0.335, 0.937, 0.975]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1222.0s to 1223.0s, [0.575, 0.328, 0.935, 0.968]->[0.588, 0.334, 0.934, 0.971]: sit, carry/hold (an object), touch (an object).", "At 1221.0s, [0.537, 0.34, 0.94, 0.977]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1218.0s to 1220.0s, [0.426, 0.334, 0.938, 0.971]->[0.422, 0.326, 0.931, 0.97]->[0.426, 0.34, 0.934, 0.968]: sit, carry/hold (an object), give/serve (an object) to (a person), listen to (a person).", "At 1217.0s, [0.462, 0.344, 0.946, 0.97]: sit, carry/hold (an object), listen to (a person).", "At 1216.0s, [0.448, 0.342, 0.933, 0.973]: sit, carry/hold (an object), watch (a person).", "From 1212.0s to 1215.0s, [0.559, 0.335, 0.919, 0.967]->[0.562, 0.33, 0.927, 0.966]->[0.566, 0.326, 0.947, 0.976]->[0.611, 0.331, 0.936, 0.976]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1211.0s, [0.633, 0.329, 0.914, 0.964]: sit, drink, touch (an object), watch (a person)."]}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1216, 1253], "conversations": [{"from": "human", "timestamps": 1246.0, "value": "What actions is the person in bbox [0.458, 0.339, 0.932, 0.968] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.458, 0.339, 0.932, 0.968]: sit, carry/hold (an object), give/serve (an object) to (a person), listen to (a person).", "At 1245.0s, [0.449, 0.328, 0.93, 0.961]: sit, carry/hold (an object), give/serve (an object) to (a person), listen to (a person).", "From 1241.0s to 1244.0s, [0.537, 0.343, 0.921, 0.927]->[0.538, 0.349, 0.902, 0.928]->[0.538, 0.347, 0.909, 0.933]->[0.594, 0.335, 0.919, 0.955]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1240.0s, [0.534, 0.339, 0.925, 0.922]: sit, carry/hold (an object), put down, touch (an object), watch (a person).", "At 1239.0s, [0.602, 0.324, 0.932, 0.964]: sit, carry/hold (an object), watch (a person).", "At 1238.0s, [0.621, 0.321, 0.938, 0.962]: sit, drink, watch (a person).", "From 1234.0s to 1237.0s, [0.574, 0.34, 0.925, 0.962]->[0.551, 0.35, 0.912, 0.951]->[0.574, 0.345, 0.915, 0.978]->[0.592, 0.347, 0.915, 0.964]: sit, carry/hold (an object), watch (a person).", "At 1233.0s, [0.545, 0.355, 0.901, 0.918]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1232.0s, [0.545, 0.347, 0.901, 0.928]: sit, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1253.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.143, 0.375, 0.486, 0.98]: sit, touch (an object), talk to (e.g., self, a person, a group); [0.541, 0.342, 0.915, 0.927]: carry/hold (an object), sit, watch (a person), listen to (a person)."}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1255, 1313], "conversations": [{"from": "human", "timestamps": 1285.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.048, 0.241, 0.565, 0.973]: stand, talk to (e.g., self, a person, a group); [0.53, 0.0, 1.0, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1313.0, "value": "What actions is the person in bbox [0.364, 0.197, 0.555, 0.973] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.364, 0.197, 0.555, 0.973]: walk, carry/hold (an object), open (e.g., a window, a car door), listen to (a person), watch (a person).", "At 1312.0s, [0.414, 0.186, 0.636, 0.909]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1311.0s, [0.519, 0.159, 0.716, 0.991]: stand, carry/hold (an object), watch (a person).", "From 1307.0s to 1310.0s, [0.413, 0.175, 0.665, 0.986]->[0.372, 0.178, 0.623, 1.0]->[0.393, 0.204, 0.655, 0.977]->[0.446, 0.215, 0.687, 0.971]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1305.0s to 1306.0s, [0.408, 0.181, 0.68, 0.973]->[0.406, 0.168, 0.677, 0.983]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1304.0s, [0.279, 0.088, 0.776, 0.998]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1303.0s, [0.255, 0.094, 0.75, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1299.0s to 1302.0s, [0.375, 0.151, 0.625, 0.978]->[0.386, 0.204, 0.639, 0.983]->[0.407, 0.189, 0.645, 1.0]->[0.254, 0.097, 0.758, 0.989]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1298.0s, [0.42, 0.217, 0.626, 0.966]: stand, carry/hold (an object), listen to (a person).", "At 1296.0s, [0.392, 0.202, 0.681, 0.983]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1295.0s, [0.231, 0.085, 0.738, 0.988]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1292.0s to 1294.0s, [0.228, 0.101, 0.757, 1.0]->[0.259, 0.086, 0.758, 1.0]->[0.243, 0.103, 0.778, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1237, 1332], "conversations": [{"from": "human", "timestamps": 1267.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.13, 0.374, 0.471, 0.977]: sit; [0.601, 0.343, 0.92, 0.962]: listen to (a person), sit, watch (a person), drink."}, {"from": "human", "timestamps": 1332.0, "value": "Describe the actions of the person at bbox [0.106, 0.129, 0.377, 0.578] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.106, 0.129, 0.377, 0.578]: sit, listen to (a person).", "At 1331.0s, [0.111, 0.127, 0.379, 0.586]: sit, listen to (a person).", "At 1330.0s, [0.121, 0.111, 0.404, 0.584]: sit, put down, listen to (a person), talk to (e.g., self, a person, a group).", "At 1329.0s, [0.11, 0.125, 0.442, 0.596]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1328.0s, [0.11, 0.123, 0.444, 0.592]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1327.0s, [0.112, 0.122, 0.423, 0.582]: sit, talk to (e.g., self, a person, a group).", "At 1326.0s, [0.124, 0.139, 0.426, 0.595]: sit, touch (an object).", "At 1325.0s, [0.122, 0.233, 0.427, 0.744]: sit, watch (a person).", "At 1324.0s, [0.229, 0.107, 0.503, 0.879]: stand, listen to (a person), watch (a person).", "At 1323.0s, [0.245, 0.225, 0.475, 0.981]: walk, listen to (a person)."]}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1307, 1358], "conversations": [{"from": "human", "timestamps": 1337.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.101, 0.136, 0.372, 0.582]: sit, talk to (e.g., self, a person, a group); [0.52, 0.153, 0.945, 0.984]: listen to (a person), sit."}, {"from": "human", "timestamps": 1358.0, "value": "What actions did the person at bbox [0.447, 0.165, 0.937, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.447, 0.165, 0.937, 0.987]: sit, listen to (a person), watch (a person).", "From 1353.0s to 1357.0s, [0.433, 0.165, 0.933, 0.978]->[0.437, 0.156, 0.933, 0.987]->[0.447, 0.15, 0.934, 0.991]->[0.497, 0.161, 0.931, 0.965]->[0.462, 0.176, 0.928, 0.975]: sit, listen to (a person), watch (a person).", "From 1349.0s to 1352.0s, [0.421, 0.172, 0.936, 0.978]->[0.363, 0.184, 0.927, 1.0]->[0.392, 0.182, 0.933, 1.0]->[0.406, 0.174, 0.937, 0.992]: sit, listen to (a person).", "From 1347.0s to 1348.0s, [0.494, 0.17, 0.941, 0.972]->[0.455, 0.176, 0.922, 0.977]: sit, listen to (a person), watch (a person).", "From 1345.0s to 1346.0s, [0.435, 0.15, 0.951, 0.976]->[0.456, 0.162, 0.948, 0.966]: sit, listen to (a person).", "From 1342.0s to 1344.0s, [0.445, 0.159, 0.95, 0.981]->[0.532, 0.133, 0.956, 1.0]->[0.391, 0.132, 0.951, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1340.0s to 1341.0s, [0.415, 0.151, 0.928, 1.0]->[0.442, 0.154, 0.931, 0.99]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1337.0s to 1339.0s, [0.52, 0.153, 0.945, 0.984]->[0.493, 0.164, 0.95, 0.982]->[0.489, 0.155, 0.934, 0.982]: sit, listen to (a person).", "At 1336.0s, [0.467, 0.162, 0.958, 0.988]: sit, listen to (a person), watch (a person).", "From 1334.0s to 1335.0s, [0.539, 0.161, 0.957, 0.97]->[0.556, 0.169, 0.95, 0.975]: sit, listen to (a person).", "From 1329.0s to 1333.0s, [0.423, 0.186, 0.963, 0.998]->[0.463, 0.149, 0.966, 0.994]->[0.497, 0.173, 0.952, 0.985]->[0.492, 0.148, 0.951, 0.994]->[0.448, 0.158, 0.959, 0.991]: sit, talk to (e.g., self, a person, a group).", "At 1328.0s, [0.468, 0.151, 0.948, 0.983]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1327.0s, [0.367, 0.135, 0.944, 1.0]: sit, listen to (a person).", "From 1324.0s to 1326.0s, [0.53, 0.477, 0.868, 0.98]->[0.448, 0.282, 0.982, 0.995]->[0.431, 0.128, 0.968, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1323.0s, [0.442, 0.118, 0.913, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1322.0s, [0.348, 0.092, 0.711, 1.0]: walk, listen to (a person).", "From 1319.0s to 1321.0s, [0.076, 0.093, 0.347, 0.992]->[0.097, 0.077, 0.38, 0.998]->[0.162, 0.078, 0.477, 0.987]: walk, talk to (e.g., self, a person, a group).", "From 1317.0s to 1318.0s, [0.073, 0.164, 0.431, 0.989]->[0.017, 0.109, 0.424, 0.994]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 1316.0s, [0.083, 0.148, 0.416, 0.994]: stand, listen to (a person), watch (a person).", "At 1315.0s, [0.095, 0.133, 0.415, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1314.0s, [0.119, 0.05, 0.409, 0.998]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1292.0s to 1313.0s, [0.628, 0.07, 0.995, 0.985]->[0.619, 0.072, 0.975, 0.97]->[0.622, 0.095, 0.981, 0.981]->[0.616, 0.071, 0.979, 0.974]->[0.088, 0.046, 0.383, 0.999]->[0.094, 0.023, 0.377, 0.991]->[0.101, 0.026, 0.378, 0.989]->[0.107, 0.05, 0.368, 0.988]->[0.088, 0.082, 0.383, 0.985]->[0.091, 0.07, 0.396, 0.984]->[0.619, 0.066, 0.969, 0.968]->[0.625, 0.085, 0.976, 0.981]->[0.624, 0.091, 0.976, 0.98]->[0.097, 0.058, 0.409, 0.999]->[0.104, 0.041, 0.39, 0.995]->[0.089, 0.044, 0.38, 0.998]->[0.09, 0.048, 0.37, 0.985]->[0.099, 0.045, 0.373, 1.0]->[0.099, 0.049, 0.37, 0.984]->[0.1, 0.042, 0.384, 0.999]->[0.092, 0.04, 0.375, 0.984]->[0.092, 0.037, 0.378, 0.988]: stand, listen to (a person), watch (a person)."]}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1399, 1443], "conversations": [{"from": "human", "timestamps": 1429.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.094, 0.048, 0.884, 0.984]?"}, {"from": "gpt", "value": ["At Now, [0.094, 0.048, 0.884, 0.984]: stand, talk to (e.g., self, a person, a group).", "At 1428.0s, [0.086, 0.053, 0.847, 1.0]: sit, stand, talk to (e.g., self, a person, a group).", "At 1427.0s, [0.0, 0.08, 0.737, 1.0]: stand, close (e.g., a door, a box).", "At 1426.0s, [0.338, 0.026, 0.915, 0.991]: walk, open (e.g., a window, a car door).", "At 1425.0s, [0.338, 0.001, 0.73, 0.981]: walk, open (e.g., a window, a car door), watch (a person)."]}, {"from": "human", "timestamps": 1443.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.306, 0.097, 0.959, 0.989]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1433, 1491], "conversations": [{"from": "human", "timestamps": 1463.0, "value": "What actions is the person in bbox [0.723, 0.478, 0.969, 0.952] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.723, 0.478, 0.969, 0.952]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1462.0s, [0.666, 0.25, 0.892, 0.981]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1461.0s, [0.68, 0.134, 0.868, 0.959]: stand, listen to (a person).", "From 1458.0s to 1460.0s, [0.712, 0.126, 0.916, 0.962]->[0.703, 0.11, 0.901, 0.97]->[0.696, 0.125, 0.898, 0.958]: stand, listen to (a person), watch (a person).", "From 1455.0s to 1457.0s, [0.684, 0.151, 0.88, 0.97]->[0.709, 0.139, 0.908, 0.975]->[0.726, 0.123, 0.924, 0.965]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1491.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.268, 0.139, 0.395, 0.974]: carry/hold (an object), enter, open (e.g., a window, a car door), walk."}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1517, 1577], "conversations": [{"from": "human", "timestamps": 1547.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.408, 0.067, 0.846, 0.973]: carry/hold (an object), sit, smoke."}, {"from": "human", "timestamps": 1577.0, "value": "For the person at bbox [0.296, 0.007, 0.99, 0.989], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.296, 0.007, 0.99, 0.989]: stand.", "At 1576.0s, [0.307, 0.004, 1.0, 0.976]: stand."]}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1595, 1664], "conversations": [{"from": "human", "timestamps": 1625.0, "value": "Describe the actions of the person at bbox [0.064, 0.016, 0.992, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.064, 0.016, 0.992, 0.987]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1624.0s, [0.227, 0.0, 0.998, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1623.0s, [0.26, 0.005, 1.0, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1622.0s, [0.203, 0.0, 1.0, 0.989]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1621.0s, [0.223, 0.0, 0.99, 1.0]: sit, listen to (a person), watch (a person).", "From 1619.0s to 1620.0s, [0.617, 0.453, 1.0, 1.0]->[0.638, 0.193, 0.997, 0.985]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1618.0s, [0.652, 0.2, 0.999, 0.995]: stand, talk to (e.g., self, a person, a group).", "At 1617.0s, [0.661, 0.15, 0.991, 0.996]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1616.0s, [0.608, 0.227, 0.994, 0.99]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1615.0s, [0.657, 0.221, 0.993, 0.994]: stand, hand shake, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1614.0s, [0.574, 0.222, 0.993, 0.99]: stand, listen to (a person), watch (a person).", "At 1613.0s, [0.342, 0.0, 0.788, 0.99]: stand, watch (a person).", "At 1612.0s, [0.501, 0.192, 0.913, 0.99]: sit."]}, {"from": "human", "timestamps": 1664.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.012, 0.0, 0.972, 0.985]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1601, 1691], "conversations": [{"from": "human", "timestamps": 1631.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.012, 0.0, 0.972, 0.974]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1691.0, "value": "Describe the actions of the person at bbox [0.545, 0.057, 0.998, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.545, 0.057, 0.998, 0.981]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1689.0s to 1690.0s, [0.57, 0.048, 0.944, 0.984]->[0.526, 0.045, 0.943, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1671, 1737], "conversations": [{"from": "human", "timestamps": 1701.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.002, 0.195, 0.858]: listen to (a person), sit, watch (a person); [0.196, 0.012, 1.0, 0.978]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1737.0, "value": "For the person at bbox [0.197, 0.015, 1.0, 0.985], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.197, 0.015, 1.0, 0.985]: sit, talk to (e.g., self, a person, a group).", "From 1735.0s to 1736.0s, [0.17, 0.02, 0.998, 0.97]->[0.183, 0.017, 1.0, 0.974]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1734.0s, [0.692, 0.0, 0.997, 0.995]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "lWXhqIAvarw.mkv", "clip": [1718, 1768], "conversations": [{"from": "human", "timestamps": 1748.0, "value": "Describe the actions of the person at bbox [0.014, 0.0, 0.822, 0.999] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.014, 0.0, 0.822, 0.999]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1747.0s, [0.011, 0.0, 0.745, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1745.0s to 1746.0s, [0.005, 0.0, 0.754, 1.0]->[0.006, 0.015, 0.891, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1744.0s, [0.0, 0.007, 0.231, 0.905]: sit, listen to (a person).", "From 1742.0s to 1743.0s, [0.0, 0.011, 0.231, 0.887]->[0.0, 0.006, 0.236, 0.949]: sit, listen to (a person), watch (a person).", "From 1740.0s to 1741.0s, [0.0, 0.0, 0.254, 0.951]->[0.0, 0.006, 0.243, 0.924]: sit, watch (a person).", "At 1738.0s, [0.0, 0.008, 0.253, 0.921]: sit, listen to (a person), watch (a person).", "At 1737.0s, [0.0, 0.008, 0.243, 0.846]: stand, listen to (a person), watch (a person).", "At 1736.0s, [0.0, 0.008, 0.244, 0.877]: sit, listen to (a person), watch (a person).", "At 1734.0s, [0.012, 0.0, 0.777, 1.0]: sit, listen to (a person), watch (a person).", "At 1733.0s, [0.0, 0.017, 0.968, 0.993]: sit, stand, listen to (a person), watch (a person).", "From 1730.0s to 1732.0s, [0.0, 0.009, 0.243, 0.921]->[0.015, 0.0, 0.784, 1.0]->[0.017, 0.0, 0.751, 1.0]: sit, listen to (a person), watch (a person).", "At 1729.0s, [0.0, 0.013, 0.243, 0.875]: listen to (a person), watch (a person).", "From 1727.0s to 1728.0s, [0.0, 0.009, 0.238, 0.869]->[0.0, 0.009, 0.25, 0.906]: sit, listen to (a person), watch (a person).", "From 1725.0s to 1726.0s, [0.002, 0.015, 0.923, 0.991]->[0.011, 0.013, 0.863, 0.996]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1724.0s, [0.018, 0.0, 0.849, 1.0]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1723.0s, [0.0, 0.013, 0.251, 0.908]: sit, stand, listen to (a person), watch (a person).", "From 1721.0s to 1722.0s, [0.0, 0.007, 0.257, 0.955]->[0.0, 0.016, 0.259, 0.955]: sit, listen to (a person), watch (a person).", "At 1720.0s, [0.0, 0.014, 0.248, 0.92]: sit, listen to (a person).", "At 1719.0s, [0.0, 0.011, 0.237, 0.898]: stand, listen to (a person), watch (a person).", "At 1718.0s, [0.0, 0.02, 0.247, 0.917]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1768.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.006, 0.004, 0.82, 1.0]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [891, 947], "conversations": [{"from": "human", "timestamps": 921.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.218, 0.164, 0.49, 0.786]: listen to (a person), stand; [0.41, 0.133, 0.846, 0.969]: carry/hold (an object), stand, read."}, {"from": "human", "timestamps": 947.0, "value": "For the person at bbox [0.494, 0.196, 0.768, 0.857], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.494, 0.196, 0.768, 0.857]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 946.0s, [0.518, 0.198, 0.797, 0.778]: stand, carry/hold (an object), read.", "At 945.0s, [0.536, 0.189, 0.827, 0.74]: stand, read.", "At 944.0s, [0.494, 0.169, 0.793, 0.774]: stand, read, talk to (e.g., self, a person, a group).", "At 943.0s, [0.511, 0.148, 0.826, 0.751]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 942.0s, [0.496, 0.105, 0.851, 0.953]: stand, carry/hold (an object), read, touch (an object), talk to (e.g., self, a person, a group).", "At 941.0s, [0.467, 0.112, 0.774, 0.95]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 938.0s to 940.0s, [0.4, 0.096, 0.832, 0.96]->[0.403, 0.085, 0.781, 0.955]->[0.271, 0.067, 0.737, 0.942]: stand, carry/hold (an object), read.", "At 937.0s, [0.429, 0.098, 0.828, 0.957]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 935.0s to 936.0s, [0.425, 0.12, 0.855, 0.981]->[0.42, 0.1, 0.826, 0.969]: stand, read.", "At 934.0s, [0.39, 0.13, 0.85, 0.922]: stand, read, listen to (a person).", "At 933.0s, [0.457, 0.105, 0.855, 0.97]: stand, read.", "At 932.0s, [0.449, 0.118, 0.864, 0.964]: stand, carry/hold (an object), read.", "At 931.0s, [0.467, 0.13, 0.854, 0.987]: stand, read.", "At 930.0s, [0.43, 0.125, 0.854, 0.973]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 929.0s, [0.41, 0.127, 0.857, 0.971]: stand, carry/hold (an object), read.", "From 927.0s to 928.0s, [0.397, 0.127, 0.836, 0.961]->[0.417, 0.13, 0.85, 0.954]: stand, read.", "From 925.0s to 926.0s, [0.417, 0.126, 0.842, 0.962]->[0.422, 0.131, 0.839, 0.953]: stand, carry/hold (an object), read.", "At 924.0s, [0.419, 0.129, 0.846, 0.959]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 923.0s, [0.445, 0.105, 0.844, 0.963]: stand, carry/hold (an object), read.", "At 922.0s, [0.433, 0.122, 0.843, 0.965]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 920.0s to 921.0s, [0.393, 0.131, 0.845, 0.955]->[0.41, 0.133, 0.846, 0.969]: stand, carry/hold (an object), read.", "From 916.0s to 919.0s, [0.396, 0.171, 0.82, 0.941]->[0.414, 0.144, 0.826, 0.967]->[0.402, 0.135, 0.835, 0.975]->[0.469, 0.123, 0.865, 0.963]: stand, read.", "From 914.0s to 915.0s, [0.389, 0.159, 0.828, 0.969]->[0.396, 0.159, 0.833, 0.959]: stand, carry/hold (an object), read.", "From 912.0s to 913.0s, [0.402, 0.156, 0.824, 0.97]->[0.399, 0.158, 0.828, 0.966]: stand, read, talk to (e.g., self, a person, a group).", "At 911.0s, [0.4, 0.157, 0.857, 0.964]: stand, carry/hold (an object), read.", "At 910.0s, [0.424, 0.124, 0.861, 0.964]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 907.0s to 909.0s, [0.503, 0.176, 0.896, 0.984]->[0.452, 0.14, 0.872, 0.972]->[0.441, 0.138, 0.874, 0.976]: stand, carry/hold (an object), read.", "At 906.0s, [0.526, 0.201, 0.903, 0.982]: stand, carry/hold (an object).", "From 903.0s to 905.0s, [0.455, 0.14, 0.876, 0.976]->[0.473, 0.111, 0.868, 0.976]->[0.493, 0.055, 0.892, 0.959]: stand, carry/hold (an object), listen to (a person).", "At 902.0s, [0.436, 0.124, 0.861, 0.954]: stand, carry/hold (an object), listen to (a person), take (an object) from (a person), watch (a person)."]}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [893, 1018], "conversations": [{"from": "human", "timestamps": 923.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.214, 0.153, 0.57, 0.934]: listen to (a person), stand; [0.445, 0.105, 0.844, 0.963]: carry/hold (an object), stand, read."}, {"from": "human", "timestamps": 1018.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.084, 0.455, 0.321, 0.86]?"}, {"from": "gpt", "value": ["In the current frame, [0.084, 0.455, 0.321, 0.86]: sit, listen to (a person).", "At 1017.0s, [0.099, 0.444, 0.309, 0.858]: sit, listen to (a person).", "From 1013.0s to 1016.0s, [0.078, 0.447, 0.313, 0.881]->[0.076, 0.445, 0.298, 0.879]->[0.075, 0.452, 0.305, 0.885]->[0.069, 0.438, 0.31, 0.888]: sit, listen to (a person), watch (a person).", "From 1010.0s to 1012.0s, [0.08, 0.435, 0.31, 0.881]->[0.083, 0.449, 0.299, 0.875]->[0.074, 0.451, 0.303, 0.886]: sit, listen to (a person).", "At 1009.0s, [0.075, 0.439, 0.319, 0.892]: sit, talk to (e.g., self, a person, a group).", "From 1007.0s to 1008.0s, [0.091, 0.458, 0.319, 0.855]->[0.088, 0.477, 0.323, 0.861]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1006.0s, [0.085, 0.431, 0.314, 0.874]: sit, talk to (e.g., self, a person, a group).", "At 1005.0s, [0.077, 0.441, 0.323, 0.891]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1004.0s, [0.067, 0.038, 0.779, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [964, 1010], "conversations": [{"from": "human", "timestamps": 994.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.087, 0.449, 0.325, 0.873]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group); [0.574, 0.145, 0.892, 0.656]: read, stand."}, {"from": "human", "timestamps": 1010.0, "value": "Describe the actions of the person at bbox [0.613, 0.111, 0.877, 0.636] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.613, 0.111, 0.877, 0.636]: stand, talk to (e.g., self, a person, a group).", "From 1007.0s to 1009.0s, [0.637, 0.114, 0.903, 0.72]->[0.622, 0.112, 0.891, 0.649]->[0.618, 0.119, 0.887, 0.661]: stand, listen to (a person).", "At 1006.0s, [0.627, 0.11, 0.904, 0.685]: stand, listen to (a person), watch (a person).", "At 1005.0s, [0.606, 0.102, 0.894, 0.64]: stand, listen to (a person)."]}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1033, 1075], "conversations": [{"from": "human", "timestamps": 1063.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.326, 0.247, 0.89, 0.984]: stand, answer phone."}, {"from": "human", "timestamps": 1075.0, "value": "For the person at bbox [0.181, 0.246, 0.656, 0.994], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.181, 0.246, 0.656, 0.994]: stand, answer phone.", "At 1074.0s, [0.175, 0.244, 0.878, 0.968]: stand, answer phone.", "At 1073.0s, [0.127, 0.257, 0.82, 0.932]: stand, answer phone, touch (an object).", "From 1059.0s to 1072.0s, [0.268, 0.256, 0.886, 0.983]->[0.234, 0.281, 0.862, 0.985]->[0.249, 0.218, 0.889, 0.989]->[0.317, 0.279, 0.89, 0.997]->[0.326, 0.247, 0.89, 0.984]->[0.34, 0.262, 0.856, 0.998]->[0.336, 0.204, 0.858, 0.988]->[0.301, 0.25, 0.879, 0.999]->[0.276, 0.262, 0.858, 1.0]->[0.297, 0.258, 0.854, 0.993]->[0.303, 0.319, 0.89, 0.971]->[0.247, 0.287, 0.894, 0.986]->[0.214, 0.271, 0.818, 0.965]->[0.137, 0.28, 0.834, 0.953]: stand, answer phone.", "At 1058.0s, [0.288, 0.209, 0.853, 0.978]: stand, answer phone, touch (an object).", "From 1056.0s to 1057.0s, [0.329, 0.188, 0.858, 0.979]->[0.351, 0.225, 0.873, 1.0]: stand, answer phone.", "At 1055.0s, [0.32, 0.228, 0.831, 1.0]: stand, answer phone, talk to (e.g., self, a person, a group).", "At 1054.0s, [0.277, 0.184, 0.79, 0.976]: sit, answer phone.", "At 1053.0s, [0.286, 0.196, 0.72, 0.997]: sit, answer phone, carry/hold (an object).", "At 1052.0s, [0.244, 0.217, 0.649, 0.996]: sit, answer phone.", "At 1051.0s, [0.261, 0.176, 0.613, 0.996]: sit, answer phone, hand wave, talk to (e.g., self, a person, a group).", "At 1050.0s, [0.236, 0.221, 0.558, 0.978]: sit, answer phone, touch (an object).", "From 1048.0s to 1049.0s, [0.208, 0.184, 0.497, 0.871]->[0.199, 0.19, 0.517, 0.96]: sit, answer phone."]}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1089, 1134], "conversations": [{"from": "human", "timestamps": 1119.0, "value": "For the person at bbox [0.135, 0.229, 0.856, 0.96], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.135, 0.229, 0.856, 0.96]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1117.0s to 1118.0s, [0.171, 0.367, 0.478, 0.927]->[0.186, 0.375, 0.446, 0.904]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1116.0s, [0.175, 0.381, 0.478, 0.983]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1134.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.26, 0.199, 0.814, 0.964]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1117, 1170], "conversations": [{"from": "human", "timestamps": 1147.0, "value": "Describe the actions of the person at bbox [0.138, 0.051, 0.838, 0.971] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.138, 0.051, 0.838, 0.971]: sit, talk to (e.g., self, a person, a group).", "At 1146.0s, [0.106, 0.043, 0.888, 1.0]: sit, sing to (e.g., self, a person, a group), talk to (e.g., self, a person, a group).", "From 1140.0s to 1145.0s, [0.121, 0.051, 0.735, 0.993]->[0.104, 0.061, 0.815, 0.986]->[0.141, 0.052, 0.705, 0.983]->[0.171, 0.051, 0.828, 0.989]->[0.193, 0.053, 0.852, 0.997]->[0.153, 0.057, 0.873, 0.979]: sit, talk to (e.g., self, a person, a group).", "At 1139.0s, [0.158, 0.125, 0.765, 0.99]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1134.0s to 1138.0s, [0.26, 0.199, 0.814, 0.964]->[0.169, 0.171, 0.742, 0.973]->[0.165, 0.155, 0.751, 0.995]->[0.209, 0.135, 0.883, 0.982]->[0.197, 0.086, 0.816, 0.97]: sit, talk to (e.g., self, a person, a group).", "At 1133.0s, [0.336, 0.203, 0.795, 0.936]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1131.0s to 1132.0s, [0.425, 0.263, 0.678, 0.837]->[0.367, 0.219, 0.784, 0.897]: sit, talk to (e.g., self, a person, a group).", "At 1130.0s, [0.434, 0.278, 0.734, 0.837]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1128.0s to 1129.0s, [0.497, 0.338, 0.801, 0.903]->[0.479, 0.298, 0.679, 0.772]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1170.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.079, 0.26, 0.915, 0.99]: stand, answer phone."}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1127, 1243], "conversations": [{"from": "human", "timestamps": 1157.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.283, 0.051, 0.896, 0.977]: watch (a person), get up, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1243.0, "value": "What actions is the person in bbox [0.081, 0.433, 0.651, 0.992] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.081, 0.433, 0.651, 0.992]: bend/bow (at the waist), sit, watch (a person).", "At 1242.0s, [0.094, 0.448, 0.653, 0.998]: bend/bow (at the waist), talk to (e.g., self, a person, a group)."]}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1203, 1246], "conversations": [{"from": "human", "timestamps": 1233.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.13, 0.455, 0.436, 1.0]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.341, 0.41, 0.741, 0.911]: listen to (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1246.0, "value": "What actions did the person at bbox [0.079, 0.394, 0.628, 0.992] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.079, 0.394, 0.628, 0.992]: sit, watch (a person).", "At 1245.0s, [0.086, 0.419, 0.657, 0.992]: sit, listen to (a person), watch (a person).", "At 1244.0s, [0.093, 0.427, 0.658, 0.992]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 1243.0s, [0.081, 0.433, 0.651, 0.992]: bend/bow (at the waist), sit, watch (a person).", "At 1242.0s, [0.094, 0.448, 0.653, 0.998]: bend/bow (at the waist), talk to (e.g., self, a person, a group)."]}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1358, 1421], "conversations": [{"from": "human", "timestamps": 1388.0, "value": "Describe the actions of the person at bbox [0.319, 0.128, 0.859, 0.944] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.319, 0.128, 0.859, 0.944]: sit, talk to (e.g., self, a person, a group).", "From 1386.0s to 1387.0s, [0.325, 0.109, 0.893, 0.964]->[0.335, 0.167, 0.842, 0.957]: sit, talk to (e.g., self, a person, a group).", "From 1383.0s to 1385.0s, [0.344, 0.151, 0.824, 0.983]->[0.383, 0.081, 0.897, 0.99]->[0.38, 0.089, 0.901, 0.98]: sit, listen to (a person).", "At 1382.0s, [0.333, 0.09, 0.901, 0.969]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1381.0s, [0.309, 0.1, 0.932, 0.978]: sit, talk to (e.g., self, a person, a group).", "From 1378.0s to 1380.0s, [0.294, 0.028, 0.893, 0.979]->[0.28, 0.143, 0.894, 0.975]->[0.257, 0.093, 0.886, 0.97]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1377.0s, [0.274, 0.032, 0.898, 0.98]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1375.0s to 1376.0s, [0.257, 0.019, 0.901, 0.945]->[0.335, 0.007, 0.889, 0.967]: sit, talk to (e.g., self, a person, a group).", "At 1374.0s, [0.257, 0.028, 0.912, 0.969]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1373.0s, [0.278, 0.003, 0.912, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1372.0s, [0.343, 0.0, 0.885, 0.992]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1371.0s, [0.297, 0.003, 0.902, 0.986]: stand, listen to (a person), watch (a person).", "At 1370.0s, [0.253, 0.04, 0.92, 0.986]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1369.0s, [0.298, 0.0, 0.911, 0.963]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1421.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.065, 0.373, 0.572, 0.996]: listen to (a person), sit, watch (a person); [0.197, 0.115, 0.842, 0.97]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1410, 1454], "conversations": [{"from": "human", "timestamps": 1440.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.151, 0.052, 0.75, 0.965]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1454.0, "value": "What actions is the person in bbox [0.378, 0.414, 0.729, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.378, 0.414, 0.729, 1.0]: sit, listen to (a person).", "From 1449.0s to 1453.0s, [0.157, 0.177, 0.722, 0.99]->[0.178, 0.329, 0.701, 1.0]->[0.28, 0.404, 0.671, 0.988]->[0.302, 0.419, 0.649, 0.989]->[0.38, 0.429, 0.714, 1.0]: sit, listen to (a person).", "From 1438.0s to 1448.0s, [0.113, 0.075, 0.723, 0.961]->[0.114, 0.129, 0.752, 0.982]->[0.151, 0.052, 0.75, 0.965]->[0.12, 0.107, 0.741, 1.0]->[0.153, 0.031, 0.799, 0.978]->[0.107, 0.079, 0.762, 1.0]->[0.127, 0.082, 0.787, 0.983]->[0.153, 0.052, 0.756, 0.989]->[0.108, 0.047, 0.789, 1.0]->[0.131, 0.05, 0.777, 0.985]->[0.128, 0.102, 0.741, 0.975]: sit, talk to (e.g., self, a person, a group).", "At 1437.0s, [0.095, 0.083, 0.747, 0.963]: sit.", "From 1429.0s to 1436.0s, [0.142, 0.124, 0.773, 0.98]->[0.124, 0.086, 0.85, 0.976]->[0.1, 0.056, 0.75, 0.975]->[0.078, 0.054, 0.726, 0.966]->[0.096, 0.07, 0.78, 0.99]->[0.113, 0.078, 0.736, 0.975]->[0.112, 0.072, 0.732, 0.973]->[0.109, 0.056, 0.766, 0.967]: sit, talk to (e.g., self, a person, a group).", "At 1428.0s, [0.071, 0.224, 0.607, 0.98]: sit.", "At 1427.0s, [0.112, 0.455, 0.462, 0.996]: sit, touch (an object), listen to (a person).", "At 1426.0s, [0.096, 0.458, 0.447, 0.973]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1425.0s, [0.121, 0.381, 0.603, 0.998]: sit, touch (an object), watch (a person)."]}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1505, 1546], "conversations": [{"from": "human", "timestamps": 1535.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.297, 0.135, 0.528, 0.937]: carry/hold (an object), walk; [0.504, 0.19, 0.734, 0.615]: walk."}, {"from": "human", "timestamps": 1546.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.453, 0.228, 0.729, 0.802]?"}, {"from": "gpt", "value": ["In the current frame, [0.453, 0.228, 0.729, 0.802]: walk, carry/hold (an object).", "At 1545.0s, [0.474, 0.164, 0.723, 0.86]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1543.0s to 1544.0s, [0.48, 0.164, 0.717, 0.861]->[0.489, 0.18, 0.72, 0.852]: stand, carry/hold (an object), watch (a person).", "From 1541.0s to 1542.0s, [0.497, 0.191, 0.725, 0.846]->[0.486, 0.172, 0.722, 0.847]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1540.0s, [0.49, 0.201, 0.728, 0.858]: stand, walk, carry/hold (an object).", "At 1539.0s, [0.503, 0.18, 0.719, 0.849]: stand, carry/hold (an object), watch (a person).", "At 1538.0s, [0.488, 0.191, 0.736, 0.865]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1513, 1586], "conversations": [{"from": "human", "timestamps": 1543.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.235, 0.152, 0.491, 0.965]: carry/hold (an object), stand, watch (a person); [0.48, 0.164, 0.717, 0.861]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1586.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.658, 0.189, 0.903, 0.929]?"}, {"from": "gpt", "value": ["In the current frame, [0.658, 0.189, 0.903, 0.929]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1585.0s, [0.668, 0.178, 0.906, 0.972]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1584.0s, [0.306, 0.094, 0.9, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1583.0s, [0.367, 0.138, 0.906, 0.992]: stand, carry/hold (an object)."]}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1542, 1582], "conversations": [{"from": "human", "timestamps": 1572.0, "value": "Describe the actions of the person at bbox [0.64, 0.181, 0.907, 0.975] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.64, 0.181, 0.907, 0.975]: stand, carry/hold (an object), watch (a person).", "At 1571.0s, [0.678, 0.339, 0.891, 0.979]: walk, carry/hold (an object).", "At 1570.0s, [0.6, 0.189, 0.835, 0.823]: walk, carry/hold (an object), watch (a person).", "At 1569.0s, [0.485, 0.165, 0.83, 0.783]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1568.0s, [0.492, 0.155, 0.79, 0.788]: stand, carry/hold (an object), take (an object) from (a person), watch (a person).", "At 1567.0s, [0.521, 0.179, 0.868, 0.781]: stand, carry/hold (an object), watch (a person).", "At 1566.0s, [0.437, 0.133, 0.89, 0.791]: stand, touch (an object), watch (a person).", "From 1563.0s to 1564.0s, [0.421, 0.19, 0.843, 0.766]->[0.456, 0.201, 0.852, 0.77]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1562.0s, [0.498, 0.18, 0.841, 0.789]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1561.0s, [0.502, 0.17, 0.853, 0.776]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1560.0s, [0.502, 0.174, 0.847, 0.785]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1559.0s, [0.503, 0.181, 0.826, 0.779]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1557.0s to 1558.0s, [0.561, 0.167, 0.896, 0.83]->[0.506, 0.177, 0.872, 0.792]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1556.0s, [0.554, 0.161, 0.895, 0.831]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1555.0s, [0.544, 0.171, 0.883, 0.845]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1554.0s, [0.547, 0.19, 0.883, 0.79]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1553.0s, [0.515, 0.158, 0.85, 0.748]: stand, touch (an object), listen to (a person), watch (a person).", "At 1552.0s, [0.45, 0.182, 0.78, 0.682]: stand, listen to (a person), watch (a person).", "At 1551.0s, [0.5, 0.229, 0.817, 0.663]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1550.0s, [0.579, 0.16, 0.842, 0.673]: stand, listen to (a person), watch (a person).", "From 1548.0s to 1549.0s, [0.555, 0.184, 0.768, 0.685]->[0.497, 0.175, 0.831, 0.66]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1547.0s, [0.508, 0.182, 0.83, 0.731]: stand, listen to (a person), watch (a person).", "At 1546.0s, [0.453, 0.228, 0.729, 0.802]: walk, carry/hold (an object).", "At 1545.0s, [0.474, 0.164, 0.723, 0.86]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1543.0s to 1544.0s, [0.48, 0.164, 0.717, 0.861]->[0.489, 0.18, 0.72, 0.852]: stand, carry/hold (an object), watch (a person).", "From 1541.0s to 1542.0s, [0.497, 0.191, 0.725, 0.846]->[0.486, 0.172, 0.722, 0.847]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1540.0s, [0.49, 0.201, 0.728, 0.858]: stand, walk, carry/hold (an object).", "At 1539.0s, [0.503, 0.18, 0.719, 0.849]: stand, carry/hold (an object), watch (a person).", "At 1538.0s, [0.488, 0.191, 0.736, 0.865]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1582.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.063, 0.041, 0.651, 0.989]: watch (a person), stand."}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1593, 1665], "conversations": [{"from": "human", "timestamps": 1623.0, "value": "What actions did the person at bbox [0.082, 0.028, 0.407, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.082, 0.028, 0.407, 0.983]: stand, listen to (a person), watch (a person).", "At 1622.0s, [0.12, 0.049, 0.43, 0.974]: stand, point to (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1621.0s, [0.11, 0.048, 0.397, 0.957]: stand, point to (an object), talk to (e.g., self, a person, a group).", "From 1618.0s to 1620.0s, [0.081, 0.04, 0.429, 0.975]->[0.081, 0.017, 0.421, 0.975]->[0.089, 0.034, 0.412, 0.984]: stand, listen to (a person), watch (a person).", "From 1616.0s to 1617.0s, [0.365, 0.004, 0.901, 0.99]->[0.357, 0.0, 0.906, 0.992]: stand.", "At 1614.0s, [0.317, 0.004, 0.904, 0.994]: stand."]}, {"from": "human", "timestamps": 1665.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.363, 0.129, 0.922, 0.999]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1600, 1736], "conversations": [{"from": "human", "timestamps": 1630.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.06, 0.039, 0.332, 0.979]: stand; [0.368, 0.158, 0.826, 0.965]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1736.0, "value": "Describe the actions of the person at bbox [0.435, 0.162, 0.847, 0.99] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.435, 0.162, 0.847, 0.99]: stand, lift/pick up, push (an object).", "At 1735.0s, [0.488, 0.158, 0.907, 0.99]: stand, touch (an object).", "At 1734.0s, [0.314, 0.173, 0.717, 0.992]: stand, carry/hold (an object)."]}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1672, 1718], "conversations": [{"from": "human", "timestamps": 1702.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.172, 0.169, 0.369, 0.596]: walk."}, {"from": "human", "timestamps": 1718.0, "value": "Describe the actions of the person at bbox [0.171, 0.102, 0.64, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.171, 0.102, 0.64, 0.983]: stand, touch (an object).", "At 1717.0s, [0.163, 0.104, 0.621, 0.95]: stand, carry/hold (an object)."]}]}, {"video": "mfsbYdLx9wE.mkv", "clip": [1714, 1755], "conversations": [{"from": "human", "timestamps": 1744.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.46, 0.181, 0.857, 0.994]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1755.0, "value": "Describe the actions of the person at bbox [0.358, 0.205, 0.712, 0.972] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.358, 0.205, 0.712, 0.972]: bend/bow (at the waist), carry/hold (an object).", "At 1754.0s, [0.38, 0.22, 0.689, 0.954]: carry/hold (an object)."]}]}, {"video": "miB-wo2PfLI.mkv", "clip": [949, 1013], "conversations": [{"from": "human", "timestamps": 979.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.004, 0.33, 0.518, 0.996]: listen to (a person), sit, touch (an object), watch (a person); [0.085, 0.261, 0.404, 0.814]: listen to (a person), sit, touch (an object), watch (a person); [0.359, 0.233, 0.748, 0.811]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.632, 0.268, 0.885, 0.78]: listen to (a person), sit, watch (a person); [0.77, 0.295, 0.994, 0.956]: listen to (a person), sit, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1013.0, "value": "What actions is the person in bbox [0.016, 0.32, 0.39, 0.99] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.016, 0.32, 0.39, 0.99]: sit, touch (an object), listen to (a person), watch (a person).", "At 1012.0s, [0.013, 0.309, 0.379, 0.992]: touch (an object), listen to (a person), watch (a person).", "At 1011.0s, [0.015, 0.271, 0.399, 0.992]: stand, touch (an object), listen to (a person).", "From 1008.0s to 1010.0s, [0.015, 0.33, 0.395, 0.982]->[0.013, 0.274, 0.396, 0.992]->[0.016, 0.274, 0.361, 0.983]: sit, touch (an object), listen to (a person), watch (a person).", "At 1007.0s, [0.019, 0.34, 0.524, 1.0]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1005.0s to 1006.0s, [0.014, 0.456, 0.383, 0.998]->[0.022, 0.245, 0.425, 1.0]: sit, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "miB-wo2PfLI.mkv", "clip": [998, 1058], "conversations": [{"from": "human", "timestamps": 1028.0, "value": "Describe the actions of the person at bbox [0.339, 0.178, 0.96, 0.978] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.339, 0.178, 0.96, 0.978]: sit, touch (an object).", "At 1027.0s, [0.436, 0.447, 0.947, 0.996]: sit, touch (an object), watch (a person)."]}, {"from": "human", "timestamps": 1058.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.007, 0.438, 0.205, 0.988]: listen to (a person), sit; [0.092, 0.277, 0.301, 0.895]: listen to (a person), stand, watch (a person); [0.252, 0.513, 0.645, 0.99]: listen to (a person), sit, touch (an object), watch (a person); [0.383, 0.409, 0.718, 0.775]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group); [0.623, 0.243, 0.866, 0.705]: listen to (a person), stand, touch (an object); [0.739, 0.246, 0.992, 0.984]: listen to (a person), sit, watch (a person)."}]}, {"video": "miB-wo2PfLI.mkv", "clip": [1051, 1100], "conversations": [{"from": "human", "timestamps": 1081.0, "value": "What actions did the person at bbox [0.114, 0.102, 0.733, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.114, 0.102, 0.733, 0.99]: stand, talk to (e.g., self, a person, a group).", "From 1076.0s to 1080.0s, [0.287, 0.219, 0.851, 0.993]->[0.119, 0.14, 0.766, 0.997]->[0.106, 0.13, 0.745, 0.991]->[0.09, 0.124, 0.752, 0.995]->[0.081, 0.11, 0.757, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1075.0s, [0.217, 0.115, 0.959, 0.987]: touch (an object), get up, talk to (e.g., self, a person, a group).", "At 1074.0s, [0.159, 0.158, 1.0, 1.0]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1073.0s, [0.18, 0.157, 0.871, 0.988]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1100.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.232, 0.145, 0.949, 0.982]: sit, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "miB-wo2PfLI.mkv", "clip": [1152, 1253], "conversations": [{"from": "human", "timestamps": 1182.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.453, 0.563, 0.471, 0.627]: walk; [0.496, 0.557, 0.514, 0.626]: hand wave, run/jog."}, {"from": "human", "timestamps": 1253.0, "value": "What actions did the person at bbox [0.778, 0.106, 0.988, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.778, 0.106, 0.988, 1.0]: stand, listen to (a person), watch (a person).", "From 1248.0s to 1252.0s, [0.764, 0.2, 0.989, 0.974]->[0.727, 0.159, 0.981, 0.989]->[0.68, 0.151, 0.988, 0.983]->[0.652, 0.161, 0.978, 0.998]->[0.675, 0.117, 0.977, 0.99]: stand, listen to (a person), watch (a person).", "At 1247.0s, [0.82, 0.413, 0.988, 0.971]: bend/bow (at the waist), listen to (a person), watch (a person)."]}]}, {"video": "miB-wo2PfLI.mkv", "clip": [1170, 1203], "conversations": [{"from": "human", "timestamps": 1200.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.013, 0.271, 0.23, 0.981]: walk; [0.558, 0.156, 0.811, 0.987]: hand wave, stand, talk to (e.g., self, a person, a group); [0.756, 0.247, 0.987, 0.989]: hand wave, listen to (a person), walk."}, {"from": "human", "timestamps": 1203.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.513, 0.185, 0.762, 0.968]?"}, {"from": "gpt", "value": ["At Now, [0.513, 0.185, 0.762, 0.968]: stand.", "At 1202.0s, [0.524, 0.141, 0.771, 0.977]: stand.", "At 1201.0s, [0.532, 0.169, 0.785, 0.981]: stand, hand wave.", "At 1200.0s, [0.558, 0.156, 0.811, 0.987]: stand, hand wave, talk to (e.g., self, a person, a group).", "At 1199.0s, [0.567, 0.124, 0.82, 0.994]: stand.", "At 1198.0s, [0.571, 0.172, 0.778, 0.928]: walk, dress/put on clothing.", "At 1197.0s, [0.108, 0.578, 0.185, 0.796]: walk.", "At 1196.0s, [0.155, 0.573, 0.219, 0.797]: stand, watch (a person).", "At 1195.0s, [0.2, 0.574, 0.291, 0.788]: stand.", "At 1194.0s, [0.235, 0.572, 0.3, 0.781]: walk.", "At 1193.0s, [0.234, 0.571, 0.289, 0.785]: stand.", "At 1192.0s, [0.255, 0.563, 0.304, 0.787]: walk.", "At 1191.0s, [0.118, 0.578, 0.183, 0.79]: stand, carry/hold (an object), watch (a person).", "At 1190.0s, [0.121, 0.573, 0.172, 0.781]: stand.", "From 1187.0s to 1189.0s, [0.259, 0.551, 0.288, 0.642]->[0.16, 0.549, 0.205, 0.706]->[0.118, 0.559, 0.182, 0.776]: walk."]}]}, {"video": "miB-wo2PfLI.mkv", "clip": [1238, 1276], "conversations": [{"from": "human", "timestamps": 1268.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.013, 0.122, 0.17, 0.775]?"}, {"from": "gpt", "value": ["At Now, [0.013, 0.122, 0.17, 0.775]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1267.0s, [0.019, 0.125, 0.194, 0.766]: walk, carry/hold (an object), listen to (a person).", "At 1266.0s, [0.054, 0.103, 0.326, 0.753]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1276.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.29, 0.399, 0.608, 0.747]: listen to (a person), sit, watch (a person); [0.295, 0.185, 0.478, 0.592]: watch (a person), stand; [0.556, 0.342, 0.794, 0.993]: listen to (a person), sit, hug (a person), watch (a person); [0.63, 0.187, 0.755, 0.505]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "miB-wo2PfLI.mkv", "clip": [1252, 1376], "conversations": [{"from": "human", "timestamps": 1282.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.259, 0.406, 0.537, 0.764]: listen to (a person), sit, watch (a person); [0.303, 0.195, 0.554, 0.594]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.506, 0.394, 0.666, 0.746]: listen to (a person), sit, hug (a person), watch (a person); [0.623, 0.18, 0.742, 0.483]: listen to (a person), stand, watch (a person); [0.643, 0.121, 0.983, 0.977]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1376.0, "value": "What actions did the person at bbox [0.211, 0.017, 1.0, 0.984] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.211, 0.017, 1.0, 0.984]: sit, ride (e.g., a bike, a car, a horse).", "At 1375.0s, [0.151, 0.0, 1.0, 0.98]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse).", "At 1374.0s, [0.265, 0.009, 0.984, 0.983]: sit, carry/hold (an object)."]}]}, {"video": "miB-wo2PfLI.mkv", "clip": [1381, 1433], "conversations": [{"from": "human", "timestamps": 1411.0, "value": "What actions is the person in bbox [0.811, 0.524, 0.879, 0.684] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.811, 0.524, 0.879, 0.684]: sit, ride (e.g., a bike, a car, a horse).", "At 1410.0s, [0.802, 0.54, 0.881, 0.705]: sit, ride (e.g., a bike, a car, a horse)."]}, {"from": "human", "timestamps": 1433.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.231, 0.417, 0.464, 1.0]: carry/hold (an object), sit; [0.603, 0.126, 0.993, 1.0]: walk."}]}, {"video": "miB-wo2PfLI.mkv", "clip": [1391, 1488], "conversations": [{"from": "human", "timestamps": 1421.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.278, 0.408, 0.368, 0.574]: ride (e.g., a bike, a car, a horse), sit; [0.284, 0.513, 0.58, 0.885]: ride (e.g., a bike, a car, a horse), sit, talk to (e.g., self, a person, a group); [0.433, 0.378, 0.614, 0.639]: ride (e.g., a bike, a car, a horse), sit, watch (a person), listen to (a person), talk to (e.g., self, a person, a group); [0.587, 0.301, 0.815, 0.853]: listen to (a person), sit, drive (e.g., a car, a truck)."}, {"from": "human", "timestamps": 1488.0, "value": "What actions did the person at bbox [0.767, 0.242, 0.99, 0.92] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.767, 0.242, 0.99, 0.92]: run/jog.", "From 1485.0s to 1487.0s, [0.621, 0.289, 0.76, 0.695]->[0.659, 0.314, 0.822, 0.852]->[0.674, 0.306, 0.894, 0.993]: run/jog.", "From 1483.0s to 1484.0s, [0.785, 0.27, 0.852, 0.476]->[0.666, 0.269, 0.768, 0.605]: run/jog, watch (a person)."]}]}, {"video": "miB-wo2PfLI.mkv", "clip": [1474, 1544], "conversations": [{"from": "human", "timestamps": 1504.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.008, 0.339, 0.306, 0.988]: stand, touch (an object); [0.155, 0.151, 0.472, 0.983]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.446, 0.226, 0.891, 1.0]: carry/hold (an object), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1544.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.335, 0.418, 0.581, 0.882]?"}, {"from": "gpt", "value": ["At Now, [0.335, 0.418, 0.581, 0.882]: stand, listen to (a person).", "At 1543.0s, [0.349, 0.417, 0.562, 0.887]: stand, listen to (a person).", "At 1542.0s, [0.344, 0.424, 0.569, 0.867]: stand, watch (a person)."]}]}, {"video": "miB-wo2PfLI.mkv", "clip": [1541, 1605], "conversations": [{"from": "human", "timestamps": 1571.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.373, 0.355, 0.512, 0.826]?"}, {"from": "gpt", "value": ["In the current frame, [0.373, 0.355, 0.512, 0.826]: walk, carry/hold (an object).", "At 1570.0s, [0.383, 0.366, 0.486, 0.577]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1569.0s, [0.322, 0.354, 0.472, 0.551]: walk, touch (an object), listen to (a person).", "At 1568.0s, [0.294, 0.344, 0.403, 0.547]: walk.", "At 1567.0s, [0.278, 0.351, 0.372, 0.602]: stand."]}, {"from": "human", "timestamps": 1605.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.472, 0.338, 0.671, 0.863]: watch (a person), talk to (e.g., self, a person, a group), walk; [0.601, 0.366, 0.672, 0.72]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.663, 0.395, 0.759, 0.777]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.74, 0.398, 0.842, 0.787]: listen to (a person), stand, watch (a person); [0.847, 0.381, 0.902, 0.508]: carry/hold (an object), walk."}]}, {"video": "miB-wo2PfLI.mkv", "clip": [1615, 1655], "conversations": [{"from": "human", "timestamps": 1645.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.183, 0.415, 0.358, 0.728]: sit, touch (an object), talk to (e.g., self, a person, a group); [0.302, 0.141, 0.518, 0.652]: carry/hold (an object), stand, listen to (a person); [0.402, 0.402, 0.588, 0.671]: listen to (a person), sit, touch (an object); [0.742, 0.409, 0.949, 0.67]: listen to (a person), sit, touch (an object)."}, {"from": "human", "timestamps": 1655.0, "value": "For the person at bbox [0.748, 0.396, 0.95, 0.661], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.748, 0.396, 0.95, 0.661]: sit, touch (an object), listen to (a person).", "At 1654.0s, [0.743, 0.393, 0.946, 0.663]: touch (an object), listen to (a person).", "At 1653.0s, [0.74, 0.388, 0.957, 0.665]: sit, touch (an object), listen to (a person), watch (a person).", "At 1652.0s, [0.742, 0.379, 0.948, 0.666]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1649.0s to 1651.0s, [0.746, 0.401, 0.951, 0.667]->[0.74, 0.389, 0.946, 0.67]->[0.739, 0.377, 0.948, 0.667]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1647.0s to 1648.0s, [0.746, 0.402, 0.948, 0.664]->[0.748, 0.406, 0.95, 0.666]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1646.0s, [0.748, 0.405, 0.948, 0.661]: stand, touch (an object).", "From 1643.0s to 1645.0s, [0.745, 0.402, 0.95, 0.668]->[0.743, 0.405, 0.951, 0.661]->[0.742, 0.409, 0.949, 0.67]: sit, touch (an object), listen to (a person).", "At 1642.0s, [0.74, 0.4, 0.956, 0.659]: sit, touch (an object).", "At 1641.0s, [0.747, 0.397, 0.953, 0.654]: sit, touch (an object), listen to (a person).", "At 1640.0s, [0.802, 0.388, 0.982, 0.675]: sit, touch (an object)."]}]}, {"video": "miB-wo2PfLI.mkv", "clip": [1646, 1683], "conversations": [{"from": "human", "timestamps": 1676.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.168, 0.411, 0.356, 0.708]: listen to (a person), sit, touch (an object), drink; [0.277, 0.261, 0.47, 0.66]: talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.41, 0.397, 0.587, 0.668]: listen to (a person), sit, touch (an object); [0.744, 0.406, 0.946, 0.677]: listen to (a person), sit, touch (an object)."}, {"from": "human", "timestamps": 1683.0, "value": "What actions did the person at bbox [0.191, 0.167, 0.996, 0.971] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.191, 0.167, 0.996, 0.971]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1682.0s, [0.19, 0.256, 0.979, 0.981]: sit, talk to (e.g., self, a person, a group).", "From 1679.0s to 1681.0s, [0.331, 0.309, 0.96, 0.987]->[0.447, 0.308, 0.99, 0.987]->[0.335, 0.273, 0.969, 0.987]: sit, listen to (a person).", "At 1678.0s, [0.41, 0.253, 0.987, 0.985]: sit, listen to (a person), watch (a person).", "From 1673.0s to 1677.0s, [0.425, 0.392, 0.587, 0.671]->[0.422, 0.393, 0.587, 0.672]->[0.417, 0.398, 0.587, 0.669]->[0.41, 0.397, 0.587, 0.668]->[0.311, 0.205, 0.984, 0.976]: sit, touch (an object), listen to (a person).", "At 1672.0s, [0.416, 0.394, 0.586, 0.67]: sit, touch (an object).", "From 1668.0s to 1671.0s, [0.393, 0.384, 0.593, 0.669]->[0.387, 0.389, 0.591, 0.67]->[0.422, 0.393, 0.59, 0.673]->[0.396, 0.392, 0.59, 0.674]: sit, touch (an object), listen to (a person)."]}]}, {"video": "miB-wo2PfLI.mkv", "clip": [1662, 1696], "conversations": [{"from": "human", "timestamps": 1692.0, "value": "For the person at bbox [0.29, 0.338, 0.5, 0.957], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.29, 0.338, 0.5, 0.957]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1690.0s to 1691.0s, [0.095, 0.231, 0.514, 0.982]->[0.24, 0.323, 0.502, 0.938]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1696.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.305, 0.356, 0.482, 0.903]: carry/hold (an object), listen to (a person), walk; [0.518, 0.401, 0.697, 0.946]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "mkcDANJjDcM.mkv", "clip": [899, 931], "conversations": [{"from": "human", "timestamps": 929.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.042, 0.041, 0.65, 0.982]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.537, 0.18, 1.0, 0.989]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 931.0, "value": "What actions is the person in bbox [0.006, 0.08, 0.664, 0.98] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.006, 0.08, 0.664, 0.98]: stand, talk to (e.g., self, a person, a group).", "At 930.0s, [0.04, 0.046, 0.659, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 928.0s to 929.0s, [0.053, 0.04, 0.655, 0.968]->[0.042, 0.041, 0.65, 0.982]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 927.0s, [0.046, 0.039, 0.635, 0.979]: stand, listen to (a person), watch (a person)."]}]}, {"video": "mkcDANJjDcM.mkv", "clip": [898, 987], "conversations": [{"from": "human", "timestamps": 928.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.053, 0.04, 0.655, 0.968]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.464, 0.176, 0.996, 0.975]: listen to (a person), stand, watch (a person); [0.491, 0.304, 0.642, 0.767]: sit."}, {"from": "human", "timestamps": 987.0, "value": "What actions is the person in bbox [0.071, 0.0, 0.768, 0.983] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.071, 0.0, 0.768, 0.983]: stand, watch (a person).", "From 985.0s to 986.0s, [0.163, 0.0, 0.857, 0.99]->[0.085, 0.0, 0.746, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 984.0s, [0.2, 0.044, 0.839, 0.989]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "From 982.0s to 983.0s, [0.331, 0.057, 0.928, 0.981]->[0.396, 0.008, 1.0, 0.99]: walk, talk to (e.g., self, a person, a group).", "From 980.0s to 981.0s, [0.258, 0.022, 0.711, 0.988]->[0.062, 0.032, 0.608, 0.981]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1026, 1079], "conversations": [{"from": "human", "timestamps": 1056.0, "value": "Describe the actions of the person at bbox [0.347, 0.075, 0.807, 0.989] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.347, 0.075, 0.807, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1055.0s, [0.373, 0.052, 0.753, 0.975]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1079.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.163, 0.198, 0.272, 0.739]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.276, 0.233, 0.364, 0.74]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.356, 0.05, 0.607, 0.962]: carry/hold (an object), stand, watch (a person), answer phone."}]}, {"video": "mkcDANJjDcM.mkv", "clip": [992, 1130], "conversations": [{"from": "human", "timestamps": 1022.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.009, 0.367, 0.981]: listen to (a person), stand, watch (a person); [0.225, 0.089, 0.976, 0.985]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1130.0, "value": "For the person at bbox [0.271, 0.157, 0.516, 0.975], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.271, 0.157, 0.516, 0.975]: stand, touch (an object), listen to (a person), watch (a person).", "At 1129.0s, [0.273, 0.175, 0.518, 0.989]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1128.0s, [0.106, 0.047, 0.74, 0.993]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1126.0s to 1127.0s, [0.113, 0.061, 0.734, 1.0]->[0.093, 0.055, 0.745, 0.98]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1125.0s, [0.152, 0.137, 0.482, 0.964]: stand, watch (a person).", "From 1122.0s to 1124.0s, [0.159, 0.153, 0.458, 0.989]->[0.165, 0.144, 0.486, 0.982]->[0.178, 0.149, 0.473, 0.98]: stand, listen to (a person), watch (a person)."]}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1056, 1107], "conversations": [{"from": "human", "timestamps": 1086.0, "value": "What actions is the person in bbox [0.487, 0.04, 0.86, 0.987] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.487, 0.04, 0.86, 0.987]: stand, answer phone.", "At 1085.0s, [0.425, 0.184, 0.85, 0.981]: stand, answer phone, talk to (e.g., self, a person, a group), watch (a person).", "At 1084.0s, [0.633, 0.002, 1.0, 0.969]: stand, answer phone, listen to (a person), watch (a person).", "At 1082.0s, [0.299, 0.157, 0.909, 0.967]: stand, answer phone, carry/hold (an object), give/serve (an object) to (a person), watch (a person).", "At 1081.0s, [0.378, 0.127, 0.742, 0.974]: stand, answer phone, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1107.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.227, 0.064, 0.533, 0.984]: touch (an object), watch (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.616, 0.008, 0.914, 1.0]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1118, 1188], "conversations": [{"from": "human", "timestamps": 1148.0, "value": "What actions did the person at bbox [0.496, 0.041, 0.94, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.496, 0.041, 0.94, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1143.0s to 1147.0s, [0.415, 0.031, 0.918, 0.991]->[0.465, 0.038, 0.923, 0.985]->[0.46, 0.04, 0.906, 0.986]->[0.463, 0.039, 0.911, 0.979]->[0.473, 0.044, 0.928, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1142.0s, [0.624, 0.0, 1.0, 0.993]: stand, watch (a person).", "At 1141.0s, [0.618, 0.0, 1.0, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1140.0s, [0.63, 0.0, 1.0, 0.999]: stand, listen to (a person), watch (a person).", "At 1139.0s, [0.608, 0.0, 1.0, 1.0]: stand, watch (a person).", "From 1137.0s to 1138.0s, [0.635, 0.0, 1.0, 0.99]->[0.611, 0.0, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1136.0s, [0.625, 0.0, 1.0, 0.992]: stand, listen to (a person), watch (a person).", "From 1132.0s to 1135.0s, [0.621, 0.009, 1.0, 0.996]->[0.628, 0.0, 1.0, 0.998]->[0.621, 0.0, 1.0, 1.0]->[0.442, 0.033, 0.894, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1130.0s to 1131.0s, [0.508, 0.025, 0.81, 0.979]->[0.518, 0.039, 0.797, 0.985]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1128.0s to 1129.0s, [0.563, 0.0, 1.0, 0.99]->[0.519, 0.045, 0.76, 0.985]: stand, text on/look at a cellphone, talk to (e.g., self, a person, a group).", "At 1127.0s, [0.581, 0.011, 1.0, 1.0]: stand, text on/look at a cellphone, listen to (a person), watch (a person).", "At 1126.0s, [0.545, 0.0, 1.0, 0.996]: stand, text on/look at a cellphone, listen to (a person)."]}, {"from": "human", "timestamps": 1188.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.266, 0.294, 0.98]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.243, 0.172, 0.573, 0.981]: listen to (a person), stand, watch (a person); [0.467, 0.018, 0.993, 0.98]: listen to (a person), stand, watch (a person)."}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1189, 1250], "conversations": [{"from": "human", "timestamps": 1219.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.065, 0.036, 0.287, 0.783]: watch (a person), stand; [0.416, 0.122, 0.733, 0.992]: watch (a person), walk; [0.679, 0.14, 0.841, 0.994]: carry/hold (an object), listen to (a person), walk."}, {"from": "human", "timestamps": 1250.0, "value": "What actions did the person at bbox [0.621, 0.107, 0.782, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.621, 0.107, 0.782, 0.985]: stand.", "At 1249.0s, [0.627, 0.022, 0.885, 0.995]: walk."]}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1262, 1343], "conversations": [{"from": "human", "timestamps": 1292.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.507, 0.082, 0.687, 0.878]: stand, answer phone; [0.709, 0.322, 0.756, 0.59]: watch (a person), stand, talk to (e.g., self, a person, a group), drink."}, {"from": "human", "timestamps": 1343.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.58, 0.251, 0.787, 0.98]?"}, {"from": "gpt", "value": ["In the current frame, [0.58, 0.251, 0.787, 0.98]: stand, listen to (a person), watch (a person).", "From 1340.0s to 1342.0s, [0.556, 0.257, 0.788, 0.982]->[0.573, 0.262, 0.786, 0.975]->[0.585, 0.26, 0.789, 0.983]: stand, listen to (a person), watch (a person)."]}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1298, 1359], "conversations": [{"from": "human", "timestamps": 1328.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.361, 0.368, 0.469, 0.917]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1359.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.576, 0.03, 1.0, 0.964]?"}, {"from": "gpt", "value": ["At Now, [0.576, 0.03, 1.0, 0.964]: stand, listen to (a person), watch (a person).", "From 1357.0s to 1358.0s, [0.452, 0.002, 0.997, 0.99]->[0.542, 0.034, 0.998, 0.947]: stand, listen to (a person), watch (a person).", "From 1355.0s to 1356.0s, [0.466, 0.085, 0.705, 0.991]->[0.15, 0.137, 0.384, 0.982]: walk.", "At 1354.0s, [0.439, 0.073, 0.784, 0.99]: walk, talk to (e.g., self, a person, a group).", "At 1353.0s, [0.419, 0.058, 0.786, 0.991]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1352.0s, [0.405, 0.037, 0.775, 0.98]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1348.0s to 1351.0s, [0.396, 0.061, 0.784, 0.98]->[0.405, 0.067, 0.766, 0.992]->[0.412, 0.054, 0.779, 0.988]->[0.406, 0.056, 0.776, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1346.0s to 1347.0s, [0.426, 0.065, 0.72, 0.978]->[0.414, 0.073, 0.748, 0.987]: stand, hand shake, talk to (e.g., self, a person, a group), watch (a person).", "At 1345.0s, [0.386, 0.091, 0.689, 0.999]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1344.0s, [0.399, 0.1, 0.655, 0.993]: stand, listen to (a person), watch (a person).", "From 1342.0s to 1343.0s, [0.415, 0.079, 0.703, 0.981]->[0.396, 0.101, 0.656, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1340.0s to 1341.0s, [0.387, 0.091, 0.627, 0.997]->[0.385, 0.149, 0.665, 0.905]: walk, listen to (a person), watch (a person)."]}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1383, 1436], "conversations": [{"from": "human", "timestamps": 1413.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.024, 0.001, 0.553, 0.955]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.406, 0.033, 0.828, 0.993]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1436.0, "value": "Describe the actions of the person at bbox [0.209, 0.07, 0.409, 0.994] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.209, 0.07, 0.409, 0.994]: walk.", "From 1433.0s to 1435.0s, [0.121, 0.134, 0.245, 0.729]->[0.172, 0.123, 0.295, 0.772]->[0.205, 0.099, 0.367, 0.857]: walk.", "From 1431.0s to 1432.0s, [0.102, 0.144, 0.182, 0.713]->[0.114, 0.14, 0.188, 0.717]: stand, watch (a person)."]}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1385, 1495], "conversations": [{"from": "human", "timestamps": 1415.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.197, 0.027, 0.739, 0.975]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.536, 0.006, 1.0, 0.962]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1495.0, "value": "For the person at bbox [0.183, 0.042, 0.769, 0.858], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.183, 0.042, 0.769, 0.858]: sit, answer phone.", "From 1493.0s to 1494.0s, [0.116, 0.019, 0.713, 0.939]->[0.125, 0.015, 0.733, 0.926]: sit, answer phone."]}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1412, 1457], "conversations": [{"from": "human", "timestamps": 1442.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.446, 0.172, 0.59, 0.948]?"}, {"from": "gpt", "value": ["At Now, [0.446, 0.172, 0.59, 0.948]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1441.0s, [0.446, 0.168, 0.586, 0.962]: stand, watch (a person).", "At 1440.0s, [0.437, 0.174, 0.578, 0.955]: stand, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1457.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.268, 0.046, 0.433, 0.941]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk; [0.417, 0.048, 0.635, 0.996]: carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1494, 1567], "conversations": [{"from": "human", "timestamps": 1524.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.223, 0.068, 0.548, 0.98]: stand, answer phone."}, {"from": "human", "timestamps": 1567.0, "value": "What actions is the person in bbox [0.626, 0.04, 1.0, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.626, 0.04, 1.0, 0.985]: stand, listen to (a person).", "At 1566.0s, [0.617, 0.052, 1.0, 0.992]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1564.0s to 1565.0s, [0.61, 0.007, 1.0, 0.999]->[0.593, 0.063, 1.0, 0.986]: stand, listen to (a person).", "From 1562.0s to 1563.0s, [0.337, 0.072, 0.884, 0.979]->[0.338, 0.069, 0.883, 0.978]: stand, watch (a person)."]}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1554, 1601], "conversations": [{"from": "human", "timestamps": 1584.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.017, 0.0, 0.682, 0.989]: listen to (a person), stand, watch (a person); [0.642, 0.187, 1.0, 0.995]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1601.0, "value": "What actions did the person at bbox [0.403, 0.076, 0.88, 0.972] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.403, 0.076, 0.88, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1594.0s to 1600.0s, [0.394, 0.028, 0.88, 0.978]->[0.387, 0.062, 0.85, 0.98]->[0.397, 0.072, 0.858, 0.985]->[0.384, 0.054, 0.861, 0.983]->[0.379, 0.05, 0.877, 0.979]->[0.39, 0.076, 0.876, 0.987]->[0.329, 0.075, 0.841, 0.974]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1588, 1647], "conversations": [{"from": "human", "timestamps": 1618.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.012, 0.0, 0.668, 0.995]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.629, 0.197, 1.0, 0.986]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1647.0, "value": "What actions did the person at bbox [0.0, 0.04, 0.408, 0.952] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.04, 0.408, 0.952]: stand, listen to (a person), watch (a person).", "From 1643.0s to 1646.0s, [0.007, 0.019, 0.374, 0.957]->[0.012, 0.017, 0.425, 0.951]->[0.01, 0.009, 0.427, 0.948]->[0.008, 0.065, 0.421, 0.938]: stand, listen to (a person), watch (a person).", "From 1636.0s to 1638.0s, [0.027, 0.018, 0.713, 0.983]->[0.018, 0.002, 0.551, 0.987]->[0.023, 0.0, 0.587, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1639, 1726], "conversations": [{"from": "human", "timestamps": 1669.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.617, 0.984]: watch (a person), stand; [0.489, 0.004, 1.0, 0.99]: watch (a person), stand."}, {"from": "human", "timestamps": 1726.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.014, 0.041, 0.587, 0.987]?"}, {"from": "gpt", "value": ["At Now, [0.014, 0.041, 0.587, 0.987]: walk, talk to (e.g., self, a person, a group).", "At 1725.0s, [0.049, 0.045, 0.661, 0.988]: walk, talk to (e.g., self, a person, a group).", "At 1724.0s, [0.121, 0.017, 0.726, 0.987]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1681, 1733], "conversations": [{"from": "human", "timestamps": 1711.0, "value": "What actions is the person in bbox [0.02, 0.0, 0.45, 0.953] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.02, 0.0, 0.45, 0.953]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1710.0s, [0.0, 0.0, 0.583, 0.983]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1733.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.207, 0.127, 0.734, 0.969]: watch (a person), fight/hit (a person), push (another person), walk; [0.554, 0.203, 0.762, 0.948]: fight/hit (a person), grab (a person), walk."}]}, {"video": "mkcDANJjDcM.mkv", "clip": [1737, 1791], "conversations": [{"from": "human", "timestamps": 1767.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.184, 0.442, 0.467, 0.876]: sit."}, {"from": "human", "timestamps": 1791.0, "value": "What actions is the person in bbox [0.111, 0.031, 0.69, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.111, 0.031, 0.69, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1790.0s, [0.111, 0.032, 0.695, 0.985]: stand, talk to (e.g., self, a person, a group).", "From 1788.0s to 1789.0s, [0.109, 0.033, 0.696, 0.981]->[0.1, 0.029, 0.694, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [904, 955], "conversations": [{"from": "human", "timestamps": 934.0, "value": "What actions did the person at bbox [0.027, 0.087, 0.827, 0.991] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.027, 0.087, 0.827, 0.991]: stand, dig.", "At 933.0s, [0.029, 0.067, 0.866, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 955.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.124, 0.112, 0.801, 0.99]: carry/hold (an object), stand; [0.838, 0.798, 0.971, 0.999]: carry/hold (an object), lift/pick up, bend/bow (at the waist)."}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1016, 1111], "conversations": [{"from": "human", "timestamps": 1046.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.079, 0.013, 0.826, 0.976]: sit, drive (e.g., a car, a truck)."}, {"from": "human", "timestamps": 1111.0, "value": "For the person at bbox [0.561, 0.208, 0.881, 0.944], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.561, 0.208, 0.881, 0.944]: stand, watch (a person).", "At 1110.0s, [0.314, 0.174, 0.597, 0.971]: walk, carry/hold (an object), touch (an object), watch (a person)."]}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1125, 1193], "conversations": [{"from": "human", "timestamps": 1155.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.003, 0.567, 0.137, 0.92]: sit; [0.013, 0.555, 0.367, 0.999]: listen to (a person), sit; [0.062, 0.133, 0.949, 1.0]: sit, hug (a person)."}, {"from": "human", "timestamps": 1193.0, "value": "What actions is the person in bbox [0.091, 0.076, 0.556, 0.861] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.091, 0.076, 0.556, 0.861]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1192.0s, [0.16, 0.092, 0.54, 0.795]: sit, talk to (e.g., self, a person, a group).", "At 1191.0s, [0.086, 0.07, 0.519, 0.787]: sit."]}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1164, 1234], "conversations": [{"from": "human", "timestamps": 1194.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.147, 0.1, 0.54, 0.788]: watch (a person), sit; [0.418, 0.279, 0.985, 0.983]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1234.0, "value": "For the person at bbox [0.51, 0.327, 0.958, 0.856], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.51, 0.327, 0.958, 0.856]: sit, watch (a person).", "From 1232.0s to 1233.0s, [0.521, 0.534, 0.843, 0.994]->[0.503, 0.297, 0.917, 0.845]: sit."]}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1186, 1252], "conversations": [{"from": "human", "timestamps": 1216.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.213, 0.409, 0.46, 0.673]: carry/hold (an object), stand, give/serve (an object) to (a person), watch (a person); [0.487, 0.293, 0.69, 0.853]: watch (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group), take (an object) from (a person); [0.666, 0.35, 0.805, 0.956]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1252.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.399, 0.346, 0.568, 0.826]?"}, {"from": "gpt", "value": ["In the current frame, [0.399, 0.346, 0.568, 0.826]: sit, listen to (a person), watch (a person).", "From 1248.0s to 1251.0s, [0.426, 0.358, 0.612, 0.886]->[0.396, 0.339, 0.567, 0.843]->[0.423, 0.36, 0.614, 0.861]->[0.423, 0.35, 0.604, 0.848]: sit, listen to (a person), watch (a person)."]}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1269, 1303], "conversations": [{"from": "human", "timestamps": 1299.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.151, 0.1, 0.489, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1303.0, "value": "Describe the actions of the person at bbox [0.15, 0.112, 0.509, 0.999] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.15, 0.112, 0.509, 0.999]: sit, talk to (e.g., self, a person, a group).", "From 1299.0s to 1302.0s, [0.151, 0.1, 0.489, 1.0]->[0.156, 0.104, 0.51, 0.992]->[0.15, 0.096, 0.494, 1.0]->[0.149, 0.094, 0.507, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1298.0s, [0.155, 0.099, 0.524, 0.995]: sit.", "At 1297.0s, [0.143, 0.108, 0.502, 1.0]: sit, listen to (a person)."]}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1258, 1373], "conversations": [{"from": "human", "timestamps": 1288.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.141, 0.131, 0.505, 1.0]: listen to (a person), sit."}, {"from": "human", "timestamps": 1373.0, "value": "What actions is the person in bbox [0.472, 0.225, 0.993, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.472, 0.225, 0.993, 0.993]: sit, listen to (a person), watch (a person).", "At 1372.0s, [0.462, 0.221, 0.985, 0.999]: sit, listen to (a person), watch (a person).", "From 1370.0s to 1371.0s, [0.456, 0.218, 0.995, 1.0]->[0.471, 0.219, 0.992, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1368.0s to 1369.0s, [0.453, 0.24, 0.988, 1.0]->[0.46, 0.231, 0.979, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1366.0s to 1367.0s, [0.464, 0.242, 1.0, 0.992]->[0.448, 0.243, 0.993, 0.998]: sit, listen to (a person).", "From 1359.0s to 1365.0s, [0.515, 0.234, 0.992, 0.97]->[0.506, 0.254, 0.996, 0.99]->[0.516, 0.265, 1.0, 0.989]->[0.5, 0.273, 1.0, 0.992]->[0.498, 0.275, 1.0, 0.978]->[0.485, 0.275, 1.0, 0.993]->[0.469, 0.252, 1.0, 0.989]: sit, listen to (a person), watch (a person)."]}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1328, 1361], "conversations": [{"from": "human", "timestamps": 1358.0, "value": "For the person at bbox [0.34, 0.219, 0.787, 0.989], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.34, 0.219, 0.787, 0.989]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1355.0s to 1357.0s, [0.436, 0.21, 0.959, 0.996]->[0.336, 0.202, 0.799, 0.998]->[0.336, 0.215, 0.789, 0.982]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1354.0s, [0.432, 0.205, 0.973, 0.991]: talk to (e.g., self, a person, a group), watch (a person).", "At 1353.0s, [0.429, 0.203, 0.973, 0.985]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1352.0s, [0.443, 0.192, 0.957, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1351.0s, [0.4, 0.197, 0.983, 1.0]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1361.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.008, 0.077, 0.613, 0.976]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.516, 0.265, 1.0, 0.989]: listen to (a person), sit, watch (a person)."}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1356, 1406], "conversations": [{"from": "human", "timestamps": 1386.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.001, 0.039, 0.558, 0.971]: watch (a person), sit, talk to (e.g., self, a person, a group), stand; [0.502, 0.189, 1.0, 0.988]: listen to (a person), sit, watch (a person), stand."}, {"from": "human", "timestamps": 1406.0, "value": "What actions did the person at bbox [0.304, 0.164, 0.799, 0.997] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.304, 0.164, 0.799, 0.997]: sit, listen to (a person), watch (a person).", "From 1393.0s to 1405.0s, [0.461, 0.2, 0.994, 0.984]->[0.466, 0.209, 1.0, 0.989]->[0.445, 0.211, 1.0, 0.996]->[0.45, 0.209, 0.994, 1.0]->[0.451, 0.221, 0.996, 0.994]->[0.459, 0.199, 0.991, 0.981]->[0.467, 0.212, 0.998, 1.0]->[0.469, 0.225, 1.0, 1.0]->[0.456, 0.241, 1.0, 0.987]->[0.477, 0.225, 1.0, 0.99]->[0.496, 0.235, 1.0, 0.995]->[0.473, 0.255, 1.0, 0.988]->[0.471, 0.26, 0.999, 0.992]: sit, listen to (a person), watch (a person).", "At 1392.0s, [0.453, 0.205, 0.989, 1.0]: sit, listen to (a person).", "At 1391.0s, [0.459, 0.199, 1.0, 0.988]: stand, listen to (a person).", "At 1390.0s, [0.445, 0.206, 0.991, 1.0]: listen to (a person).", "From 1388.0s to 1389.0s, [0.474, 0.184, 0.998, 0.989]->[0.467, 0.206, 0.998, 0.992]: sit, listen to (a person).", "At 1387.0s, [0.504, 0.189, 1.0, 0.986]: sit, listen to (a person), watch (a person).", "At 1386.0s, [0.502, 0.189, 1.0, 0.988]: sit, stand, listen to (a person), watch (a person).", "From 1380.0s to 1385.0s, [0.318, 0.219, 0.786, 0.991]->[0.29, 0.228, 0.781, 0.986]->[0.48, 0.209, 1.0, 0.981]->[0.497, 0.197, 1.0, 0.992]->[0.508, 0.201, 1.0, 0.985]->[0.499, 0.209, 1.0, 0.991]: sit, listen to (a person), watch (a person).", "From 1377.0s to 1379.0s, [0.313, 0.213, 0.775, 0.993]->[0.337, 0.212, 0.78, 0.987]->[0.301, 0.219, 0.764, 0.989]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1375.0s to 1376.0s, [0.478, 0.215, 0.99, 0.997]->[0.468, 0.223, 0.988, 0.997]: sit, listen to (a person), watch (a person).", "At 1374.0s, [0.48, 0.228, 0.987, 0.996]: stand, listen to (a person), watch (a person).", "From 1372.0s to 1373.0s, [0.462, 0.221, 0.985, 0.999]->[0.472, 0.225, 0.993, 0.993]: sit, listen to (a person), watch (a person).", "From 1370.0s to 1371.0s, [0.456, 0.218, 0.995, 1.0]->[0.471, 0.219, 0.992, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1368.0s to 1369.0s, [0.453, 0.24, 0.988, 1.0]->[0.46, 0.231, 0.979, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1366.0s to 1367.0s, [0.464, 0.242, 1.0, 0.992]->[0.448, 0.243, 0.993, 0.998]: sit, listen to (a person).", "From 1359.0s to 1365.0s, [0.515, 0.234, 0.992, 0.97]->[0.506, 0.254, 0.996, 0.99]->[0.516, 0.265, 1.0, 0.989]->[0.5, 0.273, 1.0, 0.992]->[0.498, 0.275, 1.0, 0.978]->[0.485, 0.275, 1.0, 0.993]->[0.469, 0.252, 1.0, 0.989]: sit, listen to (a person), watch (a person)."]}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1358, 1479], "conversations": [{"from": "human", "timestamps": 1388.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.003, 0.033, 0.567, 0.972]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.474, 0.184, 0.998, 0.989]: listen to (a person), sit."}, {"from": "human", "timestamps": 1479.0, "value": "What actions is the person in bbox [0.23, 0.101, 0.97, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.23, 0.101, 0.97, 0.988]: sit, listen to (a person), watch (a person).", "From 1469.0s to 1478.0s, [0.216, 0.1, 0.968, 0.99]->[0.203, 0.109, 1.0, 0.979]->[0.216, 0.103, 0.98, 0.993]->[0.203, 0.102, 0.977, 0.982]->[0.192, 0.108, 0.968, 0.978]->[0.205, 0.1, 0.975, 0.99]->[0.202, 0.091, 0.977, 0.979]->[0.207, 0.097, 0.975, 0.985]->[0.239, 0.084, 0.966, 0.972]->[0.19, 0.089, 0.972, 0.991]: sit, listen to (a person), watch (a person).", "At 1468.0s, [0.218, 0.096, 0.971, 0.978]: sit, watch (a person).", "From 1464.0s to 1467.0s, [0.187, 0.097, 0.97, 0.983]->[0.209, 0.087, 0.973, 0.982]->[0.207, 0.089, 0.973, 0.994]->[0.221, 0.087, 0.977, 0.984]: sit, listen to (a person), watch (a person).", "From 1462.0s to 1463.0s, [0.187, 0.102, 0.966, 0.981]->[0.216, 0.099, 0.971, 0.977]: sit, watch (a person).", "From 1460.0s to 1461.0s, [0.192, 0.094, 0.972, 0.981]->[0.193, 0.1, 0.971, 0.982]: sit, listen to (a person), watch (a person).", "At 1459.0s, [0.256, 0.078, 0.954, 0.978]: listen to (a person), watch (a person).", "From 1452.0s to 1458.0s, [0.221, 0.077, 0.972, 0.988]->[0.219, 0.083, 0.968, 0.987]->[0.183, 0.082, 0.978, 0.986]->[0.185, 0.081, 0.966, 0.986]->[0.181, 0.09, 0.958, 0.983]->[0.184, 0.087, 0.963, 0.987]->[0.18, 0.084, 0.968, 0.986]: sit, listen to (a person), watch (a person).", "At 1451.0s, [0.212, 0.09, 0.964, 0.986]: stand, listen to (a person), watch (a person).", "At 1450.0s, [0.243, 0.076, 0.974, 0.987]: sit, watch (a person).", "At 1449.0s, [0.21, 0.095, 0.97, 0.979]: stand, listen to (a person), watch (a person).", "From 1438.0s to 1448.0s, [0.271, 0.062, 0.979, 0.989]->[0.313, 0.052, 0.979, 0.978]->[0.282, 0.058, 0.97, 0.98]->[0.265, 0.078, 0.958, 0.992]->[0.299, 0.09, 0.967, 0.984]->[0.221, 0.092, 0.957, 0.986]->[0.23, 0.1, 0.972, 0.975]->[0.225, 0.104, 0.971, 0.98]->[0.199, 0.103, 0.964, 0.98]->[0.198, 0.103, 0.963, 0.989]->[0.201, 0.096, 0.959, 0.983]: sit, listen to (a person), watch (a person).", "At 1437.0s, [0.306, 0.083, 0.983, 0.985]: stand, listen to (a person), watch (a person).", "From 1423.0s to 1436.0s, [0.295, 0.129, 0.87, 0.987]->[0.289, 0.138, 0.872, 0.996]->[0.299, 0.141, 0.888, 0.991]->[0.282, 0.133, 0.9, 0.992]->[0.301, 0.127, 0.906, 0.995]->[0.312, 0.106, 0.908, 0.981]->[0.264, 0.136, 0.92, 0.994]->[0.3, 0.113, 0.914, 1.0]->[0.291, 0.102, 0.934, 0.987]->[0.301, 0.078, 0.954, 1.0]->[0.298, 0.091, 0.951, 0.992]->[0.317, 0.08, 0.976, 0.992]->[0.301, 0.088, 0.968, 0.984]->[0.302, 0.087, 0.984, 0.984]: sit, listen to (a person), watch (a person).", "At 1422.0s, [0.323, 0.115, 0.872, 0.988]: stand, listen to (a person), watch (a person).", "At 1421.0s, [0.286, 0.144, 0.859, 1.0]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1415.0s to 1420.0s, [0.269, 0.153, 0.824, 0.996]->[0.269, 0.147, 0.834, 0.989]->[0.281, 0.158, 0.832, 0.995]->[0.283, 0.14, 0.842, 0.984]->[0.293, 0.138, 0.845, 0.998]->[0.289, 0.13, 0.86, 0.991]: sit, listen to (a person), watch (a person).", "At 1414.0s, [0.261, 0.154, 0.82, 0.996]: sit, listen to (a person).", "From 1393.0s to 1413.0s, [0.461, 0.2, 0.994, 0.984]->[0.466, 0.209, 1.0, 0.989]->[0.445, 0.211, 1.0, 0.996]->[0.45, 0.209, 0.994, 1.0]->[0.451, 0.221, 0.996, 0.994]->[0.459, 0.199, 0.991, 0.981]->[0.467, 0.212, 0.998, 1.0]->[0.469, 0.225, 1.0, 1.0]->[0.456, 0.241, 1.0, 0.987]->[0.477, 0.225, 1.0, 0.99]->[0.496, 0.235, 1.0, 0.995]->[0.473, 0.255, 1.0, 0.988]->[0.471, 0.26, 0.999, 0.992]->[0.304, 0.164, 0.799, 0.997]->[0.301, 0.155, 0.814, 0.993]->[0.295, 0.161, 0.797, 1.0]->[0.293, 0.163, 0.797, 0.996]->[0.297, 0.159, 0.803, 1.0]->[0.279, 0.144, 0.819, 0.99]->[0.273, 0.156, 0.804, 0.989]->[0.281, 0.135, 0.825, 0.99]: sit, listen to (a person), watch (a person).", "At 1392.0s, [0.453, 0.205, 0.989, 1.0]: sit, listen to (a person).", "At 1391.0s, [0.459, 0.199, 1.0, 0.988]: stand, listen to (a person).", "At 1390.0s, [0.445, 0.206, 0.991, 1.0]: listen to (a person).", "From 1388.0s to 1389.0s, [0.474, 0.184, 0.998, 0.989]->[0.467, 0.206, 0.998, 0.992]: sit, listen to (a person).", "At 1387.0s, [0.504, 0.189, 1.0, 0.986]: sit, listen to (a person), watch (a person).", "At 1386.0s, [0.502, 0.189, 1.0, 0.988]: sit, stand, listen to (a person), watch (a person).", "From 1380.0s to 1385.0s, [0.318, 0.219, 0.786, 0.991]->[0.29, 0.228, 0.781, 0.986]->[0.48, 0.209, 1.0, 0.981]->[0.497, 0.197, 1.0, 0.992]->[0.508, 0.201, 1.0, 0.985]->[0.499, 0.209, 1.0, 0.991]: sit, listen to (a person), watch (a person).", "From 1377.0s to 1379.0s, [0.313, 0.213, 0.775, 0.993]->[0.337, 0.212, 0.78, 0.987]->[0.301, 0.219, 0.764, 0.989]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1375.0s to 1376.0s, [0.478, 0.215, 0.99, 0.997]->[0.468, 0.223, 0.988, 0.997]: sit, listen to (a person), watch (a person).", "At 1374.0s, [0.48, 0.228, 0.987, 0.996]: stand, listen to (a person), watch (a person).", "From 1372.0s to 1373.0s, [0.462, 0.221, 0.985, 0.999]->[0.472, 0.225, 0.993, 0.993]: sit, listen to (a person), watch (a person).", "From 1370.0s to 1371.0s, [0.456, 0.218, 0.995, 1.0]->[0.471, 0.219, 0.992, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1368.0s to 1369.0s, [0.453, 0.24, 0.988, 1.0]->[0.46, 0.231, 0.979, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1366.0s to 1367.0s, [0.464, 0.242, 1.0, 0.992]->[0.448, 0.243, 0.993, 0.998]: sit, listen to (a person).", "From 1359.0s to 1365.0s, [0.515, 0.234, 0.992, 0.97]->[0.506, 0.254, 0.996, 0.99]->[0.516, 0.265, 1.0, 0.989]->[0.5, 0.273, 1.0, 0.992]->[0.498, 0.275, 1.0, 0.978]->[0.485, 0.275, 1.0, 0.993]->[0.469, 0.252, 1.0, 0.989]: sit, listen to (a person), watch (a person)."]}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1410, 1450], "conversations": [{"from": "human", "timestamps": 1440.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.023, 0.0, 0.447, 0.954]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.282, 0.058, 0.97, 0.98]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1450.0, "value": "For the person at bbox [0.003, 0.006, 0.41, 0.998], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.003, 0.006, 0.41, 0.998]: sit, watch (a person).", "From 1437.0s to 1449.0s, [0.012, 0.005, 0.448, 0.974]->[0.02, 0.0, 0.492, 0.975]->[0.044, 0.003, 0.512, 0.987]->[0.023, 0.0, 0.447, 0.954]->[0.029, 0.0, 0.458, 0.969]->[0.02, 0.014, 0.486, 0.982]->[0.034, 0.008, 0.496, 1.0]->[0.0, 0.004, 0.393, 0.996]->[0.001, 0.0, 0.412, 0.996]->[0.006, 0.055, 0.357, 0.967]->[0.002, 0.02, 0.357, 0.973]->[0.004, 0.004, 0.403, 0.996]->[0.0, 0.037, 0.355, 0.98]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1436.0s, [0.031, 0.003, 0.47, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1435.0s, [0.037, 0.002, 0.487, 0.977]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1432.0s to 1434.0s, [0.042, 0.0, 0.487, 0.975]->[0.047, 0.0, 0.513, 0.974]->[0.0, 0.0, 0.439, 0.974]: sit, talk to (e.g., self, a person, a group).", "At 1431.0s, [0.026, 0.0, 0.467, 0.979]: stand, talk to (e.g., self, a person, a group).", "At 1430.0s, [0.039, 0.0, 0.496, 0.977]: sit, talk to (e.g., self, a person, a group).", "From 1427.0s to 1429.0s, [0.0, 0.026, 0.412, 0.966]->[0.0, 0.025, 0.41, 0.991]->[0.034, 0.0, 0.484, 0.98]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1417.0s to 1426.0s, [0.03, 0.004, 0.451, 0.985]->[0.019, 0.006, 0.429, 0.982]->[0.04, 0.012, 0.453, 0.992]->[0.038, 0.006, 0.442, 0.984]->[0.022, 0.004, 0.45, 0.971]->[0.0, 0.014, 0.415, 0.979]->[0.0, 0.016, 0.417, 0.998]->[0.0, 0.001, 0.418, 0.973]->[0.002, 0.0, 0.423, 0.971]->[0.028, 0.003, 0.464, 0.981]: sit, talk to (e.g., self, a person, a group).", "From 1414.0s to 1416.0s, [0.04, 0.036, 0.465, 0.985]->[0.0, 0.015, 0.371, 1.0]->[0.0, 0.023, 0.379, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1413.0s, [0.011, 0.011, 0.41, 0.98]: sit, talk to (e.g., self, a person, a group).", "From 1410.0s to 1412.0s, [0.014, 0.02, 0.427, 0.984]->[0.027, 0.017, 0.433, 0.997]->[0.0, 0.006, 0.375, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1406.0s to 1409.0s, [0.008, 0.025, 0.427, 0.988]->[0.006, 0.012, 0.421, 0.985]->[0.015, 0.02, 0.422, 0.98]->[0.019, 0.011, 0.441, 0.993]: sit, talk to (e.g., self, a person, a group).", "From 1404.0s to 1405.0s, [0.012, 0.065, 0.579, 0.973]->[0.0, 0.078, 0.582, 0.977]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1403.0s, [0.0, 0.067, 0.604, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1401.0s to 1402.0s, [0.0, 0.055, 0.578, 1.0]->[0.003, 0.055, 0.596, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1400.0s, [0.0, 0.059, 0.588, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1397.0s to 1399.0s, [0.0, 0.057, 0.553, 1.0]->[0.0, 0.051, 0.563, 0.972]->[0.013, 0.065, 0.586, 0.973]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1396.0s, [0.0, 0.046, 0.546, 0.971]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1390.0s to 1395.0s, [0.0, 0.034, 0.527, 0.964]->[0.0, 0.026, 0.533, 0.97]->[0.006, 0.025, 0.536, 0.967]->[0.003, 0.025, 0.546, 0.977]->[0.005, 0.022, 0.54, 0.979]->[0.0, 0.027, 0.539, 0.977]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1389.0s, [0.0, 0.027, 0.554, 0.965]: sit, watch (a person).", "From 1387.0s to 1388.0s, [0.0, 0.027, 0.625, 0.971]->[0.003, 0.033, 0.567, 0.972]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1386.0s, [0.001, 0.039, 0.558, 0.971]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1384.0s to 1385.0s, [0.011, 0.035, 0.57, 0.975]->[0.016, 0.04, 0.597, 0.97]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1383.0s, [0.0, 0.043, 0.609, 0.972]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1382.0s, [0.0, 0.033, 0.633, 0.975]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1381.0s, [0.0, 0.058, 0.374, 0.97]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1379.0s to 1380.0s, [0.0, 0.053, 0.348, 0.967]->[0.0, 0.064, 0.378, 0.98]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1377.0s to 1378.0s, [0.0, 0.056, 0.383, 0.964]->[0.0, 0.051, 0.381, 0.971]: sit, listen to (a person), watch (a person).", "At 1376.0s, [0.002, 0.037, 0.582, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1372.0s to 1375.0s, [0.002, 0.028, 0.572, 0.974]->[0.007, 0.043, 0.595, 0.977]->[0.0, 0.048, 0.602, 0.973]->[0.001, 0.041, 0.585, 0.981]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1368.0s to 1371.0s, [0.011, 0.066, 0.577, 0.974]->[0.003, 0.035, 0.573, 0.983]->[0.007, 0.04, 0.561, 0.985]->[0.008, 0.05, 0.555, 0.968]: sit, listen to (a person), watch (a person).", "From 1365.0s to 1367.0s, [0.005, 0.112, 0.542, 0.973]->[0.007, 0.104, 0.582, 0.985]->[0.016, 0.07, 0.548, 0.98]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1363.0s to 1364.0s, [0.0, 0.106, 0.581, 0.977]->[0.005, 0.102, 0.582, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1360.0s to 1362.0s, [0.0, 0.07, 0.598, 0.973]->[0.008, 0.077, 0.613, 0.976]->[0.0, 0.098, 0.59, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1359.0s, [0.0, 0.07, 0.604, 0.978]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1354.0s to 1358.0s, [0.0, 0.006, 0.537, 0.974]->[0.01, 0.018, 0.557, 0.977]->[0.0, 0.072, 0.405, 0.979]->[0.0, 0.062, 0.42, 1.0]->[0.0, 0.069, 0.409, 0.979]: sit, listen to (a person), watch (a person).", "At 1353.0s, [0.001, 0.008, 0.549, 0.98]: sit, stand, listen to (a person), watch (a person).", "At 1352.0s, [0.0, 0.017, 0.58, 0.978]: sit, listen to (a person), watch (a person).", "At 1351.0s, [0.001, 0.006, 0.488, 0.986]: sit, watch (a person).", "At 1350.0s, [0.0, 0.0, 0.529, 0.987]: sit, stand, listen to (a person), watch (a person).", "From 1347.0s to 1349.0s, [0.002, 0.0, 0.476, 1.0]->[0.003, 0.004, 0.486, 0.982]->[0.005, 0.007, 0.482, 0.984]: sit, listen to (a person), watch (a person).", "At 1346.0s, [0.0, 0.004, 0.525, 0.989]: stand, listen to (a person), watch (a person).", "From 1344.0s to 1345.0s, [0.008, 0.0, 0.547, 0.992]->[0.0, 0.0, 0.484, 0.98]: sit, listen to (a person), watch (a person).", "At 1343.0s, [0.0, 0.011, 0.515, 0.975]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1342.0s, [0.006, 0.097, 0.455, 0.997]: sit, watch (a person).", "From 1334.0s to 1341.0s, [0.0, 0.101, 0.408, 0.954]->[0.003, 0.109, 0.45, 0.975]->[0.007, 0.101, 0.479, 0.978]->[0.009, 0.102, 0.478, 0.988]->[0.001, 0.102, 0.461, 0.984]->[0.0, 0.096, 0.462, 0.975]->[0.0, 0.092, 0.465, 0.986]->[0.005, 0.097, 0.471, 0.986]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1332.0s to 1333.0s, [0.0, 0.106, 0.485, 0.952]->[0.0, 0.114, 0.425, 0.968]: sit.", "At 1331.0s, [0.033, 0.031, 0.499, 1.0]: sit, turn (e.g., a screwdriver), watch (a person).", "At 1330.0s, [0.025, 0.0, 0.501, 1.0]: sit, turn (e.g., a screwdriver).", "At 1329.0s, [0.028, 0.0, 0.518, 0.988]: sit, carry/hold (an object), turn (e.g., a screwdriver).", "From 1326.0s to 1328.0s, [0.0, 0.101, 0.373, 1.0]->[0.0, 0.072, 0.346, 0.985]->[0.0, 0.044, 0.328, 0.958]: sit, watch (a person).", "At 1325.0s, [0.0, 0.043, 0.452, 0.971]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1323.0s to 1324.0s, [0.0, 0.015, 0.202, 1.0]->[0.003, 0.057, 0.45, 0.996]: sit."]}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1477, 1550], "conversations": [{"from": "human", "timestamps": 1507.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.001, 0.004, 0.396, 0.993]: sit, kiss (a person); [0.18, 0.11, 0.913, 0.99]: watch (a person), sit."}, {"from": "human", "timestamps": 1550.0, "value": "Describe the actions of the person at bbox [0.144, 0.16, 0.716, 0.992] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.144, 0.16, 0.716, 0.992]: stand.", "From 1547.0s to 1549.0s, [0.133, 0.163, 0.702, 1.0]->[0.116, 0.17, 0.69, 0.987]->[0.146, 0.164, 0.699, 1.0]: stand."]}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1538, 1573], "conversations": [{"from": "human", "timestamps": 1568.0, "value": "For the person at bbox [0.344, 0.312, 0.515, 0.882], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.344, 0.312, 0.515, 0.882]: carry/hold (an object), jump/leap.", "At 1567.0s, [0.307, 0.188, 0.451, 0.805]: carry/hold (an object), jump/leap.", "At 1566.0s, [0.319, 0.28, 0.499, 0.886]: stand, carry/hold (an object).", "From 1564.0s to 1565.0s, [0.306, 0.266, 0.501, 0.887]->[0.311, 0.263, 0.504, 0.88]: stand, carry/hold (an object), push (an object)."]}, {"from": "human", "timestamps": 1573.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.203, 0.113, 0.948, 0.985]: watch (a person), sit."}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1632, 1711], "conversations": [{"from": "human", "timestamps": 1662.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.478, 0.202, 0.803, 0.981]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1711.0, "value": "Describe the actions of the person at bbox [0.373, 0.139, 0.863, 0.977] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.373, 0.139, 0.863, 0.977]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1710.0s, [0.397, 0.167, 0.976, 0.992]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1706.0s to 1709.0s, [0.413, 0.176, 0.782, 0.981]->[0.331, 0.187, 0.775, 0.996]->[0.297, 0.239, 0.719, 1.0]->[0.395, 0.28, 0.851, 0.994]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1705.0s, [0.513, 0.424, 0.966, 1.0]: bend/bow (at the waist), carry/hold (an object), listen to (a person)."]}]}, {"video": "oD_wxyTHJ2I.mp4", "clip": [1659, 1707], "conversations": [{"from": "human", "timestamps": 1689.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.158, 0.206, 0.68, 1.0]: carry/hold (an object), sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1707.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.331, 0.187, 0.775, 0.996]?"}, {"from": "gpt", "value": ["In the current frame, [0.331, 0.187, 0.775, 0.996]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1706.0s, [0.413, 0.176, 0.782, 0.981]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1705.0s, [0.513, 0.424, 0.966, 1.0]: bend/bow (at the waist), carry/hold (an object), listen to (a person)."]}]}, {"video": "oITFHwzfw_k.mkv", "clip": [905, 975], "conversations": [{"from": "human", "timestamps": 935.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.231, 0.014, 0.684, 0.976]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 975.0, "value": "Describe the actions of the person at bbox [0.266, 0.136, 0.954, 0.976] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.266, 0.136, 0.954, 0.976]: sit, touch (an object), watch (a person).", "At 974.0s, [0.281, 0.071, 0.975, 0.985]: sit, carry/hold (an object), touch (an object), watch (a person).", "At 973.0s, [0.482, 0.264, 0.973, 1.0]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 972.0s, [0.477, 0.258, 0.975, 0.994]: sit, carry/hold (an object), touch (an object), watch (a person)."]}]}, {"video": "oITFHwzfw_k.mkv", "clip": [954, 1019], "conversations": [{"from": "human", "timestamps": 984.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.016, 0.259, 0.996]: sit, watch (a person), give/serve (an object) to (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.201, 0.099, 0.455, 0.938]: listen to (a person), stand, watch (a person); [0.346, 0.037, 0.96, 0.993]: sit, drink, listen to (a person), watch (a person), carry/hold (an object); [0.647, 0.029, 0.998, 0.515]: stand."}, {"from": "human", "timestamps": 1019.0, "value": "What actions did the person at bbox [0.043, 0.145, 0.611, 0.952] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.043, 0.145, 0.611, 0.952]: sit, dance, watch (a person).", "At 1018.0s, [0.026, 0.123, 0.633, 0.963]: sit, listen to (a person), watch (a person)."]}]}, {"video": "oITFHwzfw_k.mkv", "clip": [1056, 1093], "conversations": [{"from": "human", "timestamps": 1086.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.021, 0.378, 0.461, 0.999]: sit, talk to (e.g., self, a person, a group), drink; [0.384, 0.024, 0.723, 0.989]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.626, 0.043, 0.885, 1.0]: listen to (a person), stand, watch (a person); [0.789, 0.197, 0.954, 0.988]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1093.0, "value": "What actions is the person in bbox [0.0, 0.492, 0.267, 0.991] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.492, 0.267, 0.991]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1091.0s to 1092.0s, [0.0, 0.515, 0.284, 0.991]->[0.0, 0.493, 0.276, 0.996]: sit, carry/hold (an object), watch (a person).", "At 1090.0s, [0.0, 0.516, 0.299, 0.99]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1089.0s, [0.0, 0.526, 0.29, 0.987]: sit, carry/hold (an object), listen to (a person).", "At 1088.0s, [0.09, 0.396, 0.512, 0.984]: sit, drink, listen to (a person).", "At 1087.0s, [0.053, 0.349, 0.502, 0.988]: sit, carry/hold (an object), drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1086.0s, [0.021, 0.378, 0.461, 0.999]: sit, drink, talk to (e.g., self, a person, a group).", "From 1084.0s to 1085.0s, [0.062, 0.303, 0.462, 0.975]->[0.061, 0.38, 0.491, 0.993]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1083.0s, [0.073, 0.293, 0.484, 0.976]: sit, carry/hold (an object), drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1082.0s, [0.069, 0.302, 0.501, 0.968]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "oITFHwzfw_k.mkv", "clip": [1120, 1182], "conversations": [{"from": "human", "timestamps": 1150.0, "value": "What actions is the person in bbox [0.114, 0.104, 0.823, 0.996] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.114, 0.104, 0.823, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1149.0s, [0.099, 0.106, 0.772, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1182.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.1, 0.164, 0.425, 0.987]: carry/hold (an object), walk; [0.439, 0.406, 0.813, 0.986]: watch (a person), sit; [0.53, 0.233, 0.692, 0.608]: watch (a person), stand; [0.854, 0.187, 0.999, 0.983]: walk."}]}, {"video": "oITFHwzfw_k.mkv", "clip": [1207, 1247], "conversations": [{"from": "human", "timestamps": 1237.0, "value": "What actions is the person in bbox [0.399, 0.361, 0.679, 0.871] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.399, 0.361, 0.679, 0.871]: sit, listen to (a person), watch (a person).", "At 1236.0s, [0.411, 0.375, 0.667, 0.843]: sit, watch (a person).", "From 1234.0s to 1235.0s, [0.381, 0.415, 0.676, 0.902]->[0.387, 0.423, 0.655, 0.901]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1247.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.017, 0.06, 0.878, 1.0]: listen to (a person), sit, watch (a person)."}]}, {"video": "oITFHwzfw_k.mkv", "clip": [1252, 1290], "conversations": [{"from": "human", "timestamps": 1282.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.071, 0.08, 0.864, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1290.0, "value": "For the person at bbox [0.472, 0.169, 0.977, 0.975], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.472, 0.169, 0.977, 0.975]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1289.0s, [0.434, 0.157, 0.978, 0.981]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1288.0s, [0.445, 0.167, 0.973, 0.982]: sit, carry/hold (an object), listen to (a person)."]}]}, {"video": "oITFHwzfw_k.mkv", "clip": [1298, 1370], "conversations": [{"from": "human", "timestamps": 1328.0, "value": "What actions is the person in bbox [0.437, 0.183, 0.977, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.437, 0.183, 0.977, 0.985]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1326.0s to 1327.0s, [0.405, 0.181, 0.99, 0.98]->[0.422, 0.174, 0.975, 0.985]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1370.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.173, 0.205, 0.608, 0.962]: watch (a person), listen to (a person), bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group); [0.583, 0.142, 0.846, 0.776]: watch (a person), stand, hand shake, talk to (e.g., self, a person, a group)."}]}, {"video": "oITFHwzfw_k.mkv", "clip": [1367, 1430], "conversations": [{"from": "human", "timestamps": 1397.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.087, 0.283, 0.34, 0.751]: listen to (a person), sit, watch (a person); [0.82, 0.136, 0.977, 0.657]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1430.0, "value": "For the person at bbox [0.162, 0.103, 0.975, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.162, 0.103, 0.975, 0.992]: sit.", "At 1429.0s, [0.435, 0.381, 0.794, 1.0]: sit, carry/hold (an object), listen to (a person).", "At 1428.0s, [0.464, 0.383, 0.796, 0.999]: sit, carry/hold (an object).", "At 1427.0s, [0.407, 0.374, 0.748, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1424.0s to 1426.0s, [0.291, 0.382, 0.504, 0.993]->[0.399, 0.361, 0.663, 0.995]->[0.406, 0.363, 0.706, 0.996]: sit, carry/hold (an object)."]}]}, {"video": "oITFHwzfw_k.mkv", "clip": [1429, 1465], "conversations": [{"from": "human", "timestamps": 1459.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.035, 0.051, 0.863, 0.952]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1465.0, "value": "What actions did the person at bbox [0.0, 0.029, 0.767, 0.976] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.029, 0.767, 0.976]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1464.0s, [0.0, 0.166, 0.873, 0.982]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1463.0s, [0.127, 0.06, 0.919, 0.988]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "oITFHwzfw_k.mkv", "clip": [1631, 1689], "conversations": [{"from": "human", "timestamps": 1661.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.037, 1.0, 1.0]: sit, eat."}, {"from": "human", "timestamps": 1689.0, "value": "What actions did the person at bbox [0.218, 0.063, 1.0, 0.988] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.218, 0.063, 1.0, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1688.0s, [0.215, 0.022, 1.0, 0.996]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "oITFHwzfw_k.mkv", "clip": [1684, 1723], "conversations": [{"from": "human", "timestamps": 1714.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.186, 0.036, 0.981, 0.987]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1723.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.208, 0.027, 0.968, 0.988]?"}, {"from": "gpt", "value": ["In the current frame, [0.208, 0.027, 0.968, 0.988]: stand.", "At 1722.0s, [0.236, 0.022, 1.0, 0.981]: stand, listen to (a person), watch (a person).", "At 1721.0s, [0.227, 0.039, 1.0, 0.976]: sit, stand, watch (a person)."]}]}, {"video": "oITFHwzfw_k.mkv", "clip": [1740, 1797], "conversations": [{"from": "human", "timestamps": 1770.0, "value": "What actions is the person in bbox [0.055, 0.004, 0.946, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.055, 0.004, 0.946, 0.985]: stand, answer phone.", "From 1766.0s to 1769.0s, [0.06, 0.018, 0.914, 0.975]->[0.003, 0.003, 0.937, 1.0]->[0.02, 0.0, 0.885, 0.989]->[0.079, 0.0, 0.936, 0.999]: stand, answer phone."]}, {"from": "human", "timestamps": 1797.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.259, 0.986, 0.967]: lie/sleep; [0.624, 0.274, 0.966, 0.707]: lie/sleep."}]}, {"video": "oWhvucAskhk.mkv", "clip": [967, 1001], "conversations": [{"from": "human", "timestamps": 997.0, "value": "For the person at bbox [0.003, 0.091, 0.281, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.003, 0.091, 0.281, 1.0]: walk, listen to (a person), watch (a person).", "At 996.0s, [0.0, 0.15, 0.378, 1.0]: walk, listen to (a person), watch (a person).", "At 995.0s, [0.0, 0.138, 0.327, 0.986]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 994.0s, [0.004, 0.147, 0.342, 0.98]: walk, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1001.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.169, 0.385, 0.628, 0.997]: talk to (e.g., self, a person, a group), walk; [0.648, 0.17, 0.984, 0.993]: listen to (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "oWhvucAskhk.mkv", "clip": [1003, 1056], "conversations": [{"from": "human", "timestamps": 1033.0, "value": "Describe the actions of the person at bbox [0.658, 0.459, 0.731, 0.711] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.658, 0.459, 0.731, 0.711]: walk, talk to (e.g., self, a person, a group).", "At 1032.0s, [0.678, 0.452, 0.751, 0.703]: walk, talk to (e.g., self, a person, a group).", "At 1031.0s, [0.683, 0.447, 0.733, 0.648]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1056.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.19, 0.004, 0.68, 0.948]: carry/hold (an object), stand, throw."}]}, {"video": "oWhvucAskhk.mkv", "clip": [1079, 1124], "conversations": [{"from": "human", "timestamps": 1109.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.106, 0.063, 0.813, 0.985]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1124.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.441, 0.008, 1.0, 0.995]?"}, {"from": "gpt", "value": ["In the current frame, [0.441, 0.008, 1.0, 0.995]: stand, talk to (e.g., self, a person, a group).", "At 1123.0s, [0.0, 0.002, 0.66, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 1122.0s, [0.0, 0.0, 0.378, 0.979]: stand."]}]}, {"video": "oWhvucAskhk.mkv", "clip": [1124, 1158], "conversations": [{"from": "human", "timestamps": 1154.0, "value": "For the person at bbox [0.0, 0.172, 0.424, 0.95], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.172, 0.424, 0.95]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1153.0s, [0.0, 0.232, 0.332, 0.982]: walk, carry/hold (an object), watch (a person).", "From 1150.0s to 1152.0s, [0.002, 0.301, 0.198, 0.93]->[0.135, 0.27, 0.373, 0.994]->[0.111, 0.238, 0.443, 0.973]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1158.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.149, 0.533, 0.992]: watch (a person), give/serve (an object) to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.358, 0.154, 0.777, 0.821]: listen to (a person), stand; [0.59, 0.071, 0.997, 0.973]: carry/hold (an object), stand, listen to (a person)."}]}, {"video": "oWhvucAskhk.mkv", "clip": [1180, 1227], "conversations": [{"from": "human", "timestamps": 1210.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.004, 0.402, 0.136, 0.995]: listen to (a person), walk; [0.168, 0.427, 0.309, 0.814]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.398, 0.395, 0.466, 0.681]: listen to (a person), stand, climb (e.g., a mountain); [0.459, 0.42, 0.528, 0.674]: listen to (a person), stand, touch (an object); [0.554, 0.42, 0.605, 0.615]: carry/hold (an object), stand, listen to (a person); [0.874, 0.421, 0.982, 0.766]: listen to (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1227.0, "value": "What actions did the person at bbox [0.468, 0.249, 0.972, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.468, 0.249, 0.972, 0.985]: bend/bow (at the waist), carry/hold (an object), watch (a person).", "At 1226.0s, [0.468, 0.249, 0.944, 0.986]: stand, carry/hold (an object).", "At 1225.0s, [0.541, 0.148, 0.968, 0.992]: stand, carry/hold (an object), watch (a person).", "At 1224.0s, [0.577, 0.186, 0.988, 0.982]: bend/bow (at the waist), carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "oWhvucAskhk.mkv", "clip": [1263, 1312], "conversations": [{"from": "human", "timestamps": 1293.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.001, 0.319, 0.152, 0.961]?"}, {"from": "gpt", "value": ["At Now, [0.001, 0.319, 0.152, 0.961]: stand, listen to (a person), watch (a person).", "At 1292.0s, [0.0, 0.278, 0.184, 0.986]: stand, smoke, listen to (a person), watch (a person).", "At 1291.0s, [0.0, 0.24, 0.237, 0.964]: dance, smoke, listen to (a person)."]}, {"from": "human", "timestamps": 1312.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.281, 0.027, 1.0, 0.995]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "oWhvucAskhk.mkv", "clip": [1281, 1370], "conversations": [{"from": "human", "timestamps": 1311.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.17, 0.159, 0.802]: listen to (a person), watch (a person), walk; [0.177, 0.177, 0.308, 0.617]: listen to (a person), stand, watch (a person); [0.377, 0.203, 0.49, 0.582]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.477, 0.138, 0.64, 0.821]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.637, 0.183, 0.737, 0.554]: listen to (a person), stand, watch (a person); [0.721, 0.19, 0.863, 0.651]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.881, 0.176, 1.0, 0.656]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1370.0, "value": "Describe the actions of the person at bbox [0.878, 0.046, 0.998, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.878, 0.046, 0.998, 0.988]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1369.0s, [0.763, 0.075, 0.998, 0.968]: walk, watch (a person)."]}]}, {"video": "oWhvucAskhk.mkv", "clip": [1295, 1332], "conversations": [{"from": "human", "timestamps": 1325.0, "value": "What actions is the person in bbox [0.851, 0.172, 0.987, 0.69] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.851, 0.172, 0.987, 0.69]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1323.0s to 1324.0s, [0.856, 0.178, 0.988, 0.683]->[0.848, 0.172, 0.998, 0.693]: stand, listen to (a person), watch (a person).", "At 1322.0s, [0.853, 0.177, 0.995, 0.698]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1321.0s, [0.857, 0.187, 0.995, 0.702]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1320.0s, [0.867, 0.185, 0.995, 0.699]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1332.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.193, 0.192, 0.665]: watch (a person), sit; [0.142, 0.509, 0.342, 0.985]: listen to (a person), stand, touch (an object), watch (a person); [0.264, 0.057, 0.964, 0.987]: carry/hold (an object), stand, watch (a person)."}]}, {"video": "oWhvucAskhk.mkv", "clip": [1350, 1416], "conversations": [{"from": "human", "timestamps": 1380.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.158, 0.381, 0.811]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.158, 0.381, 0.811]: stand, watch (a person).", "From 1378.0s to 1379.0s, [0.429, 0.244, 0.622, 0.787]->[0.298, 0.059, 0.623, 0.443]: jump/leap."]}, {"from": "human", "timestamps": 1416.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.193, 0.14, 0.807]: carry/hold (an object), stand, listen to (a person); [0.141, 0.174, 0.318, 0.733]: listen to (a person), stand, watch (a person); [0.296, 0.169, 0.44, 0.742]: listen to (a person), stand, watch (a person); [0.437, 0.182, 0.594, 0.731]: listen to (a person), stand; [0.576, 0.179, 0.703, 0.733]: listen to (a person), stand, watch (a person); [0.579, 0.071, 0.968, 0.996]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.848, 0.117, 0.998, 0.914]: listen to (a person), stand, watch (a person)."}]}, {"video": "oWhvucAskhk.mkv", "clip": [1389, 1481], "conversations": [{"from": "human", "timestamps": 1419.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.002, 0.172, 0.168, 0.807]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.168, 0.178, 0.356, 0.758]: listen to (a person), stand, watch (a person); [0.316, 0.176, 0.482, 0.771]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.524, 0.072, 0.776, 0.962]: carry/hold (an object), watch (a person), walk; [0.843, 0.08, 0.997, 0.986]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1481.0, "value": "What actions did the person at bbox [0.088, 0.088, 0.868, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.088, 0.088, 0.868, 0.981]: stand, talk to (e.g., self, a person, a group).", "From 1479.0s to 1480.0s, [0.098, 0.093, 0.835, 0.974]->[0.093, 0.074, 0.897, 0.984]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1478.0s, [0.027, 0.038, 0.805, 0.992]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "oWhvucAskhk.mkv", "clip": [1477, 1616], "conversations": [{"from": "human", "timestamps": 1507.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.004, 0.262, 0.131, 0.51]: read, sit."}, {"from": "human", "timestamps": 1616.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.712, 0.405, 0.762, 0.557]?"}, {"from": "gpt", "value": ["At Now, [0.712, 0.405, 0.762, 0.557]: walk, carry/hold (an object).", "At 1615.0s, [0.76, 0.405, 0.804, 0.551]: walk, carry/hold (an object).", "At 1614.0s, [0.806, 0.383, 0.847, 0.549]: walk, carry/hold (an object), watch (a person)."]}]}, {"video": "oWhvucAskhk.mkv", "clip": [1529, 1582], "conversations": [{"from": "human", "timestamps": 1559.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.535, 0.239, 0.759, 0.795]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1582.0, "value": "Describe the actions of the person at bbox [0.067, 0.0, 0.912, 0.999] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.067, 0.0, 0.912, 0.999]: stand, watch (a person).", "At 1581.0s, [0.098, 0.0, 0.934, 0.979]: stand, listen to (a person), watch (a person)."]}]}, {"video": "oWhvucAskhk.mkv", "clip": [1624, 1712], "conversations": [{"from": "human", "timestamps": 1654.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.015, 0.213, 0.197, 0.746]: listen to (a person), stand, watch (a person); [0.161, 0.18, 0.265, 0.634]: listen to (a person), stand, watch (a person); [0.212, 0.252, 0.433, 0.926]: stand; [0.289, 0.16, 0.397, 0.39]: watch (a person), stand; [0.453, 0.165, 0.526, 0.359]: listen to (a person), stand, watch (a person); [0.463, 0.183, 0.62, 0.751]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.671, 0.23, 0.805, 0.707]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.791, 0.077, 0.864, 0.412]: listen to (a person), stand, watch (a person); [0.873, 0.269, 0.995, 0.757]: watch (a person), stand."}, {"from": "human", "timestamps": 1712.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.006, 0.101, 0.385, 0.986]?"}, {"from": "gpt", "value": ["At Now, [0.006, 0.101, 0.385, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1709.0s to 1711.0s, [0.002, 0.05, 0.355, 1.0]->[0.098, 0.063, 0.509, 1.0]->[0.029, 0.075, 0.401, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "oq_bufAhyl8.mkv", "clip": [876, 920], "conversations": [{"from": "human", "timestamps": 906.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.514, 0.0, 1.0, 0.974]?"}, {"from": "gpt", "value": ["At Now, [0.514, 0.0, 1.0, 0.974]: stand, listen to (a person).", "At 905.0s, [0.486, 0.0, 1.0, 0.995]: stand, grab (a person), listen to (a person), watch (a person).", "At 904.0s, [0.496, 0.003, 1.0, 0.979]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 920.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.053, 0.267, 0.648, 0.985]: stand, talk to (e.g., self, a person, a group), answer phone; [0.489, 0.0, 1.0, 0.987]: listen to (a person), stand."}]}, {"video": "oq_bufAhyl8.mkv", "clip": [906, 997], "conversations": [{"from": "human", "timestamps": 936.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.391, 0.022, 1.0, 0.986]: watch (a person), stand, answer phone."}, {"from": "human", "timestamps": 997.0, "value": "What actions is the person in bbox [0.025, 0.079, 0.426, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.025, 0.079, 0.426, 1.0]: stand, listen to (a person), watch (a person).", "From 994.0s to 996.0s, [0.006, 0.089, 0.345, 0.922]->[0.002, 0.087, 0.353, 1.0]->[0.004, 0.072, 0.323, 0.988]: stand, listen to (a person), watch (a person)."]}]}, {"video": "oq_bufAhyl8.mkv", "clip": [1024, 1080], "conversations": [{"from": "human", "timestamps": 1054.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.006, 0.216, 0.256, 0.677]: carry/hold (an object), stand; [0.369, 0.248, 0.65, 0.592]: listen to (a person), stand, eat; [0.591, 0.224, 0.998, 0.991]: stand."}, {"from": "human", "timestamps": 1080.0, "value": "What actions is the person in bbox [0.0, 0.115, 0.471, 0.981] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.115, 0.471, 0.981]: sit, listen to (a person).", "From 1078.0s to 1079.0s, [0.0, 0.096, 0.492, 0.976]->[0.0, 0.092, 0.47, 1.0]: sit, listen to (a person)."]}]}, {"video": "oq_bufAhyl8.mkv", "clip": [1144, 1244], "conversations": [{"from": "human", "timestamps": 1174.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.168, 0.02, 0.995, 0.982]: stand."}, {"from": "human", "timestamps": 1244.0, "value": "What actions did the person at bbox [0.429, 0.087, 0.838, 0.967] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.429, 0.087, 0.838, 0.967]: stand, listen to (a person), watch (a person).", "At 1243.0s, [0.417, 0.063, 0.822, 0.978]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1242.0s, [0.414, 0.036, 0.81, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1241.0s, [0.433, 0.07, 0.799, 0.971]: stand, grab (a person), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "oq_bufAhyl8.mkv", "clip": [1177, 1257], "conversations": [{"from": "human", "timestamps": 1207.0, "value": "What actions did the person at bbox [0.319, 0.012, 0.967, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.319, 0.012, 0.967, 0.99]: stand.", "From 1202.0s to 1205.0s, [0.329, 0.015, 0.951, 0.986]->[0.355, 0.01, 0.955, 0.986]->[0.316, 0.022, 0.991, 0.979]->[0.352, 0.023, 0.998, 0.97]: stand."]}, {"from": "human", "timestamps": 1257.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.015, 0.16, 0.966]: carry/hold (an object), stand, read, listen to (a person); [0.371, 0.064, 0.648, 0.99]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "oq_bufAhyl8.mkv", "clip": [1260, 1297], "conversations": [{"from": "human", "timestamps": 1290.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.316, 0.036, 0.954, 0.984]?"}, {"from": "gpt", "value": ["In the current frame, [0.316, 0.036, 0.954, 0.984]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1289.0s, [0.321, 0.039, 0.952, 0.989]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1286.0s to 1288.0s, [0.314, 0.025, 0.94, 0.971]->[0.305, 0.033, 0.937, 0.971]->[0.311, 0.032, 0.944, 0.974]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1297.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.029, 0.001, 0.941, 0.983]: sit, answer phone."}]}, {"video": "oq_bufAhyl8.mkv", "clip": [1289, 1378], "conversations": [{"from": "human", "timestamps": 1319.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.219, 0.164, 0.42, 0.678]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1378.0, "value": "What actions is the person in bbox [0.067, 0.189, 0.559, 0.978] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.067, 0.189, 0.559, 0.978]: walk, talk to (e.g., self, a person, a group).", "At 1377.0s, [0.133, 0.162, 0.566, 0.984]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1376.0s, [0.104, 0.156, 0.462, 0.998]: walk, carry/hold (an object)."]}]}, {"video": "oq_bufAhyl8.mkv", "clip": [1350, 1381], "conversations": [{"from": "human", "timestamps": 1380.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.514, 0.322, 0.872, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1381.0, "value": "What actions is the person in bbox [0.483, 0.326, 0.844, 0.992] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.483, 0.326, 0.844, 0.992]: stand, listen to (a person), watch (a person).", "At 1380.0s, [0.514, 0.322, 0.872, 1.0]: stand, listen to (a person), watch (a person)."]}]}, {"video": "oq_bufAhyl8.mkv", "clip": [1490, 1555], "conversations": [{"from": "human", "timestamps": 1520.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.001, 0.35, 0.974]: listen to (a person), stand; [0.312, 0.107, 0.612, 0.803]: listen to (a person), stand, watch (a person); [0.506, 0.058, 1.0, 0.98]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.509, 0.215, 0.622, 0.476]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1555.0, "value": "Describe the actions of the person at bbox [0.414, 0.18, 0.989, 0.968] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.414, 0.18, 0.989, 0.968]: stand, listen to (a person), watch (a person).", "From 1553.0s to 1554.0s, [0.489, 0.149, 0.984, 0.972]->[0.614, 0.212, 1.0, 0.986]: stand, listen to (a person), watch (a person).", "At 1552.0s, [0.393, 0.053, 0.992, 0.993]: walk, watch (a person).", "At 1550.0s, [0.286, 0.087, 0.942, 1.0]: walk.", "At 1549.0s, [0.207, 0.029, 1.0, 0.983]: stand."]}]}, {"video": "oq_bufAhyl8.mkv", "clip": [1517, 1562], "conversations": [{"from": "human", "timestamps": 1547.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.056, 0.421, 0.992]: listen to (a person), stand, watch (a person); [0.414, 0.233, 0.974, 0.987]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1562.0, "value": "For the person at bbox [0.253, 0.087, 0.88, 0.997], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.253, 0.087, 0.88, 0.997]: walk, talk to (e.g., self, a person, a group).", "From 1559.0s to 1560.0s, [0.0, 0.095, 0.554, 0.992]->[0.275, 0.046, 0.992, 0.995]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1558.0s, [0.0, 0.063, 0.498, 0.976]: walk, talk to (e.g., self, a person, a group).", "At 1557.0s, [0.0, 0.042, 0.456, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "oq_bufAhyl8.mkv", "clip": [1537, 1572], "conversations": [{"from": "human", "timestamps": 1567.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.185, 0.065, 0.667, 0.949]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1572.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.746, 0.111, 0.902, 0.95]?"}, {"from": "gpt", "value": ["In the current frame, [0.746, 0.111, 0.902, 0.95]: stand, listen to (a person).", "At 1571.0s, [0.746, 0.109, 0.919, 0.951]: stand, listen to (a person).", "From 1569.0s to 1570.0s, [0.757, 0.078, 0.917, 0.959]->[0.752, 0.099, 0.909, 0.961]: stand, listen to (a person), watch (a person)."]}]}, {"video": "oq_bufAhyl8.mkv", "clip": [1634, 1699], "conversations": [{"from": "human", "timestamps": 1664.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.052, 0.515, 0.987]: listen to (a person), watch (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1699.0, "value": "What actions is the person in bbox [0.231, 0.02, 0.973, 0.991] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.231, 0.02, 0.973, 0.991]: stand, talk to (e.g., self, a person, a group).", "From 1696.0s to 1698.0s, [0.272, 0.047, 1.0, 0.99]->[0.18, 0.031, 0.992, 0.985]->[0.235, 0.044, 1.0, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1695.0s, [0.224, 0.017, 0.999, 0.966]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1693.0s to 1694.0s, [0.486, 0.121, 1.0, 0.992]->[0.441, 0.086, 0.995, 0.98]: sit.", "At 1692.0s, [0.464, 0.102, 0.977, 0.996]: sit, listen to (a person).", "At 1691.0s, [0.509, 0.085, 0.977, 0.976]: stand, watch (a person)."]}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [896, 957], "conversations": [{"from": "human", "timestamps": 926.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.154, 0.066, 0.649, 0.986]?"}, {"from": "gpt", "value": ["At Now, [0.154, 0.066, 0.649, 0.986]: stand, watch (a person).", "At 925.0s, [0.372, 0.025, 0.912, 0.973]: stand, watch (a person)."]}, {"from": "human", "timestamps": 957.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.016, 0.205, 0.439, 0.981]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.391, 0.116, 0.926, 0.987]: stand."}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [915, 1004], "conversations": [{"from": "human", "timestamps": 945.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.043, 0.141, 0.475, 0.997]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.383, 0.134, 0.877, 0.985]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1004.0, "value": "What actions is the person in bbox [0.162, 0.093, 0.873, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.162, 0.093, 0.873, 0.982]: sit.", "At 1003.0s, [0.171, 0.083, 0.86, 0.996]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1002.0s, [0.168, 0.09, 0.88, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [1021, 1056], "conversations": [{"from": "human", "timestamps": 1051.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.322, 0.04, 0.813, 0.979]?"}, {"from": "gpt", "value": ["In the current frame, [0.322, 0.04, 0.813, 0.979]: get up, listen to (a person), watch (a person).", "At 1050.0s, [0.588, 0.174, 1.0, 0.994]: get up.", "From 1046.0s to 1049.0s, [0.366, 0.157, 0.859, 0.984]->[0.38, 0.154, 0.859, 0.998]->[0.407, 0.169, 0.852, 0.979]->[0.368, 0.156, 0.845, 0.993]: sit, touch (an object).", "At 1045.0s, [0.36, 0.161, 0.864, 0.994]: sit, touch (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1056.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.404, 0.151, 0.845, 0.987]: stand."}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [1237, 1288], "conversations": [{"from": "human", "timestamps": 1267.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.014, 0.082, 0.739, 0.941]: lie/sleep; [0.588, 0.236, 0.977, 0.945]: watch (a person), stand."}, {"from": "human", "timestamps": 1288.0, "value": "Describe the actions of the person at bbox [0.035, 0.027, 0.851, 0.919] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.035, 0.027, 0.851, 0.919]: bend/bow (at the waist).", "At 1287.0s, [0.077, 0.022, 0.929, 0.844]: bend/bow (at the waist).", "At 1286.0s, [0.04, 0.013, 0.916, 0.853]: bend/bow (at the waist), lie/sleep.", "At 1285.0s, [0.04, 0.005, 0.939, 0.863]: bend/bow (at the waist)."]}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [1287, 1359], "conversations": [{"from": "human", "timestamps": 1317.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.353, 0.314, 0.406, 0.509]: stand."}, {"from": "human", "timestamps": 1359.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.43, 0.0, 0.998, 0.976]?"}, {"from": "gpt", "value": ["At Now, [0.43, 0.0, 0.998, 0.976]: walk, talk to (e.g., self, a person, a group).", "At 1358.0s, [0.402, 0.022, 0.987, 0.957]: walk, listen to (a person), watch (a person).", "At 1357.0s, [0.374, 0.186, 0.957, 0.993]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1356.0s, [0.404, 0.148, 0.885, 1.0]: walk, carry/hold (an object), listen to (a person).", "At 1355.0s, [0.429, 0.3, 0.87, 0.995]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [1361, 1401], "conversations": [{"from": "human", "timestamps": 1391.0, "value": "For the person at bbox [0.363, 0.32, 0.502, 0.776], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.363, 0.32, 0.502, 0.776]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1390.0s, [0.361, 0.319, 0.499, 0.786]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1401.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.207, 0.189, 0.475, 0.926]: carry/hold (an object), stand, watch (a person), walk; [0.616, 0.366, 0.869, 0.873]: carry/hold (an object), sit, watch (a person)."}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [1412, 1497], "conversations": [{"from": "human", "timestamps": 1442.0, "value": "What actions is the person in bbox [0.513, 0.219, 0.864, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.513, 0.219, 0.864, 0.988]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1441.0s, [0.522, 0.225, 0.864, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1440.0s, [0.503, 0.217, 0.868, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1497.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.037, 0.0, 0.847, 0.961]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.73, 0.288, 0.989, 0.981]: listen to (a person), stand, watch (a person)."}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [1483, 1514], "conversations": [{"from": "human", "timestamps": 1513.0, "value": "What actions did the person at bbox [0.693, 0.143, 0.984, 0.54] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.693, 0.143, 0.984, 0.54]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1512.0s, [0.698, 0.141, 0.945, 0.484]: stand, watch (a person).", "At 1511.0s, [0.678, 0.149, 0.889, 0.398]: stand.", "At 1510.0s, [0.04, 0.146, 0.634, 0.981]: stand, carry/hold (an object), turn (e.g., a screwdriver).", "At 1509.0s, [0.048, 0.141, 0.539, 0.965]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1514.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.109, 0.114, 0.541, 0.986]: watch (a person), stand."}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [1474, 1596], "conversations": [{"from": "human", "timestamps": 1504.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.049, 0.036, 0.931, 1.0]: stand, talk to (e.g., self, a person, a group); [0.824, 0.37, 0.997, 0.956]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1596.0, "value": "For the person at bbox [0.06, 0.02, 0.98, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.06, 0.02, 0.98, 0.992]: stand, talk to (e.g., self, a person, a group).", "From 1594.0s to 1595.0s, [0.107, 0.038, 0.966, 0.982]->[0.069, 0.017, 0.967, 0.99]: stand, talk to (e.g., self, a person, a group).", "From 1592.0s to 1593.0s, [0.001, 0.006, 0.937, 0.983]->[0.034, 0.0, 0.965, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [1529, 1573], "conversations": [{"from": "human", "timestamps": 1559.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.036, 0.017, 0.94, 0.956]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.843, 0.437, 0.993, 0.995]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1573.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.011, 0.029, 0.792, 0.969]?"}, {"from": "gpt", "value": ["In the current frame, [0.011, 0.029, 0.792, 0.969]: stand, listen to (a person).", "At 1572.0s, [0.007, 0.0, 0.784, 0.975]: stand.", "At 1571.0s, [0.014, 0.024, 0.76, 0.97]: stand, watch (a person)."]}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [1636, 1734], "conversations": [{"from": "human", "timestamps": 1666.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.172, 0.001, 0.923, 0.979]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1734.0, "value": "What actions did the person at bbox [0.0, 0.004, 0.806, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.004, 0.806, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 1733.0s, [0.0, 0.0, 0.769, 0.983]: stand, talk to (e.g., self, a person, a group).", "From 1731.0s to 1732.0s, [0.025, 0.0, 0.864, 0.99]->[0.029, 0.005, 0.876, 0.964]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [1698, 1729], "conversations": [{"from": "human", "timestamps": 1728.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.261, 0.418, 0.346, 0.627]?"}, {"from": "gpt", "value": ["At Now, [0.261, 0.418, 0.346, 0.627]: stand, touch (an object).", "From 1726.0s to 1727.0s, [0.257, 0.419, 0.348, 0.631]->[0.259, 0.419, 0.344, 0.638]: stand, touch (an object).", "At 1725.0s, [0.26, 0.419, 0.36, 0.62]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1729.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.408, 0.096, 0.746, 0.885]: stand, touch (an object)."}]}, {"video": "pGP_oIdKmRY.mkv", "clip": [1742, 1774], "conversations": [{"from": "human", "timestamps": 1772.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.019, 0.015, 0.471, 0.959]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.33, 0.026, 0.999, 0.966]: listen to (a person), stand."}, {"from": "human", "timestamps": 1774.0, "value": "Describe the actions of the person at bbox [0.016, 0.0, 0.47, 0.637] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.016, 0.0, 0.47, 0.637]: stand, watch (a person).", "From 1769.0s to 1773.0s, [0.0, 0.098, 0.44, 0.967]->[0.013, 0.052, 0.596, 0.968]->[0.0, 0.032, 0.459, 0.98]->[0.019, 0.015, 0.471, 0.959]->[0.013, 0.007, 0.41, 0.72]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "phVLLTMzmKk.mkv", "clip": [906, 993], "conversations": [{"from": "human", "timestamps": 936.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.071, 0.401, 0.264, 0.85]: carry/hold (an object), sit, talk to (e.g., self, a person, a group), listen to (a person); [0.259, 0.375, 0.419, 0.644]: carry/hold (an object), sit, listen to (a person); [0.4, 0.357, 0.57, 0.76]: carry/hold (an object), sit, listen to (a person); [0.543, 0.304, 0.676, 0.739]: listen to (a person), sit, watch (a person); [0.655, 0.372, 0.894, 0.864]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 993.0, "value": "What actions is the person in bbox [0.027, 0.022, 0.973, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.027, 0.022, 0.973, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 991.0s to 992.0s, [0.002, 0.075, 0.916, 0.989]->[0.005, 0.052, 0.995, 0.972]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "phVLLTMzmKk.mkv", "clip": [1013, 1078], "conversations": [{"from": "human", "timestamps": 1043.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.133, 0.329, 0.354, 0.997]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.377, 0.327, 0.55, 0.56]: listen to (a person), sit, watch (a person); [0.59, 0.327, 0.732, 0.626]: listen to (a person), sit, watch (a person); [0.622, 0.318, 0.831, 0.829]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1078.0, "value": "For the person at bbox [0.014, 0.034, 0.868, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.014, 0.034, 0.868, 0.99]: sit, talk to (e.g., self, a person, a group).", "From 1076.0s to 1077.0s, [0.056, 0.013, 0.883, 1.0]->[0.003, 0.054, 0.9, 0.982]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "phVLLTMzmKk.mkv", "clip": [1017, 1126], "conversations": [{"from": "human", "timestamps": 1047.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.008, 0.048, 0.649, 0.993]: listen to (a person), sit, watch (a person); [0.36, 0.121, 0.865, 0.992]: sit."}, {"from": "human", "timestamps": 1126.0, "value": "For the person at bbox [0.021, 0.062, 0.926, 0.999], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.021, 0.062, 0.926, 0.999]: sit.", "From 1124.0s to 1125.0s, [0.0, 0.137, 0.652, 0.988]->[0.013, 0.085, 0.801, 0.983]: sit.", "From 1122.0s to 1123.0s, [0.0, 0.269, 0.485, 0.983]->[0.005, 0.128, 0.554, 0.976]: sit, watch (a person).", "At 1121.0s, [0.0, 0.313, 0.448, 0.95]: sit."]}]}, {"video": "phVLLTMzmKk.mkv", "clip": [1082, 1120], "conversations": [{"from": "human", "timestamps": 1112.0, "value": "For the person at bbox [0.135, 0.333, 0.352, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.135, 0.333, 0.352, 0.99]: sit, listen to (a person), watch (a person).", "From 1101.0s to 1111.0s, [0.139, 0.329, 0.349, 1.0]->[0.138, 0.333, 0.348, 1.0]->[0.138, 0.334, 0.347, 1.0]->[0.138, 0.333, 0.348, 1.0]->[0.139, 0.328, 0.348, 1.0]->[0.137, 0.318, 0.35, 1.0]->[0.139, 0.321, 0.352, 0.993]->[0.137, 0.321, 0.348, 1.0]->[0.137, 0.328, 0.354, 0.991]->[0.138, 0.329, 0.353, 0.992]->[0.136, 0.33, 0.352, 0.995]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1120.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.032, 0.266, 0.686, 0.989]: sit."}]}, {"video": "phVLLTMzmKk.mkv", "clip": [1244, 1293], "conversations": [{"from": "human", "timestamps": 1274.0, "value": "What actions did the person at bbox [0.096, 0.365, 0.283, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.096, 0.365, 0.283, 0.981]: walk.", "From 1272.0s to 1273.0s, [0.222, 0.289, 0.451, 0.992]->[0.079, 0.297, 0.262, 0.957]: walk."]}, {"from": "human", "timestamps": 1293.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.316, 0.274, 0.421, 0.683]: carry/hold (an object), walk; [0.443, 0.29, 0.532, 0.653]: carry/hold (an object), walk; [0.489, 0.288, 0.593, 0.651]: carry/hold (an object), walk; [0.591, 0.269, 0.686, 0.653]: carry/hold (an object), walk; [0.673, 0.262, 0.788, 0.638]: carry/hold (an object), walk."}]}, {"video": "phVLLTMzmKk.mkv", "clip": [1395, 1491], "conversations": [{"from": "human", "timestamps": 1425.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.122, 0.131, 0.442, 0.998]: carry/hold (an object), talk to (e.g., self, a person, a group), walk; [0.616, 0.182, 0.741, 0.469]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1491.0, "value": "Describe the actions of the person at bbox [0.293, 0.059, 0.991, 0.959] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.293, 0.059, 0.991, 0.959]: sit, talk to (e.g., self, a person, a group).", "At 1490.0s, [0.24, 0.057, 0.953, 0.946]: sit, talk to (e.g., self, a person, a group).", "From 1488.0s to 1489.0s, [0.254, 0.054, 0.946, 0.932]->[0.248, 0.07, 0.953, 0.934]: sit.", "From 1474.0s to 1487.0s, [0.234, 0.068, 0.998, 0.957]->[0.239, 0.058, 0.962, 0.817]->[0.208, 0.062, 0.903, 0.936]->[0.215, 0.06, 0.904, 0.941]->[0.206, 0.045, 0.916, 0.938]->[0.21, 0.049, 0.95, 0.921]->[0.196, 0.059, 0.973, 0.936]->[0.273, 0.058, 0.953, 0.867]->[0.229, 0.078, 0.957, 0.914]->[0.231, 0.082, 0.937, 0.879]->[0.222, 0.066, 0.959, 0.95]->[0.246, 0.059, 0.932, 0.913]->[0.237, 0.065, 0.953, 0.906]->[0.259, 0.065, 0.931, 0.909]: sit, talk to (e.g., self, a person, a group).", "At 1473.0s, [0.224, 0.071, 0.972, 0.944]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1467.0s to 1472.0s, [0.244, 0.069, 0.931, 0.929]->[0.254, 0.03, 0.982, 0.942]->[0.23, 0.158, 0.831, 0.889]->[0.276, 0.036, 0.931, 0.893]->[0.362, 0.043, 0.922, 0.864]->[0.251, 0.06, 0.947, 0.876]: sit, talk to (e.g., self, a person, a group).", "From 1462.0s to 1466.0s, [0.259, 0.077, 0.808, 0.798]->[0.228, 0.028, 0.869, 0.833]->[0.233, 0.029, 0.871, 0.816]->[0.266, 0.045, 0.831, 0.804]->[0.236, 0.051, 0.833, 0.821]: sit.", "At 1461.0s, [0.251, 0.05, 0.842, 0.815]: sit, carry/hold (an object).", "At 1460.0s, [0.077, 0.038, 0.845, 0.822]: sit, carry/hold (an object), close (e.g., a door, a box).", "From 1458.0s to 1459.0s, [0.098, 0.064, 0.879, 0.847]->[0.093, 0.044, 0.871, 0.887]: sit, carry/hold (an object).", "At 1457.0s, [0.085, 0.059, 0.928, 0.761]: sit, carry/hold (an object), drink.", "At 1456.0s, [0.44, 0.439, 0.659, 0.778]: sit, drink.", "From 1454.0s to 1455.0s, [0.443, 0.466, 0.66, 0.775]->[0.455, 0.449, 0.662, 0.778]: sit, carry/hold (an object).", "At 1453.0s, [0.462, 0.473, 0.658, 0.761]: sit, carry/hold (an object), open (e.g., a window, a car door).", "At 1452.0s, [0.454, 0.471, 0.658, 0.77]: sit, carry/hold (an object), turn (e.g., a screwdriver).", "At 1451.0s, [0.445, 0.455, 0.655, 0.772]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1445.0s to 1450.0s, [0.005, 0.009, 0.828, 0.994]->[0.0, 0.026, 1.0, 0.997]->[0.0, 0.004, 0.508, 1.0]->[0.0, 0.027, 0.952, 0.987]->[0.437, 0.453, 0.654, 0.76]->[0.445, 0.479, 0.65, 0.761]: sit, eat.", "From 1443.0s to 1444.0s, [0.121, 0.0, 0.984, 0.989]->[0.0, 0.015, 1.0, 0.998]: sit.", "From 1441.0s to 1442.0s, [0.011, 0.002, 0.796, 1.0]->[0.009, 0.0, 0.96, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1440.0s, [0.0, 0.016, 0.778, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "phVLLTMzmKk.mkv", "clip": [1418, 1489], "conversations": [{"from": "human", "timestamps": 1448.0, "value": "What actions did the person at bbox [0.0, 0.027, 0.952, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.027, 0.952, 0.987]: sit, eat.", "From 1445.0s to 1447.0s, [0.005, 0.009, 0.828, 0.994]->[0.0, 0.026, 1.0, 0.997]->[0.0, 0.004, 0.508, 1.0]: sit, eat.", "From 1443.0s to 1444.0s, [0.121, 0.0, 0.984, 0.989]->[0.0, 0.015, 1.0, 0.998]: sit.", "From 1441.0s to 1442.0s, [0.011, 0.002, 0.796, 1.0]->[0.009, 0.0, 0.96, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1440.0s, [0.0, 0.016, 0.778, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1489.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.248, 0.07, 0.953, 0.934]: sit."}]}, {"video": "phVLLTMzmKk.mkv", "clip": [1471, 1514], "conversations": [{"from": "human", "timestamps": 1501.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.256, 0.075, 0.919, 0.907]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1514.0, "value": "Describe the actions of the person at bbox [0.252, 0.078, 0.995, 0.897] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.252, 0.078, 0.995, 0.897]: sit, talk to (e.g., self, a person, a group).", "From 1499.0s to 1513.0s, [0.24, 0.05, 0.952, 0.931]->[0.258, 0.08, 0.91, 0.912]->[0.256, 0.075, 0.919, 0.907]->[0.308, 0.062, 0.991, 0.763]->[0.263, 0.04, 0.985, 0.961]->[0.269, 0.033, 0.961, 0.92]->[0.243, 0.031, 1.0, 0.896]->[0.258, 0.024, 0.962, 0.922]->[0.255, 0.036, 0.953, 0.923]->[0.261, 0.054, 0.953, 0.902]->[0.258, 0.073, 0.933, 0.933]->[0.242, 0.05, 1.0, 0.961]->[0.276, 0.022, 0.993, 0.923]->[0.268, 0.041, 0.905, 0.878]->[0.26, 0.058, 0.859, 0.922]: sit, talk to (e.g., self, a person, a group).", "At 1498.0s, [0.255, 0.031, 0.978, 0.945]: bend/bow (at the waist), sit, talk to (e.g., self, a person, a group).", "From 1495.0s to 1497.0s, [0.259, 0.052, 1.0, 0.954]->[0.236, 0.048, 0.939, 0.876]->[0.236, 0.045, 0.831, 0.787]: sit, talk to (e.g., self, a person, a group).", "From 1492.0s to 1494.0s, [0.257, 0.046, 0.966, 0.903]->[0.235, 0.043, 0.984, 0.957]->[0.216, 0.033, 0.985, 0.953]: sit.", "From 1490.0s to 1491.0s, [0.24, 0.057, 0.953, 0.946]->[0.293, 0.059, 0.991, 0.959]: sit, talk to (e.g., self, a person, a group).", "From 1488.0s to 1489.0s, [0.254, 0.054, 0.946, 0.932]->[0.248, 0.07, 0.953, 0.934]: sit.", "From 1474.0s to 1487.0s, [0.234, 0.068, 0.998, 0.957]->[0.239, 0.058, 0.962, 0.817]->[0.208, 0.062, 0.903, 0.936]->[0.215, 0.06, 0.904, 0.941]->[0.206, 0.045, 0.916, 0.938]->[0.21, 0.049, 0.95, 0.921]->[0.196, 0.059, 0.973, 0.936]->[0.273, 0.058, 0.953, 0.867]->[0.229, 0.078, 0.957, 0.914]->[0.231, 0.082, 0.937, 0.879]->[0.222, 0.066, 0.959, 0.95]->[0.246, 0.059, 0.932, 0.913]->[0.237, 0.065, 0.953, 0.906]->[0.259, 0.065, 0.931, 0.909]: sit, talk to (e.g., self, a person, a group).", "At 1473.0s, [0.224, 0.071, 0.972, 0.944]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1467.0s to 1472.0s, [0.244, 0.069, 0.931, 0.929]->[0.254, 0.03, 0.982, 0.942]->[0.23, 0.158, 0.831, 0.889]->[0.276, 0.036, 0.931, 0.893]->[0.362, 0.043, 0.922, 0.864]->[0.251, 0.06, 0.947, 0.876]: sit, talk to (e.g., self, a person, a group).", "From 1462.0s to 1466.0s, [0.259, 0.077, 0.808, 0.798]->[0.228, 0.028, 0.869, 0.833]->[0.233, 0.029, 0.871, 0.816]->[0.266, 0.045, 0.831, 0.804]->[0.236, 0.051, 0.833, 0.821]: sit.", "At 1461.0s, [0.251, 0.05, 0.842, 0.815]: sit, carry/hold (an object).", "At 1460.0s, [0.077, 0.038, 0.845, 0.822]: sit, carry/hold (an object), close (e.g., a door, a box).", "From 1458.0s to 1459.0s, [0.098, 0.064, 0.879, 0.847]->[0.093, 0.044, 0.871, 0.887]: sit, carry/hold (an object).", "At 1457.0s, [0.085, 0.059, 0.928, 0.761]: sit, carry/hold (an object), drink.", "At 1456.0s, [0.44, 0.439, 0.659, 0.778]: sit, drink.", "From 1454.0s to 1455.0s, [0.443, 0.466, 0.66, 0.775]->[0.455, 0.449, 0.662, 0.778]: sit, carry/hold (an object).", "At 1453.0s, [0.462, 0.473, 0.658, 0.761]: sit, carry/hold (an object), open (e.g., a window, a car door).", "At 1452.0s, [0.454, 0.471, 0.658, 0.77]: sit, carry/hold (an object), turn (e.g., a screwdriver).", "At 1451.0s, [0.445, 0.455, 0.655, 0.772]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1445.0s to 1450.0s, [0.005, 0.009, 0.828, 0.994]->[0.0, 0.026, 1.0, 0.997]->[0.0, 0.004, 0.508, 1.0]->[0.0, 0.027, 0.952, 0.987]->[0.437, 0.453, 0.654, 0.76]->[0.445, 0.479, 0.65, 0.761]: sit, eat.", "From 1443.0s to 1444.0s, [0.121, 0.0, 0.984, 0.989]->[0.0, 0.015, 1.0, 0.998]: sit.", "From 1441.0s to 1442.0s, [0.011, 0.002, 0.796, 1.0]->[0.009, 0.0, 0.96, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1440.0s, [0.0, 0.016, 0.778, 1.0]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "phVLLTMzmKk.mkv", "clip": [1513, 1587], "conversations": [{"from": "human", "timestamps": 1543.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.007, 0.052, 0.942, 1.0]: listen to (a person), stand."}, {"from": "human", "timestamps": 1587.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.06, 0.05, 0.914, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.06, 0.05, 0.914, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1586.0s, [0.012, 0.01, 0.911, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1585.0s, [0.074, 0.1, 0.973, 1.0]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "phVLLTMzmKk.mkv", "clip": [1570, 1607], "conversations": [{"from": "human", "timestamps": 1600.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.165, 0.172, 0.337, 0.842]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.543, 0.394, 0.771, 0.812]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1607.0, "value": "Describe the actions of the person at bbox [0.53, 0.324, 0.672, 0.773] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.53, 0.324, 0.672, 0.773]: walk, carry/hold (an object).", "From 1603.0s to 1606.0s, [0.5, 0.057, 0.887, 0.982]->[0.484, 0.16, 0.75, 0.922]->[0.491, 0.225, 0.694, 0.873]->[0.504, 0.303, 0.677, 0.83]: walk, carry/hold (an object)."]}]}, {"video": "phVLLTMzmKk.mkv", "clip": [1595, 1647], "conversations": [{"from": "human", "timestamps": 1625.0, "value": "For the person at bbox [0.256, 0.123, 0.474, 0.773], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.256, 0.123, 0.474, 0.773]: stand, watch (a person).", "At 1624.0s, [0.281, 0.122, 0.47, 0.769]: stand, watch (a person).", "From 1620.0s to 1623.0s, [0.295, 0.052, 0.437, 0.836]->[0.28, 0.028, 0.465, 0.816]->[0.297, 0.083, 0.47, 0.786]->[0.248, 0.11, 0.46, 0.773]: walk."]}, {"from": "human", "timestamps": 1647.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.004, 0.134, 0.502, 0.987]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.411, 0.146, 0.998, 0.974]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "phVLLTMzmKk.mkv", "clip": [1589, 1709], "conversations": [{"from": "human", "timestamps": 1619.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.278, 0.323, 0.874, 0.998]: sit."}, {"from": "human", "timestamps": 1709.0, "value": "What actions is the person in bbox [0.087, 0.101, 0.872, 0.971] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.087, 0.101, 0.872, 0.971]: sit, talk to (e.g., self, a person, a group).", "From 1706.0s to 1708.0s, [0.019, 0.068, 0.917, 0.973]->[0.067, 0.048, 0.901, 0.973]->[0.037, 0.038, 0.962, 0.976]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "phVLLTMzmKk.mkv", "clip": [1725, 1775], "conversations": [{"from": "human", "timestamps": 1755.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.072, 0.147, 0.582, 0.999]: sit, talk to (e.g., self, a person, a group); [0.436, 0.148, 0.915, 0.957]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1775.0, "value": "Describe the actions of the person at bbox [0.074, 0.09, 0.966, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.074, 0.09, 0.966, 1.0]: sit.", "At 1774.0s, [0.092, 0.1, 0.939, 1.0]: sit, listen to (a person)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [906, 956], "conversations": [{"from": "human", "timestamps": 936.0, "value": "Describe the actions of the person at bbox [0.314, 0.033, 0.952, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.314, 0.033, 0.952, 0.984]: stand, listen to (a person).", "At 935.0s, [0.338, 0.021, 0.945, 0.991]: stand, listen to (a person), watch (a person).", "At 934.0s, [0.309, 0.022, 0.947, 0.994]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 932.0s to 933.0s, [0.333, 0.0, 0.9, 0.991]->[0.326, 0.012, 0.946, 0.994]: stand, listen to (a person), watch (a person).", "From 930.0s to 931.0s, [0.233, 0.0, 0.84, 1.0]->[0.315, 0.0, 0.876, 0.994]: stand, listen to (a person).", "From 927.0s to 929.0s, [0.525, 0.032, 0.866, 1.0]->[0.545, 0.054, 0.887, 1.0]->[0.587, 0.069, 0.95, 1.0]: walk, carry/hold (an object), listen to (a person).", "From 925.0s to 926.0s, [0.492, 0.027, 0.921, 1.0]->[0.494, 0.029, 0.876, 1.0]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 924.0s, [0.433, 0.032, 0.89, 0.992]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 923.0s, [0.402, 0.054, 0.907, 0.998]: walk, carry/hold (an object), listen to (a person).", "From 921.0s to 922.0s, [0.389, 0.11, 0.952, 0.998]->[0.378, 0.042, 0.926, 0.991]: walk, carry/hold (an object), hand shake, listen to (a person).", "At 920.0s, [0.371, 0.171, 0.99, 0.99]: walk, carry/hold (an object), hand shake, listen to (a person), watch (a person).", "At 919.0s, [0.42, 0.114, 0.99, 0.98]: stand, carry/hold (an object), hand shake, listen to (a person), watch (a person).", "At 918.0s, [0.697, 0.092, 0.996, 0.981]: stand, hand shake, watch (a person).", "At 917.0s, [0.71, 0.064, 1.0, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 956.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.131, 0.128, 0.421, 0.986]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.383, 0.164, 0.571, 0.983]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.716, 0.169, 0.887, 0.621]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk; [0.72, 0.472, 0.995, 0.98]: carry/hold (an object), sit, write, listen to (a person)."}]}, {"video": "phrYEKv0rmw.mkv", "clip": [901, 988], "conversations": [{"from": "human", "timestamps": 931.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.205, 0.402, 0.989]: stand, talk to (e.g., self, a person, a group); [0.315, 0.0, 0.876, 0.994]: listen to (a person), stand."}, {"from": "human", "timestamps": 988.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.169, 0.034, 0.865, 0.981]?"}, {"from": "gpt", "value": ["In the current frame, [0.169, 0.034, 0.865, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 987.0s, [0.208, 0.033, 0.864, 0.981]: stand, carry/hold (an object), read.", "From 983.0s to 986.0s, [0.206, 0.051, 0.862, 0.978]->[0.2, 0.046, 0.864, 0.978]->[0.204, 0.038, 0.856, 0.97]->[0.234, 0.039, 0.863, 0.971]: stand, read.", "At 982.0s, [0.237, 0.047, 0.872, 0.957]: stand, carry/hold (an object), read.", "At 981.0s, [0.206, 0.035, 0.869, 0.943]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 980.0s, [0.208, 0.045, 0.866, 0.966]: stand, read, talk to (e.g., self, a person, a group).", "From 978.0s to 979.0s, [0.521, 0.104, 0.865, 0.981]->[0.21, 0.043, 0.865, 0.972]: sit, carry/hold (an object), read, talk to (e.g., self, a person, a group)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [967, 1007], "conversations": [{"from": "human", "timestamps": 997.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.283, 0.062, 0.584, 0.999]: carry/hold (an object), sit, listen to (a person); [0.534, 0.373, 0.932, 0.959]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1007.0, "value": "For the person at bbox [0.098, 0.226, 0.654, 0.975], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.098, 0.226, 0.654, 0.975]: bend/bow (at the waist), write, listen to (a person).", "At 1006.0s, [0.365, 0.195, 0.72, 0.772]: bend/bow (at the waist), write, talk to (e.g., self, a person, a group)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1024, 1097], "conversations": [{"from": "human", "timestamps": 1054.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.713, 0.126, 1.0, 0.863]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1097.0, "value": "For the person at bbox [0.077, 0.115, 0.48, 0.974], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.077, 0.115, 0.48, 0.974]: stand, write, listen to (a person).", "At 1096.0s, [0.084, 0.115, 0.482, 0.974]: sit, carry/hold (an object).", "At 1095.0s, [0.129, 0.073, 0.496, 1.0]: stand, carry/hold (an object)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1169, 1215], "conversations": [{"from": "human", "timestamps": 1199.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.017, 0.142, 0.315, 0.651]: watch (a person), sit, sail boat; [0.371, 0.083, 0.691, 0.58]: stand, kiss (a person), hug (a person), sail boat."}, {"from": "human", "timestamps": 1215.0, "value": "For the person at bbox [0.746, 0.317, 0.837, 0.48], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.746, 0.317, 0.837, 0.48]: stand, listen to (a person).", "At 1214.0s, [0.749, 0.321, 0.845, 0.471]: stand, touch (an object), listen to (a person), watch (a person).", "At 1213.0s, [0.737, 0.319, 0.839, 0.47]: stand, talk to (e.g., self, a person, a group).", "At 1212.0s, [0.737, 0.337, 0.824, 0.478]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1178, 1258], "conversations": [{"from": "human", "timestamps": 1208.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.055, 0.038, 0.766, 0.94]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1258.0, "value": "What actions did the person at bbox [0.585, 0.074, 0.979, 0.896] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.585, 0.074, 0.979, 0.896]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1257.0s, [0.565, 0.025, 0.923, 0.925]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1256.0s, [0.489, 0.051, 0.929, 0.955]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1255.0s, [0.521, 0.063, 0.892, 0.902]: stand, carry/hold (an object), listen to (a person).", "At 1254.0s, [0.538, 0.048, 0.882, 0.877]: stand, listen to (a person).", "From 1250.0s to 1253.0s, [0.494, 0.108, 0.888, 0.904]->[0.502, 0.11, 0.911, 0.887]->[0.514, 0.099, 0.898, 0.885]->[0.53, 0.064, 0.917, 0.911]: stand, carry/hold (an object), listen to (a person).", "From 1247.0s to 1249.0s, [0.517, 0.1, 0.9, 0.891]->[0.495, 0.113, 0.899, 0.905]->[0.513, 0.072, 0.892, 0.905]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1246.0s, [0.471, 0.083, 0.909, 0.853]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1242.0s to 1245.0s, [0.509, 0.095, 0.914, 0.998]->[0.554, 0.072, 0.954, 0.969]->[0.604, 0.204, 0.831, 0.813]->[0.513, 0.044, 0.979, 0.895]: walk, carry/hold (an object), listen to (a person).", "At 1241.0s, [0.499, 0.037, 0.889, 0.986]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1240.0s, [0.479, 0.031, 0.901, 0.971]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1238.0s to 1239.0s, [0.508, 0.064, 0.921, 0.997]->[0.553, 0.004, 0.939, 0.997]: walk, carry/hold (an object), listen to (a person).", "At 1237.0s, [0.348, 0.099, 0.668, 1.0]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1236.0s, [0.348, 0.11, 0.69, 0.987]: walk, carry/hold (an object)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1230, 1277], "conversations": [{"from": "human", "timestamps": 1260.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.205, 0.643, 0.48, 0.993]: watch (a person), walk; [0.488, 0.651, 0.958, 0.992]: stand, pull (an object)."}, {"from": "human", "timestamps": 1277.0, "value": "Describe the actions of the person at bbox [0.182, 0.072, 0.467, 0.858] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.182, 0.072, 0.467, 0.858]: stand, touch (an object), listen to (a person).", "From 1275.0s to 1276.0s, [0.199, 0.073, 0.463, 0.811]->[0.18, 0.067, 0.464, 0.815]: stand, touch (an object), listen to (a person), watch (a person).", "At 1274.0s, [0.224, 0.078, 0.495, 0.808]: stand, carry/hold (an object), touch (an object), listen to (a person).", "At 1273.0s, [0.202, 0.054, 0.488, 0.802]: stand, touch (an object), listen to (a person).", "At 1272.0s, [0.241, 0.041, 0.535, 0.756]: walk, watch (a person).", "At 1271.0s, [0.232, 0.089, 0.478, 0.73]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1270.0s, [0.228, 0.14, 0.461, 0.727]: stand, walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1269.0s, [0.202, 0.159, 0.399, 0.701]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1267.0s to 1268.0s, [0.209, 0.263, 0.387, 0.666]->[0.143, 0.221, 0.352, 0.614]: walk, carry/hold (an object), watch (a person)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1281, 1363], "conversations": [{"from": "human", "timestamps": 1311.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.048, 0.043, 0.374, 0.884]: listen to (a person), stand, touch (an object), watch (a person); [0.441, 0.055, 0.75, 0.789]: carry/hold (an object), stand, watch (a person); [0.5, 0.431, 0.987, 0.996]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1363.0, "value": "What actions did the person at bbox [0.133, 0.124, 0.458, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.133, 0.124, 0.458, 1.0]: stand, push (another person), watch (a person).", "At 1362.0s, [0.034, 0.064, 0.389, 0.97]: walk, push (another person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1295, 1337], "conversations": [{"from": "human", "timestamps": 1325.0, "value": "What actions is the person in bbox [0.596, 0.446, 0.957, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.596, 0.446, 0.957, 1.0]: sit, touch (an object), listen to (a person), watch (a person).", "At 1324.0s, [0.522, 0.438, 0.961, 0.991]: sit, touch (an object), listen to (a person), watch (a person).", "At 1323.0s, [0.542, 0.47, 0.962, 0.998]: sit, touch (an object), watch (a person).", "From 1321.0s to 1322.0s, [0.601, 0.428, 0.991, 0.998]->[0.553, 0.433, 0.973, 0.998]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1337.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.002, 0.057, 0.206, 0.936]: listen to (a person), stand, touch (an object), watch (a person); [0.171, 0.191, 0.524, 0.892]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.556, 0.493, 0.99, 0.998]: carry/hold (an object), sit, listen to (a person); [0.577, 0.092, 0.894, 0.807]: carry/hold (an object), stand, listen to (a person)."}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1364, 1428], "conversations": [{"from": "human", "timestamps": 1394.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.166, 0.475, 0.363, 0.783]: sit; [0.618, 0.493, 0.779, 0.715]: carry/hold (an object), sit, sail boat, talk to (e.g., self, a person, a group); [0.721, 0.493, 0.85, 0.682]: listen to (a person), sit, sail boat."}, {"from": "human", "timestamps": 1428.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.125, 0.225, 0.534, 0.903]?"}, {"from": "gpt", "value": ["At Now, [0.125, 0.225, 0.534, 0.903]: sit, sail boat, listen to (a person), watch (a person).", "At 1427.0s, [0.178, 0.21, 0.54, 0.77]: sit, sail boat, listen to (a person), watch (a person).", "At 1426.0s, [0.141, 0.22, 0.532, 0.791]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1425.0s, [0.147, 0.229, 0.494, 0.844]: sit, sail boat, listen to (a person), watch (a person).", "At 1424.0s, [0.177, 0.235, 0.534, 0.848]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1423.0s, [0.178, 0.253, 0.534, 0.868]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1422.0s, [0.156, 0.257, 0.55, 0.8]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1421.0s, [0.136, 0.234, 0.555, 0.881]: sit, carry/hold (an object), sail boat, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1420.0s, [0.122, 0.244, 0.509, 0.977]: sit, carry/hold (an object), sail boat, listen to (a person).", "At 1419.0s, [0.112, 0.22, 0.499, 0.851]: sit, carry/hold (an object), listen to (a person).", "At 1418.0s, [0.139, 0.217, 0.497, 0.855]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group).", "From 1414.0s to 1417.0s, [0.108, 0.292, 0.508, 0.985]->[0.097, 0.29, 0.512, 0.979]->[0.073, 0.276, 0.513, 0.958]->[0.093, 0.261, 0.488, 0.978]: sit, carry/hold (an object), sail boat.", "From 1412.0s to 1413.0s, [0.114, 0.322, 0.55, 0.987]->[0.108, 0.324, 0.547, 0.982]: sit, carry/hold (an object), sail boat, watch (a person).", "At 1411.0s, [0.099, 0.291, 0.561, 0.988]: sit, carry/hold (an object), watch (a person).", "From 1409.0s to 1410.0s, [0.063, 0.264, 0.516, 0.965]->[0.092, 0.294, 0.554, 0.978]: sit, carry/hold (an object), sail boat, listen to (a person), watch (a person).", "At 1408.0s, [0.08, 0.25, 0.521, 0.904]: sit, carry/hold (an object), sail boat, listen to (a person).", "At 1407.0s, [0.065, 0.279, 0.559, 0.976]: sit, carry/hold (an object), listen to (a person).", "At 1406.0s, [0.114, 0.294, 0.564, 0.902]: sit, carry/hold (an object), sail boat.", "From 1404.0s to 1405.0s, [0.115, 0.276, 0.613, 0.976]->[0.118, 0.295, 0.589, 0.996]: sit, carry/hold (an object), sail boat, watch (a person).", "At 1403.0s, [0.626, 0.48, 0.765, 0.691]: sit, carry/hold (an object), row boat, watch (a person).", "From 1401.0s to 1402.0s, [0.621, 0.481, 0.78, 0.698]->[0.63, 0.48, 0.777, 0.697]: sit, sail boat, watch (a person).", "At 1400.0s, [0.636, 0.49, 0.804, 0.688]: sit, sail boat."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1383, 1457], "conversations": [{"from": "human", "timestamps": 1413.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.108, 0.324, 0.547, 0.982]: carry/hold (an object), sit, sail boat, watch (a person); [0.478, 0.274, 0.873, 0.936]: sit, sail boat; [0.698, 0.045, 0.998, 0.591]: carry/hold (an object), sit, sail boat, watch (a person)."}, {"from": "human", "timestamps": 1457.0, "value": "Describe the actions of the person at bbox [0.527, 0.023, 0.999, 0.992] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.527, 0.023, 0.999, 0.992]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1456.0s, [0.5, 0.002, 0.991, 0.973]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1455.0s, [0.534, 0.023, 1.0, 0.991]: sit, ride (e.g., a bike, a car, a horse), sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1454.0s, [0.526, 0.013, 0.993, 0.992]: sit, listen to (a person), watch (a person).", "From 1450.0s to 1453.0s, [0.524, 0.028, 1.0, 0.986]->[0.525, 0.018, 0.986, 0.973]->[0.554, 0.016, 1.0, 1.0]->[0.528, 0.028, 0.998, 0.98]: sit, sail boat, listen to (a person), watch (a person).", "From 1448.0s to 1449.0s, [0.493, 0.05, 0.981, 0.976]->[0.494, 0.046, 0.99, 0.99]: sit, listen to (a person).", "At 1447.0s, [0.537, 0.05, 0.992, 0.973]: sit, sail boat, listen to (a person), watch (a person).", "At 1446.0s, [0.551, 0.009, 0.992, 0.975]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1445.0s, [0.528, 0.048, 1.0, 0.97]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1444.0s, [0.444, 0.075, 1.0, 0.977]: sit, listen to (a person).", "At 1443.0s, [0.39, 0.094, 0.995, 0.971]: sit, sail boat, listen to (a person).", "At 1442.0s, [0.395, 0.266, 0.799, 0.881]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group).", "At 1441.0s, [0.381, 0.251, 0.81, 0.891]: sit, carry/hold (an object), sail boat, listen to (a person), talk to (e.g., self, a person, a group).", "At 1440.0s, [0.394, 0.274, 0.82, 0.865]: sit, carry/hold (an object), sail boat, listen to (a person).", "From 1438.0s to 1439.0s, [0.329, 0.284, 0.819, 0.868]->[0.411, 0.278, 0.814, 0.89]: sit, carry/hold (an object), listen to (a person).", "At 1437.0s, [0.454, 0.256, 0.815, 0.88]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1436.0s, [0.433, 0.245, 0.805, 0.856]: sit, talk to (e.g., self, a person, a group).", "From 1434.0s to 1435.0s, [0.523, 0.203, 0.895, 0.834]->[0.407, 0.143, 0.884, 0.861]: sit, sail boat, talk to (e.g., self, a person, a group).", "At 1433.0s, [0.658, 0.207, 0.926, 0.838]: sit, sail boat, listen to (a person).", "At 1432.0s, [0.521, 0.261, 0.858, 0.854]: sit, sail boat, watch (a person).", "At 1431.0s, [0.52, 0.257, 0.859, 0.838]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1430.0s, [0.529, 0.224, 0.868, 0.813]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1429.0s, [0.477, 0.251, 0.841, 0.796]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1428.0s, [0.474, 0.245, 0.824, 0.815]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1427.0s, [0.473, 0.239, 0.825, 0.774]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group), watch (a person).", "From 1424.0s to 1426.0s, [0.485, 0.266, 0.846, 0.803]->[0.496, 0.247, 0.819, 0.799]->[0.476, 0.228, 0.823, 0.776]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group).", "At 1423.0s, [0.527, 0.27, 0.843, 0.787]: sit, carry/hold (an object), sail boat, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1422.0s, [0.513, 0.27, 0.849, 0.838]: sit, carry/hold (an object), sail boat, listen to (a person), watch (a person).", "At 1421.0s, [0.489, 0.268, 0.831, 0.816]: sit, carry/hold (an object), sail boat, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1419.0s to 1420.0s, [0.524, 0.203, 0.858, 0.723]->[0.484, 0.261, 0.82, 0.822]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1418.0s, [0.466, 0.241, 0.86, 0.847]: sit, carry/hold (an object), sail boat, watch (a person).", "At 1417.0s, [0.444, 0.264, 0.845, 0.879]: sit, ride (e.g., a bike, a car, a horse), watch (a person)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1430, 1475], "conversations": [{"from": "human", "timestamps": 1460.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.052, 0.045, 0.597, 0.945]: watch (a person), sit, sail boat, talk to (e.g., self, a person, a group); [0.555, 0.025, 1.0, 0.981]: listen to (a person), sit, sail boat, watch (a person)."}, {"from": "human", "timestamps": 1475.0, "value": "For the person at bbox [0.563, 0.025, 1.0, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.563, 0.025, 1.0, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1474.0s, [0.552, 0.023, 0.997, 0.989]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1473.0s, [0.562, 0.025, 0.993, 0.984]: sit, sail boat, listen to (a person), watch (a person).", "At 1472.0s, [0.547, 0.024, 0.999, 0.981]: sit, sail boat, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1471.0s, [0.561, 0.032, 0.983, 0.982]: sit, listen to (a person), watch (a person).", "At 1470.0s, [0.542, 0.029, 0.999, 0.982]: sit, sail boat, listen to (a person), watch (a person).", "At 1469.0s, [0.561, 0.031, 1.0, 0.981]: sit, listen to (a person), watch (a person).", "At 1468.0s, [0.587, 0.033, 1.0, 0.968]: sit, sail boat, listen to (a person), watch (a person).", "At 1467.0s, [0.574, 0.03, 1.0, 0.989]: sit, listen to (a person), watch (a person).", "From 1463.0s to 1466.0s, [0.526, 0.014, 0.993, 0.982]->[0.564, 0.023, 1.0, 0.984]->[0.55, 0.026, 1.0, 0.989]->[0.536, 0.035, 0.997, 0.997]: sit, sail boat, listen to (a person), watch (a person).", "At 1462.0s, [0.558, 0.027, 0.997, 0.992]: sit, listen to (a person), watch (a person).", "From 1460.0s to 1461.0s, [0.555, 0.025, 1.0, 0.981]->[0.561, 0.026, 0.989, 0.981]: sit, sail boat, listen to (a person), watch (a person).", "From 1458.0s to 1459.0s, [0.537, 0.022, 0.996, 0.985]->[0.563, 0.031, 0.995, 0.986]: sit, listen to (a person), watch (a person).", "From 1456.0s to 1457.0s, [0.5, 0.002, 0.991, 0.973]->[0.527, 0.023, 0.999, 0.992]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1455.0s, [0.534, 0.023, 1.0, 0.991]: sit, ride (e.g., a bike, a car, a horse), sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1454.0s, [0.526, 0.013, 0.993, 0.992]: sit, listen to (a person), watch (a person).", "From 1450.0s to 1453.0s, [0.524, 0.028, 1.0, 0.986]->[0.525, 0.018, 0.986, 0.973]->[0.554, 0.016, 1.0, 1.0]->[0.528, 0.028, 0.998, 0.98]: sit, sail boat, listen to (a person), watch (a person).", "From 1448.0s to 1449.0s, [0.493, 0.05, 0.981, 0.976]->[0.494, 0.046, 0.99, 0.99]: sit, listen to (a person).", "At 1447.0s, [0.537, 0.05, 0.992, 0.973]: sit, sail boat, listen to (a person), watch (a person).", "At 1446.0s, [0.551, 0.009, 0.992, 0.975]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1445.0s, [0.528, 0.048, 1.0, 0.97]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1444.0s, [0.444, 0.075, 1.0, 0.977]: sit, listen to (a person).", "At 1443.0s, [0.39, 0.094, 0.995, 0.971]: sit, sail boat, listen to (a person).", "At 1442.0s, [0.395, 0.266, 0.799, 0.881]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group).", "At 1441.0s, [0.381, 0.251, 0.81, 0.891]: sit, carry/hold (an object), sail boat, listen to (a person), talk to (e.g., self, a person, a group).", "At 1440.0s, [0.394, 0.274, 0.82, 0.865]: sit, carry/hold (an object), sail boat, listen to (a person).", "From 1438.0s to 1439.0s, [0.329, 0.284, 0.819, 0.868]->[0.411, 0.278, 0.814, 0.89]: sit, carry/hold (an object), listen to (a person).", "At 1437.0s, [0.454, 0.256, 0.815, 0.88]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1436.0s, [0.433, 0.245, 0.805, 0.856]: sit, talk to (e.g., self, a person, a group).", "From 1434.0s to 1435.0s, [0.523, 0.203, 0.895, 0.834]->[0.407, 0.143, 0.884, 0.861]: sit, sail boat, talk to (e.g., self, a person, a group).", "At 1433.0s, [0.658, 0.207, 0.926, 0.838]: sit, sail boat, listen to (a person).", "At 1432.0s, [0.521, 0.261, 0.858, 0.854]: sit, sail boat, watch (a person).", "At 1431.0s, [0.52, 0.257, 0.859, 0.838]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1430.0s, [0.529, 0.224, 0.868, 0.813]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1429.0s, [0.477, 0.251, 0.841, 0.796]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1428.0s, [0.474, 0.245, 0.824, 0.815]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1427.0s, [0.473, 0.239, 0.825, 0.774]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group), watch (a person).", "From 1424.0s to 1426.0s, [0.485, 0.266, 0.846, 0.803]->[0.496, 0.247, 0.819, 0.799]->[0.476, 0.228, 0.823, 0.776]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group).", "At 1423.0s, [0.527, 0.27, 0.843, 0.787]: sit, carry/hold (an object), sail boat, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1422.0s, [0.513, 0.27, 0.849, 0.838]: sit, carry/hold (an object), sail boat, listen to (a person), watch (a person).", "At 1421.0s, [0.489, 0.268, 0.831, 0.816]: sit, carry/hold (an object), sail boat, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1419.0s to 1420.0s, [0.524, 0.203, 0.858, 0.723]->[0.484, 0.261, 0.82, 0.822]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1418.0s, [0.466, 0.241, 0.86, 0.847]: sit, carry/hold (an object), sail boat, watch (a person).", "At 1417.0s, [0.444, 0.264, 0.845, 0.879]: sit, ride (e.g., a bike, a car, a horse), watch (a person)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1471, 1524], "conversations": [{"from": "human", "timestamps": 1501.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.449, 0.23, 0.807, 0.875]?"}, {"from": "gpt", "value": ["At Now, [0.449, 0.23, 0.807, 0.875]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1500.0s, [0.484, 0.242, 0.825, 0.866]: sit, carry/hold (an object).", "At 1499.0s, [0.485, 0.243, 0.848, 0.894]: sit, carry/hold (an object), sail boat, listen to (a person).", "At 1498.0s, [0.492, 0.258, 0.836, 0.869]: sit, carry/hold (an object), sail boat.", "At 1497.0s, [0.522, 0.021, 1.0, 0.974]: sit, sail boat.", "At 1496.0s, [0.487, 0.0, 0.986, 0.985]: sit, sail boat, watch (a person).", "At 1495.0s, [0.4, 0.011, 0.995, 0.99]: sit, watch (a person).", "At 1494.0s, [0.425, 0.017, 1.0, 0.986]: sit, sail boat, watch (a person).", "At 1493.0s, [0.454, 0.017, 1.0, 0.992]: sit, listen to (a person), watch (a person).", "At 1492.0s, [0.455, 0.026, 1.0, 0.993]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), watch (a person).", "At 1491.0s, [0.443, 0.019, 1.0, 0.991]: sit, sail boat, listen to (a person), watch (a person).", "At 1490.0s, [0.416, 0.013, 1.0, 0.988]: sit, listen to (a person), watch (a person).", "At 1489.0s, [0.436, 0.01, 1.0, 0.98]: sit, stand, listen to (a person), watch (a person).", "At 1488.0s, [0.42, 0.003, 1.0, 0.988]: sit, sail boat, listen to (a person), watch (a person).", "At 1487.0s, [0.402, 0.022, 1.0, 0.988]: sit, listen to (a person), watch (a person).", "At 1486.0s, [0.412, 0.028, 1.0, 0.979]: stand, sail boat, listen to (a person), watch (a person).", "From 1484.0s to 1485.0s, [0.308, 0.025, 0.996, 0.991]->[0.431, 0.026, 1.0, 0.985]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1483.0s, [0.372, 0.029, 0.994, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1482.0s, [0.503, 0.03, 0.988, 0.987]: sit, sail boat, listen to (a person), watch (a person).", "From 1478.0s to 1481.0s, [0.486, 0.035, 0.993, 0.973]->[0.505, 0.044, 0.989, 0.982]->[0.48, 0.045, 0.986, 0.981]->[0.495, 0.035, 0.988, 0.983]: sit, listen to (a person), watch (a person).", "At 1477.0s, [0.522, 0.045, 0.995, 0.972]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1476.0s, [0.523, 0.017, 0.996, 1.0]: sit, sail boat, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1475.0s, [0.563, 0.025, 1.0, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1474.0s, [0.552, 0.023, 0.997, 0.989]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1473.0s, [0.562, 0.025, 0.993, 0.984]: sit, sail boat, listen to (a person), watch (a person).", "At 1472.0s, [0.547, 0.024, 0.999, 0.981]: sit, sail boat, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1471.0s, [0.561, 0.032, 0.983, 0.982]: sit, listen to (a person), watch (a person).", "At 1470.0s, [0.542, 0.029, 0.999, 0.982]: sit, sail boat, listen to (a person), watch (a person).", "At 1469.0s, [0.561, 0.031, 1.0, 0.981]: sit, listen to (a person), watch (a person).", "At 1468.0s, [0.587, 0.033, 1.0, 0.968]: sit, sail boat, listen to (a person), watch (a person).", "At 1467.0s, [0.574, 0.03, 1.0, 0.989]: sit, listen to (a person), watch (a person).", "From 1463.0s to 1466.0s, [0.526, 0.014, 0.993, 0.982]->[0.564, 0.023, 1.0, 0.984]->[0.55, 0.026, 1.0, 0.989]->[0.536, 0.035, 0.997, 0.997]: sit, sail boat, listen to (a person), watch (a person).", "At 1462.0s, [0.558, 0.027, 0.997, 0.992]: sit, listen to (a person), watch (a person).", "From 1460.0s to 1461.0s, [0.555, 0.025, 1.0, 0.981]->[0.561, 0.026, 0.989, 0.981]: sit, sail boat, listen to (a person), watch (a person).", "From 1458.0s to 1459.0s, [0.537, 0.022, 0.996, 0.985]->[0.563, 0.031, 0.995, 0.986]: sit, listen to (a person), watch (a person).", "From 1456.0s to 1457.0s, [0.5, 0.002, 0.991, 0.973]->[0.527, 0.023, 0.999, 0.992]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1455.0s, [0.534, 0.023, 1.0, 0.991]: sit, ride (e.g., a bike, a car, a horse), sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1454.0s, [0.526, 0.013, 0.993, 0.992]: sit, listen to (a person), watch (a person).", "From 1450.0s to 1453.0s, [0.524, 0.028, 1.0, 0.986]->[0.525, 0.018, 0.986, 0.973]->[0.554, 0.016, 1.0, 1.0]->[0.528, 0.028, 0.998, 0.98]: sit, sail boat, listen to (a person), watch (a person).", "From 1448.0s to 1449.0s, [0.493, 0.05, 0.981, 0.976]->[0.494, 0.046, 0.99, 0.99]: sit, listen to (a person).", "At 1447.0s, [0.537, 0.05, 0.992, 0.973]: sit, sail boat, listen to (a person), watch (a person).", "At 1446.0s, [0.551, 0.009, 0.992, 0.975]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1445.0s, [0.528, 0.048, 1.0, 0.97]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1444.0s, [0.444, 0.075, 1.0, 0.977]: sit, listen to (a person).", "At 1443.0s, [0.39, 0.094, 0.995, 0.971]: sit, sail boat, listen to (a person).", "At 1442.0s, [0.395, 0.266, 0.799, 0.881]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group).", "At 1441.0s, [0.381, 0.251, 0.81, 0.891]: sit, carry/hold (an object), sail boat, listen to (a person), talk to (e.g., self, a person, a group).", "At 1440.0s, [0.394, 0.274, 0.82, 0.865]: sit, carry/hold (an object), sail boat, listen to (a person).", "From 1438.0s to 1439.0s, [0.329, 0.284, 0.819, 0.868]->[0.411, 0.278, 0.814, 0.89]: sit, carry/hold (an object), listen to (a person).", "At 1437.0s, [0.454, 0.256, 0.815, 0.88]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1436.0s, [0.433, 0.245, 0.805, 0.856]: sit, talk to (e.g., self, a person, a group).", "From 1434.0s to 1435.0s, [0.523, 0.203, 0.895, 0.834]->[0.407, 0.143, 0.884, 0.861]: sit, sail boat, talk to (e.g., self, a person, a group).", "At 1433.0s, [0.658, 0.207, 0.926, 0.838]: sit, sail boat, listen to (a person).", "At 1432.0s, [0.521, 0.261, 0.858, 0.854]: sit, sail boat, watch (a person).", "At 1431.0s, [0.52, 0.257, 0.859, 0.838]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1430.0s, [0.529, 0.224, 0.868, 0.813]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1429.0s, [0.477, 0.251, 0.841, 0.796]: sit, sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1428.0s, [0.474, 0.245, 0.824, 0.815]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1427.0s, [0.473, 0.239, 0.825, 0.774]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group), watch (a person).", "From 1424.0s to 1426.0s, [0.485, 0.266, 0.846, 0.803]->[0.496, 0.247, 0.819, 0.799]->[0.476, 0.228, 0.823, 0.776]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group).", "At 1423.0s, [0.527, 0.27, 0.843, 0.787]: sit, carry/hold (an object), sail boat, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1422.0s, [0.513, 0.27, 0.849, 0.838]: sit, carry/hold (an object), sail boat, listen to (a person), watch (a person).", "At 1421.0s, [0.489, 0.268, 0.831, 0.816]: sit, carry/hold (an object), sail boat, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1419.0s to 1420.0s, [0.524, 0.203, 0.858, 0.723]->[0.484, 0.261, 0.82, 0.822]: sit, carry/hold (an object), sail boat, talk to (e.g., self, a person, a group), watch (a person).", "At 1418.0s, [0.466, 0.241, 0.86, 0.847]: sit, carry/hold (an object), sail boat, watch (a person).", "At 1417.0s, [0.444, 0.264, 0.845, 0.879]: sit, ride (e.g., a bike, a car, a horse), watch (a person)."]}, {"from": "human", "timestamps": 1524.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.067, 0.089, 0.535, 0.978]: sit, watch (a person), listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.484, 0.027, 0.972, 0.972]: listen to (a person), sit."}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1552, 1601], "conversations": [{"from": "human", "timestamps": 1582.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.484, 0.365, 0.665, 0.765]: carry/hold (an object), bend/bow (at the waist); [0.865, 0.194, 0.993, 0.649]: carry/hold (an object), stand, sail boat, watch (a person)."}, {"from": "human", "timestamps": 1601.0, "value": "What actions did the person at bbox [0.328, 0.146, 0.993, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.328, 0.146, 0.993, 1.0]: stand, grab (a person), talk to (e.g., self, a person, a group).", "At 1600.0s, [0.314, 0.125, 0.956, 0.981]: stand, talk to (e.g., self, a person, a group).", "From 1592.0s to 1599.0s, [0.394, 0.112, 0.986, 0.992]->[0.393, 0.121, 0.983, 0.99]->[0.407, 0.116, 0.989, 0.989]->[0.384, 0.13, 1.0, 0.989]->[0.393, 0.121, 0.99, 0.982]->[0.347, 0.118, 0.98, 0.987]->[0.334, 0.119, 0.976, 0.993]->[0.319, 0.12, 0.987, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1591.0s, [0.737, 0.035, 0.959, 0.908]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1590.0s, [0.736, 0.04, 0.956, 0.943]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1588.0s to 1589.0s, [0.716, 0.046, 0.951, 0.916]->[0.723, 0.055, 0.948, 0.925]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1587.0s, [0.583, 0.077, 0.871, 0.766]: walk, carry/hold (an object), listen to (a person).", "At 1586.0s, [0.552, 0.077, 0.819, 0.853]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1584.0s to 1585.0s, [0.422, 0.092, 0.693, 0.853]->[0.463, 0.086, 0.682, 0.717]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1583.0s, [0.446, 0.146, 0.696, 0.779]: carry/hold (an object), get up, watch (a person).", "At 1582.0s, [0.484, 0.365, 0.665, 0.765]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1604, 1668], "conversations": [{"from": "human", "timestamps": 1634.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.098, 0.488, 0.292, 0.968]: carry/hold (an object), walk; [0.727, 0.327, 0.954, 0.992]: walk; [0.854, 0.185, 0.917, 0.333]: stand."}, {"from": "human", "timestamps": 1668.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.66, 0.56, 0.987, 0.997]?"}, {"from": "gpt", "value": ["At Now, [0.66, 0.56, 0.987, 0.997]: stand, carry/hold (an object).", "At 1667.0s, [0.57, 0.501, 0.903, 0.989]: stand, carry/hold (an object), turn (e.g., a screwdriver)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1629, 1729], "conversations": [{"from": "human", "timestamps": 1659.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.079, 0.07, 0.714, 0.984]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1729.0, "value": "What actions is the person in bbox [0.015, 0.072, 0.628, 0.938] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.015, 0.072, 0.628, 0.938]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1728.0s, [0.321, 0.043, 0.923, 0.993]: stand, watch (a person)."]}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1686, 1727], "conversations": [{"from": "human", "timestamps": 1716.0, "value": "What actions did the person at bbox [0.579, 0.288, 0.978, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.579, 0.288, 0.978, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1715.0s, [0.565, 0.315, 0.924, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1714.0s, [0.525, 0.383, 0.721, 0.97]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1713.0s, [0.558, 0.28, 0.915, 0.989]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1711.0s to 1712.0s, [0.565, 0.326, 0.9, 0.993]->[0.56, 0.371, 0.913, 0.995]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1710.0s, [0.313, 0.33, 0.687, 0.993]: stand, carry/hold (an object), hand shake, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1727.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.093, 0.637, 0.917]: sit; [0.68, 0.389, 0.998, 0.972]: watch (a person), stand, touch (an object)."}]}, {"video": "phrYEKv0rmw.mkv", "clip": [1708, 1745], "conversations": [{"from": "human", "timestamps": 1738.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.021, 0.127, 0.5, 0.985]: carry/hold (an object), sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group); [0.341, 0.108, 0.711, 1.0]: ride (e.g., a bike, a car, a horse), sit, listen to (a person)."}, {"from": "human", "timestamps": 1745.0, "value": "What actions did the person at bbox [0.085, 0.114, 0.524, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.085, 0.114, 0.524, 1.0]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1744.0s, [0.079, 0.136, 0.594, 0.986]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1743.0s, [0.096, 0.111, 0.564, 1.0]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "From 1741.0s to 1742.0s, [0.025, 0.114, 0.537, 0.978]->[0.096, 0.117, 0.553, 0.98]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1740.0s, [0.059, 0.137, 0.591, 0.997]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1739.0s, [0.083, 0.15, 0.614, 0.996]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1738.0s, [0.021, 0.127, 0.5, 0.985]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1737.0s, [0.028, 0.134, 0.607, 0.998]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1736.0s, [0.008, 0.08, 0.554, 0.996]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group), watch (a person).", "At 1735.0s, [0.046, 0.083, 0.615, 0.986]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1734.0s, [0.01, 0.073, 0.518, 0.985]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "From 1731.0s to 1733.0s, [0.075, 0.104, 0.615, 0.99]->[0.099, 0.119, 0.644, 0.973]->[0.009, 0.053, 0.657, 0.998]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group), watch (a person).", "At 1730.0s, [0.073, 0.076, 0.645, 0.97]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1729.0s, [0.015, 0.072, 0.628, 0.938]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group).", "At 1728.0s, [0.321, 0.043, 0.923, 0.993]: stand, watch (a person)."]}]}, {"video": "piYxcrMxVPw.mkv", "clip": [874, 922], "conversations": [{"from": "human", "timestamps": 904.0, "value": "For the person at bbox [0.174, 0.093, 0.448, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.174, 0.093, 0.448, 0.99]: stand, listen to (a person), watch (a person).", "At 903.0s, [0.217, 0.053, 0.488, 0.989]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 922.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.05, 0.091, 0.691, 0.98]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "piYxcrMxVPw.mkv", "clip": [934, 975], "conversations": [{"from": "human", "timestamps": 964.0, "value": "What actions did the person at bbox [0.529, 0.046, 0.983, 0.957] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.529, 0.046, 0.983, 0.957]: sit, ride (e.g., a bike, a car, a horse).", "At 963.0s, [0.529, 0.066, 0.986, 0.952]: sit, ride (e.g., a bike, a car, a horse), watch (a person).", "At 962.0s, [0.49, 0.079, 0.962, 0.954]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 975.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.308, 0.046, 0.921, 0.955]: ride (e.g., a bike, a car, a horse), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "piYxcrMxVPw.mkv", "clip": [1007, 1066], "conversations": [{"from": "human", "timestamps": 1037.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.018, 0.048, 0.954, 0.958]?"}, {"from": "gpt", "value": ["In the current frame, [0.018, 0.048, 0.954, 0.958]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1028.0s to 1036.0s, [0.007, 0.031, 0.992, 0.956]->[0.013, 0.02, 0.984, 0.962]->[0.0, 0.027, 0.975, 0.946]->[0.0, 0.048, 0.991, 0.966]->[0.003, 0.029, 0.973, 0.956]->[0.009, 0.048, 0.986, 0.964]->[0.004, 0.033, 0.889, 0.963]->[0.002, 0.044, 0.953, 0.968]->[0.012, 0.051, 0.85, 0.972]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1027.0s, [0.006, 0.053, 1.0, 0.979]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1066.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.019, 0.054, 0.896, 0.95]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "piYxcrMxVPw.mkv", "clip": [1144, 1185], "conversations": [{"from": "human", "timestamps": 1174.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.03, 0.024, 0.805, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1185.0, "value": "What actions is the person in bbox [0.25, 0.072, 0.733, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.25, 0.072, 0.733, 0.984]: sit.", "At 1184.0s, [0.296, 0.029, 0.865, 0.984]: sit.", "At 1183.0s, [0.13, 0.035, 0.762, 0.98]: sit, touch (an object), listen to (a person)."]}]}, {"video": "piYxcrMxVPw.mkv", "clip": [1112, 1238], "conversations": [{"from": "human", "timestamps": 1142.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.024, 0.128, 0.59, 0.991]: sit."}, {"from": "human", "timestamps": 1238.0, "value": "What actions did the person at bbox [0.0, 0.108, 0.365, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.108, 0.365, 0.986]: sit.", "At 1237.0s, [0.004, 0.103, 0.442, 0.988]: sit.", "At 1236.0s, [0.0, 0.174, 0.414, 0.998]: stand.", "At 1235.0s, [0.0, 0.098, 0.528, 0.999]: dress/put on clothing.", "At 1234.0s, [0.0, 0.068, 0.749, 0.962]: carry/hold (an object).", "From 1231.0s to 1233.0s, [0.014, 0.05, 0.686, 0.971]->[0.001, 0.008, 0.863, 0.971]->[0.012, 0.008, 0.746, 0.985]: stand, carry/hold (an object).", "From 1229.0s to 1230.0s, [0.0, 0.092, 0.667, 0.984]->[0.0, 0.08, 0.69, 0.977]: stand.", "At 1228.0s, [0.005, 0.036, 0.668, 0.979]: sit, talk to (e.g., self, a person, a group).", "From 1226.0s to 1227.0s, [0.021, 0.045, 0.785, 0.977]->[0.024, 0.046, 0.647, 0.978]: sit, carry/hold (an object)."]}]}, {"video": "piYxcrMxVPw.mkv", "clip": [1185, 1222], "conversations": [{"from": "human", "timestamps": 1215.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.012, 0.037, 0.711, 0.973]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1222.0, "value": "Describe the actions of the person at bbox [0.103, 0.031, 0.542, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.103, 0.031, 0.542, 0.986]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1220.0s to 1221.0s, [0.094, 0.055, 0.541, 0.987]->[0.089, 0.04, 0.524, 0.98]: sit."]}]}, {"video": "piYxcrMxVPw.mkv", "clip": [1432, 1475], "conversations": [{"from": "human", "timestamps": 1462.0, "value": "For the person at bbox [0.107, 0.014, 0.744, 0.984], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.107, 0.014, 0.744, 0.984]: sit, hand clap, watch (a person).", "At 1461.0s, [0.131, 0.019, 0.788, 0.973]: sit, hand clap, watch (a person)."]}, {"from": "human", "timestamps": 1475.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.367, 0.06, 0.809, 0.971]: carry/hold (an object), stand, watch (a person)."}]}, {"video": "piYxcrMxVPw.mkv", "clip": [1521, 1552], "conversations": [{"from": "human", "timestamps": 1551.0, "value": "What actions is the person in bbox [0.415, 0.045, 0.791, 0.98] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.415, 0.045, 0.791, 0.98]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1549.0s to 1550.0s, [0.381, 0.069, 0.707, 0.916]->[0.35, 0.059, 0.786, 0.962]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1547.0s to 1548.0s, [0.357, 0.068, 0.775, 0.965]->[0.355, 0.061, 0.788, 0.962]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1552.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.368, 0.073, 0.781, 0.967]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "piYxcrMxVPw.mkv", "clip": [1512, 1585], "conversations": [{"from": "human", "timestamps": 1542.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.144, 0.583, 0.224, 0.747]: listen to (a person), sit, watch (a person); [0.635, 0.506, 0.708, 0.667]: listen to (a person), sit, watch (a person); [0.68, 0.544, 0.83, 0.787]: listen to (a person), sit, watch (a person); [0.841, 0.596, 0.972, 0.785]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1585.0, "value": "What actions did the person at bbox [0.311, 0.096, 0.719, 0.958] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.311, 0.096, 0.719, 0.958]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1584.0s, [0.35, 0.07, 0.787, 0.967]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "piYxcrMxVPw.mkv", "clip": [1615, 1674], "conversations": [{"from": "human", "timestamps": 1645.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.416, 0.978]: listen to (a person), stand; [0.294, 0.331, 0.689, 1.0]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.681, 0.106, 0.999, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1674.0, "value": "For the person at bbox [0.195, 0.305, 0.785, 0.984], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.195, 0.305, 0.785, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1673.0s, [0.204, 0.366, 0.631, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1668.0s to 1672.0s, [0.7, 0.421, 0.872, 0.982]->[0.709, 0.433, 0.889, 0.991]->[0.706, 0.419, 0.896, 0.986]->[0.155, 0.381, 0.468, 0.988]->[0.187, 0.379, 0.477, 1.0]: stand, listen to (a person), watch (a person)."]}]}, {"video": "piYxcrMxVPw.mkv", "clip": [1649, 1698], "conversations": [{"from": "human", "timestamps": 1679.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.163, 0.271, 0.432, 0.999]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.458, 0.038, 0.883, 0.996]: listen to (a person), stand."}, {"from": "human", "timestamps": 1698.0, "value": "What actions is the person in bbox [0.521, 0.111, 0.97, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.521, 0.111, 0.97, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1696.0s to 1697.0s, [0.455, 0.01, 0.903, 0.99]->[0.429, 0.093, 0.915, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1695.0s, [0.639, 0.277, 0.965, 0.997]: stand, listen to (a person), watch (a person).", "At 1694.0s, [0.615, 0.248, 0.936, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1692.0s to 1693.0s, [0.679, 0.186, 0.987, 0.985]->[0.699, 0.194, 0.997, 0.984]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1688.0s to 1691.0s, [0.644, 0.15, 0.97, 0.989]->[0.656, 0.17, 0.986, 0.954]->[0.669, 0.154, 0.983, 0.977]->[0.678, 0.157, 0.989, 0.979]: stand, listen to (a person), watch (a person).", "At 1687.0s, [0.66, 0.171, 1.0, 0.988]: stand, watch (a person)."]}]}, {"video": "piYxcrMxVPw.mkv", "clip": [1746, 1788], "conversations": [{"from": "human", "timestamps": 1776.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.221, 0.638, 0.246, 0.746]: run/jog."}, {"from": "human", "timestamps": 1788.0, "value": "For the person at bbox [0.079, 0.574, 0.146, 0.824], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.079, 0.574, 0.146, 0.824]: run/jog.", "At 1787.0s, [0.054, 0.579, 0.11, 0.785]: run/jog."]}]}, {"video": "pieVIsGmLsc.mkv", "clip": [905, 944], "conversations": [{"from": "human", "timestamps": 935.0, "value": "What actions did the person at bbox [0.225, 0.272, 0.419, 0.706] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.225, 0.272, 0.419, 0.706]: stand, carry/hold (an object), listen to (a person).", "From 933.0s to 934.0s, [0.201, 0.233, 0.442, 0.559]->[0.22, 0.296, 0.43, 0.663]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 932.0s, [0.119, 0.188, 0.313, 0.472]: stand, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 944.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.11, 0.209, 0.502, 0.909]: walk."}]}, {"video": "pieVIsGmLsc.mkv", "clip": [900, 981], "conversations": [{"from": "human", "timestamps": 930.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.016, 0.336, 0.197, 0.896]: carry/hold (an object), walk; [0.119, 0.106, 0.355, 0.904]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 981.0, "value": "For the person at bbox [0.366, 0.277, 0.591, 0.887], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.366, 0.277, 0.591, 0.887]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 980.0s, [0.368, 0.265, 0.582, 0.885]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 977.0s to 979.0s, [0.204, 0.153, 0.58, 0.906]->[0.365, 0.273, 0.624, 0.896]->[0.352, 0.281, 0.602, 0.865]: sit, listen to (a person), watch (a person)."]}]}, {"video": "pieVIsGmLsc.mkv", "clip": [943, 978], "conversations": [{"from": "human", "timestamps": 973.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.2, 0.43, 0.316, 0.794]?"}, {"from": "gpt", "value": ["At Now, [0.2, 0.43, 0.316, 0.794]: sit.", "At 972.0s, [0.196, 0.435, 0.31, 0.857]: sit.", "At 971.0s, [0.218, 0.384, 0.393, 0.879]: sit, listen to (a person).", "At 970.0s, [0.183, 0.432, 0.339, 0.87]: sit."]}, {"from": "human", "timestamps": 978.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.06, 0.165, 0.393, 0.75]: watch (a person), sit; [0.365, 0.273, 0.624, 0.896]: listen to (a person), sit, watch (a person); [0.599, 0.217, 0.951, 0.908]: listen to (a person), sit, watch (a person)."}]}, {"video": "pieVIsGmLsc.mkv", "clip": [1027, 1074], "conversations": [{"from": "human", "timestamps": 1057.0, "value": "Describe the actions of the person at bbox [0.1, 0.159, 0.371, 0.903] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.1, 0.159, 0.371, 0.903]: stand, listen to (a person), watch (a person).", "At 1056.0s, [0.188, 0.144, 0.452, 0.8]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1055.0s, [0.278, 0.135, 0.52, 0.882]: walk, talk to (e.g., self, a person, a group).", "At 1054.0s, [0.336, 0.152, 0.552, 0.889]: walk, dress/put on clothing.", "At 1053.0s, [0.187, 0.125, 0.475, 0.89]: walk, dress/put on clothing, listen to (a person)."]}, {"from": "human", "timestamps": 1074.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.151, 0.457, 0.606, 0.886]: lie/sleep; [0.393, 0.47, 0.903, 0.898]: lie/sleep, talk to (e.g., self, a person, a group)."}]}, {"video": "pieVIsGmLsc.mkv", "clip": [1066, 1102], "conversations": [{"from": "human", "timestamps": 1096.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.078, 0.261, 0.519, 0.816]: listen to (a person), lie/sleep, watch (a person); [0.414, 0.157, 0.948, 0.906]: watch (a person), lie/sleep, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1102.0, "value": "What actions is the person in bbox [0.324, 0.119, 0.782, 0.904] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.324, 0.119, 0.782, 0.904]: stand, talk to (e.g., self, a person, a group).", "At 1101.0s, [0.343, 0.122, 0.789, 0.888]: stand, talk to (e.g., self, a person, a group).", "At 1100.0s, [0.164, 0.109, 0.796, 0.901]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1099.0s, [0.222, 0.236, 0.822, 0.893]: bend/bow (at the waist), open (e.g., a window, a car door)."]}]}, {"video": "pieVIsGmLsc.mkv", "clip": [1125, 1191], "conversations": [{"from": "human", "timestamps": 1155.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.106, 0.533, 0.344, 0.875]: watch (a person), stand, touch (an object); [0.441, 0.358, 0.522, 0.683]: watch (a person), stand; [0.599, 0.229, 0.713, 0.613]: carry/hold (an object), watch (a person), walk."}, {"from": "human", "timestamps": 1191.0, "value": "For the person at bbox [0.34, 0.138, 0.544, 0.817], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.34, 0.138, 0.544, 0.817]: stand, carry/hold (an object).", "From 1186.0s to 1190.0s, [0.163, 0.148, 0.493, 0.888]->[0.176, 0.127, 0.522, 0.887]->[0.17, 0.125, 0.512, 0.898]->[0.343, 0.202, 0.557, 0.825]->[0.325, 0.146, 0.565, 0.816]: stand, carry/hold (an object).", "At 1185.0s, [0.148, 0.099, 0.391, 0.899]: walk, carry/hold (an object).", "At 1184.0s, [0.249, 0.141, 0.488, 0.865]: walk.", "At 1183.0s, [0.426, 0.125, 0.601, 0.791]: walk, watch (a person)."]}]}, {"video": "pieVIsGmLsc.mkv", "clip": [1184, 1229], "conversations": [{"from": "human", "timestamps": 1214.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.141, 0.243, 0.336, 0.587]: listen to (a person), touch (an object), bend/bow (at the waist); [0.346, 0.173, 0.51, 0.532]: sit, watch (a person), carry/hold (an object), talk to (e.g., self, a person, a group), take (an object) from (a person)."}, {"from": "human", "timestamps": 1229.0, "value": "What actions did the person at bbox [0.045, 0.114, 0.846, 0.908] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.045, 0.114, 0.846, 0.908]: stand, talk to (e.g., self, a person, a group).", "At 1228.0s, [0.082, 0.095, 0.856, 0.898]: stand, carry/hold (an object), eat, talk to (e.g., self, a person, a group).", "At 1227.0s, [0.079, 0.097, 0.861, 0.9]: stand, drink, talk to (e.g., self, a person, a group).", "At 1226.0s, [0.132, 0.111, 0.811, 0.885]: stand, drink."]}]}, {"video": "pieVIsGmLsc.mkv", "clip": [1395, 1435], "conversations": [{"from": "human", "timestamps": 1425.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.271, 0.187, 0.646, 0.858]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1435.0, "value": "For the person at bbox [0.133, 0.127, 0.477, 0.887], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.133, 0.127, 0.477, 0.887]: sit, listen to (a person), watch (a person).", "At 1434.0s, [0.123, 0.145, 0.485, 0.892]: sit, listen to (a person), watch (a person).", "At 1433.0s, [0.126, 0.147, 0.467, 0.877]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1432.0s, [0.129, 0.143, 0.465, 0.876]: sit.", "At 1431.0s, [0.122, 0.142, 0.498, 0.882]: sit, watch (a person)."]}]}, {"video": "pieVIsGmLsc.mkv", "clip": [1375, 1462], "conversations": [{"from": "human", "timestamps": 1405.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.076, 0.181, 0.186, 0.68]: listen to (a person), stand, watch (a person); [0.153, 0.229, 0.319, 0.776]: listen to (a person), stand, watch (a person); [0.263, 0.116, 0.457, 0.716]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1462.0, "value": "What actions did the person at bbox [0.394, 0.151, 0.958, 0.908] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.394, 0.151, 0.958, 0.908]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1460.0s to 1461.0s, [0.274, 0.17, 0.899, 0.909]->[0.405, 0.101, 0.955, 0.915]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1459.0s, [0.33, 0.269, 0.921, 0.928]: carry/hold (an object), crouch/kneel."]}]}, {"video": "pieVIsGmLsc.mkv", "clip": [1414, 1461], "conversations": [{"from": "human", "timestamps": 1444.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.118, 0.171, 0.482, 0.867]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group); [0.43, 0.249, 0.919, 0.922]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1461.0, "value": "What actions is the person in bbox [0.405, 0.101, 0.955, 0.915] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.405, 0.101, 0.955, 0.915]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1460.0s, [0.274, 0.17, 0.899, 0.909]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1459.0s, [0.33, 0.269, 0.921, 0.928]: carry/hold (an object), crouch/kneel."]}]}, {"video": "pieVIsGmLsc.mkv", "clip": [1552, 1600], "conversations": [{"from": "human", "timestamps": 1582.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.169, 0.185, 0.312, 0.711]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.423, 0.226, 0.614, 0.623]: watch (a person), sit, give/serve (an object) to (a person), talk to (e.g., self, a person, a group); [0.491, 0.224, 0.833, 0.694]: listen to (a person), sit."}, {"from": "human", "timestamps": 1600.0, "value": "What actions is the person in bbox [0.395, 0.394, 0.472, 0.588] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.395, 0.394, 0.472, 0.588]: walk.", "At 1599.0s, [0.461, 0.417, 0.545, 0.61]: walk."]}]}, {"video": "pieVIsGmLsc.mkv", "clip": [1674, 1728], "conversations": [{"from": "human", "timestamps": 1704.0, "value": "Describe the actions of the person at bbox [0.069, 0.113, 0.612, 0.9] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.069, 0.113, 0.612, 0.9]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1703.0s, [0.166, 0.126, 0.582, 0.899]: stand, carry/hold (an object), watch (a person).", "At 1702.0s, [0.159, 0.117, 0.592, 0.892]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1728.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.22, 0.169, 0.525, 0.915]: carry/hold (an object), talk to (e.g., self, a person, a group), walk; [0.524, 0.271, 0.758, 0.896]: carry/hold (an object), listen to (a person), walk."}]}, {"video": "pieVIsGmLsc.mkv", "clip": [1722, 1774], "conversations": [{"from": "human", "timestamps": 1752.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.063, 0.121, 0.889, 0.954]: sit, bend/bow (at the waist)."}, {"from": "human", "timestamps": 1774.0, "value": "For the person at bbox [0.222, 0.115, 0.947, 0.97], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.222, 0.115, 0.947, 0.97]: sit, stand.", "At 1773.0s, [0.219, 0.095, 0.975, 0.958]: stand.", "At 1772.0s, [0.206, 0.134, 0.957, 0.954]: sit.", "From 1770.0s to 1771.0s, [0.543, 0.141, 0.721, 0.869]->[0.52, 0.259, 0.673, 0.57]: sit, talk to (e.g., self, a person, a group).", "From 1768.0s to 1769.0s, [0.505, 0.265, 0.708, 0.865]->[0.491, 0.254, 0.709, 0.881]: sit.", "At 1767.0s, [0.454, 0.24, 0.711, 0.835]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1766.0s, [0.464, 0.253, 0.705, 0.847]: sit, talk to (e.g., self, a person, a group).", "At 1765.0s, [0.56, 0.255, 0.712, 0.855]: sit, watch (a person)."]}]}, {"video": "plkJ45_-pMk.mp4", "clip": [907, 946], "conversations": [{"from": "human", "timestamps": 937.0, "value": "What actions did the person at bbox [0.221, 0.012, 0.693, 0.767] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.221, 0.012, 0.693, 0.767]: sit.", "At 936.0s, [0.17, 0.006, 0.703, 0.736]: sit, touch (an object).", "At 935.0s, [0.162, 0.012, 0.67, 0.749]: sit.", "At 934.0s, [0.162, 0.005, 0.675, 0.756]: sit, watch (e.g., TV).", "At 933.0s, [0.182, 0.014, 0.819, 0.786]: sit."]}, {"from": "human", "timestamps": 946.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.266, 0.0, 0.868, 0.797]: sit."}]}, {"video": "plkJ45_-pMk.mp4", "clip": [938, 979], "conversations": [{"from": "human", "timestamps": 968.0, "value": "For the person at bbox [0.086, 0.0, 0.975, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.086, 0.0, 0.975, 0.99]: sit.", "At 967.0s, [0.032, 0.167, 0.918, 1.0]: sit.", "At 966.0s, [0.273, 0.005, 1.0, 0.986]: sit, talk to (e.g., self, a person, a group).", "At 965.0s, [0.158, 0.032, 0.934, 0.991]: sit.", "At 964.0s, [0.017, 0.0, 1.0, 0.999]: sit, talk to (e.g., self, a person, a group).", "At 963.0s, [0.017, 0.012, 0.972, 0.947]: lie/sleep."]}, {"from": "human", "timestamps": 979.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.021, 0.016, 0.689, 0.987]: walk."}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1043, 1138], "conversations": [{"from": "human", "timestamps": 1073.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.075, 0.49, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1138.0, "value": "For the person at bbox [0.253, 0.012, 0.984, 0.966], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.253, 0.012, 0.984, 0.966]: sit, listen to (a person), watch (a person).", "At 1137.0s, [0.26, 0.045, 1.0, 0.974]: sit, listen to (a person)."]}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1072, 1118], "conversations": [{"from": "human", "timestamps": 1102.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.003, 0.033, 0.383, 0.941]: stand; [0.271, 0.134, 0.98, 1.0]: stand."}, {"from": "human", "timestamps": 1118.0, "value": "What actions did the person at bbox [0.673, 0.014, 0.991, 0.967] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.673, 0.014, 0.991, 0.967]: sit, push (an object), touch (an object), listen to (a person), watch (a person).", "At 1117.0s, [0.663, 0.011, 0.99, 1.0]: sit, carry/hold (an object), touch (an object), listen to (a person)."]}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1148, 1179], "conversations": [{"from": "human", "timestamps": 1178.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.425, 0.437, 0.5, 0.695]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group); [0.56, 0.42, 0.657, 0.671]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1179.0, "value": "Describe the actions of the person at bbox [0.427, 0.435, 0.499, 0.696] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.427, 0.435, 0.499, 0.696]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1177.0s to 1178.0s, [0.425, 0.434, 0.492, 0.696]->[0.425, 0.437, 0.5, 0.695]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1176.0s, [0.43, 0.439, 0.491, 0.69]: sit, listen to (a person), watch (a person)."]}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1116, 1246], "conversations": [{"from": "human", "timestamps": 1146.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.032, 0.025, 0.853, 0.986]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1246.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.059, 0.007, 0.885, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.059, 0.007, 0.885, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1245.0s, [0.059, 0.005, 0.874, 0.995]: sit, talk to (e.g., self, a person, a group).", "At 1244.0s, [0.07, 0.01, 0.874, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1172, 1204], "conversations": [{"from": "human", "timestamps": 1202.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.235, 0.014, 0.952, 0.976]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1204.0, "value": "For the person at bbox [0.079, 0.0, 0.864, 0.992], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.079, 0.0, 0.864, 0.992]: sit, listen to (a person).", "At 1203.0s, [0.067, 0.004, 0.891, 0.994]: sit."]}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1277, 1318], "conversations": [{"from": "human", "timestamps": 1307.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.03, 0.075, 0.762, 0.978]: stand."}, {"from": "human", "timestamps": 1318.0, "value": "What actions is the person in bbox [0.123, 0.085, 1.0, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.123, 0.085, 1.0, 0.993]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1317.0s, [0.132, 0.092, 0.985, 1.0]: sit, stand, talk to (e.g., self, a person, a group).", "From 1314.0s to 1316.0s, [0.152, 0.114, 0.951, 0.997]->[0.145, 0.114, 0.976, 1.0]->[0.178, 0.104, 0.977, 0.996]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1313.0s, [0.152, 0.133, 0.96, 1.0]: sit, stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1310.0s to 1312.0s, [0.159, 0.123, 0.993, 0.997]->[0.168, 0.11, 0.977, 1.0]->[0.142, 0.111, 1.0, 0.994]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1309.0s, [0.156, 0.138, 0.974, 1.0]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1252, 1348], "conversations": [{"from": "human", "timestamps": 1282.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.096, 0.044, 0.252, 0.852]: listen to (a person), stand; [0.545, 0.296, 0.969, 0.955]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1348.0, "value": "What actions did the person at bbox [0.16, 0.084, 0.971, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.16, 0.084, 0.971, 0.983]: sit, talk to (e.g., self, a person, a group).", "At 1347.0s, [0.174, 0.07, 0.962, 0.997]: sit, talk to (e.g., self, a person, a group).", "At 1346.0s, [0.155, 0.082, 0.955, 0.974]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1427, 1466], "conversations": [{"from": "human", "timestamps": 1457.0, "value": "Describe the actions of the person at bbox [0.0, 0.0, 0.622, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.0, 0.622, 0.986]: stand.", "From 1455.0s to 1456.0s, [0.0, 0.0, 0.469, 0.99]->[0.0, 0.0, 0.604, 0.987]: stand.", "From 1453.0s to 1454.0s, [0.098, 0.005, 0.537, 0.985]->[0.0, 0.0, 0.493, 0.978]: stand, enter."]}, {"from": "human", "timestamps": 1466.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.003, 0.304, 0.311, 0.99]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.618, 0.017, 1.0, 1.0]: listen to (a person), stand, watch (a person)."}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1499, 1547], "conversations": [{"from": "human", "timestamps": 1529.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.097, 0.007, 0.893, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.097, 0.007, 0.893, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1528.0s, [0.147, 0.018, 0.878, 0.981]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1547.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.244, 0.024, 1.0, 0.975]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1513, 1567], "conversations": [{"from": "human", "timestamps": 1543.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.273, 0.045, 0.957, 0.98]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1567.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.587, 0.18, 0.902, 0.501]?"}, {"from": "gpt", "value": ["At Now, [0.587, 0.18, 0.902, 0.501]: sit, listen to (a person), watch (a person).", "At 1566.0s, [0.662, 0.17, 0.894, 0.492]: sit, listen to (a person), watch (a person)."]}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1634, 1667], "conversations": [{"from": "human", "timestamps": 1664.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.129, 0.069, 0.979, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1667.0, "value": "What actions did the person at bbox [0.061, 0.12, 1.0, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.061, 0.12, 1.0, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1664.0s to 1666.0s, [0.129, 0.069, 0.979, 1.0]->[0.15, 0.117, 1.0, 1.0]->[0.106, 0.097, 1.0, 0.996]: sit, talk to (e.g., self, a person, a group).", "At 1663.0s, [0.044, 0.084, 1.0, 0.978]: sit, listen to (a person).", "From 1660.0s to 1662.0s, [0.418, 0.039, 1.0, 0.987]->[0.071, 0.055, 0.988, 0.998]->[0.09, 0.069, 0.985, 0.99]: sit."]}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1625, 1736], "conversations": [{"from": "human", "timestamps": 1655.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.112, 0.083, 0.963, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1736.0, "value": "Describe the actions of the person at bbox [0.183, 0.076, 0.945, 0.994] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.183, 0.076, 0.945, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 1735.0s, [0.226, 0.046, 0.934, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 1734.0s, [0.723, 0.271, 0.863, 0.677]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1733.0s, [0.672, 0.29, 0.814, 0.761]: bend/bow (at the waist), carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1732.0s, [0.68, 0.28, 0.857, 0.918]: walk, watch (a person)."]}]}, {"video": "plkJ45_-pMk.mp4", "clip": [1700, 1737], "conversations": [{"from": "human", "timestamps": 1730.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.24, 0.407, 0.41, 0.667]: listen to (a person), sit, watch (a person); [0.441, 0.357, 0.548, 0.559]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1737.0, "value": "Describe the actions of the person at bbox [0.22, 0.071, 0.934, 0.994] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.22, 0.071, 0.934, 0.994]: stand, talk to (e.g., self, a person, a group).", "From 1735.0s to 1736.0s, [0.226, 0.046, 0.934, 0.989]->[0.183, 0.076, 0.945, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 1734.0s, [0.723, 0.271, 0.863, 0.677]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1733.0s, [0.672, 0.29, 0.814, 0.761]: bend/bow (at the waist), carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1732.0s, [0.68, 0.28, 0.857, 0.918]: walk, watch (a person)."]}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [889, 992], "conversations": [{"from": "human", "timestamps": 919.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.137, 0.19, 0.475, 0.985]: walk; [0.379, 0.115, 0.62, 0.982]: walk; [0.654, 0.127, 0.882, 0.985]: watch (a person), stand."}, {"from": "human", "timestamps": 992.0, "value": "For the person at bbox [0.234, 0.092, 0.933, 0.983], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.234, 0.092, 0.933, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 991.0s, [0.245, 0.103, 0.924, 0.982]: sit, talk to (e.g., self, a person, a group).", "At 990.0s, [0.648, 0.022, 0.984, 0.98]: sit, listen to (a person), watch (a person).", "From 988.0s to 989.0s, [0.534, 0.016, 0.986, 1.0]->[0.555, 0.028, 0.985, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 987.0s, [0.233, 0.009, 0.941, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 985.0s to 986.0s, [0.22, 0.0, 0.922, 0.993]->[0.224, 0.003, 0.927, 0.99]: stand, talk to (e.g., self, a person, a group).", "At 984.0s, [0.227, 0.007, 0.912, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 982.0s to 983.0s, [0.25, 0.015, 0.931, 0.989]->[0.24, 0.011, 0.905, 0.991]: stand, talk to (e.g., self, a person, a group).", "At 981.0s, [0.247, 0.005, 0.923, 0.996]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [945, 989], "conversations": [{"from": "human", "timestamps": 975.0, "value": "What actions is the person in bbox [0.68, 0.0, 0.991, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.68, 0.0, 0.991, 0.982]: stand, listen to (a person), watch (a person).", "At 974.0s, [0.689, 0.0, 0.994, 0.975]: sit, listen to (a person), watch (a person).", "From 971.0s to 973.0s, [0.7, 0.0, 0.985, 0.991]->[0.664, 0.0, 0.985, 0.98]->[0.655, 0.016, 0.992, 0.998]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 989.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.039, 0.088, 0.598, 0.997]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.555, 0.028, 0.985, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1001, 1077], "conversations": [{"from": "human", "timestamps": 1031.0, "value": "For the person at bbox [0.085, 0.027, 1.0, 0.998], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.085, 0.027, 1.0, 0.998]: stand.", "At 1030.0s, [0.091, 0.008, 1.0, 1.0]: stand, listen to (a person).", "From 1026.0s to 1029.0s, [0.597, 0.054, 0.997, 0.997]->[0.564, 0.054, 0.994, 1.0]->[0.597, 0.048, 1.0, 1.0]->[0.604, 0.038, 1.0, 0.996]: stand, listen to (a person), watch (a person).", "At 1025.0s, [0.206, 0.057, 0.919, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1020.0s to 1024.0s, [0.221, 0.031, 0.908, 0.988]->[0.217, 0.036, 0.907, 0.98]->[0.219, 0.035, 0.915, 0.987]->[0.216, 0.025, 0.921, 0.993]->[0.213, 0.045, 0.894, 0.975]: stand, talk to (e.g., self, a person, a group).", "At 1019.0s, [0.235, 0.035, 0.921, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1018.0s, [0.26, 0.05, 0.914, 0.985]: sit, talk to (e.g., self, a person, a group).", "From 1015.0s to 1017.0s, [0.258, 0.058, 0.916, 0.994]->[0.24, 0.046, 0.918, 0.985]->[0.258, 0.033, 0.928, 0.984]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1077.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.265, 0.029, 0.966, 1.0]: talk to (e.g., self, a person, a group), walk."}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1009, 1139], "conversations": [{"from": "human", "timestamps": 1039.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.189, 0.016, 0.572, 0.995]: listen to (a person), walk; [0.631, 0.398, 0.975, 0.987]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1139.0, "value": "For the person at bbox [0.504, 0.403, 0.692, 0.993], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.504, 0.403, 0.692, 0.993]: stand, listen to (a person).", "From 1135.0s to 1138.0s, [0.663, 0.427, 0.935, 0.997]->[0.658, 0.433, 0.925, 0.998]->[0.665, 0.455, 0.918, 0.995]->[0.653, 0.443, 0.906, 0.993]: stand, listen to (a person).", "From 1133.0s to 1134.0s, [0.673, 0.416, 0.957, 1.0]->[0.67, 0.424, 0.957, 1.0]: stand, listen to (a person), watch (a person)."]}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1067, 1119], "conversations": [{"from": "human", "timestamps": 1097.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.03, 0.655, 0.986]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.561, 0.229, 0.995, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1119.0, "value": "What actions is the person in bbox [0.002, 0.018, 0.63, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.002, 0.018, 0.63, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1118.0s, [0.001, 0.088, 0.624, 0.994]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1110.0s to 1117.0s, [0.058, 0.006, 0.744, 1.0]->[0.006, 0.026, 0.63, 0.979]->[0.013, 0.033, 0.652, 0.987]->[0.004, 0.042, 0.665, 0.979]->[0.002, 0.035, 0.636, 0.985]->[0.005, 0.045, 0.635, 0.985]->[0.003, 0.05, 0.636, 0.984]->[0.006, 0.095, 0.624, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 1104.0s to 1109.0s, [0.06, 0.052, 0.517, 0.985]->[0.021, 0.085, 0.577, 0.993]->[0.0, 0.103, 0.444, 0.992]->[0.009, 0.059, 0.509, 0.981]->[0.065, 0.053, 0.594, 0.984]->[0.005, 0.038, 0.581, 0.995]: walk, talk to (e.g., self, a person, a group).", "From 1102.0s to 1103.0s, [0.015, 0.025, 0.636, 0.984]->[0.053, 0.063, 0.702, 0.991]: stand, talk to (e.g., self, a person, a group).", "From 1097.0s to 1101.0s, [0.0, 0.03, 0.655, 0.986]->[0.0, 0.036, 0.65, 0.978]->[0.0, 0.027, 0.657, 0.974]->[0.0, 0.021, 0.621, 0.984]->[0.0, 0.027, 0.631, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1094.0s to 1096.0s, [0.001, 0.034, 0.615, 0.987]->[0.002, 0.034, 0.643, 0.993]->[0.0, 0.031, 0.629, 0.986]: stand, talk to (e.g., self, a person, a group).", "From 1089.0s to 1093.0s, [0.03, 0.044, 0.631, 0.995]->[0.0, 0.03, 0.612, 0.981]->[0.0, 0.017, 0.573, 0.993]->[0.0, 0.04, 0.585, 0.993]->[0.0, 0.031, 0.636, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1086.0s to 1088.0s, [0.011, 0.027, 0.595, 0.969]->[0.0, 0.024, 0.43, 0.97]->[0.0, 0.032, 0.485, 0.956]: stand, listen to (a person), watch (a person).", "At 1085.0s, [0.006, 0.0, 0.516, 0.991]: stand, listen to (a person).", "At 1084.0s, [0.004, 0.001, 0.51, 0.983]: stand.", "From 1079.0s to 1083.0s, [0.024, 0.006, 0.501, 0.981]->[0.033, 0.022, 0.481, 0.987]->[0.001, 0.005, 0.468, 0.987]->[0.011, 0.061, 0.579, 0.995]->[0.0, 0.02, 0.437, 0.965]: stand, listen to (a person), watch (a person).", "At 1078.0s, [0.029, 0.0, 0.484, 0.977]: stand, listen to (a person)."]}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1116, 1188], "conversations": [{"from": "human", "timestamps": 1146.0, "value": "Describe the actions of the person at bbox [0.506, 0.18, 0.796, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.506, 0.18, 0.796, 0.986]: walk, listen to (a person), watch (a person).", "At 1145.0s, [0.098, 0.082, 0.417, 0.991]: walk, watch (a person).", "From 1143.0s to 1144.0s, [0.076, 0.099, 0.323, 0.964]->[0.051, 0.094, 0.304, 0.943]: stand, watch (a person).", "From 1140.0s to 1142.0s, [0.375, 0.291, 0.633, 0.997]->[0.297, 0.258, 0.556, 0.991]->[0.19, 0.16, 0.43, 0.956]: stand.", "From 1135.0s to 1139.0s, [0.663, 0.427, 0.935, 0.997]->[0.658, 0.433, 0.925, 0.998]->[0.665, 0.455, 0.918, 0.995]->[0.653, 0.443, 0.906, 0.993]->[0.504, 0.403, 0.692, 0.993]: stand, listen to (a person).", "From 1133.0s to 1134.0s, [0.673, 0.416, 0.957, 1.0]->[0.67, 0.424, 0.957, 1.0]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1188.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.121, 0.298, 0.465, 0.969]: listen to (a person), stand, hug (a person), watch (a person); [0.317, 0.038, 0.837, 0.983]: watch (a person), stand, hug (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1169, 1214], "conversations": [{"from": "human", "timestamps": 1199.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.293, 0.122, 0.997, 0.994]: walk."}, {"from": "human", "timestamps": 1214.0, "value": "Describe the actions of the person at bbox [0.438, 0.285, 0.623, 0.749] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.438, 0.285, 0.623, 0.749]: stand, listen to (a person), watch (a person).", "At 1213.0s, [0.425, 0.273, 0.592, 0.661]: stand, watch (a person).", "At 1212.0s, [0.391, 0.251, 0.56, 0.778]: stand, listen to (a person)."]}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1187, 1242], "conversations": [{"from": "human", "timestamps": 1217.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.436, 0.065, 0.854, 0.987]?"}, {"from": "gpt", "value": ["At Now, [0.436, 0.065, 0.854, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1214.0s to 1216.0s, [0.409, 0.048, 0.88, 0.995]->[0.375, 0.06, 0.854, 0.982]->[0.406, 0.034, 0.844, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1242.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.202, 0.094, 0.507, 0.987]: walk."}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1236, 1269], "conversations": [{"from": "human", "timestamps": 1266.0, "value": "Describe the actions of the person at bbox [0.034, 0.198, 0.563, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.034, 0.198, 0.563, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1261.0s to 1265.0s, [0.066, 0.194, 0.574, 0.984]->[0.059, 0.192, 0.568, 0.985]->[0.071, 0.2, 0.645, 0.979]->[0.064, 0.201, 0.58, 0.985]->[0.062, 0.2, 0.582, 0.984]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1269.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.17, 0.032, 0.916, 0.996]: carry/hold (an object), sit, read."}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1243, 1355], "conversations": [{"from": "human", "timestamps": 1273.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.162, 0.063, 0.948, 0.989]: carry/hold (an object), sit, read."}, {"from": "human", "timestamps": 1355.0, "value": "What actions did the person at bbox [0.009, 0.363, 0.392, 0.997] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.009, 0.363, 0.392, 0.997]: sit, read, listen to (a person).", "From 1353.0s to 1354.0s, [0.01, 0.364, 0.408, 1.0]->[0.005, 0.357, 0.411, 0.989]: sit, read, listen to (a person).", "At 1352.0s, [0.01, 0.357, 0.397, 0.995]: sit, carry/hold (an object), read, listen to (a person).", "At 1351.0s, [0.012, 0.356, 0.479, 1.0]: sit, read, listen to (a person).", "From 1349.0s to 1350.0s, [0.011, 0.357, 0.423, 1.0]->[0.013, 0.355, 0.407, 1.0]: sit, carry/hold (an object), read, listen to (a person).", "From 1347.0s to 1348.0s, [0.011, 0.347, 0.41, 0.991]->[0.011, 0.35, 0.401, 0.995]: sit, read, listen to (a person).", "At 1346.0s, [0.014, 0.353, 0.413, 1.0]: sit, carry/hold (an object), read, listen to (a person).", "At 1345.0s, [0.013, 0.338, 0.395, 0.994]: sit, read, listen to (a person).", "At 1344.0s, [0.017, 0.349, 0.48, 1.0]: sit, carry/hold (an object), read, listen to (a person).", "At 1343.0s, [0.055, 0.059, 0.972, 0.962]: sit, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 1341.0s to 1342.0s, [0.066, 0.049, 0.943, 0.978]->[0.058, 0.044, 0.967, 0.997]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1339.0s to 1340.0s, [0.013, 0.326, 0.418, 0.995]->[0.013, 0.332, 0.43, 0.993]: sit, carry/hold (an object), listen to (a person).", "At 1338.0s, [0.012, 0.328, 0.399, 0.992]: sit, carry/hold (an object), read, listen to (a person).", "From 1334.0s to 1337.0s, [0.059, 0.409, 0.479, 0.984]->[0.074, 0.381, 0.469, 1.0]->[0.02, 0.325, 0.433, 0.989]->[0.014, 0.323, 0.4, 0.992]: sit, carry/hold (an object), listen to (a person).", "At 1333.0s, [0.058, 0.415, 0.469, 1.0]: sit, carry/hold (an object), watch (a person).", "At 1332.0s, [0.055, 0.419, 0.459, 0.989]: sit, carry/hold (an object), catch (an object).", "At 1331.0s, [0.062, 0.408, 0.441, 0.983]: sit, watch (a person).", "At 1330.0s, [0.052, 0.398, 0.437, 0.984]: sit, listen to (a person)."]}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1322, 1373], "conversations": [{"from": "human", "timestamps": 1352.0, "value": "Describe the actions of the person at bbox [0.01, 0.357, 0.397, 0.995] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.01, 0.357, 0.397, 0.995]: sit, carry/hold (an object), read, listen to (a person).", "At 1351.0s, [0.012, 0.356, 0.479, 1.0]: sit, read, listen to (a person).", "From 1349.0s to 1350.0s, [0.011, 0.357, 0.423, 1.0]->[0.013, 0.355, 0.407, 1.0]: sit, carry/hold (an object), read, listen to (a person).", "From 1347.0s to 1348.0s, [0.011, 0.347, 0.41, 0.991]->[0.011, 0.35, 0.401, 0.995]: sit, read, listen to (a person).", "At 1346.0s, [0.014, 0.353, 0.413, 1.0]: sit, carry/hold (an object), read, listen to (a person).", "At 1345.0s, [0.013, 0.338, 0.395, 0.994]: sit, read, listen to (a person).", "At 1344.0s, [0.017, 0.349, 0.48, 1.0]: sit, carry/hold (an object), read, listen to (a person).", "At 1343.0s, [0.055, 0.059, 0.972, 0.962]: sit, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "From 1341.0s to 1342.0s, [0.066, 0.049, 0.943, 0.978]->[0.058, 0.044, 0.967, 0.997]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1339.0s to 1340.0s, [0.013, 0.326, 0.418, 0.995]->[0.013, 0.332, 0.43, 0.993]: sit, carry/hold (an object), listen to (a person).", "At 1338.0s, [0.012, 0.328, 0.399, 0.992]: sit, carry/hold (an object), read, listen to (a person).", "From 1334.0s to 1337.0s, [0.059, 0.409, 0.479, 0.984]->[0.074, 0.381, 0.469, 1.0]->[0.02, 0.325, 0.433, 0.989]->[0.014, 0.323, 0.4, 0.992]: sit, carry/hold (an object), listen to (a person).", "At 1333.0s, [0.058, 0.415, 0.469, 1.0]: sit, carry/hold (an object), watch (a person).", "At 1332.0s, [0.055, 0.419, 0.459, 0.989]: sit, carry/hold (an object), catch (an object).", "At 1331.0s, [0.062, 0.408, 0.441, 0.983]: sit, watch (a person).", "At 1330.0s, [0.052, 0.398, 0.437, 0.984]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1373.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.006, 0.065, 1.0, 0.977]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1350, 1477], "conversations": [{"from": "human", "timestamps": 1380.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.058, 0.37, 0.454, 0.983]: read, sit, listen to (a person); [0.621, 0.122, 0.886, 0.986]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1477.0, "value": "Describe the actions of the person at bbox [0.17, 0.103, 0.435, 0.532] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.17, 0.103, 0.435, 0.532]: sit, listen to (a person), watch (a person).", "From 1454.0s to 1476.0s, [0.273, 0.064, 0.482, 0.559]->[0.214, 0.069, 0.449, 0.664]->[0.197, 0.057, 0.533, 0.705]->[0.186, 0.053, 0.557, 0.671]->[0.155, 0.051, 0.534, 0.688]->[0.142, 0.052, 0.529, 0.663]->[0.124, 0.057, 0.513, 0.688]->[0.111, 0.061, 0.503, 0.614]->[0.153, 0.081, 0.479, 0.58]->[0.103, 0.091, 0.425, 0.602]->[0.165, 0.107, 0.444, 0.549]->[0.157, 0.11, 0.453, 0.534]->[0.143, 0.103, 0.419, 0.56]->[0.117, 0.089, 0.442, 0.57]->[0.169, 0.104, 0.46, 0.536]->[0.101, 0.11, 0.432, 0.573]->[0.161, 0.106, 0.44, 0.531]->[0.16, 0.102, 0.42, 0.547]->[0.119, 0.102, 0.422, 0.569]->[0.137, 0.096, 0.448, 0.563]->[0.156, 0.1, 0.455, 0.539]->[0.12, 0.101, 0.462, 0.579]->[0.168, 0.112, 0.442, 0.524]: sit, listen to (a person), watch (a person).", "At 1453.0s, [0.349, 0.058, 0.452, 0.47]: sit, listen to (a person)."]}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1503, 1556], "conversations": [{"from": "human", "timestamps": 1533.0, "value": "What actions is the person in bbox [0.037, 0.055, 0.857, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.037, 0.055, 0.857, 0.993]: sit, carry/hold (an object), drink.", "At 1532.0s, [0.032, 0.032, 0.92, 0.978]: sit, carry/hold (an object), watch (a person).", "At 1531.0s, [0.072, 0.024, 0.927, 0.978]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1530.0s, [0.183, 0.028, 0.797, 1.0]: stand."]}, {"from": "human", "timestamps": 1556.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.197, 0.14, 0.553, 0.555]: listen to (a person), sit, watch (a person); [0.442, 0.254, 0.632, 0.505]: listen to (a person), lie/sleep, watch (a person)."}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1520, 1571], "conversations": [{"from": "human", "timestamps": 1550.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.043, 0.036, 0.962, 0.968]: watch (a person), lie/sleep, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1571.0, "value": "Describe the actions of the person at bbox [0.442, 0.324, 0.74, 0.727] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.442, 0.324, 0.74, 0.727]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "From 1564.0s to 1570.0s, [0.462, 0.286, 0.68, 0.597]->[0.414, 0.28, 0.695, 0.636]->[0.385, 0.284, 0.696, 0.654]->[0.397, 0.299, 0.699, 0.676]->[0.431, 0.301, 0.702, 0.674]->[0.389, 0.306, 0.744, 0.707]->[0.432, 0.302, 0.722, 0.717]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "At 1563.0s, [0.452, 0.28, 0.67, 0.586]: listen to (a person), lie/sleep, watch (a person).", "From 1559.0s to 1562.0s, [0.391, 0.267, 0.637, 0.556]->[0.44, 0.279, 0.655, 0.555]->[0.418, 0.277, 0.653, 0.574]->[0.397, 0.283, 0.649, 0.598]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "At 1558.0s, [0.441, 0.272, 0.64, 0.528]: talk to (e.g., self, a person, a group), lie/sleep.", "At 1557.0s, [0.447, 0.268, 0.633, 0.513]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "At 1556.0s, [0.442, 0.254, 0.632, 0.505]: listen to (a person), lie/sleep, watch (a person).", "From 1554.0s to 1555.0s, [0.433, 0.266, 0.637, 0.496]->[0.444, 0.263, 0.623, 0.497]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person)."]}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1640, 1671], "conversations": [{"from": "human", "timestamps": 1670.0, "value": "What actions is the person in bbox [0.002, 0.012, 0.786, 0.981] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.002, 0.012, 0.786, 0.981]: stand.", "At 1668.0s, [0.009, 0.018, 0.772, 0.997]: stand."]}, {"from": "human", "timestamps": 1671.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.001, 0.014, 0.594, 0.99]: stand."}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1589, 1701], "conversations": [{"from": "human", "timestamps": 1619.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.042, 0.008, 0.799, 0.986]: listen to (a person), sit."}, {"from": "human", "timestamps": 1701.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.54, 0.0, 0.99, 0.991]?"}, {"from": "gpt", "value": ["In the current frame, [0.54, 0.0, 0.99, 0.991]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1698.0s to 1700.0s, [0.456, 0.04, 0.877, 1.0]->[0.469, 0.011, 0.919, 0.975]->[0.526, 0.006, 0.984, 0.984]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1661, 1696], "conversations": [{"from": "human", "timestamps": 1691.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.328, 0.123, 0.568, 0.706]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1696.0, "value": "For the person at bbox [0.411, 0.207, 0.873, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.411, 0.207, 0.873, 1.0]: bend/bow (at the waist), lift/pick up, listen to (a person).", "At 1695.0s, [0.434, 0.198, 0.98, 0.989]: bend/bow (at the waist), put down, listen to (a person).", "At 1694.0s, [0.458, 0.1, 0.926, 1.0]: stand.", "From 1692.0s to 1693.0s, [0.362, 0.093, 0.782, 1.0]->[0.399, 0.085, 0.756, 0.982]: stand, carry/hold (an object)."]}]}, {"video": "qBUu7cy-5Iw.mp4", "clip": [1735, 1774], "conversations": [{"from": "human", "timestamps": 1765.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.123, 0.0, 0.942, 0.974]?"}, {"from": "gpt", "value": ["At Now, [0.123, 0.0, 0.942, 0.974]: stand, talk to (e.g., self, a person, a group).", "At 1764.0s, [0.116, 0.0, 0.927, 0.969]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1763.0s, [0.144, 0.0, 0.91, 0.986]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1774.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.168, 0.084, 0.458, 0.934]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "qpoWHELxL-4.mp4", "clip": [888, 932], "conversations": [{"from": "human", "timestamps": 918.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.244, 0.0, 1.0, 0.986]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 932.0, "value": "Describe the actions of the person at bbox [0.249, 0.0, 0.98, 0.99] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.249, 0.0, 0.98, 0.99]: sit, stand, talk to (e.g., self, a person, a group).", "From 930.0s to 931.0s, [0.224, 0.0, 1.0, 0.986]->[0.208, 0.01, 0.978, 0.972]: sit, talk to (e.g., self, a person, a group).", "At 929.0s, [0.241, 0.0, 0.995, 0.981]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 928.0s, [0.249, 0.004, 0.976, 0.974]: sit, talk to (e.g., self, a person, a group).", "At 927.0s, [0.233, 0.0, 0.998, 0.986]: sit, stand, talk to (e.g., self, a person, a group).", "From 925.0s to 926.0s, [0.226, 0.003, 0.994, 0.987]->[0.243, 0.0, 0.993, 0.99]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "qpoWHELxL-4.mp4", "clip": [881, 992], "conversations": [{"from": "human", "timestamps": 911.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.032, 0.356, 0.993]: listen to (a person), sit; [0.02, 0.09, 0.423, 0.978]: listen to (a person), sit; [0.42, 0.124, 0.791, 0.988]: listen to (a person), sit, watch (a person); [0.755, 0.148, 1.0, 1.0]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 992.0, "value": "For the person at bbox [0.067, 0.0, 0.96, 0.991], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.067, 0.0, 0.96, 0.991]: stand, talk to (e.g., self, a person, a group).", "At 991.0s, [0.048, 0.0, 0.941, 0.991]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1009, 1089], "conversations": [{"from": "human", "timestamps": 1039.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.057, 0.195, 0.433, 0.988]: sit, eat, talk to (e.g., self, a person, a group); [0.496, 0.048, 0.992, 0.982]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1089.0, "value": "For the person at bbox [0.266, 0.029, 0.995, 0.966], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.266, 0.029, 0.995, 0.966]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1088.0s, [0.256, 0.009, 1.0, 0.99]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1087.0s, [0.514, 0.044, 0.875, 0.983]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1063, 1121], "conversations": [{"from": "human", "timestamps": 1093.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.218, 0.0, 0.982, 0.991]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1121.0, "value": "What actions is the person in bbox [0.062, 0.057, 0.542, 0.974] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.062, 0.057, 0.542, 0.974]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1120.0s, [0.058, 0.061, 0.555, 0.979]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1119.0s, [0.065, 0.065, 0.55, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1115, 1196], "conversations": [{"from": "human", "timestamps": 1145.0, "value": "What actions is the person in bbox [0.307, 0.31, 0.419, 0.883] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.307, 0.31, 0.419, 0.883]: sit, listen to (a person), watch (a person).", "From 1141.0s to 1144.0s, [0.309, 0.313, 0.422, 0.847]->[0.309, 0.31, 0.419, 0.877]->[0.307, 0.312, 0.417, 0.869]->[0.307, 0.302, 0.418, 0.876]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1196.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.249, 0.031, 0.915, 0.987]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1143, 1254], "conversations": [{"from": "human", "timestamps": 1173.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.018, 0.036, 0.573, 0.991]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1254.0, "value": "For the person at bbox [0.487, 0.093, 0.753, 0.807], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.487, 0.093, 0.753, 0.807]: sit, listen to (a person).", "At 1253.0s, [0.508, 0.106, 0.749, 0.817]: sit, listen to (a person).", "At 1252.0s, [0.502, 0.075, 0.741, 0.814]: sit, listen to (a person), watch (a person)."]}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1257, 1324], "conversations": [{"from": "human", "timestamps": 1287.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.008, 0.26, 0.957]: listen to (a person), stand, watch (a person); [0.343, 0.026, 0.883, 0.982]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1324.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.0, 0.861, 0.988]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.0, 0.861, 0.988]: sit, listen to (a person), watch (a person).", "At 1323.0s, [0.05, 0.011, 0.887, 0.987]: sit, listen to (a person), watch (a person)."]}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1321, 1355], "conversations": [{"from": "human", "timestamps": 1351.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.025, 0.597, 0.593, 0.987]: lie/sleep; [0.276, 0.443, 0.749, 0.832]: lie/sleep; [0.501, 0.164, 0.678, 0.495]: carry/hold (an object), sit; [0.597, 0.106, 0.822, 0.479]: sit."}, {"from": "human", "timestamps": 1355.0, "value": "Describe the actions of the person at bbox [0.071, 0.037, 0.763, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.071, 0.037, 0.763, 0.982]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1354.0s, [0.123, 0.016, 0.754, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1353.0s, [0.503, 0.167, 0.682, 0.481]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1352.0s, [0.503, 0.173, 0.602, 0.483]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1350.0s to 1351.0s, [0.497, 0.171, 0.592, 0.476]->[0.501, 0.164, 0.678, 0.495]: sit, carry/hold (an object).", "From 1348.0s to 1349.0s, [0.073, 0.009, 0.888, 0.995]->[0.106, 0.001, 0.91, 0.991]: sit, watch (a person)."]}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1394, 1450], "conversations": [{"from": "human", "timestamps": 1424.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.241, 0.004, 0.978, 0.994]: lie/sleep."}, {"from": "human", "timestamps": 1450.0, "value": "Describe the actions of the person at bbox [0.443, 0.368, 0.676, 0.976] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.443, 0.368, 0.676, 0.976]: sit, carry/hold (an object).", "At 1449.0s, [0.445, 0.368, 0.666, 0.97]: carry/hold (an object), crouch/kneel."]}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1485, 1543], "conversations": [{"from": "human", "timestamps": 1515.0, "value": "For the person at bbox [0.206, 0.0, 1.0, 0.97], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.206, 0.0, 1.0, 0.97]: stand, talk to (e.g., self, a person, a group).", "At 1514.0s, [0.192, 0.0, 0.998, 0.971]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1543.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.002, 0.017, 0.424, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1508, 1583], "conversations": [{"from": "human", "timestamps": 1538.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.333, 0.723, 0.626, 0.923]: lie/sleep; [0.348, 0.463, 0.435, 0.811]: watch (a person), crouch/kneel; [0.399, 0.468, 0.491, 0.753]: sit; [0.464, 0.228, 0.552, 0.743]: watch (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1583.0, "value": "What actions did the person at bbox [0.229, 0.227, 0.545, 0.903] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.229, 0.227, 0.545, 0.903]: sit, carry/hold (an object), listen to (a person).", "From 1579.0s to 1582.0s, [0.244, 0.235, 0.552, 0.935]->[0.242, 0.262, 0.551, 0.917]->[0.246, 0.273, 0.555, 0.945]->[0.245, 0.248, 0.556, 0.928]: sit, carry/hold (an object), listen to (a person).", "At 1578.0s, [0.026, 0.001, 0.715, 0.963]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1577.0s, [0.023, 0.0, 0.727, 0.971]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1576.0s, [0.027, 0.0, 0.721, 0.98]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1566, 1599], "conversations": [{"from": "human", "timestamps": 1596.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.24, 0.211, 0.548, 0.918]?"}, {"from": "gpt", "value": ["In the current frame, [0.24, 0.211, 0.548, 0.918]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1595.0s, [0.246, 0.205, 0.549, 0.917]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1593.0s to 1594.0s, [0.0, 0.0, 0.669, 0.98]->[0.0, 0.0, 0.67, 0.984]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1592.0s, [0.01, 0.0, 0.657, 0.986]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1591.0s, [0.02, 0.011, 0.656, 0.962]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1599.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.183, 0.222, 0.354, 0.671]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.24, 0.218, 0.55, 0.929]: sit, watch (a person), listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.645, 0.033, 0.871, 0.973]: listen to (a person), sit."}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1603, 1658], "conversations": [{"from": "human", "timestamps": 1633.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.209, 0.068, 0.678, 0.972]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.728, 0.002, 0.997, 0.998]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1658.0, "value": "Describe the actions of the person at bbox [0.063, 0.266, 0.207, 0.613] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.063, 0.266, 0.207, 0.613]: sit, watch (a person).", "At 1657.0s, [0.066, 0.295, 0.207, 0.617]: sit, listen to (a person), watch (a person)."]}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1610, 1708], "conversations": [{"from": "human", "timestamps": 1640.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.063, 0.0, 0.674, 1.0]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1708.0, "value": "What actions did the person at bbox [0.0, 0.003, 0.909, 0.964] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.003, 0.909, 0.964]: stand, carry/hold (an object).", "From 1706.0s to 1707.0s, [0.069, 0.008, 0.913, 0.982]->[0.026, 0.005, 0.837, 0.964]: stand, carry/hold (an object)."]}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1681, 1720], "conversations": [{"from": "human", "timestamps": 1711.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.398, 0.012, 0.998, 0.998]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1720.0, "value": "For the person at bbox [0.01, 0.02, 0.447, 0.976], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.01, 0.02, 0.447, 0.976]: stand, carry/hold (an object), listen to (a person).", "From 1717.0s to 1719.0s, [0.236, 0.215, 0.362, 0.904]->[0.014, 0.015, 0.429, 0.98]->[0.016, 0.038, 0.459, 0.986]: stand, carry/hold (an object), listen to (a person).", "From 1715.0s to 1716.0s, [0.213, 0.211, 0.365, 0.904]->[0.232, 0.211, 0.36, 0.902]: stand, carry/hold (an object).", "From 1711.0s to 1714.0s, [0.398, 0.012, 0.998, 0.998]->[0.502, 0.01, 0.997, 0.98]->[0.429, 0.006, 0.927, 0.97]->[0.223, 0.211, 0.363, 0.907]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1710.0s, [0.442, 0.0, 1.0, 0.983]: stand, carry/hold (an object), cut, talk to (e.g., self, a person, a group).", "From 1706.0s to 1708.0s, [0.069, 0.008, 0.913, 0.982]->[0.026, 0.005, 0.837, 0.964]->[0.0, 0.003, 0.909, 0.964]: stand, carry/hold (an object)."]}]}, {"video": "qpoWHELxL-4.mp4", "clip": [1719, 1794], "conversations": [{"from": "human", "timestamps": 1749.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.035, 0.002, 0.571, 0.971]: stand, talk to (e.g., self, a person, a group); [0.537, 0.0, 1.0, 0.978]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1794.0, "value": "What actions did the person at bbox [0.329, 0.0, 0.997, 0.992] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.329, 0.0, 0.997, 0.992]: sit.", "At 1793.0s, [0.334, 0.004, 0.985, 0.991]: sit.", "From 1790.0s to 1792.0s, [0.265, 0.0, 0.97, 0.971]->[0.328, 0.0, 0.98, 0.975]->[0.283, 0.0, 0.976, 0.982]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "qrkff49p4E4.mp4", "clip": [892, 951], "conversations": [{"from": "human", "timestamps": 922.0, "value": "Describe the actions of the person at bbox [0.416, 0.163, 0.671, 0.988] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.416, 0.163, 0.671, 0.988]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 921.0s, [0.434, 0.193, 0.671, 0.988]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 951.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.006, 0.008, 0.881, 0.979]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "qrkff49p4E4.mp4", "clip": [888, 1004], "conversations": [{"from": "human", "timestamps": 918.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.057, 0.276, 0.248, 0.976]: watch (a person), stand; [0.374, 0.333, 0.498, 0.933]: carry/hold (an object), stand, watch (a person); [0.489, 0.292, 0.691, 0.994]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.699, 0.09, 0.99, 0.984]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1004.0, "value": "What actions is the person in bbox [0.0, 0.0, 0.983, 0.965] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.0, 0.983, 0.965]: stand, talk to (e.g., self, a person, a group).", "From 996.0s to 1003.0s, [0.016, 0.008, 1.0, 0.983]->[0.011, 0.006, 1.0, 0.983]->[0.018, 0.009, 1.0, 0.989]->[0.0, 0.005, 1.0, 0.968]->[0.013, 0.005, 0.976, 0.994]->[0.019, 0.023, 1.0, 0.972]->[0.009, 0.009, 1.0, 0.968]->[0.018, 0.0, 0.998, 0.97]: stand, talk to (e.g., self, a person, a group).", "From 986.0s to 995.0s, [0.0, 0.0, 1.0, 1.0]->[0.0, 0.004, 0.988, 1.0]->[0.065, 0.012, 0.928, 0.988]->[0.013, 0.026, 0.734, 0.99]->[0.013, 0.02, 0.751, 0.97]->[0.0, 0.028, 0.91, 0.99]->[0.0, 0.019, 0.871, 0.981]->[0.011, 0.0, 0.852, 0.986]->[0.0, 0.03, 0.877, 0.982]->[0.013, 0.0, 0.958, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 981.0s to 985.0s, [0.03, 0.013, 0.96, 0.986]->[0.025, 0.019, 0.944, 0.987]->[0.01, 0.015, 0.978, 0.978]->[0.0, 0.018, 0.969, 0.96]->[0.0, 0.017, 0.981, 0.966]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "qrkff49p4E4.mp4", "clip": [951, 987], "conversations": [{"from": "human", "timestamps": 981.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.03, 0.013, 0.96, 0.986]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 987.0, "value": "What actions did the person at bbox [0.0, 0.004, 0.988, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.004, 0.988, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 986.0s, [0.0, 0.0, 1.0, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 981.0s to 985.0s, [0.03, 0.013, 0.96, 0.986]->[0.025, 0.019, 0.944, 0.987]->[0.01, 0.015, 0.978, 0.978]->[0.0, 0.018, 0.969, 0.96]->[0.0, 0.017, 0.981, 0.966]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "qrkff49p4E4.mp4", "clip": [1016, 1072], "conversations": [{"from": "human", "timestamps": 1046.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.012, 0.022, 0.941, 0.985]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1072.0, "value": "Describe the actions of the person at bbox [0.0, 0.0, 0.993, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.0, 0.993, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1071.0s, [0.016, 0.011, 0.922, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1069.0s to 1070.0s, [0.015, 0.028, 0.857, 0.982]->[0.011, 0.041, 0.918, 0.991]: stand, talk to (e.g., self, a person, a group).", "From 1061.0s to 1068.0s, [0.029, 0.005, 0.894, 0.997]->[0.02, 0.045, 0.859, 0.99]->[0.0, 0.019, 0.91, 0.991]->[0.023, 0.014, 0.918, 0.992]->[0.023, 0.025, 0.91, 0.994]->[0.016, 0.012, 0.815, 1.0]->[0.019, 0.007, 0.832, 0.99]->[0.007, 0.021, 0.959, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1060.0s, [0.021, 0.0, 1.0, 0.974]: stand."]}]}, {"video": "qrkff49p4E4.mp4", "clip": [1019, 1108], "conversations": [{"from": "human", "timestamps": 1049.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.01, 0.018, 0.861, 0.987]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1108.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.009, 0.008, 0.934, 0.991]?"}, {"from": "gpt", "value": ["At Now, [0.009, 0.008, 0.934, 0.991]: stand, talk to (e.g., self, a person, a group).", "From 1085.0s to 1107.0s, [0.0, 0.004, 0.975, 1.0]->[0.012, 0.016, 0.95, 0.979]->[0.002, 0.052, 0.971, 0.979]->[0.016, 0.009, 0.954, 0.974]->[0.015, 0.03, 0.941, 0.974]->[0.0, 0.0, 0.947, 0.991]->[0.036, 0.116, 0.948, 0.982]->[0.026, 0.12, 0.929, 0.974]->[0.001, 0.035, 0.959, 1.0]->[0.0, 0.006, 0.938, 0.98]->[0.008, 0.015, 0.958, 0.977]->[0.005, 0.0, 0.979, 0.998]->[0.002, 0.021, 0.956, 0.984]->[0.016, 0.007, 0.998, 0.979]->[0.0, 0.017, 0.94, 1.0]->[0.0, 0.008, 0.922, 0.977]->[0.0, 0.036, 0.955, 1.0]->[0.0, 0.005, 0.95, 0.999]->[0.015, 0.017, 0.978, 0.986]->[0.009, 0.025, 0.941, 0.997]->[0.017, 0.003, 1.0, 0.987]->[0.0, 0.028, 0.966, 0.974]->[0.0, 0.043, 0.964, 0.973]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "qrkff49p4E4.mp4", "clip": [1067, 1106], "conversations": [{"from": "human", "timestamps": 1097.0, "value": "What actions did the person at bbox [0.002, 0.021, 0.956, 0.984] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.002, 0.021, 0.956, 0.984]: stand, talk to (e.g., self, a person, a group).", "From 1085.0s to 1096.0s, [0.0, 0.004, 0.975, 1.0]->[0.012, 0.016, 0.95, 0.979]->[0.002, 0.052, 0.971, 0.979]->[0.016, 0.009, 0.954, 0.974]->[0.015, 0.03, 0.941, 0.974]->[0.0, 0.0, 0.947, 0.991]->[0.036, 0.116, 0.948, 0.982]->[0.026, 0.12, 0.929, 0.974]->[0.001, 0.035, 0.959, 1.0]->[0.0, 0.006, 0.938, 0.98]->[0.008, 0.015, 0.958, 0.977]->[0.005, 0.0, 0.979, 0.998]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1106.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.028, 0.966, 0.974]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "qrkff49p4E4.mp4", "clip": [1113, 1198], "conversations": [{"from": "human", "timestamps": 1143.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.017, 0.005, 0.949, 0.984]?"}, {"from": "gpt", "value": ["At Now, [0.017, 0.005, 0.949, 0.984]: stand, talk to (e.g., self, a person, a group).", "From 1131.0s to 1142.0s, [0.004, 0.011, 0.934, 0.977]->[0.001, 0.018, 0.936, 0.979]->[0.018, 0.012, 0.943, 0.981]->[0.0, 0.003, 0.899, 0.98]->[0.0, 0.0, 0.922, 0.992]->[0.003, 0.002, 0.966, 0.981]->[0.006, 0.012, 0.914, 0.977]->[0.01, 0.014, 0.926, 0.976]->[0.012, 0.005, 0.953, 0.968]->[0.005, 0.008, 0.963, 0.984]->[0.014, 0.02, 0.921, 0.971]->[0.001, 0.018, 0.929, 0.973]: stand, talk to (e.g., self, a person, a group).", "At 1130.0s, [0.001, 0.011, 0.94, 0.975]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1198.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.048, 0.044, 1.0, 0.974]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "qrkff49p4E4.mp4", "clip": [1127, 1231], "conversations": [{"from": "human", "timestamps": 1157.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.009, 0.965, 0.973]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1231.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.254, 0.171, 0.582, 0.986]?"}, {"from": "gpt", "value": ["In the current frame, [0.254, 0.171, 0.582, 0.986]: walk.", "At 1230.0s, [0.145, 0.169, 0.641, 1.0]: walk, talk to (e.g., self, a person, a group).", "At 1229.0s, [0.227, 0.136, 0.614, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1228.0s, [0.223, 0.101, 0.601, 0.999]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1227.0s, [0.197, 0.09, 0.563, 0.999]: stand, listen to (a person).", "At 1226.0s, [0.206, 0.095, 0.526, 0.97]: stand, listen to (a person), watch (a person).", "At 1225.0s, [0.21, 0.096, 0.52, 0.98]: stand, listen to (a person).", "From 1223.0s to 1224.0s, [0.234, 0.098, 0.542, 0.97]->[0.228, 0.09, 0.553, 0.984]: stand, listen to (a person), watch (a person).", "From 1221.0s to 1222.0s, [0.234, 0.095, 0.564, 0.966]->[0.235, 0.094, 0.574, 0.996]: stand, talk to (e.g., self, a person, a group).", "At 1220.0s, [0.235, 0.094, 0.595, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1219.0s, [0.22, 0.104, 0.578, 0.987]: stand, talk to (e.g., self, a person, a group).", "From 1217.0s to 1218.0s, [0.213, 0.127, 0.609, 0.995]->[0.192, 0.09, 0.528, 0.956]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1216.0s, [0.266, 0.137, 0.624, 0.989]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1215.0s, [0.397, 0.162, 0.713, 0.992]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1214.0s, [0.476, 0.143, 0.733, 0.925]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "qrkff49p4E4.mp4", "clip": [1251, 1300], "conversations": [{"from": "human", "timestamps": 1281.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.324, 0.085, 0.993, 0.967]: sit."}, {"from": "human", "timestamps": 1300.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.234, 0.057, 0.707, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.234, 0.057, 0.707, 1.0]: stand.", "At 1299.0s, [0.246, 0.067, 0.712, 0.984]: stand."]}]}, {"video": "qrkff49p4E4.mp4", "clip": [1282, 1322], "conversations": [{"from": "human", "timestamps": 1312.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.255, 0.115, 0.867, 0.992]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1322.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.073, 0.238, 0.405, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.073, 0.238, 0.405, 1.0]: stand, listen to (a person), watch (a person).", "At 1321.0s, [0.0, 0.278, 0.176, 0.991]: stand, listen to (a person), watch (a person)."]}]}, {"video": "qrkff49p4E4.mp4", "clip": [1309, 1348], "conversations": [{"from": "human", "timestamps": 1339.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.117, 0.633, 0.971]: listen to (a person), stand; [0.477, 0.062, 1.0, 0.991]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1348.0, "value": "What actions is the person in bbox [0.01, 0.053, 0.775, 0.974] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.01, 0.053, 0.775, 0.974]: stand, watch (a person).", "At 1347.0s, [0.0, 0.044, 0.697, 0.969]: stand.", "From 1337.0s to 1346.0s, [0.0, 0.165, 0.617, 0.968]->[0.0, 0.142, 0.599, 0.993]->[0.0, 0.117, 0.633, 0.971]->[0.004, 0.12, 0.643, 0.963]->[0.0, 0.077, 0.629, 0.949]->[0.0, 0.058, 0.605, 0.964]->[0.0, 0.052, 0.624, 0.953]->[0.0, 0.063, 0.688, 0.941]->[0.0, 0.046, 0.705, 0.951]->[0.007, 0.042, 0.7, 0.942]: stand, listen to (a person).", "From 1335.0s to 1336.0s, [0.005, 0.176, 0.607, 0.966]->[0.0, 0.185, 0.623, 0.972]: stand, talk to (e.g., self, a person, a group).", "At 1334.0s, [0.0, 0.184, 0.59, 0.964]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1326.0s to 1333.0s, [0.126, 0.149, 0.481, 0.991]->[0.125, 0.141, 0.516, 0.996]->[0.114, 0.141, 0.544, 0.987]->[0.099, 0.155, 0.552, 0.984]->[0.077, 0.158, 0.547, 0.975]->[0.054, 0.171, 0.562, 0.974]->[0.03, 0.171, 0.554, 0.976]->[0.022, 0.196, 0.552, 0.973]: stand, listen to (a person).", "From 1321.0s to 1325.0s, [0.0, 0.278, 0.176, 0.991]->[0.073, 0.238, 0.405, 1.0]->[0.139, 0.222, 0.466, 0.989]->[0.148, 0.201, 0.49, 1.0]->[0.14, 0.168, 0.496, 1.0]: stand, listen to (a person), watch (a person)."]}]}, {"video": "qrkff49p4E4.mp4", "clip": [1363, 1405], "conversations": [{"from": "human", "timestamps": 1393.0, "value": "What actions did the person at bbox [0.0, 0.184, 0.482, 0.984] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.184, 0.482, 0.984]: stand, listen to (a person), watch (a person).", "At 1392.0s, [0.0, 0.198, 0.372, 0.977]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1405.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.046, 0.1, 0.679, 0.977]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.716, 0.01, 1.0, 0.399]: listen to (a person), stand."}]}, {"video": "qrkff49p4E4.mp4", "clip": [1596, 1656], "conversations": [{"from": "human", "timestamps": 1626.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.349, 0.434, 0.995]: listen to (a person), sit, stand; [0.344, 0.066, 0.78, 0.975]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.531, 0.009, 0.999, 0.994]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1656.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.463, 0.223, 0.681, 0.98]?"}, {"from": "gpt", "value": ["At Now, [0.463, 0.223, 0.681, 0.98]: walk, listen to (a person), watch (a person).", "From 1654.0s to 1655.0s, [0.399, 0.149, 0.587, 0.928]->[0.396, 0.171, 0.606, 0.983]: walk, listen to (a person), watch (a person).", "At 1653.0s, [0.395, 0.181, 0.568, 0.89]: stand, listen to (a person), watch (a person).", "At 1652.0s, [0.417, 0.167, 0.563, 0.897]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "qrkff49p4E4.mp4", "clip": [1622, 1679], "conversations": [{"from": "human", "timestamps": 1652.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.13, 0.268, 0.286, 0.953]: listen to (a person), stand, talk to (e.g., self, a person, a group); [0.331, 0.26, 0.454, 0.911]: listen to (a person), stand, watch (a person); [0.417, 0.167, 0.563, 0.897]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1679.0, "value": "What actions is the person in bbox [0.162, 0.32, 0.293, 0.807] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.162, 0.32, 0.293, 0.807]: stand, carry/hold (an object).", "At 1678.0s, [0.089, 0.346, 0.259, 0.774]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "qrkff49p4E4.mp4", "clip": [1694, 1729], "conversations": [{"from": "human", "timestamps": 1724.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.102, 0.235, 0.395, 0.99]: listen to (a person), stand; [0.451, 0.216, 0.671, 0.752]: talk to (e.g., self, a person, a group), walk; [0.736, 0.265, 0.957, 0.875]: carry/hold (an object), listen to (a person), watch (a person), walk."}, {"from": "human", "timestamps": 1729.0, "value": "What actions is the person in bbox [0.47, 0.231, 0.633, 0.777] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.47, 0.231, 0.633, 0.777]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1727.0s to 1728.0s, [0.485, 0.231, 0.662, 0.671]->[0.484, 0.238, 0.685, 0.742]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1726.0s, [0.463, 0.224, 0.641, 0.666]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1725.0s, [0.467, 0.237, 0.673, 0.658]: stand, talk to (e.g., self, a person, a group).", "At 1724.0s, [0.451, 0.216, 0.671, 0.752]: walk, talk to (e.g., self, a person, a group).", "At 1723.0s, [0.455, 0.177, 0.611, 0.916]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1722.0s, [0.401, 0.156, 0.617, 0.951]: walk, talk to (e.g., self, a person, a group).", "At 1721.0s, [0.397, 0.096, 0.617, 0.94]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1720.0s, [0.357, 0.161, 0.549, 0.993]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1719.0s, [0.224, 0.157, 0.511, 0.98]: walk, talk to (e.g., self, a person, a group).", "At 1718.0s, [0.202, 0.097, 0.426, 0.973]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1716.0s to 1717.0s, [0.275, 0.135, 0.55, 0.967]->[0.2, 0.121, 0.513, 0.951]: walk, touch (an object), talk to (e.g., self, a person, a group).", "At 1715.0s, [0.325, 0.112, 0.512, 0.813]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [922, 954], "conversations": [{"from": "human", "timestamps": 952.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.166, 0.054, 1.0, 0.977]?"}, {"from": "gpt", "value": ["In the current frame, [0.166, 0.054, 1.0, 0.977]: sit, listen to (a person).", "From 949.0s to 951.0s, [0.303, 0.047, 0.92, 0.979]->[0.31, 0.041, 0.929, 0.985]->[0.299, 0.028, 0.906, 0.988]: sit, listen to (a person), watch (a person).", "At 948.0s, [0.282, 0.07, 0.953, 0.997]: sit, listen to (a person).", "At 947.0s, [0.776, 0.044, 1.0, 1.0]: sit, touch (an object), listen to (a person).", "At 946.0s, [0.777, 0.041, 1.0, 1.0]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 954.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.122, 0.0, 0.816, 0.939]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.738, 0.005, 1.0, 0.995]: listen to (a person), sit."}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [896, 1002], "conversations": [{"from": "human", "timestamps": 926.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.369, 0.042, 0.897, 0.983]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1002.0, "value": "Describe the actions of the person at bbox [0.527, 0.113, 0.885, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.527, 0.113, 0.885, 0.984]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1001.0s, [0.499, 0.152, 0.933, 0.974]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1000.0s, [0.262, 0.165, 0.878, 0.98]: sit, listen to (a person).", "At 999.0s, [0.23, 0.072, 0.847, 0.953]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [939, 986], "conversations": [{"from": "human", "timestamps": 969.0, "value": "For the person at bbox [0.577, 0.0, 0.989, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.577, 0.0, 0.989, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 965.0s to 968.0s, [0.246, 0.022, 0.936, 0.989]->[0.288, 0.025, 0.951, 0.995]->[0.288, 0.033, 0.959, 1.0]->[0.279, 0.035, 0.947, 0.983]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 964.0s, [0.26, 0.044, 0.901, 0.987]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 956.0s to 963.0s, [0.291, 0.033, 0.901, 0.98]->[0.264, 0.058, 0.886, 0.984]->[0.28, 0.058, 0.896, 0.981]->[0.639, 0.013, 1.0, 1.0]->[0.63, 0.015, 1.0, 1.0]->[0.627, 0.005, 1.0, 1.0]->[0.622, 0.012, 1.0, 1.0]->[0.627, 0.007, 1.0, 1.0]: sit, listen to (a person), watch (a person).", "From 952.0s to 955.0s, [0.166, 0.054, 1.0, 0.977]->[0.766, 0.014, 1.0, 0.906]->[0.738, 0.005, 1.0, 0.995]->[0.244, 0.038, 0.956, 1.0]: sit, listen to (a person).", "From 949.0s to 951.0s, [0.303, 0.047, 0.92, 0.979]->[0.31, 0.041, 0.929, 0.985]->[0.299, 0.028, 0.906, 0.988]: sit, listen to (a person), watch (a person).", "At 948.0s, [0.282, 0.07, 0.953, 0.997]: sit, listen to (a person).", "At 947.0s, [0.776, 0.044, 1.0, 1.0]: sit, touch (an object), listen to (a person).", "At 946.0s, [0.777, 0.041, 1.0, 1.0]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 986.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.268, 0.256, 0.98]: listen to (a person), sit; [0.384, 0.059, 0.965, 1.0]: carry/hold (an object), talk to (e.g., self, a person, a group), touch (an object), bend/bow (at the waist)."}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [1038, 1073], "conversations": [{"from": "human", "timestamps": 1068.0, "value": "Describe the actions of the person at bbox [0.469, 0.058, 0.891, 0.977] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.469, 0.058, 0.891, 0.977]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1067.0s, [0.466, 0.05, 0.893, 0.976]: sit, listen to (a person), watch (a person).", "At 1066.0s, [0.459, 0.046, 0.896, 0.98]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1064.0s to 1065.0s, [0.482, 0.062, 0.915, 0.982]->[0.427, 0.062, 0.913, 0.971]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1063.0s, [0.496, 0.059, 0.912, 0.982]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1073.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.148, 0.003, 0.759, 0.975]: watch (a person), sit, talk to (e.g., self, a person, a group), drink."}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [1047, 1098], "conversations": [{"from": "human", "timestamps": 1077.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.115, 0.046, 0.538, 0.969]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.506, 0.076, 0.906, 0.972]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1098.0, "value": "What actions is the person in bbox [0.0, 0.02, 0.348, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.02, 0.348, 1.0]: sit, listen to (a person), watch (a person).", "From 1094.0s to 1097.0s, [0.016, 0.023, 0.424, 0.992]->[0.001, 0.028, 0.369, 0.998]->[0.009, 0.036, 0.387, 0.993]->[0.011, 0.052, 0.407, 0.979]: sit, listen to (a person), watch (a person).", "At 1093.0s, [0.149, 0.0, 0.653, 0.964]: sit, ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group), watch (a person).", "From 1086.0s to 1092.0s, [0.0, 0.039, 0.336, 0.996]->[0.006, 0.036, 0.386, 0.984]->[0.001, 0.043, 0.308, 0.986]->[0.16, 0.0, 0.647, 0.956]->[0.172, 0.007, 0.656, 0.953]->[0.155, 0.0, 0.643, 0.961]->[0.154, 0.008, 0.647, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [1114, 1172], "conversations": [{"from": "human", "timestamps": 1144.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.239, 0.035, 0.528, 0.989]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1172.0, "value": "Describe the actions of the person at bbox [0.248, 0.032, 0.553, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.248, 0.032, 0.553, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1171.0s, [0.186, 0.039, 0.497, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [1156, 1245], "conversations": [{"from": "human", "timestamps": 1186.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.209, 0.031, 0.608, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1245.0, "value": "Describe the actions of the person at bbox [0.346, 0.026, 0.911, 0.985] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.346, 0.026, 0.911, 0.985]: sit, listen to (a person), watch (a person).", "From 1243.0s to 1244.0s, [0.497, 0.0, 1.0, 0.99]->[0.349, 0.027, 0.914, 0.976]: sit, listen to (a person), watch (a person).", "From 1239.0s to 1242.0s, [0.494, 0.0, 1.0, 0.981]->[0.346, 0.029, 0.925, 0.985]->[0.346, 0.023, 0.935, 0.988]->[0.501, 0.002, 1.0, 0.993]: sit, listen to (a person).", "At 1238.0s, [0.503, 0.0, 1.0, 0.979]: sit, listen to (a person), watch (a person).", "From 1236.0s to 1237.0s, [0.345, 0.033, 0.904, 0.988]->[0.339, 0.029, 0.889, 0.984]: sit, listen to (a person).", "From 1233.0s to 1235.0s, [0.522, 0.0, 1.0, 0.987]->[0.535, 0.0, 1.0, 0.98]->[0.523, 0.003, 1.0, 0.981]: sit, listen to (a person), watch (a person).", "At 1232.0s, [0.508, 0.025, 1.0, 0.993]: sit, listen to (a person).", "From 1229.0s to 1231.0s, [0.327, 0.019, 0.915, 0.993]->[0.317, 0.021, 0.907, 0.988]->[0.538, 0.013, 1.0, 0.982]: sit, listen to (a person), watch (a person).", "At 1228.0s, [0.518, 0.012, 0.999, 0.99]: sit, watch (a person).", "From 1226.0s to 1227.0s, [0.486, 0.007, 0.997, 0.985]->[0.335, 0.034, 0.898, 0.986]: sit, listen to (a person), watch (a person).", "At 1225.0s, [0.544, 0.0, 1.0, 0.969]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1223.0s to 1224.0s, [0.331, 0.036, 0.897, 0.996]->[0.329, 0.041, 0.9, 0.992]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1220.0s to 1222.0s, [0.48, 0.0, 1.0, 0.984]->[0.566, 0.0, 1.0, 0.983]->[0.351, 0.031, 0.902, 0.993]: sit, listen to (a person), watch (a person).", "At 1219.0s, [0.475, 0.0, 0.982, 0.984]: sit, stand, listen to (a person), watch (a person).", "At 1218.0s, [0.533, 0.0, 1.0, 0.984]: sit, listen to (a person).", "At 1217.0s, [0.455, 0.0, 0.988, 0.997]: sit, stand, listen to (a person), watch (a person).", "From 1215.0s to 1216.0s, [0.539, 0.004, 0.999, 0.973]->[0.529, 0.002, 0.997, 0.992]: stand, listen to (a person), watch (a person).", "From 1212.0s to 1214.0s, [0.583, 0.008, 1.0, 0.971]->[0.564, 0.0, 0.999, 0.98]->[0.484, 0.001, 0.996, 0.998]: sit, listen to (a person), watch (a person).", "At 1211.0s, [0.644, 0.019, 1.0, 0.96]: stand, listen to (a person), watch (a person).", "At 1210.0s, [0.468, 0.009, 0.995, 1.0]: sit, listen to (a person), watch (a person).", "At 1209.0s, [0.506, 0.01, 0.993, 0.976]: stand, listen to (a person), watch (a person).", "From 1203.0s to 1208.0s, [0.582, 0.058, 1.0, 0.989]->[0.579, 0.05, 1.0, 0.992]->[0.572, 0.043, 1.0, 0.978]->[0.348, 0.015, 0.922, 1.0]->[0.349, 0.007, 0.918, 1.0]->[0.54, 0.018, 0.994, 0.978]: sit, listen to (a person), watch (a person).", "From 1199.0s to 1202.0s, [0.374, 0.044, 0.913, 0.998]->[0.368, 0.05, 0.924, 0.999]->[0.364, 0.057, 0.924, 0.998]->[0.361, 0.051, 0.933, 0.993]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1195.0s to 1198.0s, [0.665, 0.012, 1.0, 0.961]->[0.508, 0.0, 0.999, 0.983]->[0.497, 0.001, 0.999, 0.969]->[0.557, 0.008, 1.0, 0.987]: sit, listen to (a person), watch (a person).", "At 1194.0s, [0.579, 0.008, 1.0, 0.957]: sit, stand, listen to (a person), watch (a person).", "From 1192.0s to 1193.0s, [0.601, 0.013, 1.0, 0.974]->[0.564, 0.024, 1.0, 0.966]: sit, listen to (a person), watch (a person)."]}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [1216, 1248], "conversations": [{"from": "human", "timestamps": 1246.0, "value": "What actions did the person at bbox [0.48, 0.0, 1.0, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.48, 0.0, 1.0, 0.99]: sit, listen to (a person), watch (a person).", "From 1243.0s to 1245.0s, [0.497, 0.0, 1.0, 0.99]->[0.349, 0.027, 0.914, 0.976]->[0.346, 0.026, 0.911, 0.985]: sit, listen to (a person), watch (a person).", "From 1239.0s to 1242.0s, [0.494, 0.0, 1.0, 0.981]->[0.346, 0.029, 0.925, 0.985]->[0.346, 0.023, 0.935, 0.988]->[0.501, 0.002, 1.0, 0.993]: sit, listen to (a person).", "At 1238.0s, [0.503, 0.0, 1.0, 0.979]: sit, listen to (a person), watch (a person).", "From 1236.0s to 1237.0s, [0.345, 0.033, 0.904, 0.988]->[0.339, 0.029, 0.889, 0.984]: sit, listen to (a person).", "From 1233.0s to 1235.0s, [0.522, 0.0, 1.0, 0.987]->[0.535, 0.0, 1.0, 0.98]->[0.523, 0.003, 1.0, 0.981]: sit, listen to (a person), watch (a person).", "At 1232.0s, [0.508, 0.025, 1.0, 0.993]: sit, listen to (a person).", "From 1229.0s to 1231.0s, [0.327, 0.019, 0.915, 0.993]->[0.317, 0.021, 0.907, 0.988]->[0.538, 0.013, 1.0, 0.982]: sit, listen to (a person), watch (a person).", "At 1228.0s, [0.518, 0.012, 0.999, 0.99]: sit, watch (a person).", "From 1226.0s to 1227.0s, [0.486, 0.007, 0.997, 0.985]->[0.335, 0.034, 0.898, 0.986]: sit, listen to (a person), watch (a person).", "At 1225.0s, [0.544, 0.0, 1.0, 0.969]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1223.0s to 1224.0s, [0.331, 0.036, 0.897, 0.996]->[0.329, 0.041, 0.9, 0.992]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1220.0s to 1222.0s, [0.48, 0.0, 1.0, 0.984]->[0.566, 0.0, 1.0, 0.983]->[0.351, 0.031, 0.902, 0.993]: sit, listen to (a person), watch (a person).", "At 1219.0s, [0.475, 0.0, 0.982, 0.984]: sit, stand, listen to (a person), watch (a person).", "At 1218.0s, [0.533, 0.0, 1.0, 0.984]: sit, listen to (a person).", "At 1217.0s, [0.455, 0.0, 0.988, 0.997]: sit, stand, listen to (a person), watch (a person).", "From 1215.0s to 1216.0s, [0.539, 0.004, 0.999, 0.973]->[0.529, 0.002, 0.997, 0.992]: stand, listen to (a person), watch (a person).", "From 1212.0s to 1214.0s, [0.583, 0.008, 1.0, 0.971]->[0.564, 0.0, 0.999, 0.98]->[0.484, 0.001, 0.996, 0.998]: sit, listen to (a person), watch (a person).", "At 1211.0s, [0.644, 0.019, 1.0, 0.96]: stand, listen to (a person), watch (a person).", "At 1210.0s, [0.468, 0.009, 0.995, 1.0]: sit, listen to (a person), watch (a person).", "At 1209.0s, [0.506, 0.01, 0.993, 0.976]: stand, listen to (a person), watch (a person).", "From 1203.0s to 1208.0s, [0.582, 0.058, 1.0, 0.989]->[0.579, 0.05, 1.0, 0.992]->[0.572, 0.043, 1.0, 0.978]->[0.348, 0.015, 0.922, 1.0]->[0.349, 0.007, 0.918, 1.0]->[0.54, 0.018, 0.994, 0.978]: sit, listen to (a person), watch (a person).", "From 1199.0s to 1202.0s, [0.374, 0.044, 0.913, 0.998]->[0.368, 0.05, 0.924, 0.999]->[0.364, 0.057, 0.924, 0.998]->[0.361, 0.051, 0.933, 0.993]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1195.0s to 1198.0s, [0.665, 0.012, 1.0, 0.961]->[0.508, 0.0, 0.999, 0.983]->[0.497, 0.001, 0.999, 0.969]->[0.557, 0.008, 1.0, 0.987]: sit, listen to (a person), watch (a person).", "At 1194.0s, [0.579, 0.008, 1.0, 0.957]: sit, stand, listen to (a person), watch (a person).", "From 1192.0s to 1193.0s, [0.601, 0.013, 1.0, 0.974]->[0.564, 0.024, 1.0, 0.966]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1248.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.013, 0.042, 0.629, 0.992]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.489, 0.0, 1.0, 0.989]: listen to (a person), sit, watch (a person)."}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [1231, 1286], "conversations": [{"from": "human", "timestamps": 1261.0, "value": "For the person at bbox [0.226, 0.005, 0.482, 0.968], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.226, 0.005, 0.482, 0.968]: stand, drink.", "At 1260.0s, [0.221, 0.0, 0.463, 0.94]: stand, drink, watch (a person).", "At 1259.0s, [0.23, 0.0, 0.455, 0.94]: stand, drink."]}, {"from": "human", "timestamps": 1286.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.616, 0.08, 0.835, 0.968]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [1230, 1367], "conversations": [{"from": "human", "timestamps": 1260.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.221, 0.0, 0.463, 0.94]: watch (a person), stand, drink; [0.466, 0.113, 0.981, 0.99]: sit, touch (an object)."}, {"from": "human", "timestamps": 1367.0, "value": "For the person at bbox [0.434, 0.078, 0.906, 0.966], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.434, 0.078, 0.906, 0.966]: stand, listen to (a person), watch (a person).", "From 1360.0s to 1366.0s, [0.439, 0.006, 0.8, 0.987]->[0.434, 0.009, 0.794, 0.99]->[0.429, 0.0, 0.793, 0.986]->[0.426, 0.002, 0.787, 0.986]->[0.423, 0.022, 0.787, 0.996]->[0.436, 0.048, 0.795, 0.993]->[0.42, 0.039, 0.788, 0.99]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [1387, 1437], "conversations": [{"from": "human", "timestamps": 1417.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.232, 0.027, 0.684, 0.986]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1437.0, "value": "What actions is the person in bbox [0.412, 0.037, 0.624, 0.809] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.412, 0.037, 0.624, 0.809]: stand, touch (an object).", "From 1434.0s to 1436.0s, [0.288, 0.009, 0.469, 0.787]->[0.299, 0.05, 0.498, 0.825]->[0.271, 0.065, 0.494, 0.865]: stand, listen to (a person).", "At 1433.0s, [0.221, 0.095, 0.502, 0.775]: touch (an object), get up.", "At 1432.0s, [0.142, 0.236, 0.408, 0.774]: touch (an object).", "At 1431.0s, [0.297, 0.396, 0.475, 0.757]: touch (an object), listen to (a person)."]}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [1446, 1486], "conversations": [{"from": "human", "timestamps": 1476.0, "value": "What actions is the person in bbox [0.397, 0.011, 0.938, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.397, 0.011, 0.938, 0.988]: sit, carry/hold (an object), touch (an object), watch (a person).", "At 1475.0s, [0.387, 0.028, 0.914, 0.989]: sit, carry/hold (an object), touch (an object), play with kids, watch (a person)."]}, {"from": "human", "timestamps": 1486.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.1, 0.468, 0.393, 0.945]: carry/hold (an object), sit; [0.244, 0.017, 0.836, 0.997]: sit, touch (an object), watch (a person), listen to (a person), carry/hold (an object)."}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [1567, 1613], "conversations": [{"from": "human", "timestamps": 1597.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.194, 0.052, 0.629, 0.982]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1613.0, "value": "What actions did the person at bbox [0.206, 0.024, 0.64, 0.984] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.206, 0.024, 0.64, 0.984]: sit, talk to (e.g., self, a person, a group).", "At 1612.0s, [0.214, 0.017, 0.651, 0.976]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [1607, 1668], "conversations": [{"from": "human", "timestamps": 1637.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.385, 0.013, 0.754, 0.981]: watch (a person), stand."}, {"from": "human", "timestamps": 1668.0, "value": "Describe the actions of the person at bbox [0.32, 0.044, 0.761, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.32, 0.044, 0.761, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1659.0s to 1667.0s, [0.375, 0.064, 0.733, 1.0]->[0.392, 0.057, 0.726, 1.0]->[0.274, 0.053, 0.947, 0.989]->[0.195, 0.041, 0.968, 0.999]->[0.327, 0.027, 0.747, 1.0]->[0.277, 0.034, 0.742, 1.0]->[0.326, 0.046, 0.751, 1.0]->[0.332, 0.03, 0.728, 0.997]->[0.335, 0.061, 0.834, 0.996]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "qsTqtWVVSLM.mkv", "clip": [1709, 1757], "conversations": [{"from": "human", "timestamps": 1739.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.169, 0.041, 0.471, 0.989]: listen to (a person), stand; [0.44, 0.087, 0.896, 0.992]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1757.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.101, 0.012, 0.519, 0.977]?"}, {"from": "gpt", "value": ["In the current frame, [0.101, 0.012, 0.519, 0.977]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1755.0s to 1756.0s, [0.051, 0.038, 0.571, 0.997]->[0.083, 0.053, 0.537, 0.986]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1753.0s to 1754.0s, [0.167, 0.058, 0.633, 0.992]->[0.133, 0.066, 0.625, 0.995]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "r2llOyS-BmE.mkv", "clip": [885, 957], "conversations": [{"from": "human", "timestamps": 915.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.224, 0.339, 0.554, 0.864]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.548, 0.315, 0.894, 0.97]: carry/hold (an object), sit, listen to (a person)."}, {"from": "human", "timestamps": 957.0, "value": "Describe the actions of the person at bbox [0.033, 0.181, 0.425, 0.916] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.033, 0.181, 0.425, 0.916]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 956.0s, [0.058, 0.212, 0.425, 0.924]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "r2llOyS-BmE.mkv", "clip": [903, 998], "conversations": [{"from": "human", "timestamps": 933.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.052, 0.174, 0.433, 0.902]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.568, 0.102, 0.997, 0.939]: carry/hold (an object), sit, listen to (a person)."}, {"from": "human", "timestamps": 998.0, "value": "For the person at bbox [0.571, 0.166, 0.988, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.571, 0.166, 0.988, 1.0]: stand, listen to (a person), watch (a person).", "At 997.0s, [0.565, 0.182, 0.986, 1.0]: stand, listen to (a person), watch (a person).", "At 996.0s, [0.604, 0.154, 1.0, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 995.0s, [0.491, 0.2, 0.905, 0.976]: stand, walk, talk to (e.g., self, a person, a group), watch (a person).", "At 994.0s, [0.216, 0.272, 0.574, 0.993]: walk, talk to (e.g., self, a person, a group).", "From 991.0s to 993.0s, [0.305, 0.268, 0.62, 0.979]->[0.351, 0.267, 0.62, 0.999]->[0.349, 0.269, 0.646, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 990.0s, [0.207, 0.277, 0.698, 0.978]: get up, talk to (e.g., self, a person, a group), watch (a person).", "From 988.0s to 989.0s, [0.086, 0.433, 0.429, 0.958]->[0.053, 0.59, 0.406, 0.997]: sit, listen to (a person), watch (a person)."]}]}, {"video": "r2llOyS-BmE.mkv", "clip": [936, 976], "conversations": [{"from": "human", "timestamps": 966.0, "value": "What actions did the person at bbox [0.52, 0.114, 0.988, 0.936] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.52, 0.114, 0.988, 0.936]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 965.0s, [0.515, 0.121, 0.992, 0.965]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 964.0s, [0.514, 0.124, 0.985, 0.943]: carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 963.0s, [0.506, 0.125, 0.992, 0.947]: sit, listen to (a person).", "From 958.0s to 962.0s, [0.53, 0.106, 0.987, 0.914]->[0.514, 0.107, 0.992, 0.94]->[0.534, 0.106, 0.986, 0.925]->[0.521, 0.112, 1.0, 0.954]->[0.51, 0.113, 0.991, 0.95]: sit, carry/hold (an object), listen to (a person).", "At 957.0s, [0.522, 0.102, 0.994, 0.946]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 956.0s, [0.524, 0.104, 0.988, 0.913]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 976.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.088, 0.199, 0.547, 0.895]: sit, talk to (e.g., self, a person, a group); [0.512, 0.126, 0.99, 0.94]: carry/hold (an object), sit, listen to (a person)."}]}, {"video": "r2llOyS-BmE.mkv", "clip": [1081, 1114], "conversations": [{"from": "human", "timestamps": 1111.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.166, 0.3, 0.501, 0.764]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.513, 0.248, 0.906, 0.885]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1114.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.517, 0.255, 0.888, 0.848]?"}, {"from": "gpt", "value": ["In the current frame, [0.517, 0.255, 0.888, 0.848]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1113.0s, [0.5, 0.268, 0.91, 0.855]: sit, clink glass, talk to (e.g., self, a person, a group), watch (a person).", "At 1112.0s, [0.523, 0.257, 0.901, 0.883]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1111.0s, [0.513, 0.248, 0.906, 0.885]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1109.0s to 1110.0s, [0.516, 0.263, 0.899, 0.875]->[0.519, 0.252, 0.9, 0.875]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1108.0s, [0.514, 0.253, 0.902, 0.88]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1096.0s to 1107.0s, [0.518, 0.309, 0.94, 0.913]->[0.543, 0.307, 0.929, 0.942]->[0.531, 0.314, 0.939, 0.955]->[0.541, 0.307, 0.94, 0.943]->[0.53, 0.304, 0.938, 0.943]->[0.529, 0.289, 0.932, 0.896]->[0.54, 0.291, 0.932, 0.924]->[0.532, 0.289, 0.934, 0.909]->[0.543, 0.289, 0.916, 0.922]->[0.535, 0.283, 0.936, 0.896]->[0.535, 0.28, 0.944, 0.92]->[0.525, 0.259, 0.907, 0.895]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1094.0s to 1095.0s, [0.524, 0.298, 0.911, 0.898]->[0.509, 0.314, 0.936, 0.922]: sit, carry/hold (an object), listen to (a person), take (an object) from (a person), watch (a person).", "From 1091.0s to 1093.0s, [0.537, 0.272, 0.895, 0.859]->[0.527, 0.269, 0.91, 0.878]->[0.536, 0.264, 0.924, 0.881]: sit, listen to (a person), watch (a person).", "At 1090.0s, [0.525, 0.276, 0.917, 0.87]: sit, listen to (a person).", "From 1088.0s to 1089.0s, [0.517, 0.269, 0.9, 0.876]->[0.519, 0.272, 0.907, 0.878]: sit, listen to (a person), watch (a person).", "At 1087.0s, [0.525, 0.289, 0.912, 0.867]: sit, talk to (e.g., self, a person, a group).", "From 1085.0s to 1086.0s, [0.546, 0.27, 0.925, 0.886]->[0.531, 0.29, 0.909, 0.874]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1084.0s, [0.514, 0.282, 0.957, 0.881]: sit, talk to (e.g., self, a person, a group).", "At 1083.0s, [0.534, 0.271, 0.946, 0.861]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1082.0s, [0.477, 0.277, 0.929, 0.862]: sit, put down, listen to (a person), watch (a person).", "At 1081.0s, [0.483, 0.284, 0.933, 0.879]: sit, put down, listen to (a person).", "At 1080.0s, [0.515, 0.258, 0.92, 0.862]: sit, drink, listen to (a person).", "At 1079.0s, [0.507, 0.277, 0.944, 0.873]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1078.0s, [0.48, 0.286, 0.919, 0.892]: sit, carry/hold (an object), listen to (a person).", "From 1076.0s to 1077.0s, [0.481, 0.282, 0.922, 0.928]->[0.495, 0.275, 0.919, 0.894]: sit, listen to (a person), watch (a person).", "At 1075.0s, [0.516, 0.271, 0.909, 0.897]: sit, listen to (a person).", "At 1074.0s, [0.494, 0.285, 0.906, 0.895]: sit, listen to (a person), watch (a person).", "At 1073.0s, [0.493, 0.312, 0.907, 0.922]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1072.0s, [0.453, 0.437, 0.873, 0.98]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1071.0s, [0.402, 0.242, 0.853, 0.989]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1070.0s, [0.534, 0.106, 0.783, 0.934]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1069.0s, [0.655, 0.047, 0.91, 0.969]: stand, listen to (a person).", "At 1068.0s, [0.69, 0.074, 0.891, 0.925]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1067.0s, [0.774, 0.073, 0.986, 0.883]: walk, listen to (a person), watch (a person)."]}]}, {"video": "r2llOyS-BmE.mkv", "clip": [1146, 1248], "conversations": [{"from": "human", "timestamps": 1176.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.137, 0.174, 0.447, 1.0]: touch (an object), watch (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.426, 0.129, 0.879, 0.814]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1248.0, "value": "What actions is the person in bbox [0.446, 0.126, 0.709, 0.992] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.446, 0.126, 0.709, 0.992]: walk, sing to (e.g., self, a person, a group), talk to (e.g., self, a person, a group).", "At 1247.0s, [0.439, 0.188, 0.637, 0.955]: walk, grab (a person), listen to (a person), talk to (e.g., self, a person, a group).", "At 1246.0s, [0.403, 0.191, 0.603, 0.949]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "r2llOyS-BmE.mkv", "clip": [1173, 1214], "conversations": [{"from": "human", "timestamps": 1203.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.142, 0.21, 0.6, 0.981]?"}, {"from": "gpt", "value": ["In the current frame, [0.142, 0.21, 0.6, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1201.0s to 1202.0s, [0.129, 0.2, 0.48, 0.994]->[0.135, 0.204, 0.554, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1200.0s, [0.118, 0.207, 0.474, 0.998]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1199.0s, [0.179, 0.213, 0.495, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1214.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.357, 0.113, 0.634, 0.99]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.579, 0.167, 0.957, 0.983]: talk to (e.g., self, a person, a group), walk."}]}, {"video": "r2llOyS-BmE.mkv", "clip": [1246, 1346], "conversations": [{"from": "human", "timestamps": 1276.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.117, 0.265, 0.193, 0.527]: carry/hold (an object), walk; [0.363, 0.168, 0.596, 0.899]: grab (a person), walk."}, {"from": "human", "timestamps": 1346.0, "value": "For the person at bbox [0.481, 0.235, 0.878, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.481, 0.235, 0.878, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1345.0s, [0.494, 0.158, 0.878, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1344.0s, [0.426, 0.134, 0.873, 0.994]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1343.0s, [0.463, 0.184, 0.912, 0.979]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1342.0s, [0.418, 0.267, 0.908, 0.992]: stand, carry/hold (an object).", "From 1340.0s to 1341.0s, [0.436, 0.244, 0.822, 0.989]->[0.396, 0.251, 0.863, 0.98]: stand, talk to (e.g., self, a person, a group).", "At 1339.0s, [0.463, 0.22, 0.862, 0.98]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1338.0s, [0.586, 0.242, 0.974, 0.993]: stand, talk to (e.g., self, a person, a group).", "At 1337.0s, [0.432, 0.19, 0.897, 0.99]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1336.0s, [0.412, 0.195, 0.865, 0.98]: stand, listen to (a person), watch (a person).", "At 1335.0s, [0.446, 0.18, 0.846, 0.985]: stand, listen to (a person).", "From 1332.0s to 1334.0s, [0.452, 0.2, 0.865, 0.979]->[0.495, 0.193, 0.906, 0.982]->[0.589, 0.259, 0.937, 0.985]: stand, talk to (e.g., self, a person, a group).", "At 1331.0s, [0.613, 0.185, 0.973, 0.949]: stand, listen (e.g., to music), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "r2llOyS-BmE.mkv", "clip": [1370, 1432], "conversations": [{"from": "human", "timestamps": 1400.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.035, 0.106, 0.403, 0.967]: stand, talk to (e.g., self, a person, a group); [0.332, 0.134, 0.602, 0.669]: listen to (a person), stand, talk to (e.g., self, a person, a group); [0.517, 0.196, 0.849, 0.664]: listen to (a person), stand, grab (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1432.0, "value": "For the person at bbox [0.525, 0.13, 0.813, 0.829], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.525, 0.13, 0.813, 0.829]: stand, talk to (e.g., self, a person, a group).", "At 1431.0s, [0.518, 0.142, 0.864, 0.856]: stand, talk to (e.g., self, a person, a group).", "At 1430.0s, [0.514, 0.173, 0.852, 0.859]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1429.0s, [0.508, 0.107, 0.801, 0.859]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1428.0s, [0.513, 0.125, 0.846, 0.901]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1427.0s, [0.472, 0.158, 0.832, 0.868]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1426.0s, [0.544, 0.194, 0.828, 0.84]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1425.0s, [0.563, 0.158, 0.959, 0.956]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1424.0s, [0.586, 0.146, 0.892, 0.865]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1423.0s, [0.614, 0.141, 0.948, 0.922]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1417.0s to 1422.0s, [0.546, 0.142, 0.924, 0.884]->[0.607, 0.163, 0.936, 0.909]->[0.676, 0.212, 0.984, 0.983]->[0.55, 0.107, 0.869, 0.886]->[0.514, 0.136, 0.849, 0.822]->[0.545, 0.152, 0.893, 0.864]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "r2llOyS-BmE.mkv", "clip": [1358, 1441], "conversations": [{"from": "human", "timestamps": 1388.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.348, 0.186, 0.619, 0.807]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.615, 0.254, 0.953, 0.85]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1441.0, "value": "What actions did the person at bbox [0.0, 0.056, 0.271, 0.995] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.056, 0.271, 0.995]: stand, watch (a person).", "At 1440.0s, [0.0, 0.069, 0.282, 0.985]: stand, listen to (a person), watch (a person).", "At 1438.0s, [0.0, 0.061, 0.299, 1.0]: stand, listen to (a person), watch (a person)."]}]}, {"video": "r2llOyS-BmE.mkv", "clip": [1495, 1614], "conversations": [{"from": "human", "timestamps": 1525.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.011, 0.21, 0.386, 0.979]: listen to (a person), stand, grab (a person); [0.313, 0.311, 0.611, 0.974]: watch (a person), stand; [0.668, 0.14, 0.941, 0.708]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1614.0, "value": "For the person at bbox [0.207, 0.304, 0.673, 0.963], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.207, 0.304, 0.673, 0.963]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1611.0s to 1613.0s, [0.254, 0.31, 0.68, 0.921]->[0.219, 0.32, 0.672, 0.919]->[0.219, 0.314, 0.67, 0.93]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1610.0s, [0.078, 0.298, 0.55, 0.979]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1609.0s, [0.054, 0.271, 0.307, 0.692]: carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1608.0s, [0.078, 0.289, 0.321, 0.693]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1607.0s, [0.07, 0.278, 0.315, 0.7]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1606.0s, [0.103, 0.29, 0.377, 0.695]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1605.0s, [0.104, 0.292, 0.373, 0.692]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1604.0s, [0.068, 0.273, 0.353, 0.699]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1603.0s, [0.104, 0.296, 0.387, 0.7]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1602.0s, [0.098, 0.29, 0.329, 0.7]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1601.0s, [0.092, 0.283, 0.343, 0.704]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1600.0s, [0.104, 0.29, 0.362, 0.694]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1599.0s, [0.104, 0.285, 0.373, 0.692]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1598.0s, [0.115, 0.291, 0.371, 0.696]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1597.0s, [0.101, 0.28, 0.34, 0.706]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1596.0s, [0.107, 0.288, 0.372, 0.684]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1595.0s, [0.102, 0.295, 0.363, 0.691]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1594.0s, [0.097, 0.291, 0.349, 0.693]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1593.0s, [0.078, 0.292, 0.371, 0.693]: carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1592.0s, [0.096, 0.285, 0.376, 0.69]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1591.0s, [0.104, 0.273, 0.331, 0.694]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1590.0s, [0.052, 0.24, 0.312, 0.707]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1589.0s, [0.149, 0.311, 0.372, 0.735]: carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1588.0s, [0.191, 0.282, 0.422, 0.753]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1587.0s, [0.183, 0.283, 0.404, 0.757]: sit, talk to (e.g., self, a person, a group).", "At 1586.0s, [0.183, 0.283, 0.406, 0.752]: stand, talk to (e.g., self, a person, a group).", "At 1585.0s, [0.191, 0.28, 0.4, 0.747]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "r2llOyS-BmE.mkv", "clip": [1529, 1576], "conversations": [{"from": "human", "timestamps": 1559.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.234, 0.198, 0.392, 0.75]: walk; [0.313, 0.127, 0.688, 0.968]: watch (a person), stand; [0.643, 0.26, 0.8, 0.864]: stand."}, {"from": "human", "timestamps": 1576.0, "value": "Describe the actions of the person at bbox [0.436, 0.283, 0.561, 0.775] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.436, 0.283, 0.561, 0.775]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1575.0s, [0.462, 0.281, 0.578, 0.805]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1574.0s, [0.432, 0.279, 0.569, 0.83]: walk, carry/hold (an object), watch (a person).", "At 1573.0s, [0.425, 0.271, 0.582, 0.842]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1569.0s to 1572.0s, [0.398, 0.265, 0.562, 0.862]->[0.396, 0.263, 0.569, 0.851]->[0.4, 0.261, 0.567, 0.852]->[0.404, 0.257, 0.572, 0.851]: stand, listen to (a person), watch (a person).", "At 1567.0s, [0.398, 0.278, 0.563, 0.858]: stand, listen to (a person), watch (a person).", "At 1566.0s, [0.402, 0.276, 0.563, 0.863]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1565.0s, [0.404, 0.279, 0.567, 0.875]: stand, listen to (a person), watch (a person).", "At 1564.0s, [0.421, 0.286, 0.586, 0.879]: walk, listen to (a person), watch (a person).", "At 1563.0s, [0.396, 0.27, 0.559, 0.902]: walk, watch (a person).", "At 1562.0s, [0.389, 0.258, 0.588, 0.957]: walk."]}]}, {"video": "r2llOyS-BmE.mkv", "clip": [1601, 1662], "conversations": [{"from": "human", "timestamps": 1631.0, "value": "Describe the actions of the person at bbox [0.211, 0.334, 0.664, 0.928] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.211, 0.334, 0.664, 0.928]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1630.0s, [0.211, 0.335, 0.655, 0.922]: sit, stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1625.0s to 1629.0s, [0.222, 0.326, 0.653, 0.937]->[0.222, 0.319, 0.663, 0.944]->[0.219, 0.325, 0.656, 0.945]->[0.209, 0.315, 0.654, 0.946]->[0.214, 0.325, 0.66, 0.927]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1623.0s to 1624.0s, [0.203, 0.318, 0.645, 0.933]->[0.209, 0.312, 0.658, 0.935]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1622.0s, [0.203, 0.317, 0.648, 0.93]: carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1619.0s to 1621.0s, [0.207, 0.321, 0.657, 0.934]->[0.24, 0.334, 0.68, 0.922]->[0.208, 0.311, 0.645, 0.935]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1618.0s, [0.175, 0.137, 0.845, 0.879]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1662.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.001, 0.074, 0.13, 1.0]: listen to (a person), stand, watch (a person); [0.201, 0.28, 0.405, 0.978]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.393, 0.298, 0.567, 0.508]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.536, 0.276, 0.741, 0.53]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.733, 0.288, 0.983, 0.877]: carry/hold (an object), sit, listen to (a person)."}]}, {"video": "r2llOyS-BmE.mkv", "clip": [1636, 1713], "conversations": [{"from": "human", "timestamps": 1666.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.176, 0.122, 0.904, 0.971]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1713.0, "value": "Describe the actions of the person at bbox [0.047, 0.312, 0.23, 0.711] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.047, 0.312, 0.23, 0.711]: stand, listen to (a person), watch (a person).", "At 1712.0s, [0.052, 0.31, 0.227, 0.706]: sit, listen to (a person), watch (a person).", "At 1711.0s, [0.053, 0.309, 0.229, 0.71]: stand, listen to (a person).", "At 1710.0s, [0.051, 0.318, 0.231, 0.705]: stand, listen to (a person), watch (a person).", "At 1709.0s, [0.058, 0.314, 0.235, 0.711]: sit, listen to (a person), watch (a person).", "At 1708.0s, [0.058, 0.314, 0.234, 0.719]: stand, listen to (a person), watch (a person).", "At 1707.0s, [0.041, 0.315, 0.238, 0.726]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1706.0s, [0.08, 0.32, 0.296, 0.737]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1705.0s, [0.003, 0.301, 0.244, 0.776]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1702.0s to 1704.0s, [0.171, 0.101, 0.855, 0.874]->[0.176, 0.101, 0.882, 0.874]->[0.154, 0.104, 0.875, 0.888]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "r2llOyS-BmE.mkv", "clip": [1679, 1728], "conversations": [{"from": "human", "timestamps": 1709.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.005, 0.193, 0.128, 0.632]?"}, {"from": "gpt", "value": ["At Now, [0.005, 0.193, 0.128, 0.632]: stand, listen to (a person), watch (a person).", "From 1707.0s to 1708.0s, [0.003, 0.185, 0.124, 0.646]->[0.003, 0.184, 0.128, 0.699]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1728.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.316, 0.241, 0.616, 0.734]: sit, play with pets, talk to (e.g., self, a person, a group); [0.632, 0.245, 0.943, 0.707]: carry/hold (an object), sit, listen to (a person)."}]}, {"video": "r2llOyS-BmE.mkv", "clip": [1739, 1772], "conversations": [{"from": "human", "timestamps": 1769.0, "value": "Describe the actions of the person at bbox [0.123, 0.201, 0.447, 0.978] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.123, 0.201, 0.447, 0.978]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1768.0s, [0.106, 0.192, 0.442, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1767.0s, [0.149, 0.152, 0.446, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1766.0s, [0.149, 0.151, 0.44, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1763.0s to 1765.0s, [0.17, 0.174, 0.448, 0.987]->[0.141, 0.162, 0.449, 0.995]->[0.186, 0.174, 0.449, 0.998]: stand, listen to (a person), watch (a person).", "At 1762.0s, [0.171, 0.184, 0.446, 0.99]: stand, watch (a person).", "At 1761.0s, [0.16, 0.155, 0.435, 0.989]: stand, listen to (a person), watch (a person).", "At 1760.0s, [0.165, 0.175, 0.457, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1758.0s, [0.294, 0.198, 0.433, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1757.0s, [0.265, 0.206, 0.418, 0.993]: stand, listen to (a person), watch (a person).", "At 1756.0s, [0.229, 0.254, 0.397, 0.992]: stand, listen to (a person).", "From 1753.0s to 1755.0s, [0.251, 0.18, 0.388, 0.915]->[0.226, 0.208, 0.37, 0.907]->[0.216, 0.238, 0.383, 0.978]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1772.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.192, 0.224, 0.442, 0.988]: stand, talk to (e.g., self, a person, a group); [0.48, 0.084, 0.816, 1.0]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "rCb9-U4TArw.mp4", "clip": [905, 945], "conversations": [{"from": "human", "timestamps": 935.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.119, 0.01, 0.575, 0.995]: lift (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 945.0, "value": "What actions did the person at bbox [0.276, 0.057, 0.412, 0.649] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.276, 0.057, 0.412, 0.649]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 944.0s, [0.205, 0.271, 0.775, 0.996]: walk, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "rCb9-U4TArw.mp4", "clip": [970, 1006], "conversations": [{"from": "human", "timestamps": 1000.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.348, 0.027, 0.901, 1.0]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1006.0, "value": "What actions is the person in bbox [0.225, 0.52, 0.411, 0.995] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.225, 0.52, 0.411, 0.995]: crouch/kneel.", "At 1005.0s, [0.212, 0.396, 0.423, 0.99]: crouch/kneel, listen to (a person)."]}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1014, 1054], "conversations": [{"from": "human", "timestamps": 1044.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.151, 0.217, 0.437, 1.0]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.373, 0.084, 0.681, 0.97]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1054.0, "value": "What actions did the person at bbox [0.103, 0.168, 0.389, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.103, 0.168, 0.389, 0.986]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1053.0s, [0.109, 0.109, 0.41, 0.993]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1052.0s, [0.102, 0.121, 0.399, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1051.0s, [0.105, 0.174, 0.397, 0.979]: stand, talk to (e.g., self, a person, a group).", "At 1050.0s, [0.104, 0.226, 0.386, 1.0]: stand, carry/hold (an object), listen to (a person).", "At 1049.0s, [0.099, 0.176, 0.373, 0.986]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1047.0s to 1048.0s, [0.102, 0.172, 0.373, 0.998]->[0.107, 0.175, 0.371, 0.996]: stand, listen to (a person), watch (a person)."]}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1123, 1199], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.104, 0.079, 0.547, 0.994]: listen to (a person), stand, watch (a person); [0.462, 0.028, 0.891, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1199.0, "value": "What actions is the person in bbox [0.608, 0.177, 0.748, 0.957] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.608, 0.177, 0.748, 0.957]: stand, touch (an object), listen to (a person), watch (a person).", "From 1196.0s to 1198.0s, [0.607, 0.171, 0.756, 0.96]->[0.608, 0.143, 0.749, 0.974]->[0.606, 0.171, 0.748, 0.966]: stand, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1131, 1259], "conversations": [{"from": "human", "timestamps": 1161.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.107, 0.078, 0.569, 1.0]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group); [0.55, 0.043, 0.888, 1.0]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1259.0, "value": "Describe the actions of the person at bbox [0.119, 0.009, 0.886, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.119, 0.009, 0.886, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1258.0s, [0.132, 0.0, 0.835, 0.94]: stand, watch (a person).", "From 1256.0s to 1257.0s, [0.101, 0.002, 0.814, 0.962]->[0.126, 0.0, 0.809, 0.957]: stand.", "From 1254.0s to 1255.0s, [0.091, 0.0, 0.823, 0.964]->[0.071, 0.018, 0.862, 0.965]: stand, listen to (a person)."]}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1216, 1247], "conversations": [{"from": "human", "timestamps": 1246.0, "value": "For the person at bbox [0.348, 0.142, 0.566, 0.973], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.348, 0.142, 0.566, 0.973]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1245.0s, [0.381, 0.127, 0.545, 0.993]: stand, carry/hold (an object), listen to (a person).", "At 1244.0s, [0.38, 0.126, 0.569, 0.989]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1242.0s to 1243.0s, [0.356, 0.118, 0.593, 0.972]->[0.348, 0.128, 0.529, 0.993]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1241.0s, [0.374, 0.109, 0.542, 1.0]: stand, carry/hold (an object), listen to (a person).", "At 1240.0s, [0.31, 0.108, 0.525, 0.989]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1239.0s, [0.327, 0.115, 0.551, 0.995]: stand, carry/hold (an object), listen to (a person).", "At 1238.0s, [0.341, 0.102, 0.54, 0.983]: stand, carry/hold (an object).", "At 1237.0s, [0.34, 0.107, 0.505, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1247.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.133, 0.117, 0.363, 0.995]: listen to (a person), stand; [0.315, 0.15, 0.519, 0.982]: carry/hold (an object), stand, watch (a person); [0.671, 0.007, 0.889, 0.938]: carry/hold (an object), stand, listen to (a person)."}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1238, 1330], "conversations": [{"from": "human", "timestamps": 1268.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.379, 0.001, 0.885, 1.0]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1330.0, "value": "What actions did the person at bbox [0.142, 0.0, 0.891, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.142, 0.0, 0.891, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1327.0s to 1329.0s, [0.148, 0.007, 0.861, 0.994]->[0.146, 0.0, 0.844, 0.97]->[0.099, 0.0, 0.851, 1.0]: sit, talk to (e.g., self, a person, a group).", "At 1326.0s, [0.111, 0.0, 0.778, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1323.0s to 1325.0s, [0.261, 0.348, 0.514, 1.0]->[0.252, 0.398, 0.519, 0.977]->[0.25, 0.396, 0.516, 0.991]: sit.", "At 1322.0s, [0.254, 0.334, 0.512, 0.991]: sit, talk to (e.g., self, a person, a group).", "At 1321.0s, [0.252, 0.3, 0.511, 0.989]: sit, listen to (a person).", "At 1320.0s, [0.237, 0.289, 0.526, 0.986]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1319.0s, [0.226, 0.451, 0.463, 0.989]: sit, listen to (a person), sing to (e.g., self, a person, a group)."]}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1323, 1365], "conversations": [{"from": "human", "timestamps": 1353.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.141, 0.0, 0.827, 0.976]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1365.0, "value": "Describe the actions of the person at bbox [0.097, 0.0, 0.702, 0.99] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.097, 0.0, 0.702, 0.99]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1360.0s to 1364.0s, [0.106, 0.0, 0.66, 0.986]->[0.094, 0.0, 0.692, 0.983]->[0.109, 0.0, 0.649, 0.988]->[0.102, 0.0, 0.665, 0.978]->[0.088, 0.0, 0.658, 0.99]: sit, talk to (e.g., self, a person, a group).", "At 1359.0s, [0.145, 0.0, 0.67, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1379, 1432], "conversations": [{"from": "human", "timestamps": 1409.0, "value": "What actions did the person at bbox [0.773, 0.067, 0.89, 0.954] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.773, 0.067, 0.89, 0.954]: stand, listen to (a person), watch (a person).", "From 1405.0s to 1408.0s, [0.673, 0.103, 0.895, 0.977]->[0.719, 0.178, 0.891, 0.983]->[0.691, 0.05, 0.887, 0.996]->[0.717, 0.0, 0.882, 0.995]: stand, listen to (a person), watch (a person).", "From 1403.0s to 1404.0s, [0.427, 0.03, 0.892, 0.976]->[0.682, 0.151, 0.891, 0.98]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1402.0s, [0.463, 0.007, 0.892, 0.982]: stand, listen to (a person), watch (a person).", "At 1401.0s, [0.493, 0.073, 0.893, 0.997]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1395.0s to 1400.0s, [0.64, 0.016, 0.89, 1.0]->[0.715, 0.032, 0.886, 0.952]->[0.687, 0.029, 0.894, 0.946]->[0.674, 0.0, 0.872, 0.998]->[0.724, 0.059, 0.882, 0.995]->[0.708, 0.084, 0.882, 0.998]: stand, listen to (a person), watch (a person).", "At 1394.0s, [0.607, 0.087, 0.882, 0.998]: walk, listen to (a person), watch (a person).", "At 1393.0s, [0.455, 0.161, 0.735, 1.0]: walk, listen to (a person)."]}, {"from": "human", "timestamps": 1432.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.106, 0.117, 0.306, 0.985]: stand, talk to (e.g., self, a person, a group); [0.679, 0.174, 0.892, 0.934]: listen to (a person), stand, watch (a person)."}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1349, 1459], "conversations": [{"from": "human", "timestamps": 1379.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.485, 0.331, 0.587, 0.74]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1459.0, "value": "What actions is the person in bbox [0.124, 0.077, 0.682, 0.995] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.124, 0.077, 0.682, 0.995]: stand.", "From 1457.0s to 1458.0s, [0.143, 0.155, 0.699, 0.999]->[0.136, 0.172, 0.698, 0.977]: stand, watch (a person).", "At 1456.0s, [0.509, 0.329, 0.597, 0.73]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1455.0s, [0.511, 0.324, 0.598, 0.713]: stand, watch (a person).", "At 1454.0s, [0.512, 0.329, 0.605, 0.719]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1453.0s, [0.518, 0.307, 0.609, 0.719]: stand, watch (a person).", "From 1446.0s to 1452.0s, [0.175, 0.031, 0.817, 0.986]->[0.164, 0.001, 0.791, 0.999]->[0.105, 0.0, 0.69, 0.984]->[0.119, 0.0, 0.623, 0.991]->[0.103, 0.029, 0.661, 0.982]->[0.126, 0.0, 0.716, 1.0]->[0.519, 0.307, 0.608, 0.709]: stand.", "At 1444.0s, [0.092, 0.0, 0.832, 1.0]: walk.", "From 1442.0s to 1443.0s, [0.138, 0.0, 0.895, 1.0]->[0.086, 0.004, 0.698, 0.987]: stand."]}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1420, 1465], "conversations": [{"from": "human", "timestamps": 1450.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.103, 0.029, 0.661, 0.982]?"}, {"from": "gpt", "value": ["In the current frame, [0.103, 0.029, 0.661, 0.982]: stand.", "From 1446.0s to 1449.0s, [0.175, 0.031, 0.817, 0.986]->[0.164, 0.001, 0.791, 0.999]->[0.105, 0.0, 0.69, 0.984]->[0.119, 0.0, 0.623, 0.991]: stand.", "At 1444.0s, [0.092, 0.0, 0.832, 1.0]: walk.", "From 1442.0s to 1443.0s, [0.138, 0.0, 0.895, 1.0]->[0.086, 0.004, 0.698, 0.987]: stand."]}, {"from": "human", "timestamps": 1465.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.185, 0.04, 0.891, 0.946]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1478, 1529], "conversations": [{"from": "human", "timestamps": 1508.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.07, 0.0, 0.869, 0.966]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1529.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.149, 0.067, 0.458, 0.902]?"}, {"from": "gpt", "value": ["At Now, [0.149, 0.067, 0.458, 0.902]: walk.", "At 1528.0s, [0.08, 0.003, 0.802, 0.973]: stand.", "From 1526.0s to 1527.0s, [0.127, 0.0, 0.728, 1.0]->[0.092, 0.065, 0.575, 0.994]: walk, carry/hold (an object)."]}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1473, 1559], "conversations": [{"from": "human", "timestamps": 1503.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.096, 0.0, 0.852, 0.988]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1559.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.302, 0.005, 0.757, 0.976]?"}, {"from": "gpt", "value": ["At Now, [0.302, 0.005, 0.757, 0.976]: stand.", "From 1556.0s to 1558.0s, [0.086, 0.005, 0.895, 0.994]->[0.143, 0.005, 0.775, 0.972]->[0.353, 0.012, 0.906, 0.983]: walk."]}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1600, 1662], "conversations": [{"from": "human", "timestamps": 1630.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.353, 0.147, 0.578, 0.981]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1662.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.174, 0.08, 0.388, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.174, 0.08, 0.388, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1661.0s, [0.171, 0.096, 0.368, 1.0]: stand, listen to (a person), watch (a person).", "From 1659.0s to 1660.0s, [0.147, 0.094, 0.358, 1.0]->[0.161, 0.115, 0.358, 0.994]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1658.0s, [0.115, 0.065, 0.354, 0.994]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1656.0s to 1657.0s, [0.188, 0.106, 0.371, 0.998]->[0.184, 0.097, 0.408, 0.989]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1594, 1712], "conversations": [{"from": "human", "timestamps": 1624.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.352, 0.3, 0.44, 0.695]: carry/hold (an object), listen to (a person), watch (a person), walk; [0.389, 0.257, 0.548, 0.947]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1712.0, "value": "What actions did the person at bbox [0.115, 0.02, 0.921, 0.998] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.115, 0.02, 0.921, 0.998]: stand.", "From 1707.0s to 1711.0s, [0.367, 0.097, 0.916, 0.992]->[0.344, 0.028, 0.899, 0.992]->[0.243, 0.071, 0.873, 0.997]->[0.31, 0.02, 0.91, 0.983]->[0.19, 0.046, 0.912, 0.962]: stand."]}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1669, 1707], "conversations": [{"from": "human", "timestamps": 1699.0, "value": "Describe the actions of the person at bbox [0.248, 0.038, 0.879, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.248, 0.038, 0.879, 0.987]: sit.", "At 1698.0s, [0.263, 0.025, 0.869, 0.994]: sit."]}, {"from": "human", "timestamps": 1707.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.367, 0.097, 0.916, 0.992]: stand."}]}, {"video": "rCb9-U4TArw.mp4", "clip": [1711, 1744], "conversations": [{"from": "human", "timestamps": 1741.0, "value": "What actions did the person at bbox [0.278, 0.01, 0.878, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.278, 0.01, 0.878, 0.983]: stand, talk to (e.g., self, a person, a group).", "At 1740.0s, [0.197, 0.005, 0.872, 0.986]: sit, stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1744.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.302, 0.001, 0.89, 0.984]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [923, 1008], "conversations": [{"from": "human", "timestamps": 953.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.035, 0.32, 0.411, 0.998]: listen to (a person), sit; [0.325, 0.249, 0.526, 0.848]: listen to (a person), stand, watch (a person); [0.517, 0.403, 0.978, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1008.0, "value": "Describe the actions of the person at bbox [0.364, 0.068, 0.583, 0.589] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.364, 0.068, 0.583, 0.589]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 1007.0s, [0.367, 0.072, 0.578, 0.584]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 1006.0s, [0.357, 0.073, 0.566, 0.563]: listen to (a person), watch (a person).", "At 1005.0s, [0.342, 0.08, 0.558, 0.531]: stand."]}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1033, 1079], "conversations": [{"from": "human", "timestamps": 1063.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.004, 0.38, 0.343, 0.985]: watch (a person), sit; [0.335, 0.081, 0.606, 0.994]: carry/hold (an object), stand, watch (a person); [0.6, 0.293, 0.741, 0.979]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1079.0, "value": "What actions is the person in bbox [0.451, 0.09, 0.946, 0.986] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.451, 0.09, 0.946, 0.986]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1078.0s, [0.324, 0.107, 1.0, 0.969]: sit, carry/hold (an object)."]}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1021, 1094], "conversations": [{"from": "human", "timestamps": 1051.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.071, 0.544, 0.282, 0.881]: watch (a person), sit; [0.087, 0.182, 0.316, 0.745]: carry/hold (an object), stand; [0.279, 0.477, 0.416, 0.834]: walk; [0.68, 0.568, 0.914, 0.979]: carry/hold (an object), sit, watch (a person)."}, {"from": "human", "timestamps": 1094.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.509, 0.365, 0.691, 0.982]?"}, {"from": "gpt", "value": ["In the current frame, [0.509, 0.365, 0.691, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1093.0s, [0.503, 0.394, 0.683, 0.961]: stand, watch (a person)."]}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1051, 1091], "conversations": [{"from": "human", "timestamps": 1081.0, "value": "Describe the actions of the person at bbox [0.256, 0.122, 0.985, 0.971] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.256, 0.122, 0.985, 0.971]: sit, put down, talk to (e.g., self, a person, a group).", "At 1080.0s, [0.145, 0.139, 0.997, 0.981]: sit, carry/hold (an object), put down, talk to (e.g., self, a person, a group).", "At 1079.0s, [0.451, 0.09, 0.946, 0.986]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1078.0s, [0.324, 0.107, 1.0, 0.969]: sit, carry/hold (an object)."]}, {"from": "human", "timestamps": 1091.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.004, 0.02, 0.635, 0.995]: stand."}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1124, 1190], "conversations": [{"from": "human", "timestamps": 1154.0, "value": "For the person at bbox [0.552, 0.239, 0.691, 0.988], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.552, 0.239, 0.691, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1152.0s to 1153.0s, [0.624, 0.236, 0.756, 0.988]->[0.582, 0.225, 0.715, 0.986]: walk, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1190.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.046, 0.245, 0.364, 0.99]: listen to (a person), stand; [0.435, 0.033, 0.999, 1.0]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1123, 1205], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.001, 0.329, 0.067, 0.782]: carry/hold (an object), stand, watch (a person); [0.166, 0.38, 0.236, 0.747]: watch (a person), stand; [0.265, 0.354, 0.339, 0.755]: watch (a person), stand; [0.355, 0.279, 0.496, 0.967]: watch (a person), walk; [0.582, 0.225, 0.715, 0.986]: carry/hold (an object), watch (a person), walk; [0.801, 0.185, 0.953, 0.975]: carry/hold (an object), stand, listen to (a person)."}, {"from": "human", "timestamps": 1205.0, "value": "For the person at bbox [0.117, 0.05, 0.568, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.117, 0.05, 0.568, 0.981]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1203.0s to 1204.0s, [0.007, 0.069, 0.376, 0.991]->[0.031, 0.079, 0.458, 0.988]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1221, 1259], "conversations": [{"from": "human", "timestamps": 1251.0, "value": "Describe the actions of the person at bbox [0.317, 0.066, 0.98, 0.98] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.317, 0.066, 0.98, 0.98]: stand, listen to (a person), watch (a person).", "At 1250.0s, [0.311, 0.068, 0.973, 0.975]: stand, listen to (a person), watch (a person).", "From 1247.0s to 1249.0s, [0.684, 0.014, 1.0, 0.994]->[0.677, 0.012, 1.0, 0.988]->[0.701, 0.015, 1.0, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1259.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.016, 0.039, 0.702, 0.978]: listen to (a person), stand, watch (a person); [0.468, 0.059, 0.882, 0.987]: carry/hold (an object), stand, listen to (a person)."}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1239, 1299], "conversations": [{"from": "human", "timestamps": 1269.0, "value": "What actions did the person at bbox [0.084, 0.055, 0.588, 0.993] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.084, 0.055, 0.588, 0.993]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1265.0s to 1268.0s, [0.032, 0.084, 0.563, 0.994]->[0.083, 0.073, 0.567, 1.0]->[0.074, 0.063, 0.623, 0.992]->[0.058, 0.038, 0.59, 0.999]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1299.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.181, 0.077, 0.629, 0.936]: carry/hold (an object), sit."}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1240, 1350], "conversations": [{"from": "human", "timestamps": 1270.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.019, 0.382, 0.23, 0.988]: watch (a person), stand; [0.057, 0.067, 0.573, 0.993]: carry/hold (an object), stand; [0.559, 0.175, 0.892, 0.98]: watch (a person), stand."}, {"from": "human", "timestamps": 1350.0, "value": "Describe the actions of the person at bbox [0.377, 0.502, 0.46, 0.874] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.377, 0.502, 0.46, 0.874]: touch (an object).", "From 1348.0s to 1349.0s, [0.306, 0.497, 0.401, 0.869]->[0.331, 0.495, 0.436, 0.871]: stand, carry/hold (an object).", "At 1347.0s, [0.31, 0.503, 0.386, 0.867]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1406, 1442], "conversations": [{"from": "human", "timestamps": 1436.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.207, 0.082, 0.876, 0.977]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1442.0, "value": "What actions is the person in bbox [0.016, 0.046, 0.866, 0.994] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.016, 0.046, 0.866, 0.994]: sit, carry/hold (an object).", "At 1441.0s, [0.032, 0.064, 0.925, 0.991]: sit, carry/hold (an object).", "At 1440.0s, [0.144, 0.088, 0.733, 1.0]: sit, carry/hold (an object), touch (an object)."]}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1417, 1471], "conversations": [{"from": "human", "timestamps": 1447.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.287, 0.055, 0.904, 0.965]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1471.0, "value": "For the person at bbox [0.268, 0.03, 0.969, 0.984], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.268, 0.03, 0.969, 0.984]: stand, talk to (e.g., self, a person, a group).", "At 1470.0s, [0.314, 0.013, 0.864, 0.955]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1549, 1589], "conversations": [{"from": "human", "timestamps": 1579.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.465, 0.124, 0.709, 0.755]?"}, {"from": "gpt", "value": ["At Now, [0.465, 0.124, 0.709, 0.755]: stand, carry/hold (an object).", "At 1578.0s, [0.227, 0.081, 0.443, 0.891]: stand, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1589.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.124, 0.745, 0.375, 0.876]: lie/sleep."}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1663, 1713], "conversations": [{"from": "human", "timestamps": 1693.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.4, 0.415, 0.465, 0.772]: carry/hold (an object), walk; [0.477, 0.425, 0.533, 0.721]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1713.0, "value": "For the person at bbox [0.031, 0.117, 0.152, 0.522], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.031, 0.117, 0.152, 0.522]: stand, carry/hold (an object).", "From 1710.0s to 1712.0s, [0.032, 0.118, 0.15, 0.477]->[0.03, 0.113, 0.151, 0.499]->[0.031, 0.115, 0.152, 0.512]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "rFgb2ECMcrY.mkv", "clip": [1732, 1763], "conversations": [{"from": "human", "timestamps": 1762.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.261, 0.113, 0.551, 0.978]?"}, {"from": "gpt", "value": ["At Now, [0.261, 0.113, 0.551, 0.978]: stand, carry/hold (an object).", "From 1757.0s to 1761.0s, [0.399, 0.241, 0.56, 0.971]->[0.399, 0.23, 0.564, 0.984]->[0.358, 0.211, 0.535, 0.983]->[0.283, 0.185, 0.516, 1.0]->[0.273, 0.168, 0.558, 0.988]: stand, carry/hold (an object).", "From 1750.0s to 1756.0s, [0.335, 0.241, 0.481, 0.902]->[0.31, 0.208, 0.524, 0.927]->[0.234, 0.197, 0.452, 0.952]->[0.093, 0.193, 0.336, 0.94]->[0.135, 0.222, 0.322, 0.965]->[0.215, 0.256, 0.415, 0.964]->[0.324, 0.258, 0.51, 0.928]: walk, carry/hold (an object).", "From 1746.0s to 1749.0s, [0.343, 0.403, 0.448, 0.835]->[0.321, 0.417, 0.427, 0.851]->[0.313, 0.462, 0.438, 0.858]->[0.304, 0.41, 0.452, 0.856]: bend/bow (at the waist), carry/hold (an object).", "From 1744.0s to 1745.0s, [0.37, 0.542, 0.475, 0.873]->[0.359, 0.462, 0.483, 0.868]: walk, carry/hold (an object).", "At 1743.0s, [0.375, 0.642, 0.479, 0.857]: bend/bow (at the waist), carry/hold (an object).", "At 1742.0s, [0.302, 0.672, 0.405, 0.873]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1763.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.424, 0.0, 0.651, 0.994]: carry/hold (an object), walk."}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [884, 989], "conversations": [{"from": "human", "timestamps": 914.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.133, 0.126, 0.704, 0.973]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.629, 0.059, 1.0, 0.929]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 989.0, "value": "Describe the actions of the person at bbox [0.133, 0.053, 0.892, 0.97] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.133, 0.053, 0.892, 0.97]: stand.", "From 987.0s to 988.0s, [0.146, 0.043, 0.928, 0.983]->[0.169, 0.05, 0.898, 0.981]: sit, talk to (e.g., self, a person, a group).", "At 986.0s, [0.189, 0.088, 0.891, 0.966]: stand, talk to (e.g., self, a person, a group).", "At 985.0s, [0.138, 0.045, 0.878, 0.972]: sit, listen to (a person).", "At 984.0s, [0.114, 0.068, 0.895, 0.969]: stand, listen to (a person), watch (a person)."]}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [991, 1033], "conversations": [{"from": "human", "timestamps": 1021.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.112, 0.125, 0.37, 0.975]?"}, {"from": "gpt", "value": ["In the current frame, [0.112, 0.125, 0.37, 0.975]: walk, listen to (a person).", "At 1020.0s, [0.115, 0.151, 0.403, 0.968]: run/jog, talk to (e.g., self, a person, a group).", "At 1019.0s, [0.135, 0.113, 0.426, 0.948]: walk, listen to (a person).", "From 1017.0s to 1018.0s, [0.17, 0.202, 0.383, 0.961]->[0.204, 0.164, 0.42, 0.966]: walk, talk to (e.g., self, a person, a group).", "At 1016.0s, [0.195, 0.203, 0.368, 0.962]: run/jog, talk to (e.g., self, a person, a group).", "From 1014.0s to 1015.0s, [0.247, 0.228, 0.372, 0.894]->[0.172, 0.232, 0.322, 0.961]: walk.", "At 1013.0s, [0.294, 0.253, 0.419, 0.85]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1033.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.12, 0.167, 0.388, 0.956]: listen to (a person), stand; [0.381, 0.104, 0.647, 0.971]: watch (a person), talk to (e.g., self, a person, a group), walk."}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1028, 1100], "conversations": [{"from": "human", "timestamps": 1058.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.091, 0.153, 0.415, 0.981]: watch (a person), talk to (e.g., self, a person, a group), walk; [0.383, 0.094, 0.685, 0.972]: listen to (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1100.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.384, 0.125, 0.818, 0.956]?"}, {"from": "gpt", "value": ["In the current frame, [0.384, 0.125, 0.818, 0.956]: stand, listen to (a person).", "From 1096.0s to 1099.0s, [0.388, 0.12, 0.815, 0.953]->[0.376, 0.12, 0.869, 0.949]->[0.398, 0.125, 0.845, 0.961]->[0.392, 0.14, 0.83, 0.961]: stand, listen to (a person).", "At 1095.0s, [0.376, 0.121, 0.847, 0.962]: stand, listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1057, 1102], "conversations": [{"from": "human", "timestamps": 1087.0, "value": "For the person at bbox [0.024, 0.17, 0.425, 0.973], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.024, 0.17, 0.425, 0.973]: stand, listen to (a person), watch (a person).", "From 1083.0s to 1086.0s, [0.066, 0.155, 0.421, 0.972]->[0.063, 0.157, 0.405, 0.978]->[0.016, 0.203, 0.379, 0.977]->[0.042, 0.211, 0.455, 0.977]: stand, listen to (a person), watch (a person).", "At 1082.0s, [0.085, 0.143, 0.418, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1077.0s to 1081.0s, [0.077, 0.12, 0.409, 0.985]->[0.143, 0.147, 0.451, 0.976]->[0.125, 0.153, 0.424, 0.971]->[0.116, 0.146, 0.426, 0.969]->[0.153, 0.171, 0.518, 0.957]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1069.0s to 1076.0s, [0.077, 0.133, 0.407, 0.977]->[0.064, 0.153, 0.407, 0.973]->[0.065, 0.142, 0.398, 0.978]->[0.065, 0.144, 0.397, 0.985]->[0.067, 0.151, 0.411, 0.983]->[0.07, 0.145, 0.412, 0.981]->[0.063, 0.145, 0.379, 0.971]->[0.061, 0.14, 0.378, 0.979]: stand, listen to (a person), watch (a person).", "From 1067.0s to 1068.0s, [0.112, 0.151, 0.41, 0.987]->[0.104, 0.152, 0.429, 0.969]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1066.0s, [0.087, 0.186, 0.429, 0.968]: stand, talk to (e.g., self, a person, a group).", "From 1059.0s to 1065.0s, [0.1, 0.15, 0.414, 0.972]->[0.128, 0.166, 0.431, 0.981]->[0.125, 0.145, 0.435, 0.971]->[0.141, 0.169, 0.438, 0.966]->[0.118, 0.156, 0.42, 0.98]->[0.135, 0.174, 0.444, 0.965]->[0.126, 0.143, 0.437, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1058.0s, [0.091, 0.153, 0.415, 0.981]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1057.0s, [0.098, 0.145, 0.432, 0.962]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1102.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.071, 0.178, 0.452, 0.963]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.399, 0.126, 0.814, 0.947]: listen to (a person), stand."}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1133, 1198], "conversations": [{"from": "human", "timestamps": 1163.0, "value": "What actions is the person in bbox [0.378, 0.285, 0.575, 0.832] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.378, 0.285, 0.575, 0.832]: walk.", "At 1162.0s, [0.398, 0.277, 0.545, 0.915]: walk, grab (a person)."]}, {"from": "human", "timestamps": 1198.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.069, 0.09, 0.793, 0.967]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1126, 1232], "conversations": [{"from": "human", "timestamps": 1156.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.076, 0.267, 0.868, 0.963]: grab (a person), walk."}, {"from": "human", "timestamps": 1232.0, "value": "What actions is the person in bbox [0.065, 0.111, 0.748, 0.954] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.065, 0.111, 0.748, 0.954]: stand, talk to (e.g., self, a person, a group).", "From 1230.0s to 1231.0s, [0.038, 0.111, 0.797, 0.94]->[0.06, 0.115, 0.821, 0.96]: sit, talk to (e.g., self, a person, a group).", "At 1229.0s, [0.046, 0.12, 0.799, 0.943]: sit, stand, talk to (e.g., self, a person, a group).", "At 1228.0s, [0.045, 0.116, 0.776, 0.954]: sit, talk to (e.g., self, a person, a group).", "At 1227.0s, [0.054, 0.12, 0.726, 0.959]: sit.", "At 1226.0s, [0.053, 0.115, 0.786, 0.957]: stand, listen to (a person)."]}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1170, 1258], "conversations": [{"from": "human", "timestamps": 1200.0, "value": "What actions is the person in bbox [0.275, 0.091, 0.888, 0.974] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.275, 0.091, 0.888, 0.974]: stand, talk to (e.g., self, a person, a group).", "At 1199.0s, [0.276, 0.098, 0.908, 0.964]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1258.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.282, 0.073, 0.916, 0.961]: listen to (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1247, 1298], "conversations": [{"from": "human", "timestamps": 1277.0, "value": "Describe the actions of the person at bbox [0.074, 0.115, 0.825, 0.959] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.074, 0.115, 0.825, 0.959]: stand, talk to (e.g., self, a person, a group).", "From 1274.0s to 1276.0s, [0.068, 0.118, 0.833, 0.958]->[0.066, 0.117, 0.821, 0.961]->[0.06, 0.132, 0.835, 0.961]: sit, talk to (e.g., self, a person, a group).", "At 1273.0s, [0.062, 0.137, 0.825, 0.956]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1272.0s, [0.048, 0.122, 0.811, 0.951]: sit, stand, talk to (e.g., self, a person, a group).", "At 1271.0s, [0.041, 0.112, 0.794, 0.967]: sit.", "At 1269.0s, [0.071, 0.142, 0.835, 0.96]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1298.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.252, 0.163, 0.546, 0.953]: talk to (e.g., self, a person, a group), walk."}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1307, 1350], "conversations": [{"from": "human", "timestamps": 1337.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.02, 0.262, 0.085, 0.635]: hand wave, stand, watch (a person), talk to (e.g., self, a person, a group); [0.073, 0.259, 0.123, 0.59]: stand; [0.112, 0.273, 0.151, 0.561]: hand wave, stand, watch (a person), sing to (e.g., self, a person, a group); [0.144, 0.246, 0.236, 0.749]: hand wave, stand, watch (a person); [0.214, 0.276, 0.273, 0.713]: hand wave, stand; [0.567, 0.237, 0.632, 0.63]: listen to (a person), stand, watch (a person); [0.582, 0.096, 0.941, 0.961]: hand wave, stand, watch (a person), listen to (a person); [0.649, 0.225, 0.729, 0.495]: hand wave, stand, watch (a person)."}, {"from": "human", "timestamps": 1350.0, "value": "What actions is the person in bbox [0.338, 0.05, 0.946, 0.963] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.338, 0.05, 0.946, 0.963]: stand.", "From 1348.0s to 1349.0s, [0.378, 0.064, 0.917, 0.951]->[0.364, 0.043, 0.984, 0.951]: stand.", "At 1347.0s, [0.37, 0.057, 0.949, 0.952]: stand, talk to (e.g., self, a person, a group).", "From 1344.0s to 1346.0s, [0.364, 0.047, 0.968, 0.965]->[0.369, 0.051, 0.958, 0.995]->[0.398, 0.057, 0.944, 0.949]: stand."]}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1425, 1456], "conversations": [{"from": "human", "timestamps": 1455.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.06, 0.105, 0.569, 0.963]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1456.0, "value": "Describe the actions of the person at bbox [0.058, 0.106, 0.582, 0.968] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.058, 0.106, 0.582, 0.968]: sit, carry/hold (an object), watch (e.g., TV).", "At 1455.0s, [0.06, 0.105, 0.569, 0.963]: sit, carry/hold (an object)."]}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1472, 1539], "conversations": [{"from": "human", "timestamps": 1502.0, "value": "What actions did the person at bbox [0.058, 0.104, 0.639, 0.965] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.058, 0.104, 0.639, 0.965]: sit, talk to (e.g., self, a person, a group).", "From 1500.0s to 1501.0s, [0.072, 0.104, 0.638, 0.969]->[0.064, 0.095, 0.635, 0.969]: sit, talk to (e.g., self, a person, a group).", "From 1497.0s to 1499.0s, [0.064, 0.116, 0.599, 0.979]->[0.072, 0.1, 0.611, 0.973]->[0.061, 0.104, 0.626, 0.957]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1495.0s to 1496.0s, [0.067, 0.12, 0.605, 0.975]->[0.06, 0.106, 0.592, 0.97]: sit, carry/hold (an object).", "At 1494.0s, [0.069, 0.092, 0.605, 0.969]: sit, stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1493.0s, [0.071, 0.09, 0.599, 0.971]: sit, carry/hold (an object), watch (e.g., TV), talk to (e.g., self, a person, a group).", "From 1484.0s to 1492.0s, [0.073, 0.112, 0.605, 0.971]->[0.079, 0.096, 0.61, 0.975]->[0.075, 0.091, 0.632, 0.972]->[0.073, 0.093, 0.61, 0.973]->[0.067, 0.111, 0.595, 0.957]->[0.058, 0.096, 0.606, 0.976]->[0.066, 0.103, 0.589, 0.968]->[0.074, 0.098, 0.62, 0.971]->[0.067, 0.098, 0.613, 0.966]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1483.0s, [0.067, 0.107, 0.632, 0.973]: sit, carry/hold (an object), watch (e.g., TV), talk to (e.g., self, a person, a group).", "From 1481.0s to 1482.0s, [0.074, 0.108, 0.62, 0.972]->[0.069, 0.105, 0.601, 0.96]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1479.0s to 1480.0s, [0.064, 0.108, 0.604, 0.971]->[0.073, 0.098, 0.596, 0.974]: sit, carry/hold (an object).", "From 1477.0s to 1478.0s, [0.071, 0.096, 0.629, 0.972]->[0.068, 0.102, 0.604, 0.964]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1476.0s, [0.065, 0.101, 0.588, 0.967]: sit, carry/hold (an object), watch (e.g., TV), talk to (e.g., self, a person, a group).", "From 1458.0s to 1475.0s, [0.066, 0.108, 0.61, 0.979]->[0.048, 0.101, 0.595, 0.967]->[0.064, 0.093, 0.591, 0.977]->[0.06, 0.092, 0.62, 0.98]->[0.058, 0.106, 0.602, 0.973]->[0.063, 0.099, 0.611, 0.97]->[0.065, 0.093, 0.603, 0.963]->[0.064, 0.116, 0.598, 0.972]->[0.069, 0.102, 0.613, 0.968]->[0.065, 0.116, 0.586, 0.973]->[0.06, 0.118, 0.587, 0.971]->[0.071, 0.116, 0.615, 0.973]->[0.071, 0.115, 0.601, 0.972]->[0.065, 0.115, 0.602, 0.967]->[0.06, 0.108, 0.571, 0.973]->[0.06, 0.107, 0.598, 0.968]->[0.065, 0.108, 0.587, 0.964]->[0.089, 0.107, 0.589, 0.964]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1539.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.167, 0.021, 0.95, 0.975]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1505, 1611], "conversations": [{"from": "human", "timestamps": 1535.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.155, 0.069, 0.966, 0.996]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1611.0, "value": "What actions did the person at bbox [0.169, 0.08, 0.983, 0.958] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.169, 0.08, 0.983, 0.958]: stand, talk to (e.g., self, a person, a group).", "At 1610.0s, [0.175, 0.079, 0.973, 0.955]: stand, talk to (e.g., self, a person, a group).", "At 1609.0s, [0.188, 0.069, 0.986, 0.959]: sit, stand, talk to (e.g., self, a person, a group).", "From 1606.0s to 1608.0s, [0.497, 0.048, 0.999, 0.955]->[0.151, 0.058, 1.0, 0.972]->[0.187, 0.061, 0.973, 0.941]: stand, talk to (e.g., self, a person, a group).", "At 1605.0s, [0.511, 0.044, 0.999, 0.962]: stand, listen to (a person), watch (a person).", "From 1600.0s to 1604.0s, [0.525, 0.05, 0.995, 0.971]->[0.525, 0.035, 0.996, 0.975]->[0.514, 0.047, 0.991, 0.968]->[0.531, 0.041, 0.996, 0.979]->[0.54, 0.045, 0.994, 0.973]: stand, touch (an object), listen to (a person), watch (a person).", "At 1599.0s, [0.496, 0.044, 0.996, 0.967]: stand, listen to (a person), watch (a person).", "At 1598.0s, [0.511, 0.054, 0.993, 0.973]: stand, touch (an object), watch (a person).", "From 1596.0s to 1597.0s, [0.482, 0.033, 0.991, 0.958]->[0.523, 0.037, 0.991, 0.959]: stand, watch (a person).", "At 1595.0s, [0.468, 0.026, 0.993, 0.974]: stand, touch (an object), watch (a person).", "At 1594.0s, [0.475, 0.045, 0.992, 0.961]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1593.0s, [0.507, 0.05, 0.992, 0.963]: stand, listen to (a person), watch (a person).", "At 1592.0s, [0.189, 0.06, 0.963, 0.973]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1591.0s, [0.196, 0.07, 1.0, 0.973]: sit, stand, talk to (e.g., self, a person, a group).", "From 1587.0s to 1590.0s, [0.541, 0.052, 0.995, 0.973]->[0.539, 0.039, 0.984, 0.962]->[0.455, 0.059, 0.991, 0.962]->[0.144, 0.063, 0.987, 0.936]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1580.0s to 1586.0s, [0.715, 0.044, 0.999, 0.971]->[0.695, 0.055, 0.993, 0.94]->[0.681, 0.049, 1.0, 0.963]->[0.661, 0.049, 1.0, 0.961]->[0.657, 0.055, 1.0, 0.964]->[0.638, 0.048, 1.0, 0.971]->[0.638, 0.032, 1.0, 0.981]: stand, listen to (a person), watch (a person).", "At 1579.0s, [0.665, 0.048, 1.0, 0.957]: stand, watch (a person)."]}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1530, 1614], "conversations": [{"from": "human", "timestamps": 1560.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.217, 0.045, 0.946, 0.965]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1614.0, "value": "For the person at bbox [0.033, 0.062, 0.752, 0.975], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.033, 0.062, 0.752, 0.975]: stand, talk to (e.g., self, a person, a group).", "At 1613.0s, [0.03, 0.049, 0.741, 0.968]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1618, 1716], "conversations": [{"from": "human", "timestamps": 1648.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.155, 0.079, 0.984, 0.959]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1716.0, "value": "What actions is the person in bbox [0.014, 0.292, 0.414, 0.971] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.014, 0.292, 0.414, 0.971]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1715.0s, [0.014, 0.283, 0.442, 0.965]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1713.0s to 1714.0s, [0.006, 0.306, 0.369, 0.992]->[0.021, 0.285, 0.431, 0.975]: sit, listen to (a person), watch (a person).", "At 1712.0s, [0.008, 0.384, 0.314, 0.961]: sit, listen to (a person).", "At 1711.0s, [0.011, 0.502, 0.137, 0.977]: sit."]}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1677, 1718], "conversations": [{"from": "human", "timestamps": 1707.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.31, 0.174, 0.508, 0.941]?"}, {"from": "gpt", "value": ["At Now, [0.31, 0.174, 0.508, 0.941]: walk, open (e.g., a window, a car door).", "From 1705.0s to 1706.0s, [0.388, 0.259, 0.635, 0.958]->[0.398, 0.283, 0.607, 0.97]: walk, open (e.g., a window, a car door)."]}, {"from": "human", "timestamps": 1718.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.07, 0.221, 0.503, 0.932]: carry/hold (an object), sit, talk to (e.g., self, a person, a group), listen to (a person); [0.44, 0.05, 0.989, 0.976]: watch (a person), listen to (a person), give/serve (an object) to (a person), carry/hold (an object), stand."}]}, {"video": "rJKeqfTlAeY.mkv", "clip": [1719, 1769], "conversations": [{"from": "human", "timestamps": 1749.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.038, 0.03, 0.94, 0.945]?"}, {"from": "gpt", "value": ["In the current frame, [0.038, 0.03, 0.94, 0.945]: sit, talk to (e.g., self, a person, a group).", "At 1748.0s, [0.0, 0.029, 0.949, 0.959]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1769.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.075, 0.064, 0.984, 0.952]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "rUYsoIIE37A.mp4", "clip": [889, 948], "conversations": [{"from": "human", "timestamps": 919.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.282, 0.262, 0.442, 0.807]?"}, {"from": "gpt", "value": ["In the current frame, [0.282, 0.262, 0.442, 0.807]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 917.0s to 918.0s, [0.3, 0.258, 0.454, 0.731]->[0.248, 0.269, 0.454, 0.758]: stand, listen to (a person), watch (a person).", "From 915.0s to 916.0s, [0.299, 0.263, 0.461, 0.737]->[0.296, 0.263, 0.467, 0.731]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 908.0s to 914.0s, [0.23, 0.258, 0.429, 0.749]->[0.243, 0.271, 0.445, 0.755]->[0.266, 0.269, 0.458, 0.738]->[0.238, 0.261, 0.443, 0.747]->[0.258, 0.27, 0.459, 0.744]->[0.233, 0.261, 0.459, 0.733]->[0.256, 0.257, 0.45, 0.77]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 907.0s, [0.222, 0.282, 0.435, 0.762]: stand, talk to (e.g., self, a person, a group).", "From 905.0s to 906.0s, [0.14, 0.272, 0.3, 0.803]->[0.261, 0.254, 0.406, 0.776]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 948.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.013, 0.129, 0.52, 0.874]: listen to (a person), sit, watch (a person); [0.419, 0.19, 0.996, 0.862]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "rUYsoIIE37A.mp4", "clip": [1008, 1059], "conversations": [{"from": "human", "timestamps": 1038.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.207, 0.378, 0.375, 0.869]: carry/hold (an object), watch (a person), walk; [0.301, 0.463, 0.636, 0.881]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1059.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.106, 0.208, 0.579, 0.888]?"}, {"from": "gpt", "value": ["At Now, [0.106, 0.208, 0.579, 0.888]: jump/leap.", "At 1058.0s, [0.081, 0.24, 0.526, 0.893]: stand, eat.", "At 1057.0s, [0.344, 0.211, 0.851, 0.867]: stand, eat, watch (a person)."]}]}, {"video": "rUYsoIIE37A.mp4", "clip": [1003, 1125], "conversations": [{"from": "human", "timestamps": 1033.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.074, 0.199, 0.334, 0.869]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.672, 0.215, 0.993, 0.874]: carry/hold (an object), sit, listen to (a person)."}, {"from": "human", "timestamps": 1125.0, "value": "Describe the actions of the person at bbox [0.418, 0.277, 0.572, 0.619] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.418, 0.277, 0.572, 0.619]: stand, grab (a person), listen to (a person), watch (a person).", "At 1124.0s, [0.433, 0.262, 0.595, 0.634]: stand, listen to (a person), watch (a person)."]}]}, {"video": "rUYsoIIE37A.mp4", "clip": [1077, 1113], "conversations": [{"from": "human", "timestamps": 1107.0, "value": "For the person at bbox [0.256, 0.342, 0.508, 0.872], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.256, 0.342, 0.508, 0.872]: stand, watch (a person).", "At 1106.0s, [0.181, 0.356, 0.399, 0.87]: stand.", "At 1105.0s, [0.199, 0.354, 0.379, 0.866]: walk."]}, {"from": "human", "timestamps": 1113.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.002, 0.138, 0.429, 0.893]: listen to (a person), stand, write; [0.489, 0.38, 0.947, 0.871]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.49, 0.258, 0.662, 0.712]: listen to (a person), stand, watch (a person); [0.662, 0.256, 0.815, 0.552]: listen to (a person), stand, watch (a person); [0.811, 0.17, 0.997, 0.852]: watch (a person), stand."}]}, {"video": "rUYsoIIE37A.mp4", "clip": [1121, 1180], "conversations": [{"from": "human", "timestamps": 1151.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.009, 0.136, 0.56, 0.874]: carry/hold (an object), sit, grab (a person); [0.441, 0.173, 0.98, 0.869]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1180.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.057, 0.129, 0.708, 0.881]?"}, {"from": "gpt", "value": ["In the current frame, [0.057, 0.129, 0.708, 0.881]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1179.0s, [0.055, 0.133, 0.716, 0.887]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "rUYsoIIE37A.mp4", "clip": [1247, 1295], "conversations": [{"from": "human", "timestamps": 1277.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.208, 0.348, 0.362, 0.867]: listen to (a person), stand, watch (a person); [0.369, 0.387, 0.467, 0.776]: listen to (a person), stand, watch (a person); [0.501, 0.348, 0.657, 0.82]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.632, 0.434, 0.804, 0.864]: listen to (a person), watch (a person), bend/bow (at the waist); [0.755, 0.402, 0.849, 0.801]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.831, 0.387, 0.945, 0.848]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1295.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.043, 0.395, 0.228, 0.867]?"}, {"from": "gpt", "value": ["In the current frame, [0.043, 0.395, 0.228, 0.867]: stand, listen to (a person), watch (a person).", "At 1294.0s, [0.098, 0.436, 0.21, 0.875]: stand, listen to (a person), watch (a person)."]}]}, {"video": "rUYsoIIE37A.mp4", "clip": [1252, 1327], "conversations": [{"from": "human", "timestamps": 1282.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.209, 0.167, 0.552, 0.564]: stand, talk to (e.g., self, a person, a group); [0.576, 0.168, 0.773, 0.573]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1327.0, "value": "Describe the actions of the person at bbox [0.234, 0.157, 0.917, 0.851] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.234, 0.157, 0.917, 0.851]: sit, talk to (e.g., self, a person, a group).", "From 1323.0s to 1326.0s, [0.225, 0.131, 0.908, 0.872]->[0.267, 0.132, 0.894, 0.861]->[0.173, 0.137, 0.905, 0.871]->[0.206, 0.133, 0.889, 0.853]: sit, talk to (e.g., self, a person, a group).", "From 1317.0s to 1322.0s, [0.676, 0.206, 0.835, 0.457]->[0.674, 0.182, 0.828, 0.452]->[0.694, 0.184, 0.834, 0.436]->[0.681, 0.181, 0.814, 0.42]->[0.674, 0.193, 0.808, 0.421]->[0.68, 0.216, 0.795, 0.441]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1316.0s, [0.682, 0.192, 0.866, 0.483]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1313.0s to 1315.0s, [0.729, 0.208, 0.924, 0.538]->[0.728, 0.2, 0.91, 0.514]->[0.719, 0.187, 0.889, 0.5]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1310.0s to 1312.0s, [0.805, 0.26, 0.996, 0.647]->[0.761, 0.227, 0.987, 0.613]->[0.738, 0.208, 0.94, 0.573]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "rUYsoIIE37A.mp4", "clip": [1311, 1349], "conversations": [{"from": "human", "timestamps": 1341.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.094, 0.381, 0.388, 0.878]: listen to (a person), sit, watch (a person); [0.42, 0.148, 0.843, 0.883]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.469, 0.431, 0.793, 0.878]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1349.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.364, 0.163, 0.726, 0.873]?"}, {"from": "gpt", "value": ["In the current frame, [0.364, 0.163, 0.726, 0.873]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1346.0s to 1348.0s, [0.397, 0.165, 0.677, 0.866]->[0.396, 0.17, 0.691, 0.871]->[0.416, 0.177, 0.794, 0.872]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1345.0s, [0.414, 0.159, 0.717, 0.858]: stand, listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "rUYsoIIE37A.mp4", "clip": [1385, 1432], "conversations": [{"from": "human", "timestamps": 1415.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.411, 0.198, 0.874]: watch (a person), sit, drink; [0.176, 0.614, 0.439, 0.866]: watch (a person), sit; [0.262, 0.162, 0.445, 0.598]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.304, 0.356, 0.52, 0.752]: carry/hold (an object), sit, watch (a person); [0.481, 0.344, 0.659, 0.701]: carry/hold (an object), sit, listen to (a person), drink; [0.659, 0.338, 0.84, 0.631]: carry/hold (an object), sit, drink; [0.755, 0.448, 1.0, 0.853]: listen to (a person), sit, watch (a person), drink."}, {"from": "human", "timestamps": 1432.0, "value": "Describe the actions of the person at bbox [0.614, 0.165, 0.881, 0.873] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.614, 0.165, 0.881, 0.873]: stand, listen to (a person).", "At 1431.0s, [0.481, 0.161, 0.729, 0.873]: walk, listen to (a person).", "At 1430.0s, [0.538, 0.188, 0.723, 0.873]: walk, listen to (a person), watch (a person).", "At 1429.0s, [0.473, 0.193, 0.61, 0.869]: walk, listen to (a person)."]}]}, {"video": "rUYsoIIE37A.mp4", "clip": [1476, 1617], "conversations": [{"from": "human", "timestamps": 1506.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.092, 0.475, 0.368, 0.87]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.11, 0.174, 0.277, 0.61]: carry/hold (an object), stand, walk; [0.306, 0.233, 0.425, 0.574]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.41, 0.223, 0.532, 0.48]: stand, eat, talk to (e.g., self, a person, a group); [0.524, 0.362, 0.657, 0.559]: listen to (a person), sit, eat; [0.652, 0.321, 0.767, 0.565]: watch (a person), sit."}, {"from": "human", "timestamps": 1617.0, "value": "What actions did the person at bbox [0.756, 0.464, 0.792, 0.544] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.756, 0.464, 0.792, 0.544]: sit, sail boat.", "At 1616.0s, [0.717, 0.462, 0.776, 0.549]: sit, row boat.", "At 1615.0s, [0.678, 0.47, 0.734, 0.565]: watch (a person).", "At 1614.0s, [0.669, 0.503, 0.728, 0.603]: stand."]}]}, {"video": "rUYsoIIE37A.mp4", "clip": [1534, 1570], "conversations": [{"from": "human", "timestamps": 1564.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.073, 0.136, 0.987, 0.865]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1570.0, "value": "Describe the actions of the person at bbox [0.082, 0.121, 0.979, 0.863] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.082, 0.121, 0.979, 0.863]: sit, talk to (e.g., self, a person, a group).", "At 1569.0s, [0.058, 0.128, 0.969, 0.866]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1558.0s to 1568.0s, [0.129, 0.159, 0.972, 0.871]->[0.056, 0.174, 0.924, 0.884]->[0.006, 0.178, 0.861, 0.881]->[0.03, 0.133, 0.885, 0.866]->[0.009, 0.131, 0.952, 0.886]->[0.071, 0.128, 0.974, 0.868]->[0.073, 0.136, 0.987, 0.865]->[0.06, 0.129, 0.987, 0.889]->[0.077, 0.13, 0.982, 0.878]->[0.088, 0.177, 1.0, 0.873]->[0.078, 0.156, 0.984, 0.87]: sit, talk to (e.g., self, a person, a group).", "From 1554.0s to 1557.0s, [0.281, 0.161, 0.984, 0.861]->[0.286, 0.169, 0.994, 0.852]->[0.233, 0.183, 0.978, 0.871]->[0.172, 0.174, 0.987, 0.865]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1553.0s, [0.334, 0.17, 0.979, 0.864]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1551.0s to 1552.0s, [0.436, 0.143, 0.977, 0.873]->[0.4, 0.144, 0.988, 0.883]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1548.0s to 1550.0s, [0.468, 0.17, 0.979, 0.876]->[0.443, 0.151, 0.984, 0.873]->[0.455, 0.143, 0.982, 0.874]: sit, listen to (a person), watch (a person).", "From 1535.0s to 1547.0s, [0.533, 0.175, 0.984, 0.881]->[0.537, 0.156, 0.982, 0.879]->[0.548, 0.173, 0.981, 0.884]->[0.535, 0.186, 0.977, 0.879]->[0.519, 0.178, 0.975, 0.874]->[0.505, 0.174, 0.977, 0.88]->[0.493, 0.19, 0.981, 0.876]->[0.504, 0.205, 0.987, 0.879]->[0.499, 0.212, 0.979, 0.877]->[0.493, 0.217, 0.981, 0.89]->[0.494, 0.225, 0.979, 0.874]->[0.491, 0.207, 0.979, 0.881]->[0.483, 0.192, 0.984, 0.87]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1533.0s to 1534.0s, [0.509, 0.166, 0.997, 0.876]->[0.517, 0.18, 0.995, 0.871]: sit, listen to (a person).", "From 1531.0s to 1532.0s, [0.527, 0.183, 0.986, 0.885]->[0.532, 0.184, 0.992, 0.876]: sit, put down, listen to (a person).", "At 1530.0s, [0.443, 0.146, 0.982, 0.882]: sit, carry/hold (an object), listen to (a person).", "At 1529.0s, [0.327, 0.155, 0.993, 0.9]: sit, drink, listen to (a person)."]}]}, {"video": "rUYsoIIE37A.mp4", "clip": [1709, 1787], "conversations": [{"from": "human", "timestamps": 1739.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.008, 0.117, 0.364, 0.865]: listen to (a person), stand, watch (a person); [0.188, 0.271, 0.572, 0.871]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.499, 0.223, 0.822, 0.882]: listen to (a person), stand, watch (a person); [0.693, 0.137, 0.996, 0.893]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1787.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.115, 0.254, 0.418, 0.87]?"}, {"from": "gpt", "value": ["At Now, [0.115, 0.254, 0.418, 0.87]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1786.0s, [0.11, 0.249, 0.391, 0.868]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1785.0s, [0.081, 0.262, 0.366, 0.863]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [874, 955], "conversations": [{"from": "human", "timestamps": 904.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.151, 0.048, 0.775, 0.982]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 955.0, "value": "For the person at bbox [0.449, 0.164, 0.839, 0.991], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.449, 0.164, 0.839, 0.991]: sit, talk to (e.g., self, a person, a group).", "From 953.0s to 954.0s, [0.407, 0.169, 0.85, 0.972]->[0.38, 0.122, 0.843, 0.991]: sit, talk to (e.g., self, a person, a group).", "At 952.0s, [0.389, 0.168, 0.841, 0.981]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 951.0s, [0.403, 0.164, 0.84, 0.977]: sit."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [940, 995], "conversations": [{"from": "human", "timestamps": 970.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.123, 0.068, 0.873, 0.977]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 995.0, "value": "What actions is the person in bbox [0.153, 0.153, 0.436, 0.952] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.153, 0.153, 0.436, 0.952]: sit, listen to (a person), watch (a person).", "From 991.0s to 994.0s, [0.158, 0.149, 0.438, 0.954]->[0.173, 0.149, 0.438, 0.956]->[0.164, 0.139, 0.453, 0.959]->[0.167, 0.148, 0.425, 0.963]: sit, listen to (a person), watch (a person).", "At 990.0s, [0.164, 0.148, 0.439, 0.962]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 986.0s to 989.0s, [0.267, 0.063, 0.872, 0.993]->[0.104, 0.0, 0.843, 0.976]->[0.123, 0.03, 0.834, 0.983]->[0.134, 0.004, 0.798, 0.963]: sit, talk to (e.g., self, a person, a group).", "At 985.0s, [0.144, 0.0, 0.861, 0.996]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1004, 1057], "conversations": [{"from": "human", "timestamps": 1034.0, "value": "What actions is the person in bbox [0.148, 0.01, 0.843, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.148, 0.01, 0.843, 0.984]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1033.0s, [0.393, 0.182, 0.685, 0.975]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1032.0s, [0.411, 0.158, 0.669, 0.983]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1031.0s, [0.368, 0.176, 0.741, 0.983]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1030.0s, [0.382, 0.186, 0.67, 0.991]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "From 1028.0s to 1029.0s, [0.274, 0.222, 0.627, 0.985]->[0.354, 0.17, 0.623, 0.984]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1027.0s, [0.365, 0.18, 0.626, 0.991]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1026.0s, [0.413, 0.169, 0.62, 1.0]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1057.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.126, 0.021, 0.825, 0.985]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1035, 1108], "conversations": [{"from": "human", "timestamps": 1065.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.167, 0.053, 0.805, 0.973]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1108.0, "value": "For the person at bbox [0.151, 0.017, 0.852, 0.987], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.151, 0.017, 0.852, 0.987]: sit, talk to (e.g., self, a person, a group).", "From 1104.0s to 1107.0s, [0.162, 0.069, 0.854, 0.989]->[0.154, 0.023, 0.851, 0.993]->[0.135, 0.013, 0.838, 0.98]->[0.157, 0.005, 0.84, 0.981]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1073, 1122], "conversations": [{"from": "human", "timestamps": 1103.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.182, 0.0, 0.851, 0.99]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1122.0, "value": "Describe the actions of the person at bbox [0.488, 0.167, 0.749, 0.963] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.488, 0.167, 0.749, 0.963]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1121.0s, [0.485, 0.172, 0.748, 0.944]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1120.0s, [0.201, 0.006, 0.865, 0.983]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1119.0s, [0.172, 0.0, 0.85, 0.976]: sit, talk to (e.g., self, a person, a group).", "At 1118.0s, [0.171, 0.0, 0.844, 0.98]: stand, talk to (e.g., self, a person, a group).", "At 1117.0s, [0.163, 0.002, 0.869, 0.978]: talk to (e.g., self, a person, a group).", "From 1113.0s to 1116.0s, [0.236, 0.072, 0.87, 0.982]->[0.199, 0.019, 0.883, 0.963]->[0.197, 0.014, 0.862, 0.97]->[0.205, 0.024, 0.917, 0.979]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1160, 1218], "conversations": [{"from": "human", "timestamps": 1190.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.219, 0.309, 0.454, 0.924]: sit, talk to (e.g., self, a person, a group); [0.587, 0.341, 0.806, 0.874]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1218.0, "value": "Describe the actions of the person at bbox [0.258, 0.016, 0.876, 0.987] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.258, 0.016, 0.876, 0.987]: sit.", "At 1217.0s, [0.301, 0.01, 0.871, 0.98]: sit.", "At 1216.0s, [0.242, 0.016, 0.875, 0.98]: sit, talk to (e.g., self, a person, a group).", "At 1215.0s, [0.256, 0.037, 0.876, 0.984]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1183, 1221], "conversations": [{"from": "human", "timestamps": 1213.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.187, 0.075, 0.816, 0.971]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1221.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.253, 0.04, 0.867, 0.988]?"}, {"from": "gpt", "value": ["In the current frame, [0.253, 0.04, 0.867, 0.988]: sit, talk to (e.g., self, a person, a group).", "From 1219.0s to 1220.0s, [0.239, 0.056, 0.876, 0.99]->[0.253, 0.059, 0.863, 0.978]: sit, talk to (e.g., self, a person, a group).", "From 1217.0s to 1218.0s, [0.301, 0.01, 0.871, 0.98]->[0.258, 0.016, 0.876, 0.987]: sit.", "At 1216.0s, [0.242, 0.016, 0.875, 0.98]: sit, talk to (e.g., self, a person, a group).", "At 1215.0s, [0.256, 0.037, 0.876, 0.984]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1262, 1304], "conversations": [{"from": "human", "timestamps": 1292.0, "value": "What actions did the person at bbox [0.277, 0.0, 0.866, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.277, 0.0, 0.866, 0.987]: sit, talk to (e.g., self, a person, a group).", "From 1290.0s to 1291.0s, [0.252, 0.023, 0.883, 0.971]->[0.172, 0.025, 0.894, 0.968]: sit, talk to (e.g., self, a person, a group).", "At 1289.0s, [0.281, 0.022, 0.875, 0.961]: sit, stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1304.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.194, 0.284, 0.426, 0.918]: listen to (a person), sit, watch (a person); [0.498, 0.306, 0.712, 0.925]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1265, 1373], "conversations": [{"from": "human", "timestamps": 1295.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.114, 0.141, 0.812, 0.977]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1373.0, "value": "What actions is the person in bbox [0.54, 0.316, 0.737, 0.963] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.54, 0.316, 0.737, 0.963]: sit, listen to (a person).", "From 1371.0s to 1372.0s, [0.547, 0.314, 0.74, 0.949]->[0.54, 0.32, 0.735, 0.96]: sit, listen to (a person).", "From 1369.0s to 1370.0s, [0.543, 0.277, 0.745, 0.966]->[0.541, 0.347, 0.747, 0.953]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1368.0s, [0.537, 0.254, 0.768, 0.962]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1367.0s, [0.556, 0.265, 0.761, 0.916]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1366.0s, [0.561, 0.295, 0.765, 0.977]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1337, 1370], "conversations": [{"from": "human", "timestamps": 1367.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.187, 0.314, 0.442, 0.949]: carry/hold (an object), sit, talk to (e.g., self, a person, a group), listen to (a person); [0.556, 0.265, 0.761, 0.916]: sit, watch (a person), listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1370.0, "value": "What actions did the person at bbox [0.197, 0.299, 0.424, 0.952] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.197, 0.299, 0.424, 0.952]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1369.0s, [0.193, 0.287, 0.434, 0.951]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1368.0s, [0.2, 0.302, 0.437, 0.939]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1367.0s, [0.187, 0.314, 0.442, 0.949]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1366.0s, [0.19, 0.272, 0.443, 0.936]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1364.0s, [0.186, 0.31, 0.432, 0.944]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1363.0s, [0.185, 0.278, 0.358, 0.96]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1362.0s, [0.171, 0.282, 0.451, 0.927]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1361.0s, [0.172, 0.276, 0.445, 0.93]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1360.0s, [0.158, 0.275, 0.44, 0.939]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1359.0s, [0.18, 0.289, 0.445, 0.931]: sit, listen to (a person), watch (a person).", "At 1358.0s, [0.179, 0.301, 0.448, 0.927]: sit, listen to (a person).", "From 1355.0s to 1357.0s, [0.18, 0.284, 0.436, 0.954]->[0.181, 0.283, 0.441, 0.943]->[0.178, 0.284, 0.447, 0.925]: sit, listen to (a person), watch (a person).", "At 1354.0s, [0.178, 0.289, 0.444, 0.937]: sit, touch (an object), listen to (a person), watch (a person).", "From 1351.0s to 1353.0s, [0.182, 0.29, 0.44, 0.951]->[0.18, 0.287, 0.444, 0.935]->[0.18, 0.286, 0.443, 0.945]: sit, listen to (a person), watch (a person).", "From 1347.0s to 1350.0s, [0.177, 0.283, 0.442, 0.943]->[0.184, 0.289, 0.438, 0.963]->[0.18, 0.285, 0.443, 0.944]->[0.18, 0.288, 0.44, 0.948]: sit, listen to (a person).", "At 1346.0s, [0.179, 0.276, 0.452, 0.948]: sit, touch (an object), listen to (a person), watch (a person).", "At 1345.0s, [0.176, 0.282, 0.361, 0.938]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1343.0s to 1344.0s, [0.178, 0.282, 0.43, 0.925]->[0.175, 0.273, 0.375, 0.949]: sit, listen to (a person), watch (a person).", "At 1342.0s, [0.175, 0.283, 0.364, 0.939]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1341.0s, [0.177, 0.279, 0.446, 0.928]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1339.0s to 1340.0s, [0.181, 0.281, 0.385, 0.96]->[0.175, 0.283, 0.419, 0.927]: sit, listen to (a person), watch (a person).", "At 1338.0s, [0.176, 0.314, 0.438, 0.933]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1337.0s, [0.176, 0.299, 0.363, 0.942]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1336.0s, [0.164, 0.281, 0.443, 0.923]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1335.0s, [0.163, 0.279, 0.445, 0.926]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1334.0s, [0.146, 0.295, 0.425, 0.96]: sit, carry/hold (an object), listen to (a person).", "At 1333.0s, [0.18, 0.304, 0.442, 0.928]: sit, listen to (a person).", "At 1332.0s, [0.185, 0.306, 0.434, 0.924]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1329.0s to 1331.0s, [0.181, 0.314, 0.439, 0.929]->[0.19, 0.297, 0.434, 0.939]->[0.186, 0.296, 0.44, 0.949]: sit, listen to (a person).", "At 1328.0s, [0.192, 0.313, 0.429, 0.928]: sit, carry/hold (an object), listen to (a person).", "At 1327.0s, [0.181, 0.299, 0.389, 0.942]: sit, listen to (a person).", "At 1326.0s, [0.188, 0.308, 0.437, 0.946]: sit, listen to (a person), watch (a person).", "From 1321.0s to 1325.0s, [0.191, 0.291, 0.414, 0.927]->[0.191, 0.3, 0.439, 0.942]->[0.193, 0.305, 0.425, 0.934]->[0.191, 0.308, 0.427, 0.947]->[0.192, 0.312, 0.439, 0.95]: sit, listen to (a person).", "From 1318.0s to 1320.0s, [0.188, 0.27, 0.448, 0.922]->[0.186, 0.263, 0.454, 0.934]->[0.187, 0.267, 0.441, 0.939]: sit, listen to (a person), watch (a person).", "At 1317.0s, [0.19, 0.296, 0.441, 0.944]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1314.0s to 1316.0s, [0.171, 0.273, 0.449, 0.914]->[0.189, 0.296, 0.417, 0.898]->[0.182, 0.279, 0.373, 0.942]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1313.0s, [0.171, 0.29, 0.449, 0.933]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1312.0s, [0.151, 0.293, 0.446, 0.931]: sit, listen to (a person).", "From 1310.0s to 1311.0s, [0.167, 0.325, 0.434, 0.947]->[0.164, 0.292, 0.429, 0.928]: sit, listen to (a person), watch (a person).", "At 1309.0s, [0.162, 0.274, 0.446, 0.926]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1304.0s to 1308.0s, [0.194, 0.284, 0.426, 0.918]->[0.187, 0.279, 0.44, 0.932]->[0.185, 0.279, 0.443, 0.933]->[0.186, 0.277, 0.438, 0.937]->[0.187, 0.275, 0.438, 0.926]: sit, listen to (a person), watch (a person).", "At 1303.0s, [0.187, 0.281, 0.437, 0.921]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1300.0s to 1302.0s, [0.188, 0.304, 0.443, 0.924]->[0.185, 0.303, 0.45, 0.911]->[0.197, 0.289, 0.44, 0.935]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1296.0s to 1299.0s, [0.171, 0.307, 0.441, 0.936]->[0.175, 0.247, 0.439, 0.922]->[0.189, 0.281, 0.441, 0.916]->[0.193, 0.311, 0.438, 0.939]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1294.0s to 1295.0s, [0.117, 0.065, 0.804, 0.974]->[0.114, 0.141, 0.812, 0.977]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1354, 1396], "conversations": [{"from": "human", "timestamps": 1384.0, "value": "What actions did the person at bbox [0.194, 0.274, 0.442, 0.924] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.194, 0.274, 0.442, 0.924]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1383.0s, [0.167, 0.246, 0.45, 0.937]: sit, talk to (e.g., self, a person, a group).", "At 1381.0s, [0.175, 0.28, 0.446, 0.951]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1380.0s, [0.196, 0.279, 0.435, 0.942]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1378.0s to 1379.0s, [0.203, 0.294, 0.448, 0.942]->[0.203, 0.283, 0.429, 0.942]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1377.0s, [0.192, 0.282, 0.415, 0.951]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1375.0s to 1376.0s, [0.182, 0.268, 0.428, 0.953]->[0.206, 0.329, 0.386, 0.952]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1371.0s to 1374.0s, [0.199, 0.288, 0.427, 0.95]->[0.198, 0.3, 0.424, 0.94]->[0.2, 0.308, 0.423, 0.947]->[0.207, 0.322, 0.442, 0.941]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1370.0s, [0.197, 0.299, 0.424, 0.952]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1369.0s, [0.193, 0.287, 0.434, 0.951]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1368.0s, [0.2, 0.302, 0.437, 0.939]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1367.0s, [0.187, 0.314, 0.442, 0.949]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1366.0s, [0.19, 0.272, 0.443, 0.936]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1364.0s, [0.186, 0.31, 0.432, 0.944]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1363.0s, [0.185, 0.278, 0.358, 0.96]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1362.0s, [0.171, 0.282, 0.451, 0.927]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1361.0s, [0.172, 0.276, 0.445, 0.93]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1360.0s, [0.158, 0.275, 0.44, 0.939]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1359.0s, [0.18, 0.289, 0.445, 0.931]: sit, listen to (a person), watch (a person).", "At 1358.0s, [0.179, 0.301, 0.448, 0.927]: sit, listen to (a person).", "From 1355.0s to 1357.0s, [0.18, 0.284, 0.436, 0.954]->[0.181, 0.283, 0.441, 0.943]->[0.178, 0.284, 0.447, 0.925]: sit, listen to (a person), watch (a person).", "At 1354.0s, [0.178, 0.289, 0.444, 0.937]: sit, touch (an object), listen to (a person), watch (a person).", "From 1351.0s to 1353.0s, [0.182, 0.29, 0.44, 0.951]->[0.18, 0.287, 0.444, 0.935]->[0.18, 0.286, 0.443, 0.945]: sit, listen to (a person), watch (a person).", "From 1347.0s to 1350.0s, [0.177, 0.283, 0.442, 0.943]->[0.184, 0.289, 0.438, 0.963]->[0.18, 0.285, 0.443, 0.944]->[0.18, 0.288, 0.44, 0.948]: sit, listen to (a person).", "At 1346.0s, [0.179, 0.276, 0.452, 0.948]: sit, touch (an object), listen to (a person), watch (a person).", "At 1345.0s, [0.176, 0.282, 0.361, 0.938]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1343.0s to 1344.0s, [0.178, 0.282, 0.43, 0.925]->[0.175, 0.273, 0.375, 0.949]: sit, listen to (a person), watch (a person).", "At 1342.0s, [0.175, 0.283, 0.364, 0.939]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1341.0s, [0.177, 0.279, 0.446, 0.928]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1339.0s to 1340.0s, [0.181, 0.281, 0.385, 0.96]->[0.175, 0.283, 0.419, 0.927]: sit, listen to (a person), watch (a person).", "At 1338.0s, [0.176, 0.314, 0.438, 0.933]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1337.0s, [0.176, 0.299, 0.363, 0.942]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1336.0s, [0.164, 0.281, 0.443, 0.923]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1335.0s, [0.163, 0.279, 0.445, 0.926]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1334.0s, [0.146, 0.295, 0.425, 0.96]: sit, carry/hold (an object), listen to (a person).", "At 1333.0s, [0.18, 0.304, 0.442, 0.928]: sit, listen to (a person).", "At 1332.0s, [0.185, 0.306, 0.434, 0.924]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1329.0s to 1331.0s, [0.181, 0.314, 0.439, 0.929]->[0.19, 0.297, 0.434, 0.939]->[0.186, 0.296, 0.44, 0.949]: sit, listen to (a person).", "At 1328.0s, [0.192, 0.313, 0.429, 0.928]: sit, carry/hold (an object), listen to (a person).", "At 1327.0s, [0.181, 0.299, 0.389, 0.942]: sit, listen to (a person).", "At 1326.0s, [0.188, 0.308, 0.437, 0.946]: sit, listen to (a person), watch (a person).", "From 1321.0s to 1325.0s, [0.191, 0.291, 0.414, 0.927]->[0.191, 0.3, 0.439, 0.942]->[0.193, 0.305, 0.425, 0.934]->[0.191, 0.308, 0.427, 0.947]->[0.192, 0.312, 0.439, 0.95]: sit, listen to (a person).", "From 1318.0s to 1320.0s, [0.188, 0.27, 0.448, 0.922]->[0.186, 0.263, 0.454, 0.934]->[0.187, 0.267, 0.441, 0.939]: sit, listen to (a person), watch (a person).", "At 1317.0s, [0.19, 0.296, 0.441, 0.944]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1314.0s to 1316.0s, [0.171, 0.273, 0.449, 0.914]->[0.189, 0.296, 0.417, 0.898]->[0.182, 0.279, 0.373, 0.942]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1313.0s, [0.171, 0.29, 0.449, 0.933]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1312.0s, [0.151, 0.293, 0.446, 0.931]: sit, listen to (a person).", "From 1310.0s to 1311.0s, [0.167, 0.325, 0.434, 0.947]->[0.164, 0.292, 0.429, 0.928]: sit, listen to (a person), watch (a person).", "At 1309.0s, [0.162, 0.274, 0.446, 0.926]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1304.0s to 1308.0s, [0.194, 0.284, 0.426, 0.918]->[0.187, 0.279, 0.44, 0.932]->[0.185, 0.279, 0.443, 0.933]->[0.186, 0.277, 0.438, 0.937]->[0.187, 0.275, 0.438, 0.926]: sit, listen to (a person), watch (a person).", "At 1303.0s, [0.187, 0.281, 0.437, 0.921]: sit, carry/hold (an object), listen to (a person), watch (a person).", "From 1300.0s to 1302.0s, [0.188, 0.304, 0.443, 0.924]->[0.185, 0.303, 0.45, 0.911]->[0.197, 0.289, 0.44, 0.935]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1296.0s to 1299.0s, [0.171, 0.307, 0.441, 0.936]->[0.175, 0.247, 0.439, 0.922]->[0.189, 0.281, 0.441, 0.916]->[0.193, 0.311, 0.438, 0.939]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1294.0s to 1295.0s, [0.117, 0.065, 0.804, 0.974]->[0.114, 0.141, 0.812, 0.977]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1396.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.158, 0.274, 0.334, 0.927]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.551, 0.354, 0.728, 0.891]: listen to (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1381, 1488], "conversations": [{"from": "human", "timestamps": 1411.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.429, 0.301, 0.696, 0.956]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1488.0, "value": "For the person at bbox [0.117, 0.033, 0.808, 0.998], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.117, 0.033, 0.808, 0.998]: sit, talk to (e.g., self, a person, a group).", "From 1482.0s to 1487.0s, [0.18, 0.015, 0.809, 0.988]->[0.142, 0.004, 0.795, 0.995]->[0.189, 0.0, 0.798, 0.979]->[0.225, 0.022, 0.82, 0.984]->[0.183, 0.027, 0.841, 1.0]->[0.158, 0.012, 0.816, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1479.0s to 1480.0s, [0.25, 0.027, 0.884, 1.0]->[0.211, 0.006, 0.877, 0.984]: sit, talk to (e.g., self, a person, a group).", "At 1478.0s, [0.279, 0.01, 0.876, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1473.0s to 1477.0s, [0.286, 0.051, 0.874, 0.996]->[0.215, 0.004, 0.84, 1.0]->[0.264, 0.022, 0.863, 0.999]->[0.301, 0.0, 0.872, 0.977]->[0.286, 0.01, 0.883, 0.994]: sit, talk to (e.g., self, a person, a group).", "At 1472.0s, [0.296, 0.042, 0.88, 0.994]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1471.0s, [0.26, 0.038, 0.887, 0.998]: sit, talk to (e.g., self, a person, a group).", "At 1470.0s, [0.332, 0.051, 0.869, 0.989]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1468.0s to 1469.0s, [0.299, 0.032, 0.858, 1.0]->[0.334, 0.035, 0.865, 0.992]: sit, listen to (a person).", "From 1466.0s to 1467.0s, [0.388, 0.044, 0.879, 0.993]->[0.33, 0.043, 0.877, 0.983]: sit, listen to (a person), watch (a person).", "At 1465.0s, [0.325, 0.045, 0.869, 0.993]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1464.0s, [0.343, 0.049, 0.863, 0.998]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1462.0s to 1463.0s, [0.321, 0.037, 0.873, 1.0]->[0.271, 0.064, 0.857, 0.996]: sit, talk to (e.g., self, a person, a group).", "From 1458.0s to 1461.0s, [0.313, 0.03, 0.866, 1.0]->[0.352, 0.017, 0.877, 1.0]->[0.354, 0.042, 0.853, 1.0]->[0.383, 0.047, 0.876, 1.0]: sit, listen to (a person)."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1422, 1455], "conversations": [{"from": "human", "timestamps": 1452.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.1, 0.049, 0.854, 0.96]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1455.0, "value": "For the person at bbox [0.114, 0.053, 0.839, 0.965], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.114, 0.053, 0.839, 0.965]: sit, talk to (e.g., self, a person, a group).", "From 1448.0s to 1454.0s, [0.1, 0.063, 0.867, 0.969]->[0.116, 0.077, 0.852, 0.967]->[0.16, 0.0, 0.865, 0.978]->[0.128, 0.057, 0.867, 0.964]->[0.1, 0.049, 0.854, 0.96]->[0.13, 0.039, 0.883, 0.975]->[0.129, 0.032, 0.883, 0.993]: sit, talk to (e.g., self, a person, a group).", "From 1445.0s to 1447.0s, [0.148, 0.225, 0.484, 0.984]->[0.099, 0.064, 0.844, 0.977]->[0.116, 0.054, 0.825, 0.973]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1444.0s, [0.151, 0.257, 0.484, 0.989]: sit, talk to (e.g., self, a person, a group).", "From 1439.0s to 1443.0s, [0.251, 0.193, 0.532, 0.995]->[0.214, 0.247, 0.573, 0.974]->[0.214, 0.218, 0.556, 0.991]->[0.226, 0.268, 0.461, 0.986]->[0.228, 0.227, 0.494, 0.981]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1438.0s, [0.261, 0.169, 0.568, 0.956]: sit, talk to (e.g., self, a person, a group).", "From 1436.0s to 1437.0s, [0.296, 0.239, 0.55, 0.949]->[0.263, 0.233, 0.579, 0.98]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1482, 1528], "conversations": [{"from": "human", "timestamps": 1512.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.258, 0.274, 0.541, 1.0]: listen to (a person), sit; [0.42, 0.252, 0.724, 0.971]: sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1528.0, "value": "Describe the actions of the person at bbox [0.262, 0.004, 0.513, 0.983] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.262, 0.004, 0.513, 0.983]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1527.0s, [0.389, 0.276, 0.728, 0.959]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1526.0s, [0.457, 0.256, 0.733, 0.928]: sit, listen to (a person).", "From 1523.0s to 1524.0s, [0.502, 0.251, 0.725, 0.833]->[0.374, 0.239, 0.736, 0.932]: sit, talk to (e.g., self, a person, a group).", "From 1521.0s to 1522.0s, [0.393, 0.244, 0.69, 1.0]->[0.395, 0.246, 0.735, 0.86]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "From 1518.0s to 1520.0s, [0.48, 0.222, 0.689, 0.893]->[0.386, 0.25, 0.711, 1.0]->[0.485, 0.243, 0.681, 0.836]: sit, talk to (e.g., self, a person, a group).", "At 1517.0s, [0.353, 0.233, 0.712, 0.993]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1516.0s, [0.417, 0.258, 0.681, 0.964]: sit, talk to (e.g., self, a person, a group).", "From 1513.0s to 1514.0s, [0.491, 0.225, 0.713, 0.853]->[0.431, 0.237, 0.735, 0.918]: sit, talk to (e.g., self, a person, a group).", "At 1512.0s, [0.42, 0.252, 0.724, 0.971]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1511.0s, [0.434, 0.271, 0.734, 0.931]: sit, talk to (e.g., self, a person, a group).", "From 1507.0s to 1510.0s, [0.445, 0.267, 0.733, 0.952]->[0.454, 0.257, 0.732, 0.894]->[0.427, 0.262, 0.734, 0.966]->[0.444, 0.262, 0.73, 0.906]: sit, listen to (a person).", "From 1504.0s to 1505.0s, [0.424, 0.262, 0.738, 0.962]->[0.444, 0.264, 0.731, 0.904]: sit, listen to (a person).", "At 1503.0s, [0.435, 0.26, 0.754, 0.937]: sit, listen to (a person), watch (a person).", "At 1502.0s, [0.317, 0.24, 0.697, 0.983]: sit, listen to (a person)."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1485, 1561], "conversations": [{"from": "human", "timestamps": 1515.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.257, 0.243, 0.517, 0.987]: listen to (a person), sit."}, {"from": "human", "timestamps": 1561.0, "value": "What actions did the person at bbox [0.394, 0.181, 0.72, 0.975] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.394, 0.181, 0.72, 0.975]: sit, listen to (a person), watch (a person).", "At 1560.0s, [0.422, 0.197, 0.776, 0.976]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1559.0s, [0.523, 0.225, 0.859, 0.964]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "From 1555.0s to 1558.0s, [0.474, 0.215, 0.857, 0.986]->[0.481, 0.209, 0.86, 0.977]->[0.501, 0.22, 0.858, 0.986]->[0.549, 0.233, 0.859, 1.0]: sit, listen to (a person), watch (a person).", "From 1551.0s to 1554.0s, [0.504, 0.224, 0.866, 0.976]->[0.482, 0.214, 0.854, 0.988]->[0.453, 0.215, 0.851, 0.99]->[0.485, 0.209, 0.862, 0.981]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1549.0s to 1550.0s, [0.459, 0.217, 0.8, 0.904]->[0.509, 0.229, 0.878, 0.976]: sit, listen to (a person), watch (a person)."]}]}, {"video": "ri4P2enZT9o.mkv", "clip": [1599, 1708], "conversations": [{"from": "human", "timestamps": 1629.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.224, 0.169, 0.453, 0.981]: listen to (a person), stand, talk to (e.g., self, a person, a group); [0.477, 0.081, 0.755, 0.981]: listen to (a person), stand, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1708.0, "value": "Describe the actions of the person at bbox [0.263, 0.094, 0.878, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.263, 0.094, 0.878, 0.981]: sit, listen to (a person), watch (a person).", "At 1707.0s, [0.099, 0.084, 0.877, 0.99]: sit, listen to (a person), watch (a person)."]}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [898, 929], "conversations": [{"from": "human", "timestamps": 928.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.139, 0.135, 0.527, 0.878]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.508, 0.182, 0.894, 0.78]: ride (e.g., a bike, a car, a horse), sit, listen to (a person), watch (a person), carry/hold (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 929.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.447, 0.194, 0.841, 0.797]?"}, {"from": "gpt", "value": ["At Now, [0.447, 0.194, 0.841, 0.797]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group), watch (a person).", "At 928.0s, [0.508, 0.182, 0.894, 0.78]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 927.0s, [0.542, 0.166, 0.907, 0.785]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), listen to (a person).", "At 926.0s, [0.488, 0.183, 0.847, 0.786]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 925.0s, [0.524, 0.128, 0.876, 0.786]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 922.0s to 924.0s, [0.447, 0.191, 0.842, 0.788]->[0.514, 0.126, 0.895, 0.797]->[0.453, 0.167, 0.853, 0.787]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), talk to (e.g., self, a person, a group), watch (a person).", "From 919.0s to 921.0s, [0.528, 0.143, 0.893, 0.792]->[0.479, 0.151, 0.842, 0.791]->[0.484, 0.131, 0.901, 0.807]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse), listen to (a person)."]}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1001, 1113], "conversations": [{"from": "human", "timestamps": 1031.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.201, 0.211, 0.548, 0.893]: ride (e.g., a bike, a car, a horse), sit, talk to (e.g., self, a person, a group); [0.504, 0.137, 0.862, 0.793]: carry/hold (an object), sit, ride (e.g., a bike, a car, a horse), listen to (a person)."}, {"from": "human", "timestamps": 1113.0, "value": "Describe the actions of the person at bbox [0.181, 0.276, 0.513, 0.902] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.181, 0.276, 0.513, 0.902]: stand, walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1112.0s, [0.203, 0.423, 0.508, 0.895]: walk, carry/hold (an object)."]}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1148, 1196], "conversations": [{"from": "human", "timestamps": 1178.0, "value": "Describe the actions of the person at bbox [0.012, 0.323, 0.478, 0.905] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.012, 0.323, 0.478, 0.905]: stand, listen to (a person).", "At 1177.0s, [0.009, 0.314, 0.42, 0.913]: stand, listen to (a person), watch (a person).", "From 1169.0s to 1176.0s, [0.486, 0.282, 0.754, 0.833]->[0.409, 0.273, 0.754, 0.817]->[0.49, 0.271, 0.773, 0.823]->[0.457, 0.272, 0.798, 0.834]->[0.435, 0.265, 0.786, 0.848]->[0.43, 0.272, 0.745, 0.83]->[0.462, 0.254, 0.753, 0.868]->[0.008, 0.331, 0.508, 0.903]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1196.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.028, 0.214, 0.498, 0.858]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.471, 0.295, 0.738, 0.857]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.667, 0.136, 0.99, 0.908]: listen to (a person), stand."}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1154, 1233], "conversations": [{"from": "human", "timestamps": 1184.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.165, 0.125, 0.755, 0.881]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1233.0, "value": "For the person at bbox [0.009, 0.298, 0.166, 0.889], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.009, 0.298, 0.166, 0.889]: walk.", "At 1232.0s, [0.331, 0.343, 0.509, 0.905]: walk, carry/hold (an object)."]}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1238, 1308], "conversations": [{"from": "human", "timestamps": 1268.0, "value": "For the person at bbox [0.195, 0.127, 0.871, 0.889], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.195, 0.127, 0.871, 0.889]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1266.0s to 1267.0s, [0.215, 0.133, 0.878, 0.892]->[0.216, 0.109, 0.876, 0.919]: stand, talk to (e.g., self, a person, a group).", "From 1264.0s to 1265.0s, [0.208, 0.129, 0.884, 0.893]->[0.203, 0.122, 0.89, 0.884]: stand."]}, {"from": "human", "timestamps": 1308.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.115, 0.278, 0.567, 0.776]: watch (a person), sit, smoke, talk to (e.g., self, a person, a group); [0.48, 0.223, 1.0, 0.809]: listen to (a person), sit."}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1272, 1341], "conversations": [{"from": "human", "timestamps": 1302.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.084, 0.425, 0.189, 0.832]: carry/hold (an object), stand, smoke, watch (a person); [0.287, 0.468, 0.427, 0.851]: listen to (a person), sit, watch (a person); [0.749, 0.352, 0.931, 0.895]: watch (a person), stand."}, {"from": "human", "timestamps": 1341.0, "value": "For the person at bbox [0.228, 0.125, 0.982, 0.893], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.228, 0.125, 0.982, 0.893]: sit, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 1340.0s, [0.234, 0.119, 0.984, 0.901]: sit, carry/hold (an object), read.", "From 1338.0s to 1339.0s, [0.243, 0.099, 0.985, 0.891]->[0.245, 0.105, 0.981, 0.89]: sit, read.", "At 1337.0s, [0.242, 0.127, 1.0, 0.876]: sit, carry/hold (an object), read.", "At 1336.0s, [0.574, 0.216, 0.991, 0.907]: sit, catch (an object), listen to (a person)."]}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1293, 1370], "conversations": [{"from": "human", "timestamps": 1323.0, "value": "What actions is the person in bbox [0.592, 0.123, 0.995, 0.903] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.592, 0.123, 0.995, 0.903]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1321.0s to 1322.0s, [0.62, 0.239, 1.0, 0.903]->[0.498, 0.123, 0.994, 0.885]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1318.0s to 1320.0s, [0.487, 0.107, 0.98, 0.905]->[0.515, 0.108, 0.982, 0.881]->[0.573, 0.168, 1.0, 0.894]: sit, talk to (e.g., self, a person, a group).", "At 1317.0s, [0.627, 0.127, 1.0, 0.892]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1315.0s to 1316.0s, [0.424, 0.165, 0.986, 0.806]->[0.481, 0.262, 1.0, 0.818]: sit, listen to (a person).", "At 1314.0s, [0.31, 0.117, 0.974, 0.785]: sit, touch (an object), listen to (a person).", "At 1313.0s, [0.342, 0.117, 0.977, 0.795]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1311.0s to 1312.0s, [0.38, 0.138, 0.981, 0.8]->[0.399, 0.125, 0.983, 0.802]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1308.0s to 1310.0s, [0.48, 0.223, 1.0, 0.809]->[0.469, 0.181, 0.979, 0.804]->[0.456, 0.156, 0.98, 0.816]: sit, listen to (a person).", "At 1307.0s, [0.497, 0.275, 0.992, 0.8]: sit, touch (an object), listen to (a person).", "At 1306.0s, [0.573, 0.248, 0.995, 0.807]: sit, grab (a person), listen to (a person).", "From 1304.0s to 1305.0s, [0.482, 0.261, 1.0, 0.805]->[0.568, 0.25, 0.996, 0.804]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1370.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.349, 0.133, 0.503, 0.844]: carry/hold (an object), bend/bow (at the waist)."}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1354, 1398], "conversations": [{"from": "human", "timestamps": 1384.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.371, 0.128, 0.884, 0.911]: stand."}, {"from": "human", "timestamps": 1398.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.405, 0.155, 0.835, 0.903]?"}, {"from": "gpt", "value": ["At Now, [0.405, 0.155, 0.835, 0.903]: stand.", "From 1393.0s to 1397.0s, [0.371, 0.137, 0.864, 0.901]->[0.369, 0.155, 0.846, 0.907]->[0.371, 0.144, 0.849, 0.918]->[0.378, 0.15, 0.846, 0.912]->[0.408, 0.163, 0.858, 0.892]: stand.", "At 1392.0s, [0.399, 0.145, 0.909, 0.916]: stand, talk to (e.g., self, a person, a group).", "From 1381.0s to 1391.0s, [0.329, 0.133, 0.855, 0.917]->[0.37, 0.132, 0.905, 0.923]->[0.366, 0.141, 0.907, 0.91]->[0.371, 0.128, 0.884, 0.911]->[0.382, 0.133, 0.91, 0.905]->[0.373, 0.14, 0.904, 0.912]->[0.398, 0.156, 0.922, 0.921]->[0.388, 0.148, 0.903, 0.903]->[0.384, 0.156, 0.897, 0.926]->[0.369, 0.163, 0.869, 0.913]->[0.366, 0.144, 0.864, 0.916]: stand.", "From 1378.0s to 1380.0s, [0.317, 0.172, 0.892, 0.911]->[0.292, 0.141, 0.933, 0.879]->[0.362, 0.102, 1.0, 0.891]: stand, carry/hold (an object).", "At 1377.0s, [0.368, 0.139, 0.888, 0.896]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1376.0s, [0.365, 0.124, 0.897, 0.907]: stand, carry/hold (an object).", "At 1375.0s, [0.355, 0.136, 0.75, 0.891]: stand, carry/hold (an object), listen to (a person).", "At 1374.0s, [0.284, 0.121, 0.762, 0.902]: stand, carry/hold (an object).", "From 1371.0s to 1373.0s, [0.286, 0.106, 0.66, 0.897]->[0.404, 0.16, 0.716, 0.898]->[0.417, 0.185, 0.822, 0.901]: stand, carry/hold (an object), close (e.g., a door, a box).", "At 1370.0s, [0.349, 0.133, 0.503, 0.844]: bend/bow (at the waist), carry/hold (an object).", "At 1369.0s, [0.343, 0.143, 0.492, 0.603]: stand."]}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1491, 1528], "conversations": [{"from": "human", "timestamps": 1521.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.406, 0.292, 0.53, 0.759]: open (e.g., a window, a car door), run/jog."}, {"from": "human", "timestamps": 1528.0, "value": "For the person at bbox [0.372, 0.131, 0.541, 0.9], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.372, 0.131, 0.541, 0.9]: stand, carry/hold (an object), open (e.g., a window, a car door).", "At 1527.0s, [0.36, 0.152, 0.498, 0.628]: stand, open (e.g., a window, a car door)."]}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1494, 1569], "conversations": [{"from": "human", "timestamps": 1524.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.123, 0.178, 0.371, 0.737]: carry/hold (an object), smoke, watch (a person), walk; [0.521, 0.251, 0.792, 0.906]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1569.0, "value": "For the person at bbox [0.512, 0.088, 1.0, 0.932], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.512, 0.088, 1.0, 0.932]: stand, listen to (a person), watch (a person).", "From 1566.0s to 1568.0s, [0.517, 0.09, 1.0, 0.901]->[0.525, 0.081, 0.998, 0.901]->[0.538, 0.079, 1.0, 0.914]: stand, listen to (a person), watch (a person).", "From 1560.0s to 1565.0s, [0.683, 0.14, 1.0, 0.906]->[0.626, 0.136, 0.972, 0.916]->[0.559, 0.159, 0.984, 0.912]->[0.517, 0.136, 0.996, 0.895]->[0.562, 0.124, 1.0, 0.9]->[0.519, 0.09, 1.0, 0.906]: stand, listen to (a person).", "At 1559.0s, [0.672, 0.147, 1.0, 0.893]: stand, listen to (a person), watch (a person).", "At 1558.0s, [0.688, 0.141, 1.0, 0.878]: stand, listen to (a person).", "At 1557.0s, [0.683, 0.147, 1.0, 0.869]: stand, listen to (a person), watch (a person).", "At 1556.0s, [0.685, 0.146, 1.0, 0.89]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1555.0s, [0.663, 0.121, 0.992, 0.864]: stand, talk to (e.g., self, a person, a group).", "At 1554.0s, [0.701, 0.149, 0.998, 0.886]: stand, listen to (a person).", "From 1547.0s to 1553.0s, [0.69, 0.158, 1.0, 0.888]->[0.691, 0.158, 1.0, 0.884]->[0.691, 0.16, 1.0, 0.89]->[0.684, 0.138, 1.0, 0.878]->[0.685, 0.15, 1.0, 0.9]->[0.689, 0.15, 1.0, 0.874]->[0.694, 0.177, 1.0, 0.876]: stand, listen to (a person), watch (a person).", "At 1546.0s, [0.69, 0.161, 1.0, 0.891]: stand, watch (a person).", "From 1543.0s to 1545.0s, [0.688, 0.141, 1.0, 0.879]->[0.684, 0.137, 1.0, 0.9]->[0.689, 0.147, 1.0, 0.904]: stand, listen to (a person), watch (a person).", "At 1542.0s, [0.684, 0.13, 1.0, 0.885]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1541.0s, [0.693, 0.168, 1.0, 0.893]: stand, listen to (a person), watch (a person).", "At 1540.0s, [0.657, 0.175, 1.0, 0.873]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1539.0s, [0.641, 0.137, 1.0, 0.871]: stand, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1538.0s, [0.67, 0.145, 0.993, 0.898]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1537.0s, [0.654, 0.127, 0.992, 0.879]: stand, carry/hold (an object), listen to (a person).", "From 1535.0s to 1536.0s, [0.661, 0.149, 1.0, 0.913]->[0.659, 0.143, 0.991, 0.883]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1533.0s to 1534.0s, [0.662, 0.164, 0.993, 0.857]->[0.628, 0.127, 1.0, 0.905]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1531.0s, [0.454, 0.11, 0.904, 0.901]: carry/hold (an object), open (e.g., a window, a car door).", "From 1529.0s to 1530.0s, [0.379, 0.174, 0.674, 0.897]->[0.423, 0.114, 0.706, 0.925]: walk, carry/hold (an object), open (e.g., a window, a car door).", "At 1528.0s, [0.372, 0.131, 0.541, 0.9]: stand, carry/hold (an object), open (e.g., a window, a car door).", "At 1527.0s, [0.36, 0.152, 0.498, 0.628]: stand, open (e.g., a window, a car door)."]}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1597, 1660], "conversations": [{"from": "human", "timestamps": 1627.0, "value": "What actions is the person in bbox [0.561, 0.165, 0.947, 0.898] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.561, 0.165, 0.947, 0.898]: stand, listen to (a person).", "From 1625.0s to 1626.0s, [0.581, 0.119, 0.926, 0.917]->[0.552, 0.151, 0.89, 0.906]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1660.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.116, 0.128, 0.464, 0.907]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.543, 0.306, 0.926, 0.905]: listen to (a person), sit, watch (a person)."}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1600, 1734], "conversations": [{"from": "human", "timestamps": 1630.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.141, 0.216, 0.51, 0.904]: listen to (a person), stand, watch (a person); [0.423, 0.12, 0.97, 0.91]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1734.0, "value": "For the person at bbox [0.324, 0.123, 0.953, 0.9], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.324, 0.123, 0.953, 0.9]: stand, talk to (e.g., self, a person, a group).", "From 1732.0s to 1733.0s, [0.341, 0.121, 0.962, 0.898]->[0.332, 0.129, 0.949, 0.897]: stand, talk to (e.g., self, a person, a group).", "At 1731.0s, [0.342, 0.131, 0.948, 0.899]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1730.0s, [0.339, 0.127, 0.942, 0.899]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1659, 1697], "conversations": [{"from": "human", "timestamps": 1689.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.123, 0.289, 0.181, 0.534]: watch (a person), stand; [0.132, 0.419, 0.182, 0.676]: watch (a person), stand, walk; [0.389, 0.629, 0.471, 0.902]: carry/hold (an object), stand; [0.917, 0.445, 0.976, 0.694]: watch (a person), stand."}, {"from": "human", "timestamps": 1697.0, "value": "What actions is the person in bbox [0.398, 0.137, 0.74, 0.911] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.398, 0.137, 0.74, 0.911]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1696.0s, [0.382, 0.153, 0.72, 0.913]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1695.0s, [0.385, 0.185, 0.731, 0.905]: walk, talk to (e.g., self, a person, a group).", "At 1694.0s, [0.105, 0.157, 0.819, 0.902]: walk, listen to (a person).", "From 1692.0s to 1693.0s, [0.286, 0.15, 0.514, 0.886]->[0.211, 0.13, 0.505, 0.9]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "rk8Xm0EAOWs.mkv", "clip": [1713, 1757], "conversations": [{"from": "human", "timestamps": 1743.0, "value": "What actions did the person at bbox [0.451, 0.226, 0.849, 0.908] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.451, 0.226, 0.849, 0.908]: bend/bow (at the waist), open (e.g., a window, a car door), pull (an object).", "At 1742.0s, [0.416, 0.219, 0.846, 0.92]: bend/bow (at the waist), carry/hold (an object), touch (an object).", "At 1741.0s, [0.379, 0.22, 0.835, 0.905]: bend/bow (at the waist).", "At 1740.0s, [0.339, 0.153, 0.707, 0.899]: talk to (e.g., self, a person, a group).", "From 1737.0s to 1739.0s, [0.276, 0.185, 0.486, 0.901]->[0.271, 0.194, 0.468, 0.85]->[0.423, 0.143, 0.757, 0.88]: walk, talk to (e.g., self, a person, a group).", "From 1735.0s to 1736.0s, [0.173, 0.119, 0.835, 0.904]->[0.178, 0.127, 0.835, 0.892]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1757.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.178, 0.208, 0.465, 0.909]: carry/hold (an object), stand, watch (a person); [0.518, 0.174, 0.955, 0.894]: talk to (e.g., self, a person, a group), bend/bow (at the waist)."}]}, {"video": "sADELCyj10I.mkv", "clip": [886, 954], "conversations": [{"from": "human", "timestamps": 916.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.337, 0.151, 0.536, 0.783]: listen to (a person), walk; [0.49, 0.192, 0.722, 0.859]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 954.0, "value": "For the person at bbox [0.005, 0.133, 0.809, 0.87], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.005, 0.133, 0.809, 0.87]: stand, talk to (e.g., self, a person, a group).", "From 950.0s to 953.0s, [0.013, 0.116, 0.818, 0.877]->[0.009, 0.12, 0.809, 0.883]->[0.0, 0.122, 0.784, 0.888]->[0.0, 0.126, 0.773, 0.874]: stand, talk to (e.g., self, a person, a group).", "At 949.0s, [0.01, 0.133, 0.833, 0.874]: stand, watch (a person).", "At 948.0s, [0.012, 0.123, 0.811, 0.882]: stand, listen to (a person)."]}]}, {"video": "sADELCyj10I.mkv", "clip": [890, 987], "conversations": [{"from": "human", "timestamps": 920.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.25, 0.142, 0.387, 0.783]: walk; [0.306, 0.196, 0.552, 0.812]: talk to (e.g., self, a person, a group), walk; [0.564, 0.24, 0.884, 0.866]: listen to (a person), walk."}, {"from": "human", "timestamps": 987.0, "value": "Describe the actions of the person at bbox [0.739, 0.204, 0.998, 0.855] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.739, 0.204, 0.998, 0.855]: stand, listen to (a person), watch (a person).", "At 986.0s, [0.732, 0.207, 1.0, 0.856]: stand, touch (an object), listen to (a person).", "From 983.0s to 985.0s, [0.74, 0.21, 1.0, 0.852]->[0.744, 0.21, 0.994, 0.834]->[0.74, 0.207, 0.996, 0.846]: stand, listen to (a person).", "At 982.0s, [0.749, 0.213, 0.993, 0.832]: stand.", "From 977.0s to 981.0s, [0.769, 0.199, 1.0, 0.85]->[0.742, 0.21, 1.0, 0.832]->[0.746, 0.181, 1.0, 0.851]->[0.737, 0.205, 0.999, 0.843]->[0.738, 0.207, 0.996, 0.824]: stand, listen to (a person)."]}]}, {"video": "sADELCyj10I.mkv", "clip": [955, 996], "conversations": [{"from": "human", "timestamps": 985.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.263, 0.166, 0.469, 0.876]: listen to (a person), stand; [0.453, 0.215, 0.743, 0.847]: listen to (a person), stand, touch (an object), watch (a person); [0.74, 0.207, 0.996, 0.846]: listen to (a person), stand."}, {"from": "human", "timestamps": 996.0, "value": "For the person at bbox [0.235, 0.169, 0.47, 0.869], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.235, 0.169, 0.47, 0.869]: stand, listen to (a person), watch (a person).", "At 995.0s, [0.256, 0.172, 0.471, 0.865]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 994.0s, [0.231, 0.165, 0.468, 0.862]: stand, watch (a person).", "From 992.0s to 993.0s, [0.108, 0.169, 0.494, 0.855]->[0.206, 0.176, 0.463, 0.863]: stand, listen to (a person), watch (a person)."]}]}, {"video": "sADELCyj10I.mkv", "clip": [1078, 1131], "conversations": [{"from": "human", "timestamps": 1108.0, "value": "What actions did the person at bbox [0.447, 0.337, 0.665, 0.889] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.447, 0.337, 0.665, 0.889]: listen to (a person).", "At 1107.0s, [0.202, 0.173, 0.427, 0.875]: walk, talk to (e.g., self, a person, a group).", "From 1105.0s to 1106.0s, [0.126, 0.179, 0.266, 0.578]->[0.079, 0.154, 0.256, 0.81]: walk."]}, {"from": "human", "timestamps": 1131.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.007, 0.139, 0.324, 0.897]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.294, 0.278, 0.435, 0.694]: listen to (a person), stand, watch (a person); [0.435, 0.26, 0.565, 0.655]: listen to (a person), stand, watch (a person); [0.454, 0.384, 0.702, 0.884]: listen to (a person), sit, watch (a person); [0.692, 0.377, 0.956, 0.867]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "sADELCyj10I.mkv", "clip": [1154, 1216], "conversations": [{"from": "human", "timestamps": 1184.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.062, 0.187, 0.39, 0.863]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.348, 0.201, 0.631, 0.891]: listen to (a person), sit, watch (a person); [0.608, 0.162, 0.899, 0.88]: listen to (a person), sit."}, {"from": "human", "timestamps": 1216.0, "value": "For the person at bbox [0.187, 0.131, 0.907, 0.841], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.187, 0.131, 0.907, 0.841]: stand, talk to (e.g., self, a person, a group).", "At 1215.0s, [0.059, 0.176, 0.822, 0.88]: stand, talk to (e.g., self, a person, a group).", "At 1214.0s, [0.22, 0.121, 0.817, 0.841]: sit, talk to (e.g., self, a person, a group).", "At 1213.0s, [0.212, 0.132, 0.827, 0.857]: stand, talk to (e.g., self, a person, a group).", "At 1212.0s, [0.199, 0.187, 0.861, 0.846]: stand."]}]}, {"video": "sADELCyj10I.mkv", "clip": [1234, 1274], "conversations": [{"from": "human", "timestamps": 1264.0, "value": "For the person at bbox [0.535, 0.129, 0.883, 0.851], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.535, 0.129, 0.883, 0.851]: sit, watch (a person).", "At 1263.0s, [0.531, 0.146, 0.868, 0.864]: sit, watch (a person).", "From 1260.0s to 1262.0s, [0.631, 0.382, 0.901, 0.877]->[0.558, 0.248, 0.835, 0.851]->[0.544, 0.178, 0.869, 0.866]: sit.", "From 1252.0s to 1259.0s, [0.662, 0.409, 0.929, 0.876]->[0.66, 0.422, 0.939, 0.877]->[0.67, 0.44, 0.933, 0.879]->[0.685, 0.459, 0.938, 0.88]->[0.682, 0.452, 0.938, 0.884]->[0.681, 0.451, 0.948, 0.878]->[0.685, 0.459, 0.95, 0.876]->[0.685, 0.461, 0.951, 0.876]: sit, listen to (a person).", "At 1251.0s, [0.649, 0.376, 0.919, 0.873]: sit, listen to (a person), watch (a person).", "From 1249.0s to 1250.0s, [0.577, 0.167, 0.932, 0.876]->[0.594, 0.316, 0.879, 0.879]: sit, listen to (a person).", "From 1246.0s to 1248.0s, [0.573, 0.161, 0.922, 0.874]->[0.576, 0.157, 0.944, 0.882]->[0.577, 0.153, 0.933, 0.882]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1274.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.2, 0.263, 0.64]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.363, 0.198, 1.0, 0.882]: sit, watch (a person), listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "sADELCyj10I.mkv", "clip": [1372, 1405], "conversations": [{"from": "human", "timestamps": 1402.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.169, 0.154, 0.482, 0.867]: listen to (a person), sit, watch (a person); [0.583, 0.204, 0.955, 0.869]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1405.0, "value": "What actions did the person at bbox [0.579, 0.217, 0.939, 0.877] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.579, 0.217, 0.939, 0.877]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1402.0s to 1404.0s, [0.583, 0.204, 0.955, 0.869]->[0.579, 0.219, 0.939, 0.867]->[0.579, 0.217, 0.939, 0.873]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "sADELCyj10I.mkv", "clip": [1419, 1467], "conversations": [{"from": "human", "timestamps": 1449.0, "value": "For the person at bbox [0.385, 0.209, 0.979, 0.851], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.385, 0.209, 0.979, 0.851]: bend/bow (at the waist), kiss (a person), watch (a person).", "At 1448.0s, [0.301, 0.18, 0.983, 0.865]: bend/bow (at the waist), watch (a person).", "At 1447.0s, [0.456, 0.121, 1.0, 0.852]: stand, watch (a person).", "At 1446.0s, [0.425, 0.123, 1.0, 0.885]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1445.0s, [0.271, 0.143, 0.893, 0.859]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1467.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.103, 0.218, 0.301, 0.872]: listen to (a person), stand, watch (a person); [0.532, 0.306, 0.716, 0.759]: listen to (a person), sit, watch (a person); [0.7, 0.186, 0.887, 0.877]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "sADELCyj10I.mkv", "clip": [1488, 1519], "conversations": [{"from": "human", "timestamps": 1518.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.108, 0.163, 0.651, 0.867]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.6, 0.127, 1.0, 0.886]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1519.0, "value": "What actions did the person at bbox [0.108, 0.152, 0.627, 0.877] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.108, 0.152, 0.627, 0.877]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1516.0s to 1518.0s, [0.134, 0.149, 0.584, 0.874]->[0.099, 0.17, 0.667, 0.875]->[0.108, 0.163, 0.651, 0.867]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1515.0s, [0.065, 0.174, 0.696, 0.874]: stand, talk to (e.g., self, a person, a group).", "From 1513.0s to 1514.0s, [0.532, 0.165, 0.979, 0.882]->[0.18, 0.142, 0.66, 0.882]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1512.0s, [0.554, 0.182, 1.0, 0.873]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1510.0s to 1511.0s, [0.534, 0.174, 0.977, 0.868]->[0.536, 0.183, 0.981, 0.88]: stand, listen to (a person), watch (a person).", "At 1509.0s, [0.508, 0.163, 0.958, 0.889]: stand, watch (a person).", "From 1502.0s to 1508.0s, [0.492, 0.166, 0.986, 0.893]->[0.571, 0.179, 1.0, 0.873]->[0.203, 0.135, 0.689, 0.864]->[0.206, 0.133, 0.661, 0.86]->[0.205, 0.133, 0.671, 0.878]->[0.194, 0.145, 0.704, 0.876]->[0.526, 0.175, 0.971, 0.863]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1501.0s, [0.545, 0.182, 0.981, 0.87]: stand, listen to (a person), watch (a person).", "From 1495.0s to 1500.0s, [0.215, 0.146, 0.713, 0.855]->[0.173, 0.165, 0.698, 0.869]->[0.16, 0.16, 0.694, 0.867]->[0.139, 0.149, 0.688, 0.875]->[0.175, 0.147, 0.705, 0.878]->[0.185, 0.148, 0.716, 0.879]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1494.0s, [0.19, 0.147, 0.712, 0.871]: stand, talk to (e.g., self, a person, a group).", "From 1487.0s to 1493.0s, [0.001, 0.138, 0.508, 0.87]->[0.0, 0.169, 0.486, 0.875]->[0.05, 0.167, 0.578, 0.88]->[0.053, 0.149, 0.567, 0.868]->[0.0, 0.156, 0.523, 0.878]->[0.004, 0.136, 0.463, 0.882]->[0.0, 0.165, 0.417, 0.877]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1482.0s to 1486.0s, [0.653, 0.18, 1.0, 0.876]->[0.661, 0.176, 0.998, 0.877]->[0.669, 0.193, 0.997, 0.866]->[0.653, 0.189, 0.996, 0.869]->[0.646, 0.184, 0.999, 0.869]: stand, listen to (a person), watch (a person)."]}]}, {"video": "sADELCyj10I.mkv", "clip": [1494, 1602], "conversations": [{"from": "human", "timestamps": 1524.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.111, 0.176, 0.677, 0.872]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.604, 0.114, 0.995, 0.892]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1602.0, "value": "What actions is the person in bbox [0.509, 0.178, 0.826, 0.878] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.509, 0.178, 0.826, 0.878]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1600.0s to 1601.0s, [0.511, 0.167, 0.814, 0.873]->[0.508, 0.186, 0.832, 0.873]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1599.0s, [0.533, 0.204, 0.784, 0.881]: stand, listen to (a person).", "From 1595.0s to 1598.0s, [0.462, 0.189, 0.764, 0.874]->[0.485, 0.182, 0.768, 0.878]->[0.482, 0.182, 0.831, 0.87]->[0.495, 0.179, 0.806, 0.878]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1594.0s, [0.478, 0.191, 0.772, 0.873]: stand, talk to (e.g., self, a person, a group).", "From 1592.0s to 1593.0s, [0.418, 0.176, 0.766, 0.867]->[0.463, 0.178, 0.777, 0.873]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1591.0s, [0.402, 0.19, 0.748, 0.873]: stand, kiss (a person), watch (a person).", "At 1590.0s, [0.371, 0.193, 0.737, 0.866]: stand, kiss (a person).", "At 1589.0s, [0.365, 0.19, 0.735, 0.867]: stand, grab (a person), kiss (a person), watch (a person).", "At 1588.0s, [0.337, 0.184, 0.746, 0.866]: stand, kiss (a person), watch (a person).", "From 1586.0s to 1587.0s, [0.488, 0.202, 0.778, 0.88]->[0.373, 0.198, 0.752, 0.887]: stand, listen to (a person), watch (a person).", "From 1580.0s to 1585.0s, [0.488, 0.188, 0.789, 0.87]->[0.473, 0.175, 0.777, 0.878]->[0.498, 0.186, 0.794, 0.884]->[0.478, 0.185, 0.794, 0.881]->[0.499, 0.188, 0.803, 0.875]->[0.498, 0.188, 0.802, 0.878]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1579.0s, [0.468, 0.188, 0.778, 0.87]: stand, talk to (e.g., self, a person, a group).", "From 1577.0s to 1578.0s, [0.477, 0.218, 0.784, 0.878]->[0.467, 0.216, 0.784, 0.888]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1575.0s to 1576.0s, [0.568, 0.218, 0.843, 0.876]->[0.523, 0.218, 0.832, 0.879]: stand, talk to (e.g., self, a person, a group).", "At 1574.0s, [0.512, 0.208, 0.829, 0.875]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1573.0s, [0.51, 0.214, 0.766, 0.867]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1572.0s, [0.459, 0.24, 0.881, 0.867]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1571.0s, [0.351, 0.237, 0.831, 0.878]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "sADELCyj10I.mkv", "clip": [1559, 1611], "conversations": [{"from": "human", "timestamps": 1589.0, "value": "For the person at bbox [0.365, 0.19, 0.735, 0.867], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.365, 0.19, 0.735, 0.867]: stand, grab (a person), kiss (a person), watch (a person).", "At 1588.0s, [0.337, 0.184, 0.746, 0.866]: stand, kiss (a person), watch (a person).", "From 1586.0s to 1587.0s, [0.488, 0.202, 0.778, 0.88]->[0.373, 0.198, 0.752, 0.887]: stand, listen to (a person), watch (a person).", "From 1580.0s to 1585.0s, [0.488, 0.188, 0.789, 0.87]->[0.473, 0.175, 0.777, 0.878]->[0.498, 0.186, 0.794, 0.884]->[0.478, 0.185, 0.794, 0.881]->[0.499, 0.188, 0.803, 0.875]->[0.498, 0.188, 0.802, 0.878]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1579.0s, [0.468, 0.188, 0.778, 0.87]: stand, talk to (e.g., self, a person, a group).", "From 1577.0s to 1578.0s, [0.477, 0.218, 0.784, 0.878]->[0.467, 0.216, 0.784, 0.888]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1575.0s to 1576.0s, [0.568, 0.218, 0.843, 0.876]->[0.523, 0.218, 0.832, 0.879]: stand, talk to (e.g., self, a person, a group).", "At 1574.0s, [0.512, 0.208, 0.829, 0.875]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1573.0s, [0.51, 0.214, 0.766, 0.867]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1572.0s, [0.459, 0.24, 0.881, 0.867]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1571.0s, [0.351, 0.237, 0.831, 0.878]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1611.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.299, 0.235, 0.48, 0.869]: walk; [0.435, 0.203, 0.655, 0.877]: walk."}]}, {"video": "sADELCyj10I.mkv", "clip": [1591, 1635], "conversations": [{"from": "human", "timestamps": 1621.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.171, 0.502, 0.281, 0.733]: watch (a person), walk; [0.295, 0.535, 0.375, 0.8]: walk."}, {"from": "human", "timestamps": 1635.0, "value": "For the person at bbox [0.081, 0.107, 0.372, 0.911], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.081, 0.107, 0.372, 0.911]: stand, listen to (a person).", "At 1634.0s, [0.069, 0.142, 0.405, 0.878]: stand, listen to (a person).", "From 1632.0s to 1633.0s, [0.029, 0.124, 0.352, 0.895]->[0.058, 0.122, 0.367, 0.907]: stand, talk to (e.g., self, a person, a group).", "From 1629.0s to 1631.0s, [0.075, 0.13, 0.377, 0.883]->[0.066, 0.121, 0.373, 0.884]->[0.069, 0.129, 0.362, 0.91]: stand, listen to (a person)."]}]}, {"video": "sADELCyj10I.mkv", "clip": [1615, 1689], "conversations": [{"from": "human", "timestamps": 1645.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.073, 0.155, 0.91, 0.889]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1689.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.193, 0.32, 0.957, 0.847]?"}, {"from": "gpt", "value": ["In the current frame, [0.193, 0.32, 0.957, 0.847]: lie/sleep.", "At 1688.0s, [0.194, 0.392, 0.697, 0.766]: lie/sleep."]}]}, {"video": "sADELCyj10I.mkv", "clip": [1658, 1703], "conversations": [{"from": "human", "timestamps": 1688.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.194, 0.392, 0.697, 0.766]: lie/sleep."}, {"from": "human", "timestamps": 1703.0, "value": "Describe the actions of the person at bbox [0.172, 0.361, 0.65, 0.689] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.172, 0.361, 0.65, 0.689]: talk to (e.g., self, a person, a group), lie/sleep.", "At 1702.0s, [0.176, 0.36, 0.615, 0.682]: talk to (e.g., self, a person, a group), lie/sleep.", "From 1698.0s to 1700.0s, [0.143, 0.214, 0.921, 0.84]->[0.164, 0.334, 0.56, 0.722]->[0.217, 0.398, 0.806, 0.805]: talk to (e.g., self, a person, a group), lie/sleep."]}]}, {"video": "sADELCyj10I.mkv", "clip": [1741, 1781], "conversations": [{"from": "human", "timestamps": 1771.0, "value": "Describe the actions of the person at bbox [0.673, 0.268, 0.841, 0.829] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.673, 0.268, 0.841, 0.829]: stand, watch (a person).", "At 1770.0s, [0.706, 0.269, 0.85, 0.894]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1781.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.39, 0.115, 0.889, 0.879]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [884, 934], "conversations": [{"from": "human", "timestamps": 914.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.061, 0.026, 0.951, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 934.0, "value": "What actions did the person at bbox [0.729, 0.416, 0.896, 0.862] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.729, 0.416, 0.896, 0.862]: stand, watch (a person).", "At 933.0s, [0.621, 0.422, 0.795, 0.834]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 932.0s, [0.551, 0.405, 0.717, 0.764]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [924, 1000], "conversations": [{"from": "human", "timestamps": 954.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.244, 0.421, 0.33, 0.737]: carry/hold (an object), walk; [0.577, 0.512, 0.71, 0.894]: walk; [0.789, 0.449, 0.902, 0.608]: ride (e.g., a bike, a car, a horse), sit."}, {"from": "human", "timestamps": 1000.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.151, 0.091, 0.975, 0.998]?"}, {"from": "gpt", "value": ["In the current frame, [0.151, 0.091, 0.975, 0.998]: stand, listen to (a person), watch (a person).", "From 998.0s to 999.0s, [0.161, 0.097, 0.99, 0.982]->[0.153, 0.089, 0.974, 0.984]: stand, watch (a person).", "At 997.0s, [0.239, 0.154, 0.974, 0.995]: sit, watch (a person).", "From 995.0s to 996.0s, [0.433, 0.499, 0.62, 0.741]->[0.451, 0.506, 0.609, 0.746]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [977, 1016], "conversations": [{"from": "human", "timestamps": 1007.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.096, 0.197, 0.398, 0.984]: stand; [0.298, 0.148, 0.822, 0.987]: watch (a person), stand."}, {"from": "human", "timestamps": 1016.0, "value": "What actions is the person in bbox [0.385, 0.343, 0.739, 0.989] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.385, 0.343, 0.739, 0.989]: walk.", "From 1014.0s to 1015.0s, [0.34, 0.25, 0.712, 0.994]->[0.378, 0.323, 0.704, 0.989]: walk."]}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [994, 1110], "conversations": [{"from": "human", "timestamps": 1024.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.331, 0.166, 0.97]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.338, 0.346, 0.454, 0.972]: watch (a person), stand; [0.386, 0.211, 0.747, 0.999]: listen to (a person), stand; [0.663, 0.26, 0.832, 0.97]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1110.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.174, 0.613, 0.25, 0.792]?"}, {"from": "gpt", "value": ["In the current frame, [0.174, 0.613, 0.25, 0.792]: sit, listen to (a person), watch (a person).", "At 1109.0s, [0.174, 0.615, 0.25, 0.797]: sit, watch (a person)."]}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [1053, 1099], "conversations": [{"from": "human", "timestamps": 1083.0, "value": "For the person at bbox [0.08, 0.467, 0.157, 0.818], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.08, 0.467, 0.157, 0.818]: stand, listen to (a person), watch (a person).", "At 1082.0s, [0.061, 0.469, 0.133, 0.811]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1080.0s to 1081.0s, [0.054, 0.463, 0.124, 0.814]->[0.053, 0.465, 0.123, 0.809]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1099.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.006, 0.0, 0.925, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [1117, 1195], "conversations": [{"from": "human", "timestamps": 1147.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.047, 0.455, 0.305, 0.956]: listen to (a person), sit, watch (a person); [0.23, 0.408, 0.3, 0.736]: listen to (a person), stand, watch (a person); [0.301, 0.433, 0.383, 0.732]: listen to (a person), stand, watch (a person); [0.369, 0.449, 0.459, 0.743]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.601, 0.202, 0.957, 0.994]: listen to (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1195.0, "value": "Describe the actions of the person at bbox [0.063, 0.376, 0.15, 0.635] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.063, 0.376, 0.15, 0.635]: stand, watch (a person).", "At 1194.0s, [0.066, 0.376, 0.148, 0.954]: stand, watch (a person)."]}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [1141, 1229], "conversations": [{"from": "human", "timestamps": 1171.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.409, 0.188, 0.79, 0.736]: carry/hold (an object), stand, hit (an object)."}, {"from": "human", "timestamps": 1229.0, "value": "What actions did the person at bbox [0.0, 0.076, 0.544, 0.988] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.076, 0.544, 0.988]: stand.", "At 1228.0s, [0.0, 0.08, 0.548, 0.986]: stand, watch (a person)."]}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [1268, 1316], "conversations": [{"from": "human", "timestamps": 1298.0, "value": "For the person at bbox [0.429, 0.05, 1.0, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.429, 0.05, 1.0, 1.0]: stand, carry/hold (an object).", "At 1297.0s, [0.453, 0.016, 0.962, 1.0]: stand.", "At 1296.0s, [0.025, 0.106, 0.415, 0.991]: stand, grab (a person), watch (a person).", "At 1295.0s, [0.004, 0.072, 0.779, 0.968]: stand, watch (a person).", "At 1294.0s, [0.0, 0.023, 0.641, 1.0]: stand, kiss (a person).", "At 1293.0s, [0.0, 0.063, 0.549, 0.99]: stand, watch (a person).", "At 1292.0s, [0.002, 0.092, 0.527, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1291.0s, [0.0, 0.07, 0.483, 0.981]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1290.0s, [0.0, 0.081, 0.473, 0.981]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1289.0s, [0.0, 0.082, 0.472, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1285.0s to 1288.0s, [0.006, 0.094, 0.351, 0.986]->[0.001, 0.101, 0.381, 0.996]->[0.0, 0.087, 0.449, 0.985]->[0.0, 0.083, 0.441, 0.997]: stand, listen to (a person), watch (a person).", "From 1282.0s to 1284.0s, [0.074, 0.043, 0.917, 1.0]->[0.277, 0.044, 0.981, 1.0]->[0.009, 0.079, 0.828, 0.985]: stand, open (e.g., a window, a car door).", "At 1281.0s, [0.516, 0.036, 0.938, 0.989]: walk.", "From 1279.0s to 1280.0s, [0.075, 0.037, 0.904, 0.999]->[0.315, 0.06, 0.975, 0.996]: walk, talk to (e.g., self, a person, a group).", "From 1277.0s to 1278.0s, [0.017, 0.103, 0.621, 0.98]->[0.004, 0.091, 0.548, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1276.0s, [0.023, 0.092, 0.573, 0.976]: stand, watch (a person).", "From 1270.0s to 1275.0s, [0.001, 0.059, 0.517, 0.973]->[0.014, 0.113, 0.573, 0.974]->[0.036, 0.103, 0.605, 0.979]->[0.017, 0.112, 0.573, 0.972]->[0.018, 0.126, 0.573, 0.981]->[0.012, 0.127, 0.58, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1269.0s, [0.018, 0.122, 0.564, 0.976]: walk, talk to (e.g., self, a person, a group).", "At 1268.0s, [0.064, 0.169, 0.563, 1.0]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1267.0s, [0.114, 0.201, 0.546, 0.99]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1266.0s, [0.181, 0.203, 0.55, 0.981]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1265.0s, [0.218, 0.235, 0.526, 0.991]: walk, talk to (e.g., self, a person, a group).", "From 1263.0s to 1264.0s, [0.288, 0.251, 0.566, 1.0]->[0.195, 0.241, 0.496, 0.989]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1261.0s to 1262.0s, [0.394, 0.258, 0.639, 1.0]->[0.391, 0.269, 0.674, 0.993]: walk, talk to (e.g., self, a person, a group).", "From 1258.0s to 1260.0s, [0.312, 0.313, 0.536, 0.999]->[0.384, 0.279, 0.598, 0.985]->[0.445, 0.269, 0.638, 0.986]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1256.0s to 1257.0s, [0.325, 0.193, 0.535, 0.979]->[0.339, 0.284, 0.537, 0.983]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1316.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.19, 0.117, 0.518, 0.989]: walk; [0.638, 0.443, 0.804, 0.987]: watch (a person), stand."}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [1273, 1343], "conversations": [{"from": "human", "timestamps": 1303.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.294, 0.146, 0.895, 0.998]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1343.0, "value": "What actions is the person in bbox [0.121, 0.199, 0.679, 0.983] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.121, 0.199, 0.679, 0.983]: stand, watch (a person).", "At 1342.0s, [0.155, 0.19, 0.667, 0.993]: stand, watch (a person).", "From 1340.0s to 1341.0s, [0.123, 0.196, 0.66, 0.987]->[0.054, 0.199, 0.657, 0.994]: stand."]}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [1370, 1422], "conversations": [{"from": "human", "timestamps": 1400.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.331, 0.251, 0.589, 0.97]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1422.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.029, 0.29, 0.673, 0.981]?"}, {"from": "gpt", "value": ["In the current frame, [0.029, 0.29, 0.673, 0.981]: stand, carry/hold (an object), close (e.g., a door, a box), listen to (a person), watch (a person).", "At 1421.0s, [0.117, 0.246, 0.836, 0.976]: walk, touch (an object), watch (a person).", "From 1419.0s to 1420.0s, [0.296, 0.289, 0.686, 1.0]->[0.192, 0.122, 0.6, 0.982]: walk.", "At 1418.0s, [0.347, 0.262, 0.901, 1.0]: walk, talk to (e.g., self, a person, a group).", "At 1417.0s, [0.44, 0.36, 0.899, 0.998]: stand, talk to (e.g., self, a person, a group).", "At 1416.0s, [0.447, 0.359, 0.95, 0.998]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1415.0s, [0.427, 0.371, 0.955, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1412.0s to 1414.0s, [0.427, 0.355, 0.971, 0.996]->[0.445, 0.389, 0.951, 0.996]->[0.437, 0.372, 0.955, 0.994]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1411.0s, [0.443, 0.372, 0.968, 0.999]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1410.0s, [0.428, 0.363, 0.964, 0.992]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1409.0s, [0.458, 0.363, 0.959, 0.995]: stand, touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1408.0s, [0.503, 0.327, 0.988, 0.993]: stand, carry/hold (an object), touch (an object), listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [1410, 1491], "conversations": [{"from": "human", "timestamps": 1440.0, "value": "What actions is the person in bbox [0.23, 0.149, 0.62, 0.996] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.23, 0.149, 0.62, 0.996]: walk.", "From 1438.0s to 1439.0s, [0.26, 0.205, 0.645, 0.993]->[0.193, 0.164, 0.612, 1.0]: walk.", "At 1437.0s, [0.376, 0.294, 0.686, 0.983]: bend/bow (at the waist)."]}, {"from": "human", "timestamps": 1491.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.049, 0.881, 0.981]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [1486, 1533], "conversations": [{"from": "human", "timestamps": 1516.0, "value": "What actions did the person at bbox [0.016, 0.016, 0.754, 0.988] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.016, 0.016, 0.754, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1511.0s to 1515.0s, [0.005, 0.128, 0.753, 0.965]->[0.023, 0.098, 0.736, 0.969]->[0.016, 0.093, 0.737, 0.996]->[0.011, 0.051, 0.789, 0.978]->[0.031, 0.035, 0.765, 0.993]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1510.0s, [0.019, 0.079, 0.757, 0.972]: stand.", "At 1509.0s, [0.013, 0.115, 0.86, 0.981]: stand, touch (an object).", "At 1508.0s, [0.026, 0.095, 0.831, 0.979]: stand.", "From 1506.0s to 1507.0s, [0.0, 0.148, 0.79, 1.0]->[0.0, 0.113, 0.611, 0.996]: walk.", "At 1505.0s, [0.044, 0.035, 0.972, 0.991]: walk, talk to (e.g., self, a person, a group).", "From 1497.0s to 1504.0s, [0.171, 0.018, 1.0, 0.998]->[0.167, 0.043, 0.993, 0.983]->[0.151, 0.031, 1.0, 0.995]->[0.189, 0.024, 1.0, 0.99]->[0.158, 0.025, 1.0, 0.996]->[0.179, 0.034, 1.0, 0.999]->[0.146, 0.0, 1.0, 0.994]->[0.161, 0.0, 1.0, 0.99]: stand, talk to (e.g., self, a person, a group).", "From 1495.0s to 1496.0s, [0.178, 0.042, 1.0, 0.992]->[0.166, 0.034, 1.0, 0.993]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1533.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.236, 0.433, 0.999]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.351, 0.154, 1.0, 0.995]: listen to (a person), stand."}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [1551, 1585], "conversations": [{"from": "human", "timestamps": 1581.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.045, 0.253, 0.787]: carry/hold (an object), stand, watch (a person); [0.102, 0.214, 1.0, 0.966]: walk."}, {"from": "human", "timestamps": 1585.0, "value": "Describe the actions of the person at bbox [0.035, 0.06, 0.843, 0.974] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.035, 0.06, 0.843, 0.974]: talk to (e.g., self, a person, a group), lie/sleep.", "At 1584.0s, [0.078, 0.013, 0.989, 0.991]: talk to (e.g., self, a person, a group), lie/sleep."]}]}, {"video": "sUVhd0YTKgw.mkv", "clip": [1608, 1697], "conversations": [{"from": "human", "timestamps": 1638.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.324, 0.38, 0.577, 0.97]: listen to (a person), sit, watch (a person); [0.325, 0.224, 0.533, 0.588]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.511, 0.187, 0.716, 0.634]: listen to (a person), stand, hand clap, watch (a person); [0.548, 0.153, 1.0, 1.0]: listen (e.g., to music), dance."}, {"from": "human", "timestamps": 1697.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.058, 0.191, 0.348, 0.968]?"}, {"from": "gpt", "value": ["At Now, [0.058, 0.191, 0.348, 0.968]: stand, hand clap.", "At 1696.0s, [0.039, 0.213, 0.385, 0.976]: stand, hand clap, watch (a person).", "At 1695.0s, [0.042, 0.21, 0.381, 0.988]: stand, listen (e.g., to music), hand clap, sing to (e.g., self, a person, a group).", "At 1694.0s, [0.088, 0.227, 0.42, 0.986]: stand, hand clap, watch (a person)."]}]}, {"video": "skiZueh4lfY.mkv", "clip": [925, 960], "conversations": [{"from": "human", "timestamps": 955.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.028, 0.03, 0.476, 1.0]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 960.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.0, 0.83, 0.994]?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.0, 0.83, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 959.0s, [0.002, 0.013, 0.19, 0.73]: stand, listen to (a person), watch (a person)."]}]}, {"video": "skiZueh4lfY.mkv", "clip": [899, 968], "conversations": [{"from": "human", "timestamps": 929.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.039, 0.0, 0.781, 0.931]: sit, touch (an object)."}, {"from": "human", "timestamps": 968.0, "value": "Describe the actions of the person at bbox [0.106, 0.0, 0.964, 0.993] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.106, 0.0, 0.964, 0.993]: sit, listen to (a person), watch (a person).", "At 967.0s, [0.541, 0.202, 0.972, 0.944]: sit, listen to (a person), watch (a person).", "At 966.0s, [0.517, 0.189, 0.958, 0.953]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "skiZueh4lfY.mkv", "clip": [943, 1011], "conversations": [{"from": "human", "timestamps": 973.0, "value": "What actions did the person at bbox [0.507, 0.344, 0.701, 0.689] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.507, 0.344, 0.701, 0.689]: sit, touch (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 972.0s, [0.417, 0.325, 0.704, 0.857]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1011.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.009, 0.314, 0.392, 0.969]: carry/hold (an object), sit, touch (an object), play board game; [0.328, 0.244, 0.438, 0.563]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.438, 0.201, 0.611, 0.699]: carry/hold (an object), stand, listen to (a person); [0.636, 0.305, 0.737, 0.678]: carry/hold (an object), walk; [0.675, 0.205, 0.982, 0.937]: touch (an object), play board game, bend/bow (at the waist)."}]}, {"video": "skiZueh4lfY.mkv", "clip": [1083, 1115], "conversations": [{"from": "human", "timestamps": 1113.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.053, 0.063, 0.422, 0.985]: stand."}, {"from": "human", "timestamps": 1115.0, "value": "What actions is the person in bbox [0.126, 0.088, 0.475, 0.989] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.126, 0.088, 0.475, 0.989]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1114.0s, [0.084, 0.055, 0.481, 0.987]: stand, touch (an object), listen to (a person).", "At 1113.0s, [0.053, 0.063, 0.422, 0.985]: stand."]}]}, {"video": "skiZueh4lfY.mkv", "clip": [1160, 1194], "conversations": [{"from": "human", "timestamps": 1190.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.146, 0.097, 0.707, 0.982]?"}, {"from": "gpt", "value": ["At Now, [0.146, 0.097, 0.707, 0.982]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person).", "From 1188.0s to 1189.0s, [0.025, 0.043, 0.685, 0.976]->[0.063, 0.047, 0.738, 0.995]: talk to (e.g., self, a person, a group), lie/sleep, watch (a person)."]}, {"from": "human", "timestamps": 1194.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.133, 0.044, 0.641, 0.976]: listen to (a person), lie/sleep, watch (a person)."}]}, {"video": "skiZueh4lfY.mkv", "clip": [1125, 1244], "conversations": [{"from": "human", "timestamps": 1155.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.195, 0.12, 0.747, 0.995]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1244.0, "value": "What actions did the person at bbox [0.031, 0.17, 0.909, 0.921] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.031, 0.17, 0.909, 0.921]: lie/sleep.", "At 1243.0s, [0.111, 0.201, 0.934, 0.934]: lie/sleep."]}]}, {"video": "skiZueh4lfY.mkv", "clip": [1260, 1333], "conversations": [{"from": "human", "timestamps": 1290.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.655, 0.148, 1.0, 1.0]: listen to (a person), lie/sleep."}, {"from": "human", "timestamps": 1333.0, "value": "What actions did the person at bbox [0.076, 0.096, 0.401, 0.974] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.076, 0.096, 0.401, 0.974]: stand, listen to (a person), watch (a person).", "At 1332.0s, [0.08, 0.098, 0.404, 0.987]: stand, listen to (a person), watch (a person).", "At 1331.0s, [0.093, 0.103, 0.415, 0.991]: stand, watch (a person).", "From 1327.0s to 1330.0s, [0.173, 0.045, 0.651, 0.992]->[0.188, 0.04, 0.672, 0.999]->[0.177, 0.033, 0.647, 0.991]->[0.171, 0.022, 0.637, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1326.0s, [0.117, 0.028, 0.633, 0.987]: stand, watch (a person)."]}]}, {"video": "skiZueh4lfY.mkv", "clip": [1366, 1426], "conversations": [{"from": "human", "timestamps": 1396.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.176, 0.372, 0.256, 0.877]: carry/hold (an object), stand, watch (a person); [0.279, 0.409, 0.332, 0.733]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1426.0, "value": "What actions did the person at bbox [0.113, 0.242, 0.375, 0.984] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.113, 0.242, 0.375, 0.984]: stand, carry/hold (an object), play with pets.", "From 1424.0s to 1425.0s, [0.203, 0.212, 0.404, 0.971]->[0.18, 0.172, 0.389, 0.997]: stand, carry/hold (an object), play with pets.", "From 1421.0s to 1423.0s, [0.221, 0.233, 0.501, 0.999]->[0.217, 0.224, 0.511, 0.994]->[0.182, 0.207, 0.48, 0.994]: stand, play with pets.", "From 1419.0s to 1420.0s, [0.221, 0.211, 0.511, 0.984]->[0.211, 0.204, 0.507, 0.992]: stand, play with pets, talk to (e.g., self, a person, a group).", "From 1416.0s to 1418.0s, [0.185, 0.227, 0.484, 0.984]->[0.168, 0.246, 0.488, 0.985]->[0.168, 0.227, 0.489, 1.0]: stand, carry/hold (an object), play with pets.", "At 1415.0s, [0.183, 0.215, 0.501, 0.998]: stand, carry/hold (an object), play with pets, talk to (e.g., self, a person, a group).", "From 1411.0s to 1414.0s, [0.235, 0.268, 0.527, 1.0]->[0.122, 0.254, 0.529, 1.0]->[0.18, 0.216, 0.536, 0.994]->[0.206, 0.267, 0.455, 0.992]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1409.0s to 1410.0s, [0.266, 0.251, 0.549, 0.991]->[0.256, 0.259, 0.535, 0.993]: stand, carry/hold (an object), play with pets, talk to (e.g., self, a person, a group)."]}]}, {"video": "skiZueh4lfY.mkv", "clip": [1393, 1488], "conversations": [{"from": "human", "timestamps": 1423.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.182, 0.207, 0.48, 0.994]: stand, play with pets."}, {"from": "human", "timestamps": 1488.0, "value": "For the person at bbox [0.008, 0.008, 0.749, 0.977], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.008, 0.008, 0.749, 0.977]: stand, talk to (e.g., self, a person, a group).", "At 1487.0s, [0.102, 0.0, 0.729, 0.969]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "skiZueh4lfY.mkv", "clip": [1501, 1541], "conversations": [{"from": "human", "timestamps": 1531.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.327, 0.045, 0.967, 0.971]: carry/hold (an object), stand, answer phone."}, {"from": "human", "timestamps": 1541.0, "value": "Describe the actions of the person at bbox [0.628, 0.07, 0.949, 0.977] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.628, 0.07, 0.949, 0.977]: walk, answer phone.", "At 1540.0s, [0.607, 0.089, 0.829, 0.777]: walk, answer phone.", "At 1539.0s, [0.364, 0.167, 0.561, 0.985]: walk, answer phone, carry/hold (an object).", "At 1538.0s, [0.229, 0.134, 0.496, 1.0]: walk, answer phone.", "At 1537.0s, [0.077, 0.093, 0.36, 0.973]: stand, answer phone.", "From 1535.0s to 1536.0s, [0.03, 0.028, 0.336, 0.965]->[0.073, 0.091, 0.333, 0.971]: stand, answer phone, touch (an object).", "At 1534.0s, [0.029, 0.0, 0.701, 0.988]: stand, answer phone."]}]}, {"video": "skiZueh4lfY.mkv", "clip": [1471, 1597], "conversations": [{"from": "human", "timestamps": 1501.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.014, 0.001, 0.725, 0.986]: stand."}, {"from": "human", "timestamps": 1597.0, "value": "Describe the actions of the person at bbox [0.033, 0.112, 0.9, 0.984] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.033, 0.112, 0.9, 0.984]: stand, watch (a person).", "At 1596.0s, [0.163, 0.237, 0.32, 0.607]: walk, dress/put on clothing, listen to (a person), watch (a person).", "At 1595.0s, [0.349, 0.29, 0.402, 0.517]: walk, dress/put on clothing, watch (a person)."]}]}, {"video": "skiZueh4lfY.mkv", "clip": [1554, 1617], "conversations": [{"from": "human", "timestamps": 1584.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.13, 0.393, 0.334, 0.724]: sit; [0.688, 0.425, 0.739, 0.668]: carry/hold (an object), sit, dress/put on clothing; [0.784, 0.209, 0.933, 1.0]: watch (a person), open (e.g., a window, a car door), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1617.0, "value": "For the person at bbox [0.0, 0.0, 0.594, 0.989], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.0, 0.594, 0.989]: stand.", "At 1616.0s, [0.0, 0.0, 0.606, 1.0]: stand."]}]}, {"video": "skiZueh4lfY.mkv", "clip": [1599, 1661], "conversations": [{"from": "human", "timestamps": 1629.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.01, 0.308, 0.16, 0.954]: sit; [0.532, 0.0, 1.0, 0.976]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1661.0, "value": "For the person at bbox [0.443, 0.0, 1.0, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.443, 0.0, 1.0, 1.0]: stand, watch (a person).", "At 1660.0s, [0.541, 0.0, 1.0, 0.988]: stand, carry/hold (an object).", "At 1659.0s, [0.036, 0.0, 0.357, 0.991]: walk.", "At 1658.0s, [0.007, 0.0, 0.484, 0.971]: walk, carry/hold (an object)."]}]}, {"video": "skiZueh4lfY.mkv", "clip": [1648, 1704], "conversations": [{"from": "human", "timestamps": 1678.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.17, 0.015, 0.991, 0.962]: sit, hug (a person), talk to (e.g., self, a person, a group), stand."}, {"from": "human", "timestamps": 1704.0, "value": "What actions did the person at bbox [0.004, 0.017, 0.804, 0.993] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.004, 0.017, 0.804, 0.993]: sit.", "At 1703.0s, [0.1, 0.025, 0.852, 0.991]: sit.", "At 1702.0s, [0.129, 0.014, 0.918, 0.997]: lie/sleep.", "At 1701.0s, [0.229, 0.018, 0.997, 0.999]: stand.", "At 1700.0s, [0.386, 0.013, 1.0, 0.978]: sit."]}]}, {"video": "t0V4drbYDnc.mkv", "clip": [888, 986], "conversations": [{"from": "human", "timestamps": 918.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.092, 0.441, 0.152, 0.612]: watch (a person), walk; [0.341, 0.429, 0.43, 0.662]: run/jog; [0.414, 0.427, 0.487, 0.694]: run/jog; [0.446, 0.421, 0.666, 0.767]: run/jog; [0.819, 0.408, 0.946, 0.725]: watch (a person), walk."}, {"from": "human", "timestamps": 986.0, "value": "What actions did the person at bbox [0.291, 0.119, 0.822, 0.927] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.291, 0.119, 0.822, 0.927]: sit.", "At 985.0s, [0.256, 0.121, 0.869, 0.929]: sit, open (e.g., a window, a car door).", "At 984.0s, [0.164, 0.127, 0.858, 0.917]: sit, carry/hold (an object), open (e.g., a window, a car door).", "At 983.0s, [0.115, 0.153, 0.866, 0.914]: sit, carry/hold (an object).", "From 980.0s to 982.0s, [0.293, 0.305, 0.644, 0.916]->[0.447, 0.225, 0.843, 0.933]->[0.476, 0.158, 0.919, 0.925]: sit, touch (an object).", "From 978.0s to 979.0s, [0.292, 0.366, 0.614, 0.932]->[0.212, 0.355, 0.569, 0.923]: sit.", "At 977.0s, [0.477, 0.383, 0.807, 0.917]: sit, watch (a person).", "At 976.0s, [0.562, 0.392, 0.938, 0.926]: sit, listen to (a person), watch (a person).", "At 975.0s, [0.644, 0.378, 0.923, 0.935]: sit, touch (an object), listen to (a person), watch (a person).", "At 974.0s, [0.644, 0.392, 0.951, 0.941]: sit, listen to (a person), watch (a person)."]}]}, {"video": "t0V4drbYDnc.mkv", "clip": [933, 1003], "conversations": [{"from": "human", "timestamps": 963.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.026, 0.135, 0.427, 0.959]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.187, 0.495, 0.269, 0.743]: walk; [0.589, 0.313, 0.989, 0.936]: listen to (a person), walk."}, {"from": "human", "timestamps": 1003.0, "value": "What actions is the person in bbox [0.196, 0.131, 0.371, 0.919] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.196, 0.131, 0.371, 0.919]: stand, listen to (a person), watch (a person).", "From 1000.0s to 1002.0s, [0.245, 0.189, 0.411, 0.837]->[0.187, 0.135, 0.361, 0.886]->[0.192, 0.122, 0.369, 0.914]: stand, watch (a person)."]}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1027, 1075], "conversations": [{"from": "human", "timestamps": 1057.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.472, 0.206, 0.667, 0.601]?"}, {"from": "gpt", "value": ["In the current frame, [0.472, 0.206, 0.667, 0.601]: touch (an object), listen to (a person), watch (a person).", "At 1056.0s, [0.469, 0.209, 0.677, 0.676]: sit, touch (an object), watch (a person).", "At 1055.0s, [0.452, 0.209, 0.681, 0.614]: carry/hold (an object), crouch/kneel, listen to (a person), watch (a person).", "At 1054.0s, [0.454, 0.197, 0.674, 0.62]: crouch/kneel, listen to (a person), watch (a person).", "At 1053.0s, [0.446, 0.223, 0.691, 0.669]: crouch/kneel, touch (an object), listen to (a person), watch (a person).", "At 1052.0s, [0.402, 0.252, 0.675, 0.829]: sit, carry/hold (an object), touch (an object), watch (a person)."]}, {"from": "human", "timestamps": 1075.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.142, 0.376, 0.278, 0.811]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.322, 0.31, 0.487, 0.771]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.629, 0.244, 0.863, 0.851]: stand."}]}, {"video": "t0V4drbYDnc.mkv", "clip": [996, 1121], "conversations": [{"from": "human", "timestamps": 1026.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.154, 0.193, 0.629, 0.931]: walk."}, {"from": "human", "timestamps": 1121.0, "value": "What actions is the person in bbox [0.702, 0.13, 0.977, 0.938] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.702, 0.13, 0.977, 0.938]: stand, listen to (a person), watch (a person).", "At 1120.0s, [0.657, 0.132, 0.98, 0.95]: stand, listen to (a person), watch (a person)."]}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1082, 1116], "conversations": [{"from": "human", "timestamps": 1112.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.048, 0.176, 0.49, 0.932]: stand, answer phone; [0.46, 0.159, 0.895, 0.932]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1116.0, "value": "Describe the actions of the person at bbox [0.065, 0.125, 0.673, 0.958] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.065, 0.125, 0.673, 0.958]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1114.0s to 1115.0s, [0.042, 0.149, 0.348, 0.93]->[0.032, 0.235, 0.331, 0.944]: stand, listen to (a person), watch (a person).", "At 1113.0s, [0.08, 0.169, 0.487, 0.938]: stand, put down, listen to (a person).", "From 1108.0s to 1112.0s, [0.025, 0.171, 0.516, 0.937]->[0.054, 0.162, 0.509, 0.923]->[0.066, 0.192, 0.517, 0.929]->[0.061, 0.188, 0.506, 0.939]->[0.048, 0.176, 0.49, 0.932]: stand, answer phone.", "At 1107.0s, [0.039, 0.2, 0.458, 0.949]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1106.0s, [0.051, 0.231, 0.5, 0.938]: stand, answer phone, listen to (a person).", "At 1105.0s, [0.082, 0.142, 0.508, 0.925]: stand, answer phone."]}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1115, 1162], "conversations": [{"from": "human", "timestamps": 1145.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.091, 0.122, 0.942, 0.934]?"}, {"from": "gpt", "value": ["In the current frame, [0.091, 0.122, 0.942, 0.934]: stand, carry/hold (an object).", "From 1140.0s to 1144.0s, [0.283, 0.132, 0.938, 0.91]->[0.165, 0.095, 0.982, 0.929]->[0.177, 0.101, 0.963, 0.935]->[0.101, 0.098, 0.942, 0.941]->[0.093, 0.113, 0.929, 0.931]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1162.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.055, 0.182, 0.759, 0.961]: watch (a person); [0.599, 0.155, 0.786, 0.88]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1123, 1247], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.078, 0.196, 0.663, 0.971]: listen to (a person), sit, watch (a person); [0.601, 0.177, 0.786, 0.862]: carry/hold (an object), stand, talk to (e.g., self, a person, a group), listen to (a person)."}, {"from": "human", "timestamps": 1247.0, "value": "What actions did the person at bbox [0.648, 0.228, 0.869, 0.528] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.648, 0.228, 0.869, 0.528]: stand, carry/hold (an object), listen to (a person).", "At 1246.0s, [0.604, 0.185, 0.981, 0.944]: stand, listen to (a person).", "At 1245.0s, [0.617, 0.195, 0.943, 0.947]: walk, carry/hold (an object), close (e.g., a door, a box), talk to (e.g., self, a person, a group), watch (a person).", "At 1244.0s, [0.681, 0.129, 0.985, 0.958]: stand, answer phone, carry/hold (an object), listen to (a person), watch (a person).", "From 1241.0s to 1243.0s, [0.56, 0.237, 0.836, 0.893]->[0.574, 0.217, 0.82, 0.913]->[0.507, 0.247, 0.824, 0.651]: stand, answer phone.", "At 1240.0s, [0.186, 0.161, 0.809, 0.962]: stand, open (e.g., a window, a car door), watch (a person).", "At 1239.0s, [0.537, 0.361, 0.835, 0.985]: answer phone, open (e.g., a window, a car door), get up, watch (a person).", "At 1238.0s, [0.677, 0.381, 0.832, 0.635]: sit, answer phone, open (e.g., a window, a car door), listen to (a person), watch (a person).", "At 1237.0s, [0.69, 0.377, 0.849, 0.618]: sit, open (e.g., a window, a car door), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1236.0s, [0.705, 0.388, 0.852, 0.619]: sit, answer phone, talk to (e.g., self, a person, a group).", "From 1231.0s to 1235.0s, [0.77, 0.431, 0.892, 0.595]->[0.758, 0.394, 0.948, 0.648]->[0.673, 0.385, 0.932, 0.729]->[0.675, 0.373, 0.922, 0.673]->[0.724, 0.379, 0.863, 0.611]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1193, 1233], "conversations": [{"from": "human", "timestamps": 1223.0, "value": "Describe the actions of the person at bbox [0.008, 0.178, 0.729, 0.954] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.008, 0.178, 0.729, 0.954]: bend/bow (at the waist), carry/hold (an object).", "At 1222.0s, [0.009, 0.336, 0.769, 0.987]: bend/bow (at the waist), sit, carry/hold (an object), drive (e.g., a car, a truck).", "At 1221.0s, [0.016, 0.435, 0.691, 0.983]: bend/bow (at the waist), drive (e.g., a car, a truck)."]}, {"from": "human", "timestamps": 1233.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.673, 0.385, 0.932, 0.729]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1248, 1279], "conversations": [{"from": "human", "timestamps": 1278.0, "value": "For the person at bbox [0.124, 0.2, 0.594, 0.704], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.124, 0.2, 0.594, 0.704]: stand, carry/hold (an object), give/serve (an object) to (a person).", "From 1276.0s to 1277.0s, [0.083, 0.184, 0.611, 0.897]->[0.199, 0.174, 0.657, 0.917]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1275.0s, [0.068, 0.189, 0.576, 0.848]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1274.0s, [0.099, 0.197, 0.633, 0.943]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1272.0s to 1273.0s, [0.087, 0.187, 0.626, 0.912]->[0.122, 0.23, 0.593, 0.853]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1279.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.12, 0.175, 0.639, 0.911]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.604, 0.135, 0.988, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1278, 1340], "conversations": [{"from": "human", "timestamps": 1308.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.146, 0.181, 0.697, 0.914]: stand, write; [0.57, 0.11, 0.988, 0.983]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1340.0, "value": "Describe the actions of the person at bbox [0.354, 0.146, 0.505, 0.9] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.354, 0.146, 0.505, 0.9]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1338.0s to 1339.0s, [0.305, 0.185, 0.481, 0.953]->[0.322, 0.149, 0.477, 0.931]: walk, carry/hold (an object), listen to (a person).", "At 1337.0s, [0.343, 0.234, 0.535, 0.684]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group)."]}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1318, 1363], "conversations": [{"from": "human", "timestamps": 1348.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.356, 0.151, 0.519, 0.899]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.458, 0.134, 0.678, 0.895]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1363.0, "value": "Describe the actions of the person at bbox [0.399, 0.11, 0.972, 0.938] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.399, 0.11, 0.972, 0.938]: stand, talk to (e.g., self, a person, a group).", "From 1361.0s to 1362.0s, [0.457, 0.098, 0.983, 0.921]->[0.433, 0.097, 0.968, 0.938]: stand, listen to (a person)."]}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1358, 1478], "conversations": [{"from": "human", "timestamps": 1388.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.204, 0.097, 0.942, 0.907]: sit, talk to (e.g., self, a person, a group); [0.231, 0.284, 0.428, 0.628]: watch (a person), sit, drink."}, {"from": "human", "timestamps": 1478.0, "value": "What actions did the person at bbox [0.453, 0.258, 0.679, 0.616] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.453, 0.258, 0.679, 0.616]: sit.", "From 1473.0s to 1477.0s, [0.444, 0.258, 0.676, 0.626]->[0.458, 0.252, 0.699, 0.599]->[0.454, 0.24, 0.694, 0.607]->[0.457, 0.257, 0.704, 0.603]->[0.448, 0.247, 0.671, 0.619]: sit."]}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1419, 1476], "conversations": [{"from": "human", "timestamps": 1449.0, "value": "For the person at bbox [0.638, 0.106, 0.978, 0.93], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.638, 0.106, 0.978, 0.93]: sit, eat, watch (a person).", "At 1448.0s, [0.642, 0.108, 0.979, 0.936]: sit, watch (a person).", "At 1447.0s, [0.674, 0.121, 0.978, 0.92]: sit, listen to (a person), watch (a person).", "At 1446.0s, [0.641, 0.124, 0.978, 0.918]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1476.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.146, 0.132, 0.26, 0.422]: walk; [0.149, 0.195, 0.435, 0.66]: listen to (a person), sit, watch (a person); [0.343, 0.242, 0.452, 0.497]: sit; [0.457, 0.257, 0.704, 0.603]: sit; [0.488, 0.13, 0.975, 0.954]: sit, watch (a person), listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1505, 1586], "conversations": [{"from": "human", "timestamps": 1535.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.079, 0.264, 0.268, 0.682]: sit, talk to (e.g., self, a person, a group); [0.324, 0.134, 0.978, 0.945]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1586.0, "value": "For the person at bbox [0.108, 0.105, 0.834, 0.927], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.108, 0.105, 0.834, 0.927]: sit, talk to (e.g., self, a person, a group).", "At 1585.0s, [0.134, 0.104, 0.8, 0.933]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1555, 1607], "conversations": [{"from": "human", "timestamps": 1585.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.134, 0.104, 0.8, 0.933]: sit, talk to (e.g., self, a person, a group); [0.717, 0.107, 0.979, 0.911]: sit."}, {"from": "human", "timestamps": 1607.0, "value": "For the person at bbox [0.352, 0.499, 0.503, 0.767], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.352, 0.499, 0.503, 0.767]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1606.0s, [0.361, 0.513, 0.495, 0.763]: sit, carry/hold (an object), read.", "At 1605.0s, [0.354, 0.478, 0.518, 0.771]: sit, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 1604.0s, [0.362, 0.489, 0.517, 0.77]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1606, 1713], "conversations": [{"from": "human", "timestamps": 1636.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.243, 0.157, 0.88, 0.918]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1713.0, "value": "Describe the actions of the person at bbox [0.047, 0.168, 0.918, 0.929] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.047, 0.168, 0.918, 0.929]: sit.", "At 1712.0s, [0.043, 0.094, 0.858, 0.942]: sit."]}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1679, 1725], "conversations": [{"from": "human", "timestamps": 1709.0, "value": "What actions did the person at bbox [0.053, 0.132, 0.863, 0.923] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.053, 0.132, 0.863, 0.923]: sit, watch (a person).", "At 1708.0s, [0.058, 0.148, 0.87, 0.914]: sit."]}, {"from": "human", "timestamps": 1725.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.029, 0.085, 0.853, 0.915]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "t0V4drbYDnc.mkv", "clip": [1723, 1762], "conversations": [{"from": "human", "timestamps": 1753.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.328, 0.328, 0.529, 0.544]: sit, touch (an object)."}, {"from": "human", "timestamps": 1762.0, "value": "For the person at bbox [0.157, 0.131, 0.945, 0.921], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.157, 0.131, 0.945, 0.921]: walk, talk to (e.g., self, a person, a group).", "At 1761.0s, [0.44, 0.128, 0.835, 0.931]: walk, talk to (e.g., self, a person, a group).", "From 1759.0s to 1760.0s, [0.466, 0.16, 0.672, 0.82]->[0.499, 0.112, 0.804, 0.917]: walk, dress/put on clothing.", "At 1758.0s, [0.439, 0.178, 0.626, 0.629]: walk.", "At 1757.0s, [0.404, 0.124, 0.63, 0.61]: get up, talk to (e.g., self, a person, a group).", "From 1755.0s to 1756.0s, [0.33, 0.29, 0.614, 0.553]->[0.355, 0.315, 0.587, 0.556]: sit, talk to (e.g., self, a person, a group).", "At 1754.0s, [0.326, 0.344, 0.526, 0.545]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1752.0s to 1753.0s, [0.332, 0.365, 0.507, 0.547]->[0.328, 0.328, 0.529, 0.544]: sit, touch (an object)."]}]}, {"video": "t1LXrJOvPDg.mkv", "clip": [883, 943], "conversations": [{"from": "human", "timestamps": 913.0, "value": "For the person at bbox [0.162, 0.016, 0.958, 0.975], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.162, 0.016, 0.958, 0.975]: stand, talk to (e.g., self, a person, a group).", "From 910.0s to 912.0s, [0.16, 0.02, 0.968, 0.986]->[0.173, 0.006, 0.971, 0.97]->[0.176, 0.011, 0.961, 0.971]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 943.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.025, 0.033, 0.509, 0.966]: listen to (a person), stand, watch (a person); [0.381, 0.058, 0.847, 0.971]: listen to (a person), stand, watch (a person); [0.744, 0.012, 0.979, 0.956]: watch (a person)."}]}, {"video": "t1LXrJOvPDg.mkv", "clip": [915, 1019], "conversations": [{"from": "human", "timestamps": 945.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.022, 0.022, 0.503, 0.978]: stand, talk to (e.g., self, a person, a group); [0.395, 0.063, 0.875, 0.971]: listen to (a person), stand, watch (a person); [0.736, 0.014, 0.976, 0.955]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1019.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.154, 0.067, 0.984, 0.975]?"}, {"from": "gpt", "value": ["In the current frame, [0.154, 0.067, 0.984, 0.975]: stand, watch (a person).", "At 1018.0s, [0.693, 0.17, 0.911, 0.969]: stand, carry/hold (an object), touch (an object), watch (a person).", "At 1017.0s, [0.775, 0.186, 0.902, 0.968]: stand.", "At 1016.0s, [0.747, 0.198, 0.897, 0.984]: walk, talk to (e.g., self, a person, a group).", "At 1015.0s, [0.794, 0.191, 0.972, 0.961]: walk, watch (a person).", "From 1006.0s to 1014.0s, [0.33, 0.094, 0.534, 0.974]->[0.287, 0.072, 0.483, 0.973]->[0.215, 0.014, 0.663, 0.984]->[0.175, 0.093, 0.609, 0.979]->[0.472, 0.059, 0.741, 0.97]->[0.438, 0.092, 0.835, 0.995]->[0.585, 0.108, 0.882, 0.964]->[0.683, 0.125, 0.973, 0.979]->[0.768, 0.192, 0.968, 0.964]: walk."]}]}, {"video": "t1LXrJOvPDg.mkv", "clip": [1202, 1239], "conversations": [{"from": "human", "timestamps": 1232.0, "value": "What actions is the person in bbox [0.077, 0.048, 0.421, 0.976] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.077, 0.048, 0.421, 0.976]: sit, listen to (a person), watch (a person).", "From 1228.0s to 1231.0s, [0.08, 0.067, 0.417, 0.969]->[0.084, 0.063, 0.424, 0.967]->[0.082, 0.055, 0.422, 0.969]->[0.078, 0.044, 0.421, 0.985]: sit, listen to (a person), watch (a person).", "At 1227.0s, [0.082, 0.067, 0.423, 0.977]: sit, grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1224.0s to 1226.0s, [0.078, 0.061, 0.42, 0.98]->[0.078, 0.058, 0.421, 0.981]->[0.081, 0.052, 0.426, 0.973]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1223.0s, [0.078, 0.053, 0.423, 0.978]: sit, grab (a person), listen to (a person), watch (a person).", "At 1222.0s, [0.085, 0.058, 0.428, 0.961]: sit, listen to (a person), watch (a person).", "At 1221.0s, [0.086, 0.06, 0.432, 0.962]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1220.0s, [0.088, 0.069, 0.433, 0.958]: sit, talk to (e.g., self, a person, a group).", "From 1217.0s to 1219.0s, [0.086, 0.061, 0.428, 0.971]->[0.078, 0.069, 0.425, 0.966]->[0.08, 0.065, 0.436, 0.978]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1216.0s, [0.083, 0.096, 0.432, 0.957]: sit, grab (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1215.0s, [0.071, 0.096, 0.437, 0.961]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1214.0s, [0.08, 0.092, 0.429, 0.959]: sit, grab (a person), kiss (a person), watch (a person).", "At 1213.0s, [0.083, 0.096, 0.428, 0.969]: sit, grab (a person), watch (a person).", "From 1210.0s to 1212.0s, [0.084, 0.132, 0.409, 0.955]->[0.076, 0.077, 0.406, 0.981]->[0.079, 0.064, 0.443, 0.976]: sit, listen to (a person), watch (a person).", "At 1209.0s, [0.083, 0.142, 0.431, 0.955]: sit.", "At 1208.0s, [0.058, 0.311, 0.222, 0.889]: sit, watch (a person).", "At 1207.0s, [0.059, 0.308, 0.229, 0.908]: sit, listen to (a person).", "From 1203.0s to 1206.0s, [0.058, 0.31, 0.227, 0.901]->[0.061, 0.31, 0.229, 0.92]->[0.06, 0.303, 0.23, 0.905]->[0.058, 0.308, 0.233, 0.901]: sit, listen to (a person), watch (a person).", "From 1196.0s to 1202.0s, [0.065, 0.313, 0.228, 0.913]->[0.065, 0.313, 0.229, 0.896]->[0.066, 0.315, 0.224, 0.912]->[0.066, 0.313, 0.222, 0.909]->[0.056, 0.31, 0.229, 0.91]->[0.057, 0.322, 0.228, 0.902]->[0.058, 0.329, 0.224, 0.898]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1239.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.213, 0.033, 0.968, 0.976]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "t1LXrJOvPDg.mkv", "clip": [1266, 1366], "conversations": [{"from": "human", "timestamps": 1296.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.292, 0.0, 0.955, 0.989]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1366.0, "value": "For the person at bbox [0.593, 0.163, 0.969, 0.973], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.593, 0.163, 0.969, 0.973]: sit, talk to (e.g., self, a person, a group).", "At 1365.0s, [0.558, 0.173, 0.972, 0.969]: sit."]}]}, {"video": "t1LXrJOvPDg.mkv", "clip": [1291, 1346], "conversations": [{"from": "human", "timestamps": 1321.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.425, 0.195, 0.945, 0.959]?"}, {"from": "gpt", "value": ["At Now, [0.425, 0.195, 0.945, 0.959]: lie/sleep.", "From 1317.0s to 1320.0s, [0.603, 0.742, 0.906, 0.988]->[0.519, 0.583, 0.911, 0.972]->[0.314, 0.209, 0.94, 0.91]->[0.266, 0.263, 0.915, 0.929]: lie/sleep."]}, {"from": "human", "timestamps": 1346.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.443, 0.029, 0.947, 0.964]: watch (a person), sit, grab (a person)."}]}, {"video": "t1LXrJOvPDg.mkv", "clip": [1369, 1418], "conversations": [{"from": "human", "timestamps": 1399.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.178, 0.014, 0.942, 0.979]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1418.0, "value": "For the person at bbox [0.365, 0.072, 0.83, 0.978], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.365, 0.072, 0.83, 0.978]: stand.", "At 1417.0s, [0.365, 0.081, 0.82, 0.975]: stand.", "At 1416.0s, [0.388, 0.068, 0.81, 0.978]: stand, watch (a person)."]}]}, {"video": "t1LXrJOvPDg.mkv", "clip": [1364, 1449], "conversations": [{"from": "human", "timestamps": 1394.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.112, 0.51, 0.642, 0.968]: lie/sleep."}, {"from": "human", "timestamps": 1449.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.5, 0.03, 0.979, 0.983]?"}, {"from": "gpt", "value": ["In the current frame, [0.5, 0.03, 0.979, 0.983]: stand, listen to (a person), watch (a person).", "From 1447.0s to 1448.0s, [0.574, 0.02, 0.958, 0.963]->[0.563, 0.0, 0.941, 0.932]: stand, listen to (a person), watch (a person)."]}]}, {"video": "t1LXrJOvPDg.mkv", "clip": [1499, 1542], "conversations": [{"from": "human", "timestamps": 1529.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.027, 0.0, 0.41, 0.971]: listen to (a person), stand, watch (a person); [0.304, 0.0, 0.974, 0.969]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1542.0, "value": "What actions did the person at bbox [0.714, 0.007, 0.979, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.714, 0.007, 0.979, 0.987]: stand, listen to (a person), watch (a person).", "At 1540.0s, [0.703, 0.02, 0.982, 0.977]: stand, listen to (a person), watch (a person)."]}]}, {"video": "t1LXrJOvPDg.mkv", "clip": [1626, 1674], "conversations": [{"from": "human", "timestamps": 1656.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.017, 0.278, 0.27, 0.951]: sit; [0.402, 0.143, 0.496, 0.469]: watch (a person), sit, touch (an object); [0.478, 0.148, 0.651, 0.63]: sit; [0.575, 0.356, 0.706, 0.917]: sit; [0.653, 0.126, 0.969, 0.977]: watch (a person), sit."}, {"from": "human", "timestamps": 1674.0, "value": "What actions is the person in bbox [0.455, 0.1, 0.961, 0.957] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.455, 0.1, 0.961, 0.957]: sit, touch (an object).", "From 1669.0s to 1673.0s, [0.049, 0.251, 0.467, 0.971]->[0.135, 0.227, 0.599, 0.985]->[0.242, 0.208, 0.801, 0.985]->[0.316, 0.199, 0.907, 0.989]->[0.414, 0.122, 0.979, 0.961]: sit.", "At 1668.0s, [0.015, 0.25, 0.262, 0.976]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "t1LXrJOvPDg.mkv", "clip": [1672, 1713], "conversations": [{"from": "human", "timestamps": 1702.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.113, 0.017, 0.783, 0.972]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1713.0, "value": "For the person at bbox [0.102, 0.055, 0.769, 0.962], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.102, 0.055, 0.769, 0.962]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1712.0s, [0.088, 0.051, 0.742, 0.96]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "t1LXrJOvPDg.mkv", "clip": [1740, 1793], "conversations": [{"from": "human", "timestamps": 1770.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.123, 0.039, 0.855, 0.982]: listen to (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1793.0, "value": "What actions is the person in bbox [0.345, 0.421, 0.594, 0.963] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.345, 0.421, 0.594, 0.963]: lie/sleep.", "From 1791.0s to 1792.0s, [0.348, 0.417, 0.596, 0.962]->[0.354, 0.416, 0.594, 0.962]: lie/sleep."]}]}, {"video": "tEoJW9ycmSY.mkv", "clip": [898, 952], "conversations": [{"from": "human", "timestamps": 928.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.329, 0.01, 1.0, 0.987]?"}, {"from": "gpt", "value": ["In the current frame, [0.329, 0.01, 1.0, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 927.0s, [0.366, 0.0, 1.0, 0.983]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 952.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.209, 0.012, 0.998, 0.985]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "tEoJW9ycmSY.mkv", "clip": [888, 1017], "conversations": [{"from": "human", "timestamps": 918.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.007, 0.093, 0.704, 0.986]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.764, 0.0, 1.0, 0.979]: listen to (a person), stand."}, {"from": "human", "timestamps": 1017.0, "value": "For the person at bbox [0.0, 0.012, 0.651, 0.988], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.012, 0.651, 0.988]: stand, talk to (e.g., self, a person, a group).", "From 1011.0s to 1016.0s, [0.002, 0.013, 0.759, 0.978]->[0.0, 0.015, 0.75, 0.969]->[0.003, 0.052, 0.688, 0.987]->[0.011, 0.045, 0.643, 0.993]->[0.0, 0.068, 0.667, 0.991]->[0.0, 0.009, 0.682, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "tEoJW9ycmSY.mkv", "clip": [965, 1006], "conversations": [{"from": "human", "timestamps": 995.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.008, 0.009, 0.66, 0.995]?"}, {"from": "gpt", "value": ["In the current frame, [0.008, 0.009, 0.66, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 994.0s, [0.005, 0.023, 0.61, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1006.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.011, 0.019, 0.643, 1.0]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "tEoJW9ycmSY.mkv", "clip": [998, 1066], "conversations": [{"from": "human", "timestamps": 1028.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.313, 0.041, 1.0, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1066.0, "value": "For the person at bbox [0.0, 0.017, 0.511, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.017, 0.511, 0.99]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1065.0s, [0.004, 0.0, 0.599, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1062.0s to 1064.0s, [0.0, 0.087, 0.32, 0.98]->[0.0, 0.08, 0.294, 0.992]->[0.028, 0.008, 0.646, 1.0]: stand, listen to (a person), watch (a person).", "At 1061.0s, [0.014, 0.129, 0.53, 0.997]: stand, hand shake, talk to (e.g., self, a person, a group), watch (a person).", "At 1060.0s, [0.0, 0.123, 0.429, 0.989]: stand, talk to (e.g., self, a person, a group).", "At 1059.0s, [0.004, 0.065, 0.331, 0.993]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1058.0s, [0.0, 0.076, 0.397, 0.967]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1057.0s, [0.179, 0.211, 0.44, 0.98]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1054.0s to 1056.0s, [0.104, 0.008, 0.915, 0.985]->[0.336, 0.244, 0.535, 0.942]->[0.301, 0.277, 0.546, 0.987]: walk, carry/hold (an object).", "At 1053.0s, [0.106, 0.074, 0.882, 0.975]: walk.", "At 1052.0s, [0.486, 0.275, 0.713, 0.807]: stand, touch (an object)."]}]}, {"video": "tEoJW9ycmSY.mkv", "clip": [1040, 1119], "conversations": [{"from": "human", "timestamps": 1070.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.389, 0.013, 1.0, 0.973]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1119.0, "value": "Describe the actions of the person at bbox [0.349, 0.227, 0.573, 0.893] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.349, 0.227, 0.573, 0.893]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1118.0s, [0.486, 0.369, 0.652, 0.94]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1117.0s, [0.511, 0.361, 0.679, 0.911]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "tEoJW9ycmSY.mkv", "clip": [1085, 1122], "conversations": [{"from": "human", "timestamps": 1115.0, "value": "Describe the actions of the person at bbox [0.051, 0.005, 0.994, 0.998] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.051, 0.005, 0.994, 0.998]: stand, talk to (e.g., self, a person, a group).", "From 1113.0s to 1114.0s, [0.034, 0.008, 1.0, 0.993]->[0.059, 0.002, 0.994, 0.998]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1122.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.008, 0.125, 0.29, 0.626]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.268, 0.272, 0.363, 0.602]: walk; [0.372, 0.283, 0.436, 0.502]: walk; [0.54, 0.269, 0.598, 0.388]: walk; [0.605, 0.166, 0.935, 0.982]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "tEoJW9ycmSY.mkv", "clip": [1216, 1252], "conversations": [{"from": "human", "timestamps": 1246.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.182, 0.0, 0.991, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.182, 0.0, 0.991, 1.0]: stand, carry/hold (an object).", "At 1245.0s, [0.382, 0.045, 1.0, 0.997]: stand.", "At 1244.0s, [0.263, 0.075, 1.0, 1.0]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1252.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.023, 1.0, 0.953]: watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "tEoJW9ycmSY.mkv", "clip": [1281, 1345], "conversations": [{"from": "human", "timestamps": 1311.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.365, 0.024, 1.0, 0.997]: sit."}, {"from": "human", "timestamps": 1345.0, "value": "For the person at bbox [0.132, 0.0, 0.978, 0.998], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.132, 0.0, 0.978, 0.998]: sit, listen to (a person).", "At 1344.0s, [0.061, 0.026, 0.936, 1.0]: sit, listen to (a person).", "At 1343.0s, [0.028, 0.0, 0.962, 0.998]: sit."]}]}, {"video": "tEoJW9ycmSY.mkv", "clip": [1395, 1428], "conversations": [{"from": "human", "timestamps": 1425.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.594, 0.013, 1.0, 0.99]: watch (a person), stand."}, {"from": "human", "timestamps": 1428.0, "value": "What actions did the person at bbox [0.722, 0.095, 1.0, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.722, 0.095, 1.0, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1427.0s, [0.713, 0.044, 1.0, 0.977]: stand, carry/hold (an object), take (an object) from (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1425.0s to 1426.0s, [0.594, 0.013, 1.0, 0.99]->[0.617, 0.0, 0.997, 0.994]: stand, watch (a person)."]}]}, {"video": "tEoJW9ycmSY.mkv", "clip": [1351, 1462], "conversations": [{"from": "human", "timestamps": 1381.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.488, 0.0, 0.967, 0.99]: touch (an object), watch (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1462.0, "value": "What actions did the person at bbox [0.051, 0.039, 0.527, 0.995] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.051, 0.039, 0.527, 0.995]: stand, open (e.g., a window, a car door).", "At 1461.0s, [0.22, 0.081, 0.627, 0.979]: stand, open (e.g., a window, a car door)."]}]}, {"video": "tEoJW9ycmSY.mkv", "clip": [1556, 1587], "conversations": [{"from": "human", "timestamps": 1586.0, "value": "Describe the actions of the person at bbox [0.02, 0.161, 0.21, 0.98] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.02, 0.161, 0.21, 0.98]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1585.0s, [0.005, 0.146, 0.185, 0.979]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1587.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.22, 0.199, 0.45, 0.983]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk; [0.503, 0.022, 0.745, 1.0]: listen to (a person), stand, watch (a person); [0.717, 0.234, 0.925, 0.895]: listen to (a person), stand, watch (a person)."}]}, {"video": "tEoJW9ycmSY.mkv", "clip": [1595, 1668], "conversations": [{"from": "human", "timestamps": 1625.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.521, 0.062, 1.0, 1.0]?"}, {"from": "gpt", "value": ["In the current frame, [0.521, 0.062, 1.0, 1.0]: stand, answer phone.", "From 1622.0s to 1624.0s, [0.49, 0.067, 0.97, 0.981]->[0.501, 0.091, 0.967, 0.989]->[0.5, 0.098, 0.998, 0.996]: stand, answer phone."]}, {"from": "human", "timestamps": 1668.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.053, 0.04, 0.971, 0.998]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "tNpZtigMc4g.mkv", "clip": [882, 947], "conversations": [{"from": "human", "timestamps": 912.0, "value": "What actions did the person at bbox [0.062, 0.013, 0.744, 0.994] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.062, 0.013, 0.744, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 911.0s, [0.073, 0.013, 0.64, 0.974]: stand, talk to (e.g., self, a person, a group).", "At 910.0s, [0.055, 0.02, 0.725, 0.983]: stand, hand wave, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 947.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.049, 0.039, 0.626, 0.99]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "tNpZtigMc4g.mkv", "clip": [908, 1001], "conversations": [{"from": "human", "timestamps": 938.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.424, 0.006, 0.921, 0.967]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1001.0, "value": "Describe the actions of the person at bbox [0.581, 0.058, 0.987, 0.968] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.581, 0.058, 0.987, 0.968]: sit, listen to (a person), watch (a person).", "From 998.0s to 1000.0s, [0.527, 0.046, 0.971, 0.976]->[0.473, 0.025, 0.977, 0.988]->[0.642, 0.063, 1.0, 0.967]: sit, listen to (a person), watch (a person)."]}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1019, 1066], "conversations": [{"from": "human", "timestamps": 1049.0, "value": "For the person at bbox [0.009, 0.019, 0.373, 0.914], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.009, 0.019, 0.373, 0.914]: stand, hug (a person).", "From 1047.0s to 1048.0s, [0.082, 0.001, 0.459, 0.973]->[0.052, 0.0, 0.442, 0.966]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1066.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.144, 0.048, 0.511, 0.985]: listen to (a person), sit, watch (a person); [0.377, 0.176, 0.767, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.7, 0.067, 1.0, 0.994]: listen to (a person), sit, watch (a person)."}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1034, 1099], "conversations": [{"from": "human", "timestamps": 1064.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.003, 0.191, 1.0]: listen to (a person), sit, watch (a person); [0.459, 0.022, 0.998, 0.973]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1099.0, "value": "What actions did the person at bbox [0.153, 0.035, 0.666, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.153, 0.035, 0.666, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1098.0s, [0.143, 0.035, 0.662, 0.994]: stand, talk to (e.g., self, a person, a group).", "At 1097.0s, [0.127, 0.03, 0.655, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1060, 1128], "conversations": [{"from": "human", "timestamps": 1090.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.373, 0.061, 0.789, 0.979]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1128.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.393, 0.033, 0.845, 0.968]?"}, {"from": "gpt", "value": ["In the current frame, [0.393, 0.033, 0.845, 0.968]: sit, text on/look at a cellphone, talk to (e.g., self, a person, a group).", "At 1127.0s, [0.381, 0.03, 0.857, 0.987]: sit, text on/look at a cellphone, touch (an object), talk to (e.g., self, a person, a group).", "At 1126.0s, [0.428, 0.041, 0.861, 0.978]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1125.0s, [0.493, 0.033, 1.0, 0.986]: sit, text on/look at a cellphone, watch (e.g., TV), talk to (e.g., self, a person, a group).", "From 1119.0s to 1124.0s, [0.429, 0.025, 0.873, 0.98]->[0.412, 0.013, 0.874, 0.984]->[0.434, 0.024, 0.882, 0.986]->[0.439, 0.028, 0.872, 0.97]->[0.377, 0.021, 0.885, 0.978]->[0.636, 0.038, 0.998, 0.99]: sit, talk to (e.g., self, a person, a group).", "From 1112.0s to 1117.0s, [0.48, 0.078, 0.891, 0.986]->[0.557, 0.008, 1.0, 0.972]->[0.538, 0.007, 1.0, 0.983]->[0.472, 0.045, 0.86, 0.99]->[0.467, 0.047, 0.897, 0.995]->[0.442, 0.03, 0.865, 0.981]: sit, talk to (e.g., self, a person, a group).", "At 1111.0s, [0.544, 0.0, 1.0, 1.0]: sit, put down, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1110.0s, [0.604, 0.038, 0.992, 0.958]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1108.0s to 1109.0s, [0.583, 0.005, 0.991, 0.993]->[0.663, 0.038, 0.999, 0.974]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1107.0s, [0.602, 0.058, 0.996, 0.954]: sit, touch (an object), listen to (a person), watch (a person).", "At 1106.0s, [0.514, 0.0, 0.99, 0.993]: sit, carry/hold (an object), listen to (a person).", "At 1105.0s, [0.511, 0.007, 0.98, 0.966]: sit, put down, listen to (a person).", "At 1104.0s, [0.54, 0.0, 0.99, 0.985]: sit, drink, listen to (a person).", "At 1103.0s, [0.549, 0.0, 0.959, 0.969]: sit, carry/hold (an object), listen to (a person)."]}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1154, 1191], "conversations": [{"from": "human", "timestamps": 1184.0, "value": "For the person at bbox [0.543, 0.167, 0.854, 0.975], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.543, 0.167, 0.854, 0.975]: sit, carry/hold (an object), text on/look at a cellphone, talk to (e.g., self, a person, a group).", "From 1179.0s to 1183.0s, [0.542, 0.162, 0.85, 0.985]->[0.508, 0.12, 0.847, 0.978]->[0.526, 0.045, 0.862, 0.994]->[0.52, 0.108, 0.848, 0.979]->[0.538, 0.158, 0.853, 1.0]: sit, carry/hold (an object), text on/look at a cellphone, talk to (e.g., self, a person, a group).", "From 1177.0s to 1178.0s, [0.542, 0.172, 0.859, 0.981]->[0.542, 0.151, 0.859, 0.978]: sit, text on/look at a cellphone, talk to (e.g., self, a person, a group).", "At 1176.0s, [0.559, 0.166, 0.853, 0.969]: sit, carry/hold (an object), text on/look at a cellphone, talk to (e.g., self, a person, a group).", "At 1175.0s, [0.529, 0.157, 0.853, 0.977]: sit, carry/hold (an object), text on/look at a cellphone, talk to (e.g., self, a person, a group), watch (a person).", "From 1173.0s to 1174.0s, [0.541, 0.143, 0.857, 0.975]->[0.554, 0.143, 0.852, 0.988]: sit, text on/look at a cellphone, talk to (e.g., self, a person, a group).", "At 1172.0s, [0.5, 0.144, 0.838, 0.98]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1171.0s, [0.518, 0.138, 0.845, 0.964]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1170.0s, [0.562, 0.14, 0.843, 1.0]: sit, carry/hold (an object), text on/look at a cellphone, talk to (e.g., self, a person, a group).", "At 1169.0s, [0.542, 0.139, 0.859, 0.975]: sit, text on/look at a cellphone, talk to (e.g., self, a person, a group).", "From 1167.0s to 1168.0s, [0.546, 0.174, 0.838, 0.993]->[0.483, 0.159, 0.847, 0.978]: sit, text on/look at a cellphone, listen to (a person).", "At 1166.0s, [0.535, 0.163, 0.857, 0.987]: sit, carry/hold (an object), text on/look at a cellphone, listen to (a person).", "At 1165.0s, [0.445, 0.09, 0.8, 0.945]: sit, carry/hold (an object), text on/look at a cellphone, listen to (a person), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1191.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.073, 0.053, 0.625, 0.965]: listen to (a person), sit; [0.578, 0.157, 0.859, 0.926]: sit, text on/look at a cellphone, talk to (e.g., self, a person, a group)."}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1188, 1238], "conversations": [{"from": "human", "timestamps": 1218.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.372, 0.017, 0.908, 0.978]?"}, {"from": "gpt", "value": ["At Now, [0.372, 0.017, 0.908, 0.978]: sit, talk to (e.g., self, a person, a group).", "At 1217.0s, [0.358, 0.019, 0.901, 0.975]: sit, talk to (e.g., self, a person, a group).", "At 1216.0s, [0.355, 0.026, 0.896, 0.961]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1213.0s to 1215.0s, [0.406, 0.03, 0.876, 0.974]->[0.439, 0.005, 0.887, 0.988]->[0.4, 0.023, 0.892, 0.974]: sit, talk to (e.g., self, a person, a group).", "At 1212.0s, [0.373, 0.021, 0.893, 0.975]: sit, point to (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1211.0s, [0.38, 0.037, 0.898, 0.967]: sit, point to (an object), talk to (e.g., self, a person, a group).", "At 1210.0s, [0.216, 0.045, 0.733, 0.968]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "At 1209.0s, [0.382, 0.033, 0.911, 0.996]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1238.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.538, 0.135, 0.805, 0.947]: sit, text on/look at a cellphone, talk to (e.g., self, a person, a group)."}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1254, 1314], "conversations": [{"from": "human", "timestamps": 1284.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.533, 0.084, 0.824, 0.919]?"}, {"from": "gpt", "value": ["At Now, [0.533, 0.084, 0.824, 0.919]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1282.0s to 1283.0s, [0.524, 0.089, 0.812, 0.975]->[0.529, 0.102, 0.811, 0.91]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1314.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.008, 0.0, 0.534, 0.98]: listen to (a person), stand, watch (a person); [0.45, 0.055, 0.812, 0.982]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1246, 1360], "conversations": [{"from": "human", "timestamps": 1276.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.519, 0.105, 0.86, 0.96]: carry/hold (an object), sit, listen to (a person)."}, {"from": "human", "timestamps": 1360.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.71, 0.346, 1.0, 0.988]?"}, {"from": "gpt", "value": ["At Now, [0.71, 0.346, 1.0, 0.988]: stand, watch (a person).", "At 1359.0s, [0.68, 0.377, 0.996, 0.977]: stand, listen to (a person), watch (a person).", "From 1357.0s to 1358.0s, [0.628, 0.287, 0.928, 0.945]->[0.721, 0.384, 1.0, 0.981]: stand, listen to (a person)."]}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1291, 1334], "conversations": [{"from": "human", "timestamps": 1321.0, "value": "What actions did the person at bbox [0.486, 0.086, 0.969, 0.956] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.486, 0.086, 0.969, 0.956]: stand, listen to (a person).", "From 1318.0s to 1320.0s, [0.458, 0.059, 0.972, 0.952]->[0.485, 0.052, 0.961, 0.97]->[0.4, 0.072, 0.988, 0.988]: stand, listen to (a person), watch (a person).", "From 1315.0s to 1317.0s, [0.73, 0.313, 0.847, 0.938]->[0.744, 0.308, 0.875, 0.949]->[0.749, 0.318, 0.872, 0.943]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1334.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.065, 0.038, 0.705, 0.981]: stand, talk to (e.g., self, a person, a group); [0.547, 0.148, 1.0, 0.967]: listen to (a person), stand, watch (a person)."}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1386, 1458], "conversations": [{"from": "human", "timestamps": 1416.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.123, 0.008, 0.587, 0.999]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.615, 0.069, 0.997, 0.989]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1458.0, "value": "For the person at bbox [0.128, 0.002, 0.645, 0.995], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.128, 0.002, 0.645, 0.995]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1457.0s, [0.099, 0.005, 0.753, 0.991]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1455.0s to 1456.0s, [0.002, 0.222, 0.266, 1.0]->[0.0, 0.223, 0.266, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1454.0s, [0.001, 0.231, 0.249, 1.0]: stand, carry/hold (an object), listen to (a person).", "At 1453.0s, [0.0, 0.221, 0.251, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1451.0s, [0.0, 0.208, 0.256, 1.0]: stand, eat, listen to (a person), watch (a person).", "At 1450.0s, [0.001, 0.219, 0.213, 1.0]: stand, listen to (a person).", "From 1445.0s to 1449.0s, [0.0, 0.209, 0.209, 1.0]->[0.002, 0.205, 0.206, 1.0]->[0.001, 0.217, 0.231, 1.0]->[0.001, 0.221, 0.19, 1.0]->[0.0, 0.212, 0.218, 1.0]: stand, listen to (a person), watch (a person).", "From 1440.0s to 1444.0s, [0.112, 0.019, 0.663, 0.991]->[0.0, 0.118, 0.205, 0.974]->[0.0, 0.166, 0.203, 0.974]->[0.0, 0.135, 0.196, 0.982]->[0.1, 0.016, 0.641, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1435.0s to 1439.0s, [0.074, 0.015, 0.572, 0.993]->[0.087, 0.019, 0.616, 0.984]->[0.074, 0.014, 0.54, 0.999]->[0.08, 0.01, 0.678, 0.98]->[0.11, 0.006, 0.555, 0.977]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1430.0s to 1433.0s, [0.125, 0.014, 0.578, 0.985]->[0.122, 0.009, 0.565, 0.98]->[0.114, 0.003, 0.526, 0.982]->[0.114, 0.01, 0.522, 0.997]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1429.0s, [0.12, 0.012, 0.587, 0.995]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1424.0s to 1428.0s, [0.135, 0.009, 0.623, 0.979]->[0.139, 0.0, 0.647, 0.995]->[0.14, 0.002, 0.654, 0.993]->[0.121, 0.003, 0.644, 0.99]->[0.113, 0.026, 0.623, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1423.0s, [0.137, 0.015, 0.604, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 1422.0s, [0.141, 0.007, 0.591, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1421.0s, [0.162, 0.009, 0.597, 0.984]: stand, talk to (e.g., self, a person, a group).", "At 1420.0s, [0.0, 0.142, 0.268, 0.983]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1419.0s, [0.001, 0.187, 0.23, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1418.0s, [0.001, 0.145, 0.253, 0.967]: stand, listen to (a person), watch (a person).", "At 1417.0s, [0.138, 0.018, 0.563, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1416.0s, [0.123, 0.008, 0.587, 0.999]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1414.0s to 1415.0s, [0.013, 0.142, 0.395, 0.986]->[0.0, 0.136, 0.242, 0.973]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1413.0s, [0.001, 0.135, 0.194, 0.994]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1418, 1476], "conversations": [{"from": "human", "timestamps": 1448.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.001, 0.221, 0.19, 1.0]: listen to (a person), stand, watch (a person); [0.245, 0.016, 0.892, 0.987]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1476.0, "value": "What actions did the person at bbox [0.293, 0.055, 0.779, 0.977] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.293, 0.055, 0.779, 0.977]: stand, talk to (e.g., self, a person, a group).", "At 1475.0s, [0.222, 0.062, 0.773, 0.994]: sit, stand, talk to (e.g., self, a person, a group).", "From 1473.0s to 1474.0s, [0.199, 0.052, 0.783, 0.983]->[0.287, 0.073, 0.788, 0.987]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1469, 1515], "conversations": [{"from": "human", "timestamps": 1499.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.489, 0.074, 0.835, 0.96]: watch (a person), stand."}, {"from": "human", "timestamps": 1515.0, "value": "What actions did the person at bbox [0.5, 0.054, 0.895, 0.96] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.5, 0.054, 0.895, 0.96]: stand, listen to (a person), watch (a person).", "From 1506.0s to 1513.0s, [0.768, 0.075, 1.0, 0.903]->[0.769, 0.059, 1.0, 0.873]->[0.784, 0.048, 1.0, 0.875]->[0.774, 0.057, 1.0, 0.891]->[0.773, 0.071, 1.0, 0.925]->[0.782, 0.07, 1.0, 0.91]->[0.772, 0.074, 1.0, 0.871]->[0.797, 0.055, 0.999, 0.94]: stand, listen to (a person), watch (a person).", "From 1504.0s to 1505.0s, [0.479, 0.062, 0.856, 0.969]->[0.494, 0.068, 0.857, 0.967]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1503.0s, [0.502, 0.081, 0.853, 0.968]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1492, 1605], "conversations": [{"from": "human", "timestamps": 1522.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.004, 0.007, 0.725, 1.0]: listen to (a person), stand, watch (a person); [0.774, 0.037, 1.0, 0.811]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1605.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.575, 0.086, 0.799, 0.976]?"}, {"from": "gpt", "value": ["In the current frame, [0.575, 0.086, 0.799, 0.976]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1604.0s, [0.552, 0.081, 0.777, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1603.0s, [0.571, 0.07, 0.81, 0.983]: walk, talk to (e.g., self, a person, a group).", "At 1602.0s, [0.474, 0.058, 0.689, 0.982]: walk, listen to (a person), talk to (e.g., self, a person, a group).", "From 1599.0s to 1601.0s, [0.19, 0.08, 0.359, 0.8]->[0.293, 0.066, 0.439, 0.911]->[0.364, 0.031, 0.536, 0.974]: walk, listen to (a person).", "At 1598.0s, [0.139, 0.067, 0.301, 0.732]: walk, touch (an object), listen to (a person)."]}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1561, 1606], "conversations": [{"from": "human", "timestamps": 1591.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.239, 0.072, 0.328, 0.478]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.374, 0.107, 0.466, 0.459]: carry/hold (an object), stand, talk to (e.g., self, a person, a group), listen to (a person)."}, {"from": "human", "timestamps": 1606.0, "value": "What actions is the person in bbox [0.24, 0.057, 0.596, 0.977] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.24, 0.057, 0.596, 0.977]: stand, walk, listen to (a person), watch (a person).", "At 1605.0s, [0.0, 0.062, 0.315, 0.997]: walk, listen to (a person).", "From 1603.0s to 1604.0s, [0.252, 0.175, 0.414, 0.974]->[0.096, 0.156, 0.332, 0.981]: walk, listen to (a person), watch (a person).", "At 1602.0s, [0.376, 0.131, 0.503, 0.939]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1598.0s to 1601.0s, [0.594, 0.11, 0.725, 0.542]->[0.602, 0.094, 0.717, 0.571]->[0.583, 0.113, 0.7, 0.582]->[0.508, 0.1, 0.668, 0.527]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1597.0s, [0.707, 0.125, 0.791, 0.559]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1613, 1708], "conversations": [{"from": "human", "timestamps": 1643.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.207, 0.009, 0.691, 0.989]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1708.0, "value": "What actions did the person at bbox [0.301, 0.044, 0.902, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.301, 0.044, 0.902, 0.983]: stand, carry/hold (an object), listen to (a person).", "At 1707.0s, [0.28, 0.04, 0.888, 0.984]: stand, carry/hold (an object), listen to (a person)."]}]}, {"video": "tNpZtigMc4g.mkv", "clip": [1687, 1735], "conversations": [{"from": "human", "timestamps": 1717.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.031, 0.063, 0.461, 0.992]: drink, listen to (a person), watch (a person), carry/hold (an object), stand; [0.507, 0.048, 0.804, 0.993]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1735.0, "value": "Describe the actions of the person at bbox [0.504, 0.022, 0.883, 0.98] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.504, 0.022, 0.883, 0.98]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1733.0s to 1734.0s, [0.622, 0.024, 0.991, 0.972]->[0.555, 0.0, 0.98, 1.0]: stand, listen to (a person), watch (a person)."]}]}, {"video": "tjqCzVjojCo.mkv", "clip": [936, 1012], "conversations": [{"from": "human", "timestamps": 966.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.118, 0.352, 0.236, 0.931]: walk; [0.242, 0.314, 0.349, 0.913]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1012.0, "value": "What actions is the person in bbox [0.008, 0.0, 0.742, 0.962] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.008, 0.0, 0.742, 0.962]: stand, hug (a person).", "At 1011.0s, [0.004, 0.011, 0.755, 0.947]: stand, hug (a person)."]}]}, {"video": "tjqCzVjojCo.mkv", "clip": [1027, 1114], "conversations": [{"from": "human", "timestamps": 1057.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.507, 0.231, 0.675, 0.986]: carry/hold (an object), sit, listen to (a person); [0.695, 0.205, 0.853, 0.984]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1114.0, "value": "Describe the actions of the person at bbox [0.457, 0.077, 0.881, 0.989] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.457, 0.077, 0.881, 0.989]: stand, carry/hold (an object), listen to (a person).", "From 1110.0s to 1112.0s, [0.447, 0.087, 0.899, 0.987]->[0.461, 0.09, 0.902, 0.972]->[0.461, 0.09, 0.91, 0.999]: stand, carry/hold (an object), watch (a person).", "At 1109.0s, [0.445, 0.091, 0.886, 0.958]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1108.0s, [0.386, 0.07, 0.981, 0.984]: stand, carry/hold (an object), watch (a person).", "At 1107.0s, [0.543, 0.07, 0.966, 0.991]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "tjqCzVjojCo.mkv", "clip": [1211, 1259], "conversations": [{"from": "human", "timestamps": 1241.0, "value": "What actions is the person in bbox [0.149, 0.0, 0.829, 0.876] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.149, 0.0, 0.829, 0.876]: sit.", "From 1239.0s to 1240.0s, [0.101, 0.028, 0.836, 0.868]->[0.138, 0.0, 0.807, 0.874]: sit.", "At 1238.0s, [0.169, 0.048, 0.828, 0.852]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1259.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.11, 0.0, 0.989, 0.854]: stand."}]}, {"video": "tjqCzVjojCo.mkv", "clip": [1319, 1365], "conversations": [{"from": "human", "timestamps": 1349.0, "value": "What actions is the person in bbox [0.185, 0.0, 0.957, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.185, 0.0, 0.957, 0.988]: stand, talk to (e.g., self, a person, a group).", "From 1347.0s to 1348.0s, [0.208, 0.0, 0.926, 0.98]->[0.158, 0.0, 0.961, 0.98]: sit, talk to (e.g., self, a person, a group).", "At 1346.0s, [0.569, 0.227, 0.902, 0.879]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1344.0s to 1345.0s, [0.577, 0.235, 0.886, 0.822]->[0.56, 0.23, 0.891, 0.844]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1343.0s, [0.591, 0.229, 0.883, 0.836]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1341.0s to 1342.0s, [0.615, 0.227, 0.86, 0.792]->[0.593, 0.232, 0.874, 0.799]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1340.0s, [0.573, 0.24, 0.851, 0.778]: sit, carry/hold (an object), eat, watch (a person).", "At 1339.0s, [0.604, 0.231, 0.846, 0.752]: sit, eat.", "At 1338.0s, [0.564, 0.231, 0.836, 0.747]: sit, carry/hold (an object), eat.", "At 1337.0s, [0.594, 0.213, 0.834, 0.747]: sit, eat, watch (a person).", "At 1336.0s, [0.594, 0.223, 0.826, 0.741]: sit, carry/hold (an object), eat, watch (a person)."]}, {"from": "human", "timestamps": 1365.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.078, 0.092, 0.925, 0.976]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "tjqCzVjojCo.mkv", "clip": [1387, 1456], "conversations": [{"from": "human", "timestamps": 1417.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.058, 0.0, 0.928, 0.955]: watch (a person), sit."}, {"from": "human", "timestamps": 1456.0, "value": "What actions did the person at bbox [0.017, 0.007, 0.758, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.017, 0.007, 0.758, 0.985]: sit, eat.", "At 1455.0s, [0.045, 0.0, 0.717, 1.0]: sit, eat.", "At 1454.0s, [0.0, 0.0, 0.696, 0.976]: sit, stand, eat, listen to (a person), watch (a person)."]}]}, {"video": "tjqCzVjojCo.mkv", "clip": [1414, 1474], "conversations": [{"from": "human", "timestamps": 1444.0, "value": "What actions did the person at bbox [0.287, 0.008, 1.0, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.287, 0.008, 1.0, 0.99]: sit, stand.", "At 1443.0s, [0.343, 0.0, 1.0, 0.983]: sit, stand."]}, {"from": "human", "timestamps": 1474.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.069, 0.033, 0.993, 0.982]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "tjqCzVjojCo.mkv", "clip": [1515, 1612], "conversations": [{"from": "human", "timestamps": 1545.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.04, 0.021, 0.697, 0.976]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1612.0, "value": "What actions did the person at bbox [0.387, 0.218, 0.956, 0.994] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.387, 0.218, 0.956, 0.994]: sit.", "At 1611.0s, [0.38, 0.206, 0.917, 0.979]: sit, talk to (e.g., self, a person, a group).", "At 1610.0s, [0.394, 0.197, 0.888, 0.978]: sit, listen to (a person).", "At 1609.0s, [0.388, 0.256, 0.805, 0.974]: sit, listen to (a person), watch (a person).", "From 1605.0s to 1608.0s, [0.282, 0.103, 0.89, 0.965]->[0.404, 0.27, 0.812, 0.995]->[0.352, 0.156, 0.925, 0.933]->[0.317, 0.154, 0.924, 0.945]: sit, listen to (a person)."]}]}, {"video": "tjqCzVjojCo.mkv", "clip": [1596, 1675], "conversations": [{"from": "human", "timestamps": 1626.0, "value": "What actions did the person at bbox [0.369, 0.059, 0.878, 0.959] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.369, 0.059, 0.878, 0.959]: sit.", "From 1624.0s to 1625.0s, [0.362, 0.073, 0.816, 0.964]->[0.397, 0.073, 0.841, 0.955]: sit.", "From 1622.0s to 1623.0s, [0.275, 0.088, 0.843, 0.969]->[0.443, 0.069, 0.882, 0.97]: sit, talk to (e.g., self, a person, a group).", "From 1618.0s to 1621.0s, [0.406, 0.095, 0.813, 0.958]->[0.403, 0.1, 0.843, 0.977]->[0.389, 0.09, 0.833, 0.962]->[0.413, 0.093, 0.836, 0.971]: sit.", "At 1616.0s, [0.31, 0.112, 0.745, 0.979]: sit, talk to (e.g., self, a person, a group).", "From 1613.0s to 1614.0s, [0.404, 0.241, 0.757, 0.987]->[0.285, 0.158, 0.747, 0.974]: sit, talk to (e.g., self, a person, a group).", "At 1612.0s, [0.387, 0.218, 0.956, 0.994]: sit.", "At 1611.0s, [0.38, 0.206, 0.917, 0.979]: sit, talk to (e.g., self, a person, a group).", "At 1610.0s, [0.394, 0.197, 0.888, 0.978]: sit, listen to (a person).", "At 1609.0s, [0.388, 0.256, 0.805, 0.974]: sit, listen to (a person), watch (a person).", "From 1605.0s to 1608.0s, [0.282, 0.103, 0.89, 0.965]->[0.404, 0.27, 0.812, 0.995]->[0.352, 0.156, 0.925, 0.933]->[0.317, 0.154, 0.924, 0.945]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1675.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.508, 0.102, 0.739, 0.89]: sit, answer phone."}]}, {"video": "tjqCzVjojCo.mkv", "clip": [1651, 1691], "conversations": [{"from": "human", "timestamps": 1681.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.506, 0.089, 0.754, 0.884]: sit, answer phone."}, {"from": "human", "timestamps": 1691.0, "value": "What actions is the person in bbox [0.503, 0.091, 0.757, 0.91] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.503, 0.091, 0.757, 0.91]: sit, answer phone.", "From 1680.0s to 1690.0s, [0.505, 0.118, 0.75, 0.881]->[0.506, 0.089, 0.754, 0.884]->[0.504, 0.103, 0.747, 0.879]->[0.504, 0.104, 0.747, 0.877]->[0.507, 0.097, 0.747, 0.877]->[0.502, 0.113, 0.754, 0.901]->[0.503, 0.101, 0.749, 0.879]->[0.503, 0.093, 0.747, 0.882]->[0.505, 0.099, 0.735, 0.883]->[0.504, 0.088, 0.758, 0.894]->[0.501, 0.101, 0.746, 0.878]: sit, answer phone.", "From 1671.0s to 1678.0s, [0.507, 0.089, 0.754, 0.896]->[0.505, 0.085, 0.756, 0.894]->[0.506, 0.095, 0.737, 0.891]->[0.504, 0.102, 0.739, 0.889]->[0.508, 0.102, 0.739, 0.89]->[0.503, 0.093, 0.742, 0.888]->[0.506, 0.106, 0.745, 0.874]->[0.506, 0.108, 0.738, 0.878]: sit, answer phone.", "At 1670.0s, [0.503, 0.095, 0.749, 0.883]: sit, carry/hold (an object).", "At 1669.0s, [0.505, 0.099, 0.741, 0.892]: sit, carry/hold (an object), text on/look at a cellphone.", "At 1668.0s, [0.507, 0.086, 0.755, 0.9]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1667.0s, [0.502, 0.1, 0.752, 0.894]: sit, talk to (e.g., self, a person, a group).", "From 1665.0s to 1666.0s, [0.497, 0.085, 0.746, 0.879]->[0.5, 0.111, 0.752, 0.896]: sit.", "At 1663.0s, [0.58, 0.0, 0.985, 0.895]: stand."]}]}, {"video": "tjqCzVjojCo.mkv", "clip": [1722, 1753], "conversations": [{"from": "human", "timestamps": 1752.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.033, 0.386, 0.946]: sit, talk to (e.g., self, a person, a group); [0.643, 0.183, 0.976, 0.771]: listen to (a person), sit, drive (e.g., a car, a truck)."}, {"from": "human", "timestamps": 1753.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.656, 0.199, 0.975, 0.75]?"}, {"from": "gpt", "value": ["At Now, [0.656, 0.199, 0.975, 0.75]: sit, drive (e.g., a car, a truck), listen to (a person).", "At 1752.0s, [0.643, 0.183, 0.976, 0.771]: sit, drive (e.g., a car, a truck), listen to (a person).", "From 1750.0s to 1751.0s, [0.651, 0.199, 0.985, 0.746]->[0.601, 0.191, 0.997, 0.869]: sit, listen to (a person).", "From 1748.0s to 1749.0s, [0.622, 0.188, 0.974, 0.779]->[0.608, 0.201, 0.984, 0.765]: sit.", "At 1747.0s, [0.616, 0.184, 0.965, 0.767]: sit, listen to (a person).", "From 1744.0s to 1746.0s, [0.61, 0.208, 0.968, 0.774]->[0.623, 0.198, 0.971, 0.779]->[0.617, 0.2, 0.975, 0.778]: sit, drive (e.g., a car, a truck), listen to (a person).", "At 1743.0s, [0.598, 0.191, 0.974, 0.79]: sit, listen to (a person)."]}]}, {"video": "tt0t_a1EDCE.mkv", "clip": [912, 956], "conversations": [{"from": "human", "timestamps": 942.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.09, 0.311, 0.642, 0.937]: run/jog; [0.666, 0.074, 0.891, 0.924]: bend/bow (at the waist)."}, {"from": "human", "timestamps": 956.0, "value": "What actions is the person in bbox [0.3, 0.276, 0.599, 0.745] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.3, 0.276, 0.599, 0.745]: sit, put down, watch (a person).", "At 955.0s, [0.31, 0.281, 0.594, 0.746]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 953.0s to 954.0s, [0.379, 0.278, 0.575, 0.745]->[0.31, 0.286, 0.582, 0.742]: sit, turn (e.g., a screwdriver), talk to (e.g., self, a person, a group), watch (a person).", "From 950.0s to 952.0s, [0.297, 0.269, 0.612, 0.753]->[0.298, 0.273, 0.602, 0.755]->[0.314, 0.27, 0.614, 0.753]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 949.0s, [0.386, 0.267, 0.593, 0.755]: sit, carry/hold (an object), give/serve (an object) to (a person).", "From 946.0s to 948.0s, [0.296, 0.283, 0.54, 0.731]->[0.315, 0.273, 0.586, 0.752]->[0.368, 0.269, 0.593, 0.753]: sit, carry/hold (an object).", "At 945.0s, [0.298, 0.262, 0.549, 0.724]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "tt0t_a1EDCE.mkv", "clip": [1202, 1234], "conversations": [{"from": "human", "timestamps": 1232.0, "value": "What actions did the person at bbox [0.231, 0.562, 0.447, 0.934] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.231, 0.562, 0.447, 0.934]: stand, listen to (a person), watch (a person).", "From 1230.0s to 1231.0s, [0.637, 0.671, 0.794, 0.922]->[0.36, 0.594, 0.535, 0.93]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1234.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.116, 0.589, 0.336, 0.933]: listen to (a person), stand, watch (a person); [0.516, 0.135, 0.976, 0.924]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "tt0t_a1EDCE.mkv", "clip": [1250, 1294], "conversations": [{"from": "human", "timestamps": 1280.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.52, 0.351, 0.727, 0.919]?"}, {"from": "gpt", "value": ["In the current frame, [0.52, 0.351, 0.727, 0.919]: stand, listen to (a person), watch (a person).", "At 1279.0s, [0.494, 0.344, 0.727, 0.929]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1294.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.811, 0.094, 0.864, 0.372]: walk."}]}, {"video": "tt0t_a1EDCE.mkv", "clip": [1273, 1340], "conversations": [{"from": "human", "timestamps": 1303.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.232, 0.443, 0.385, 0.936]: listen to (a person), stand, watch (a person); [0.322, 0.305, 0.459, 0.915]: listen to (a person), stand, watch (a person); [0.754, 0.128, 0.903, 0.925]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1340.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.003, 0.129, 0.55, 0.935]?"}, {"from": "gpt", "value": ["In the current frame, [0.003, 0.129, 0.55, 0.935]: stand, talk to (e.g., self, a person, a group).", "At 1339.0s, [0.0, 0.133, 0.531, 0.952]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1337.0s to 1338.0s, [0.0, 0.137, 0.517, 0.938]->[0.0, 0.141, 0.507, 0.957]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "tt0t_a1EDCE.mkv", "clip": [1372, 1457], "conversations": [{"from": "human", "timestamps": 1402.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.174, 0.363, 0.954]: walk; [0.266, 0.243, 0.368, 0.871]: grab (a person), talk to (e.g., self, a person, a group), walk; [0.344, 0.19, 0.486, 0.927]: listen to (a person), walk; [0.68, 0.332, 0.776, 0.736]: carry/hold (an object), walk; [0.739, 0.363, 0.861, 0.762]: grab (a person), walk."}, {"from": "human", "timestamps": 1457.0, "value": "What actions did the person at bbox [0.071, 0.132, 0.698, 0.943] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.071, 0.132, 0.698, 0.943]: lie/sleep.", "At 1456.0s, [0.111, 0.07, 0.968, 0.85]: lie/sleep."]}]}, {"video": "tt0t_a1EDCE.mkv", "clip": [1445, 1483], "conversations": [{"from": "human", "timestamps": 1475.0, "value": "What actions is the person in bbox [0.195, 0.042, 0.801, 0.935] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.195, 0.042, 0.801, 0.935]: stand, talk to (e.g., self, a person, a group).", "At 1474.0s, [0.152, 0.055, 0.773, 0.909]: stand.", "At 1473.0s, [0.007, 0.067, 0.636, 0.933]: stand, talk to (e.g., self, a person, a group).", "From 1471.0s to 1472.0s, [0.0, 0.173, 0.293, 0.925]->[0.027, 0.086, 0.444, 0.919]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1483.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.062, 0.657, 0.932]: stand; [0.421, 0.1, 0.989, 0.918]: stand."}]}, {"video": "tt0t_a1EDCE.mkv", "clip": [1489, 1532], "conversations": [{"from": "human", "timestamps": 1519.0, "value": "What actions did the person at bbox [0.45, 0.326, 0.6, 0.915] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.45, 0.326, 0.6, 0.915]: stand, carry/hold (an object), listen to (a person).", "At 1518.0s, [0.455, 0.318, 0.611, 0.916]: stand, carry/hold (an object), listen to (a person).", "At 1517.0s, [0.432, 0.318, 0.607, 0.926]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1532.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.316, 0.369, 0.467, 0.931]: listen to (a person), stand, watch (a person); [0.435, 0.329, 0.562, 0.912]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.57, 0.502, 0.663, 0.723]: sit."}]}, {"video": "tt0t_a1EDCE.mkv", "clip": [1680, 1735], "conversations": [{"from": "human", "timestamps": 1710.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.532, 0.416, 0.711, 0.933]?"}, {"from": "gpt", "value": ["At Now, [0.532, 0.416, 0.711, 0.933]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1709.0s, [0.564, 0.424, 0.724, 0.925]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1708.0s, [0.201, 0.364, 0.755, 0.939]: stand, push (another person), talk to (e.g., self, a person, a group), watch (a person).", "At 1707.0s, [0.068, 0.294, 0.371, 0.95]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1706.0s, [0.207, 0.307, 0.62, 0.941]: stand, point to (an object), talk to (e.g., self, a person, a group).", "At 1705.0s, [0.232, 0.317, 0.635, 0.918]: stand, point to (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1704.0s, [0.293, 0.316, 0.685, 0.91]: stand, talk to (e.g., self, a person, a group).", "From 1702.0s to 1703.0s, [0.108, 0.332, 0.462, 0.924]->[0.164, 0.313, 0.611, 0.923]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1701.0s, [0.136, 0.217, 0.72, 0.954]: stand, hand wave, talk to (e.g., self, a person, a group), watch (a person).", "From 1699.0s to 1700.0s, [0.2, 0.201, 0.551, 0.892]->[0.019, 0.205, 0.561, 0.906]: walk, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1735.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.275, 0.37, 0.514, 0.925]: walk."}]}, {"video": "tt0t_a1EDCE.mkv", "clip": [1738, 1769], "conversations": [{"from": "human", "timestamps": 1768.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.328, 0.245, 0.598, 0.924]: watch (a person), stand, grab (a person), talk to (e.g., self, a person, a group); [0.569, 0.1, 0.99, 0.924]: listen to (a person), stand."}, {"from": "human", "timestamps": 1769.0, "value": "For the person at bbox [0.587, 0.106, 0.996, 0.926], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.587, 0.106, 0.996, 0.926]: stand, listen to (a person).", "At 1768.0s, [0.569, 0.1, 0.99, 0.924]: stand, listen to (a person).", "At 1767.0s, [0.373, 0.075, 0.943, 0.933]: stand, listen to (a person), watch (a person)."]}]}, {"video": "uq_HBsvP548.mkv", "clip": [882, 952], "conversations": [{"from": "human", "timestamps": 912.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.049, 0.008, 0.783, 0.99]?"}, {"from": "gpt", "value": ["At Now, [0.049, 0.008, 0.783, 0.99]: stand, talk to (e.g., self, a person, a group).", "At 911.0s, [0.054, 0.02, 0.795, 0.991]: stand."]}, {"from": "human", "timestamps": 952.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.153, 0.175, 0.249, 0.576]: listen to (a person), stand, watch (a person); [0.221, 0.144, 0.33, 0.529]: listen to (a person), stand, watch (a person); [0.228, 0.435, 0.381, 0.809]: listen to (a person), sit, watch (a person); [0.343, 0.431, 0.53, 0.828]: listen to (a person), sit, watch (a person); [0.397, 0.234, 0.476, 0.455]: listen to (a person), stand, watch (a person); [0.456, 0.271, 0.587, 0.805]: listen to (a person), stand, watch (a person); [0.534, 0.225, 0.622, 0.462]: listen to (a person), stand, watch (a person); [0.553, 0.03, 0.855, 0.943]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.846, 0.075, 0.992, 0.395]: listen to (a person), stand, watch (a person)."}]}, {"video": "uq_HBsvP548.mkv", "clip": [1001, 1041], "conversations": [{"from": "human", "timestamps": 1031.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.183, 0.051, 0.925, 0.993]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1041.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.678, 0.156, 0.776, 0.506]?"}, {"from": "gpt", "value": ["In the current frame, [0.678, 0.156, 0.776, 0.506]: stand, write, listen to (a person), watch (a person).", "At 1040.0s, [0.68, 0.159, 0.776, 0.505]: stand, write, listen to (a person)."]}]}, {"video": "uq_HBsvP548.mkv", "clip": [1089, 1122], "conversations": [{"from": "human", "timestamps": 1119.0, "value": "Describe the actions of the person at bbox [0.299, 0.17, 0.467, 0.651] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.299, 0.17, 0.467, 0.651]: stand, listen to (a person), watch (a person).", "From 1117.0s to 1118.0s, [0.298, 0.158, 0.462, 0.648]->[0.296, 0.163, 0.463, 0.649]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1122.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.19, 0.099, 0.526]: watch (a person), sit; [0.188, 0.066, 0.306, 0.344]: watch (a person), stand; [0.294, 0.15, 0.463, 0.653]: listen to (a person), stand, watch (a person); [0.428, 0.331, 0.5, 0.604]: listen to (a person), sit; [0.486, 0.24, 0.687, 0.658]: watch (a person), sit, touch (an object); [0.57, 0.692, 0.902, 1.0]: watch (a person), sit; [0.607, 0.181, 0.74, 0.507]: listen to (a person), stand, watch (a person); [0.766, 0.083, 0.874, 0.4]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "uq_HBsvP548.mkv", "clip": [1136, 1190], "conversations": [{"from": "human", "timestamps": 1166.0, "value": "Describe the actions of the person at bbox [0.0, 0.086, 0.255, 0.939] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.0, 0.086, 0.255, 0.939]: sit, listen to (a person), watch (a person).", "At 1165.0s, [0.0, 0.09, 0.246, 0.922]: stand, listen to (a person), watch (a person).", "At 1164.0s, [0.753, 0.198, 0.983, 0.767]: sit, stand, listen to (a person), watch (a person).", "At 1163.0s, [0.756, 0.216, 0.98, 0.839]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1190.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.168, 0.006, 0.762, 0.989]: listen to (a person), stand, watch (a person)."}]}, {"video": "uq_HBsvP548.mkv", "clip": [1159, 1246], "conversations": [{"from": "human", "timestamps": 1189.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.273, 0.067, 0.985, 0.974]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1246.0, "value": "For the person at bbox [0.314, 0.231, 0.626, 0.808], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.314, 0.231, 0.626, 0.808]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1245.0s, [0.392, 0.241, 0.669, 0.857]: stand, touch (an object).", "At 1244.0s, [0.431, 0.249, 0.644, 0.909]: stand, listen to (a person), watch (a person)."]}]}, {"video": "uq_HBsvP548.mkv", "clip": [1184, 1217], "conversations": [{"from": "human", "timestamps": 1214.0, "value": "What actions did the person at bbox [0.466, 0.49, 0.561, 0.98] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.466, 0.49, 0.561, 0.98]: run/jog.", "From 1212.0s to 1213.0s, [0.632, 0.509, 0.727, 0.91]->[0.5, 0.499, 0.591, 0.935]: run/jog."]}, {"from": "human", "timestamps": 1217.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.23, 0.303, 0.523, 0.995]: carry/hold (an object), run/jog, watch (a person); [0.581, 0.137, 0.769, 0.991]: carry/hold (an object), walk."}]}, {"video": "uq_HBsvP548.mkv", "clip": [1246, 1319], "conversations": [{"from": "human", "timestamps": 1276.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.346, 0.27, 0.665, 0.837]?"}, {"from": "gpt", "value": ["At Now, [0.346, 0.27, 0.665, 0.837]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1275.0s, [0.374, 0.273, 0.661, 0.842]: stand, talk to (e.g., self, a person, a group).", "From 1273.0s to 1274.0s, [0.2, 0.032, 1.0, 0.984]->[0.262, 0.026, 1.0, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1319.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.179, 0.001, 0.956, 0.975]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "uq_HBsvP548.mkv", "clip": [1309, 1373], "conversations": [{"from": "human", "timestamps": 1339.0, "value": "For the person at bbox [0.174, 0.008, 0.991, 0.982], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.174, 0.008, 0.991, 0.982]: sit, watch (a person).", "At 1338.0s, [0.202, 0.022, 0.954, 0.99]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1373.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.205, 0.142, 0.878]: listen to (a person), stand, watch (a person); [0.307, 0.207, 0.431, 0.593]: listen to (a person), sit, watch (a person); [0.44, 0.236, 0.569, 0.534]: sit, talk to (e.g., self, a person, a group); [0.678, 0.191, 0.829, 0.876]: listen to (a person), sit, touch (an object), watch (a person)."}]}, {"video": "uq_HBsvP548.mkv", "clip": [1358, 1402], "conversations": [{"from": "human", "timestamps": 1388.0, "value": "What actions is the person in bbox [0.0, 0.009, 0.771, 0.986] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.009, 0.771, 0.986]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1387.0s, [0.0, 0.0, 0.752, 0.98]: sit, stand, talk to (e.g., self, a person, a group).", "At 1386.0s, [0.0, 0.0, 0.796, 0.983]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1402.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.104, 0.026, 0.977, 0.989]: listen to (a person), sit, watch (a person)."}]}, {"video": "uq_HBsvP548.mkv", "clip": [1368, 1460], "conversations": [{"from": "human", "timestamps": 1398.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.01, 0.0, 0.774, 0.975]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1460.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.031, 0.083, 0.462, 0.986]?"}, {"from": "gpt", "value": ["At Now, [0.031, 0.083, 0.462, 0.986]: sit, listen to (a person).", "From 1453.0s to 1459.0s, [0.518, 0.221, 0.665, 0.567]->[0.525, 0.223, 0.671, 0.578]->[0.522, 0.221, 0.663, 0.556]->[0.526, 0.22, 0.665, 0.56]->[0.527, 0.222, 0.662, 0.563]->[0.025, 0.081, 0.456, 0.998]->[0.025, 0.081, 0.446, 0.974]: sit, listen to (a person), watch (a person).", "At 1452.0s, [0.524, 0.222, 0.667, 0.565]: sit, listen to (a person).", "From 1447.0s to 1451.0s, [0.512, 0.217, 0.67, 0.637]->[0.505, 0.213, 0.668, 0.58]->[0.522, 0.217, 0.663, 0.558]->[0.524, 0.218, 0.666, 0.585]->[0.522, 0.223, 0.666, 0.571]: sit, listen to (a person), watch (a person)."]}]}, {"video": "uq_HBsvP548.mkv", "clip": [1516, 1552], "conversations": [{"from": "human", "timestamps": 1546.0, "value": "What actions did the person at bbox [0.409, 0.117, 0.533, 0.562] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.409, 0.117, 0.533, 0.562]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1545.0s, [0.406, 0.126, 0.523, 0.561]: stand, listen to (a person), watch (a person).", "At 1544.0s, [0.473, 0.172, 0.598, 0.566]: get up, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1552.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.104, 0.017, 0.634, 0.99]: watch (a person), stand."}]}, {"video": "uq_HBsvP548.mkv", "clip": [1489, 1587], "conversations": [{"from": "human", "timestamps": 1519.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.045, 0.67, 1.0]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1587.0, "value": "What actions is the person in bbox [0.027, 0.017, 0.254, 0.94] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.027, 0.017, 0.254, 0.94]: walk, carry/hold (an object).", "At 1586.0s, [0.17, 0.144, 0.371, 0.943]: walk, carry/hold (an object)."]}]}, {"video": "uq_HBsvP548.mkv", "clip": [1648, 1726], "conversations": [{"from": "human", "timestamps": 1678.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.037, 0.673, 0.984]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1726.0, "value": "What actions did the person at bbox [0.201, 0.044, 0.965, 0.995] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.201, 0.044, 0.965, 0.995]: sit, watch (a person).", "From 1724.0s to 1725.0s, [0.115, 0.096, 0.929, 0.982]->[0.18, 0.053, 0.945, 0.988]: sit.", "At 1723.0s, [0.141, 0.036, 0.964, 0.974]: sit, watch (a person)."]}]}, {"video": "uq_HBsvP548.mkv", "clip": [1711, 1763], "conversations": [{"from": "human", "timestamps": 1741.0, "value": "For the person at bbox [0.362, 0.016, 0.949, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.362, 0.016, 0.949, 1.0]: stand.", "At 1740.0s, [0.37, 0.021, 0.95, 1.0]: stand.", "At 1739.0s, [0.348, 0.016, 0.95, 1.0]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1763.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.484, 0.031, 0.95, 1.0]: watch (a person), sit."}]}, {"video": "uwW0ejeosmk.mkv", "clip": [875, 908], "conversations": [{"from": "human", "timestamps": 905.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.164, 0.013, 0.66, 0.984]: stand, touch (an object)."}, {"from": "human", "timestamps": 908.0, "value": "Describe the actions of the person at bbox [0.162, 0.057, 0.615, 0.994] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.162, 0.057, 0.615, 0.994]: stand, touch (an object).", "From 905.0s to 907.0s, [0.164, 0.013, 0.66, 0.984]->[0.169, 0.003, 0.482, 0.993]->[0.157, 0.004, 0.601, 1.0]: stand, touch (an object).", "From 903.0s to 904.0s, [0.094, 0.021, 0.459, 0.984]->[0.153, 0.026, 0.632, 0.992]: stand.", "At 902.0s, [0.02, 0.018, 0.275, 0.965]: walk."]}]}, {"video": "uwW0ejeosmk.mkv", "clip": [948, 995], "conversations": [{"from": "human", "timestamps": 978.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.023, 0.076, 0.595, 0.987]: stand, talk to (e.g., self, a person, a group); [0.463, 0.266, 0.825, 0.987]: listen to (a person), stand; [0.733, 0.433, 0.936, 0.977]: listen to (a person), stand."}, {"from": "human", "timestamps": 995.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.294, 0.128, 0.83, 0.986]?"}, {"from": "gpt", "value": ["At Now, [0.294, 0.128, 0.83, 0.986]: sit.", "From 993.0s to 994.0s, [0.293, 0.119, 0.83, 0.98]->[0.294, 0.126, 0.833, 0.98]: sit."]}]}, {"video": "uwW0ejeosmk.mkv", "clip": [1014, 1049], "conversations": [{"from": "human", "timestamps": 1044.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.024, 0.0, 0.739, 0.982]: watch (a person), stand."}, {"from": "human", "timestamps": 1049.0, "value": "For the person at bbox [0.381, 0.265, 0.596, 0.678], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.381, 0.265, 0.596, 0.678]: lie/sleep.", "From 1045.0s to 1048.0s, [0.03, 0.003, 0.777, 0.981]->[0.028, 0.02, 0.738, 0.976]->[0.0, 0.013, 0.648, 0.974]->[0.01, 0.01, 0.785, 0.973]: stand.", "From 1039.0s to 1044.0s, [0.018, 0.037, 0.401, 0.941]->[0.021, 0.14, 0.319, 0.964]->[0.017, 0.175, 0.408, 0.972]->[0.012, 0.211, 0.415, 0.957]->[0.036, 0.014, 0.737, 0.994]->[0.024, 0.0, 0.739, 0.982]: stand, watch (a person).", "From 1026.0s to 1038.0s, [0.143, 0.088, 0.719, 0.998]->[0.138, 0.07, 0.734, 0.99]->[0.137, 0.092, 0.743, 0.991]->[0.138, 0.086, 0.736, 0.993]->[0.135, 0.094, 0.731, 1.0]->[0.138, 0.092, 0.723, 1.0]->[0.141, 0.093, 0.726, 0.986]->[0.136, 0.099, 0.728, 0.992]->[0.137, 0.08, 0.734, 0.996]->[0.128, 0.084, 0.732, 0.98]->[0.058, 0.082, 0.698, 0.982]->[0.024, 0.024, 0.59, 1.0]->[0.025, 0.046, 0.558, 1.0]: stand.", "At 1025.0s, [0.533, 0.115, 0.979, 0.967]: stand, watch (a person).", "From 1018.0s to 1024.0s, [0.165, 0.083, 0.793, 1.0]->[0.194, 0.084, 0.816, 0.993]->[0.184, 0.098, 0.807, 0.998]->[0.178, 0.092, 0.781, 0.992]->[0.537, 0.168, 0.97, 0.974]->[0.533, 0.188, 0.963, 0.974]->[0.522, 0.186, 0.964, 0.976]: stand.", "From 1013.0s to 1017.0s, [0.214, 0.091, 0.737, 0.98]->[0.249, 0.089, 0.759, 0.971]->[0.225, 0.089, 0.778, 0.989]->[0.319, 0.037, 0.824, 0.992]->[0.288, 0.054, 0.869, 0.998]: walk."]}]}, {"video": "uwW0ejeosmk.mkv", "clip": [1050, 1099], "conversations": [{"from": "human", "timestamps": 1080.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.053, 0.0, 0.832, 0.973]: stand."}, {"from": "human", "timestamps": 1099.0, "value": "For the person at bbox [0.445, 0.372, 0.541, 0.717], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.445, 0.372, 0.541, 0.717]: walk, carry/hold (an object).", "At 1098.0s, [0.456, 0.401, 0.54, 0.699]: walk, carry/hold (an object)."]}]}, {"video": "uwW0ejeosmk.mkv", "clip": [1190, 1249], "conversations": [{"from": "human", "timestamps": 1220.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.086, 0.095, 0.966, 0.986]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1249.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.046, 0.052, 0.644, 0.988]?"}, {"from": "gpt", "value": ["At Now, [0.046, 0.052, 0.644, 0.988]: stand, talk to (e.g., self, a person, a group).", "At 1248.0s, [0.037, 0.068, 0.635, 1.0]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "uwW0ejeosmk.mkv", "clip": [1327, 1375], "conversations": [{"from": "human", "timestamps": 1357.0, "value": "What actions is the person in bbox [0.649, 0.348, 0.825, 0.943] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.649, 0.348, 0.825, 0.943]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1356.0s, [0.719, 0.33, 0.906, 0.968]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1375.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.171, 0.146, 0.625, 0.981]: talk to (e.g., self, a person, a group), walk; [0.758, 0.101, 0.983, 0.95]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "uwW0ejeosmk.mkv", "clip": [1368, 1415], "conversations": [{"from": "human", "timestamps": 1398.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.003, 0.002, 0.457, 0.988]: listen to (a person), stand, watch (a person); [0.236, 0.203, 0.474, 0.794]: listen to (a person), stand, watch (a person); [0.293, 0.079, 0.928, 0.969]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.748, 0.277, 0.949, 0.821]: listen to (a person), stand."}, {"from": "human", "timestamps": 1415.0, "value": "For the person at bbox [0.303, 0.078, 0.957, 0.978], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.303, 0.078, 0.957, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1414.0s, [0.284, 0.083, 0.89, 0.956]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "uwW0ejeosmk.mkv", "clip": [1361, 1474], "conversations": [{"from": "human", "timestamps": 1391.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.013, 0.098, 0.301, 0.825]: listen to (a person), stand, watch (a person); [0.214, 0.203, 0.484, 0.908]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1474.0, "value": "For the person at bbox [0.1, 0.015, 0.951, 0.957], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.1, 0.015, 0.951, 0.957]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1473.0s, [0.311, 0.0, 0.951, 0.965]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "uwW0ejeosmk.mkv", "clip": [1414, 1467], "conversations": [{"from": "human", "timestamps": 1444.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.04, 0.181, 0.469, 0.99]: carry/hold (an object), stand, listen to (a person); [0.333, 0.057, 0.972, 0.966]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.394, 0.183, 0.535, 0.624]: listen to (a person), stand, watch (a person); [0.754, 0.146, 0.978, 0.919]: listen to (a person), stand."}, {"from": "human", "timestamps": 1467.0, "value": "What actions did the person at bbox [0.395, 0.031, 0.975, 0.978] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.395, 0.031, 0.975, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1466.0s, [0.34, 0.009, 0.983, 0.974]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "uwW0ejeosmk.mkv", "clip": [1505, 1593], "conversations": [{"from": "human", "timestamps": 1535.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.005, 0.339, 0.958]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.236, 0.023, 0.979, 0.966]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1593.0, "value": "What actions is the person in bbox [0.398, 0.362, 0.45, 0.761] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.398, 0.362, 0.45, 0.761]: stand, watch (a person).", "At 1592.0s, [0.413, 0.369, 0.5, 0.772]: stand, listen to (a person), watch (a person)."]}]}, {"video": "uwW0ejeosmk.mkv", "clip": [1604, 1653], "conversations": [{"from": "human", "timestamps": 1634.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.06, 0.014, 0.804, 0.964]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1653.0, "value": "What actions did the person at bbox [0.472, 0.103, 0.962, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.472, 0.103, 0.962, 0.986]: stand, watch (a person).", "At 1652.0s, [0.466, 0.09, 0.965, 0.975]: stand."]}]}, {"video": "uwW0ejeosmk.mkv", "clip": [1621, 1730], "conversations": [{"from": "human", "timestamps": 1651.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.074, 0.0, 0.771, 0.975]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1730.0, "value": "What actions did the person at bbox [0.101, 0.276, 0.251, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.101, 0.276, 0.251, 0.981]: stand, watch (a person).", "At 1729.0s, [0.034, 0.284, 0.25, 0.985]: stand, watch (a person)."]}]}, {"video": "uwW0ejeosmk.mkv", "clip": [1709, 1746], "conversations": [{"from": "human", "timestamps": 1739.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.793, 0.582, 0.835, 0.76]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1746.0, "value": "What actions did the person at bbox [0.72, 0.521, 0.78, 0.834] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.72, 0.521, 0.78, 0.834]: walk, carry/hold (an object), watch (a person).", "At 1745.0s, [0.8, 0.511, 0.869, 0.829]: walk, carry/hold (an object)."]}]}, {"video": "uzPI7FcF79U.mkv", "clip": [935, 997], "conversations": [{"from": "human", "timestamps": 965.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.176, 0.45, 0.466, 0.822]: lie/sleep; [0.641, 0.255, 0.714, 0.533]: stand."}, {"from": "human", "timestamps": 997.0, "value": "What actions did the person at bbox [0.182, 0.465, 0.466, 0.822] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.182, 0.465, 0.466, 0.822]: sit.", "At 996.0s, [0.188, 0.477, 0.462, 0.821]: sit, touch (an object).", "At 995.0s, [0.197, 0.474, 0.463, 0.822]: sit.", "From 993.0s to 994.0s, [0.18, 0.469, 0.462, 0.82]->[0.182, 0.469, 0.456, 0.822]: lie/sleep.", "At 992.0s, [0.185, 0.465, 0.462, 0.825]: sit.", "At 991.0s, [0.208, 0.463, 0.461, 0.82]: sit, touch (an object).", "At 990.0s, [0.194, 0.466, 0.46, 0.823]: sit.", "At 989.0s, [0.193, 0.458, 0.454, 0.822]: lie/sleep.", "At 988.0s, [0.187, 0.469, 0.458, 0.817]: sit.", "From 986.0s to 987.0s, [0.192, 0.472, 0.462, 0.801]->[0.191, 0.463, 0.461, 0.819]: lie/sleep.", "At 985.0s, [0.204, 0.454, 0.472, 0.815]: sit."]}]}, {"video": "uzPI7FcF79U.mkv", "clip": [996, 1056], "conversations": [{"from": "human", "timestamps": 1026.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.376, 0.024, 0.987, 1.0]: bend/bow (at the waist)."}, {"from": "human", "timestamps": 1056.0, "value": "What actions is the person in bbox [0.013, 0.159, 0.971, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.013, 0.159, 0.971, 0.993]: sit.", "At 1055.0s, [0.013, 0.181, 0.929, 0.972]: sit."]}]}, {"video": "uzPI7FcF79U.mkv", "clip": [1040, 1088], "conversations": [{"from": "human", "timestamps": 1070.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.085, 0.369, 0.482, 0.981]: listen to (a person), sit; [0.611, 0.167, 0.829, 0.51]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1088.0, "value": "For the person at bbox [0.033, 0.092, 0.881, 0.975], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.033, 0.092, 0.881, 0.975]: sit.", "At 1087.0s, [0.053, 0.071, 0.963, 0.938]: sit.", "At 1086.0s, [0.014, 0.091, 0.858, 0.948]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "uzPI7FcF79U.mkv", "clip": [1142, 1191], "conversations": [{"from": "human", "timestamps": 1172.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.671, 0.149, 0.828, 0.501]?"}, {"from": "gpt", "value": ["In the current frame, [0.671, 0.149, 0.828, 0.501]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1171.0s, [0.675, 0.156, 0.828, 0.493]: sit, watch (a person).", "From 1168.0s to 1170.0s, [0.626, 0.157, 0.827, 0.51]->[0.666, 0.151, 0.83, 0.496]->[0.666, 0.154, 0.828, 0.497]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1167.0s, [0.658, 0.158, 0.827, 0.502]: sit, watch (a person).", "From 1163.0s to 1166.0s, [0.266, 0.032, 0.987, 0.978]->[0.265, 0.0, 0.978, 0.989]->[0.678, 0.169, 0.829, 0.503]->[0.675, 0.15, 0.83, 0.504]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1162.0s, [0.252, 0.033, 0.987, 0.972]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1191.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.163, 0.178, 0.987, 0.997]: listen to (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "uzPI7FcF79U.mkv", "clip": [1164, 1254], "conversations": [{"from": "human", "timestamps": 1194.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.163, 0.01, 0.958, 0.992]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1254.0, "value": "For the person at bbox [0.025, 0.0, 0.915, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.025, 0.0, 0.915, 0.99]: sit.", "At 1253.0s, [0.023, 0.0, 0.895, 0.974]: sit, stand."]}]}, {"video": "uzPI7FcF79U.mkv", "clip": [1175, 1236], "conversations": [{"from": "human", "timestamps": 1205.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.403, 0.324, 0.529, 0.685]: sit; [0.524, 0.352, 0.768, 0.709]: sit."}, {"from": "human", "timestamps": 1236.0, "value": "Describe the actions of the person at bbox [0.013, 0.0, 0.913, 0.979] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.013, 0.0, 0.913, 0.979]: sit.", "At 1235.0s, [0.013, 0.0, 0.942, 0.997]: sit, listen to (a person).", "At 1234.0s, [0.013, 0.002, 0.897, 0.982]: sit, listen to (a person), watch (a person)."]}]}, {"video": "uzPI7FcF79U.mkv", "clip": [1259, 1319], "conversations": [{"from": "human", "timestamps": 1289.0, "value": "What actions did the person at bbox [0.161, 0.012, 0.98, 0.991] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.161, 0.012, 0.98, 0.991]: sit, talk to (e.g., self, a person, a group).", "At 1288.0s, [0.179, 0.012, 0.973, 0.998]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1319.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.013, 0.001, 0.883, 0.963]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "uzPI7FcF79U.mkv", "clip": [1269, 1361], "conversations": [{"from": "human", "timestamps": 1299.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.161, 0.009, 0.987, 1.0]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1361.0, "value": "What actions did the person at bbox [0.021, 0.0, 0.935, 0.971] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.021, 0.0, 0.935, 0.971]: sit, talk to (e.g., self, a person, a group).", "At 1360.0s, [0.025, 0.0, 0.876, 0.971]: sit, talk to (e.g., self, a person, a group).", "At 1359.0s, [0.013, 0.0, 0.876, 0.967]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "uzPI7FcF79U.mkv", "clip": [1302, 1368], "conversations": [{"from": "human", "timestamps": 1332.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.179, 0.054, 0.94, 0.999]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1368.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.013, 0.0, 0.662, 0.978]?"}, {"from": "gpt", "value": ["In the current frame, [0.013, 0.0, 0.662, 0.978]: sit, talk to (e.g., self, a person, a group).", "From 1365.0s to 1367.0s, [0.027, 0.0, 0.974, 0.96]->[0.013, 0.0, 0.977, 0.994]->[0.02, 0.002, 0.94, 0.981]: sit, talk to (e.g., self, a person, a group).", "At 1364.0s, [0.037, 0.0, 0.982, 0.962]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "uzPI7FcF79U.mkv", "clip": [1353, 1428], "conversations": [{"from": "human", "timestamps": 1383.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.013, 0.006, 0.987, 1.0]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1428.0, "value": "Describe the actions of the person at bbox [0.013, 0.013, 0.943, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.013, 0.013, 0.943, 0.986]: stand, talk to (e.g., self, a person, a group).", "From 1425.0s to 1427.0s, [0.032, 0.0, 0.979, 0.981]->[0.022, 0.0, 0.931, 0.974]->[0.013, 0.0, 0.929, 0.977]: stand, talk to (e.g., self, a person, a group).", "At 1424.0s, [0.025, 0.0, 0.908, 0.969]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "uzPI7FcF79U.mkv", "clip": [1538, 1606], "conversations": [{"from": "human", "timestamps": 1568.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.477, 0.159, 0.625, 0.481]?"}, {"from": "gpt", "value": ["In the current frame, [0.477, 0.159, 0.625, 0.481]: walk, carry/hold (an object), push (an object).", "At 1567.0s, [0.455, 0.187, 0.618, 0.495]: walk, push (an object).", "At 1566.0s, [0.342, 0.334, 0.439, 0.597]: stand, watch (a person).", "At 1565.0s, [0.386, 0.332, 0.452, 0.615]: bend/bow (at the waist), carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1606.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.28, 0.485, 0.395, 0.776]: sit; [0.286, 0.396, 0.376, 0.592]: carry/hold (an object), stand."}]}, {"video": "uzPI7FcF79U.mkv", "clip": [1609, 1659], "conversations": [{"from": "human", "timestamps": 1639.0, "value": "What actions is the person in bbox [0.755, 0.338, 0.848, 0.618] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.755, 0.338, 0.848, 0.618]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1638.0s, [0.756, 0.338, 0.849, 0.613]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1637.0s, [0.782, 0.343, 0.836, 0.621]: stand, talk to (e.g., self, a person, a group).", "At 1636.0s, [0.772, 0.34, 0.843, 0.627]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1635.0s, [0.778, 0.339, 0.855, 0.609]: walk."]}, {"from": "human", "timestamps": 1659.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.257, 0.02, 0.918, 0.982]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "uzPI7FcF79U.mkv", "clip": [1729, 1767], "conversations": [{"from": "human", "timestamps": 1759.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.218, 0.434, 0.258, 0.663]: stand."}, {"from": "human", "timestamps": 1767.0, "value": "Describe the actions of the person at bbox [0.216, 0.432, 0.257, 0.664] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.216, 0.432, 0.257, 0.664]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1766.0s, [0.216, 0.428, 0.261, 0.66]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [876, 922], "conversations": [{"from": "human", "timestamps": 906.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.026, 0.049, 0.737, 0.988]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.654, 0.086, 1.0, 0.991]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 922.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.587, 0.08, 0.814, 0.972]?"}, {"from": "gpt", "value": ["At Now, [0.587, 0.08, 0.814, 0.972]: stand, listen to (a person), watch (a person).", "From 918.0s to 921.0s, [0.755, 0.0, 1.0, 1.0]->[0.576, 0.087, 0.823, 0.988]->[0.589, 0.079, 0.823, 1.0]->[0.585, 0.073, 0.816, 1.0]: stand, listen to (a person), watch (a person).", "From 916.0s to 917.0s, [0.321, 0.025, 0.856, 0.974]->[0.3, 0.036, 0.86, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [945, 1020], "conversations": [{"from": "human", "timestamps": 975.0, "value": "What actions is the person in bbox [0.564, 0.21, 0.995, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.564, 0.21, 0.995, 0.984]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 973.0s to 974.0s, [0.555, 0.202, 1.0, 1.0]->[0.607, 0.212, 0.994, 0.979]: stand, listen to (a person), watch (a person).", "At 972.0s, [0.595, 0.221, 0.943, 0.971]: stand, listen to (a person).", "From 969.0s to 971.0s, [0.632, 0.22, 0.941, 0.975]->[0.62, 0.217, 0.941, 0.995]->[0.608, 0.226, 0.949, 0.977]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1020.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.0, 0.562, 0.966]: listen to (a person), stand, watch (a person); [0.387, 0.021, 0.828, 0.967]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1004, 1063], "conversations": [{"from": "human", "timestamps": 1034.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.017, 0.0, 0.682, 0.971]?"}, {"from": "gpt", "value": ["In the current frame, [0.017, 0.0, 0.682, 0.971]: stand, listen to (a person), watch (a person).", "From 1032.0s to 1033.0s, [0.0, 0.007, 0.607, 0.969]->[0.0, 0.003, 0.635, 0.99]: stand, listen to (a person), watch (a person).", "From 1028.0s to 1031.0s, [0.096, 0.0, 0.643, 0.967]->[0.093, 0.002, 0.64, 0.971]->[0.097, 0.0, 0.629, 0.971]->[0.074, 0.0, 0.629, 0.971]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1015.0s to 1027.0s, [0.0, 0.006, 0.483, 0.996]->[0.0, 0.0, 0.549, 0.987]->[0.0, 0.0, 0.578, 0.98]->[0.0, 0.0, 0.55, 0.982]->[0.075, 0.0, 0.588, 0.98]->[0.0, 0.0, 0.562, 0.966]->[0.0, 0.0, 0.602, 0.962]->[0.0, 0.0, 0.592, 0.957]->[0.0, 0.0, 0.619, 0.962]->[0.0, 0.0, 0.612, 0.977]->[0.005, 0.007, 0.615, 0.961]->[0.087, 0.0, 0.665, 0.979]->[0.089, 0.0, 0.65, 0.969]: stand, listen to (a person), watch (a person).", "From 1013.0s to 1014.0s, [0.009, 0.008, 0.706, 0.975]->[0.064, 0.004, 0.763, 0.969]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1063.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.054, 0.15, 0.396, 0.991]: listen to (a person), stand, watch (a person); [0.296, 0.059, 0.885, 0.999]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.809, 0.414, 1.0, 0.983]: dance."}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1003, 1100], "conversations": [{"from": "human", "timestamps": 1033.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.003, 0.635, 0.99]: listen to (a person), stand, watch (a person); [0.437, 0.072, 0.87, 0.983]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1100.0, "value": "For the person at bbox [0.055, 0.036, 0.619, 0.991], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.055, 0.036, 0.619, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1099.0s, [0.055, 0.045, 0.621, 0.99]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1124, 1157], "conversations": [{"from": "human", "timestamps": 1154.0, "value": "For the person at bbox [0.0, 0.0, 0.492, 0.988], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.0, 0.492, 0.988]: stand, listen to (a person), watch (a person).", "From 1151.0s to 1153.0s, [0.0, 0.0, 0.499, 0.966]->[0.0, 0.0, 0.493, 0.969]->[0.0, 0.0, 0.49, 0.975]: stand, listen to (a person), watch (a person).", "At 1150.0s, [0.043, 0.058, 0.721, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1149.0s, [0.043, 0.047, 0.721, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1148.0s, [0.054, 0.039, 0.708, 0.986]: stand, listen to (a person), watch (a person).", "At 1147.0s, [0.087, 0.035, 0.735, 0.989]: stand, listen to (a person).", "At 1146.0s, [0.0, 0.008, 0.425, 0.974]: stand, listen to (a person), watch (a person).", "At 1145.0s, [0.0, 0.003, 0.465, 0.98]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1143.0s to 1144.0s, [0.295, 0.04, 0.93, 0.981]->[0.176, 0.03, 0.808, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1142.0s, [0.212, 0.001, 0.807, 0.974]: stand, talk to (e.g., self, a person, a group).", "At 1141.0s, [0.182, 0.003, 0.784, 0.983]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1140.0s, [0.203, 0.0, 0.794, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1137.0s to 1139.0s, [0.0, 0.0, 0.356, 0.995]->[0.0, 0.015, 0.501, 1.0]->[0.005, 0.0, 0.46, 1.0]: stand, listen to (a person), watch (a person).", "At 1136.0s, [0.0, 0.073, 0.208, 0.986]: walk, listen to (a person)."]}, {"from": "human", "timestamps": 1157.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.003, 0.792, 0.981]: listen to (a person), stand, watch (a person); [0.801, 0.1, 1.0, 0.981]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1169, 1258], "conversations": [{"from": "human", "timestamps": 1199.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.018, 0.583, 0.962]: listen to (a person), stand; [0.336, 0.057, 0.89, 0.97]: stand, answer phone."}, {"from": "human", "timestamps": 1258.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.059, 0.01, 0.744, 0.985]?"}, {"from": "gpt", "value": ["In the current frame, [0.059, 0.01, 0.744, 0.985]: stand, answer phone.", "From 1255.0s to 1257.0s, [0.058, 0.015, 0.729, 0.978]->[0.049, 0.0, 0.747, 0.984]->[0.045, 0.0, 0.739, 0.99]: stand, answer phone."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1183, 1249], "conversations": [{"from": "human", "timestamps": 1213.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.02, 0.045, 0.73, 0.977]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1249.0, "value": "What actions did the person at bbox [0.063, 0.017, 0.743, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.063, 0.017, 0.743, 0.982]: stand, answer phone.", "From 1240.0s to 1248.0s, [0.063, 0.033, 0.771, 0.983]->[0.073, 0.022, 0.739, 0.982]->[0.074, 0.043, 0.715, 0.972]->[0.048, 0.009, 0.714, 0.985]->[0.052, 0.008, 0.797, 0.987]->[0.109, 0.033, 0.733, 0.983]->[0.112, 0.029, 0.769, 0.974]->[0.087, 0.015, 0.758, 0.982]->[0.086, 0.021, 0.763, 0.984]: stand, answer phone."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1231, 1282], "conversations": [{"from": "human", "timestamps": 1261.0, "value": "For the person at bbox [0.074, 0.003, 0.708, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.074, 0.003, 0.708, 1.0]: stand, answer phone.", "From 1255.0s to 1260.0s, [0.058, 0.015, 0.729, 0.978]->[0.049, 0.0, 0.747, 0.984]->[0.045, 0.0, 0.739, 0.99]->[0.059, 0.01, 0.744, 0.985]->[0.12, 0.024, 0.752, 0.976]->[0.103, 0.019, 0.752, 0.978]: stand, answer phone."]}, {"from": "human", "timestamps": 1282.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.028, 0.0, 0.472, 0.975]: stand, talk to (e.g., self, a person, a group); [0.206, 0.049, 0.921, 0.995]: carry/hold (an object), stand, watch (a person), listen to (a person)."}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1238, 1376], "conversations": [{"from": "human", "timestamps": 1268.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.289, 0.03, 0.706, 0.934]: listen to (a person), stand, talk to (e.g., self, a person, a group), answer phone."}, {"from": "human", "timestamps": 1376.0, "value": "What actions did the person at bbox [0.176, 0.0, 0.715, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.176, 0.0, 0.715, 0.986]: stand, watch (a person).", "At 1375.0s, [0.105, 0.013, 0.626, 0.967]: stand, watch (a person).", "At 1374.0s, [0.33, 0.043, 0.646, 0.998]: stand, touch (an object).", "At 1373.0s, [0.322, 0.04, 0.637, 0.993]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1318, 1377], "conversations": [{"from": "human", "timestamps": 1348.0, "value": "For the person at bbox [0.3, 0.0, 0.921, 0.995], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.3, 0.0, 0.921, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1345.0s to 1347.0s, [0.28, 0.0, 0.928, 0.99]->[0.26, 0.002, 0.915, 0.988]->[0.282, 0.001, 0.909, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1377.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.386, 0.0, 0.979, 1.0]: stand; [0.646, 0.287, 0.974, 0.995]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1356, 1432], "conversations": [{"from": "human", "timestamps": 1386.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.09, 0.042, 0.564, 0.988]: listen to (a person), stand, watch (a person); [0.717, 0.061, 1.0, 0.998]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1432.0, "value": "What actions is the person in bbox [0.043, 0.005, 0.604, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.043, 0.005, 0.604, 0.985]: stand, listen to (a person), watch (a person).", "At 1431.0s, [0.066, 0.0, 0.57, 1.0]: stand, listen to (a person), watch (a person).", "At 1430.0s, [0.0, 0.0, 0.372, 0.994]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1429.0s, [0.201, 0.033, 0.63, 0.986]: stand, talk to (e.g., self, a person, a group).", "At 1428.0s, [0.008, 0.014, 0.347, 0.965]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1426.0s to 1427.0s, [0.146, 0.011, 0.617, 1.0]->[0.002, 0.023, 0.393, 1.0]: stand, talk to (e.g., self, a person, a group).", "At 1425.0s, [0.137, 0.01, 0.674, 0.995]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1373, 1440], "conversations": [{"from": "human", "timestamps": 1403.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.069, 0.013, 0.593, 0.992]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.679, 0.031, 1.0, 0.994]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1440.0, "value": "For the person at bbox [0.0, 0.017, 0.463, 0.989], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.017, 0.463, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1439.0s, [0.004, 0.009, 0.464, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1437, 1494], "conversations": [{"from": "human", "timestamps": 1467.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.001, 0.171, 0.26, 0.998]: listen to (a person), stand; [0.324, 0.026, 0.872, 0.98]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1494.0, "value": "What actions is the person in bbox [0.217, 0.271, 0.372, 0.969] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.217, 0.271, 0.372, 0.969]: stand, listen to (a person), watch (a person).", "From 1492.0s to 1493.0s, [0.21, 0.273, 0.37, 0.981]->[0.219, 0.277, 0.368, 0.964]: stand, listen to (a person), watch (a person).", "At 1491.0s, [0.231, 0.272, 0.376, 0.981]: stand, listen to (a person).", "At 1490.0s, [0.213, 0.271, 0.364, 0.992]: stand, listen to (a person), watch (a person).", "At 1489.0s, [0.222, 0.279, 0.374, 0.964]: stand, listen to (a person).", "From 1484.0s to 1485.0s, [0.0, 0.314, 0.214, 0.982]->[0.004, 0.33, 0.196, 0.978]: stand, listen to (a person), watch (a person).", "From 1481.0s to 1483.0s, [0.001, 0.323, 0.226, 0.971]->[0.005, 0.324, 0.21, 0.977]->[0.002, 0.306, 0.201, 0.964]: stand, listen to (a person)."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1516, 1553], "conversations": [{"from": "human", "timestamps": 1546.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.385, 0.089, 0.863, 0.972]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1553.0, "value": "Describe the actions of the person at bbox [0.408, 0.014, 0.749, 0.935] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.408, 0.014, 0.749, 0.935]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1551.0s to 1552.0s, [0.423, 0.017, 0.744, 0.945]->[0.418, 0.022, 0.756, 0.933]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1482, 1583], "conversations": [{"from": "human", "timestamps": 1512.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.243, 0.055, 0.406, 0.993]: listen to (a person), stand, watch (a person); [0.539, 0.223, 0.724, 0.994]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.648, 0.119, 0.866, 0.991]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1583.0, "value": "What actions is the person in bbox [0.066, 0.176, 0.5, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.066, 0.176, 0.5, 1.0]: stand, listen to (a person), watch (a person).", "At 1582.0s, [0.077, 0.161, 0.501, 0.994]: stand, listen to (a person), watch (a person)."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1532, 1587], "conversations": [{"from": "human", "timestamps": 1562.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.151, 0.137, 0.526, 0.985]: listen to (a person), stand, watch (a person); [0.412, 0.01, 0.761, 0.995]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1587.0, "value": "Describe the actions of the person at bbox [0.362, 0.067, 0.858, 0.979] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.362, 0.067, 0.858, 0.979]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1585.0s to 1586.0s, [0.321, 0.049, 0.846, 0.972]->[0.325, 0.041, 0.842, 0.967]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1613, 1646], "conversations": [{"from": "human", "timestamps": 1643.0, "value": "What actions is the person in bbox [0.07, 0.023, 0.581, 0.995] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.07, 0.023, 0.581, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1642.0s, [0.047, 0.006, 0.584, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 1641.0s, [0.056, 0.003, 0.599, 0.992]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1640.0s, [0.062, 0.021, 0.594, 0.991]: stand, talk to (e.g., self, a person, a group).", "From 1637.0s to 1639.0s, [0.017, 0.004, 0.642, 0.999]->[0.025, 0.02, 0.601, 1.0]->[0.057, 0.013, 0.57, 0.999]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1636.0s, [0.042, 0.032, 0.576, 0.981]: sit, stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1634.0s to 1635.0s, [0.0, 0.005, 0.444, 1.0]->[0.0, 0.016, 0.449, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1633.0s, [0.053, 0.009, 0.681, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1632.0s, [0.072, 0.011, 0.681, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1631.0s, [0.05, 0.03, 0.609, 0.994]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1630.0s, [0.049, 0.044, 0.63, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1624.0s to 1629.0s, [0.0, 0.0, 0.448, 1.0]->[0.0, 0.0, 0.435, 1.0]->[0.0, 0.012, 0.428, 1.0]->[0.0, 0.02, 0.431, 1.0]->[0.0, 0.0, 0.358, 1.0]->[0.0, 0.002, 0.38, 1.0]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1646.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.07, 0.058, 0.606, 0.983]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.545, 0.004, 0.975, 0.988]: listen to (a person), stand, watch (a person)."}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1600, 1691], "conversations": [{"from": "human", "timestamps": 1630.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.049, 0.044, 0.63, 0.989]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.653, 0.027, 1.0, 1.0]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1691.0, "value": "For the person at bbox [0.52, 0.11, 0.835, 0.8], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.52, 0.11, 0.835, 0.8]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1689.0s to 1690.0s, [0.53, 0.131, 0.813, 0.797]->[0.534, 0.121, 0.805, 0.789]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1688.0s, [0.536, 0.133, 0.803, 0.794]: stand, touch (an object)."]}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1672, 1727], "conversations": [{"from": "human", "timestamps": 1702.0, "value": "Describe the actions of the person at bbox [0.068, 0.0, 0.583, 0.973] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.068, 0.0, 0.583, 0.973]: stand, touch (an object), listen to (a person), watch (a person).", "From 1698.0s to 1701.0s, [0.062, 0.0, 0.564, 0.971]->[0.062, 0.0, 0.596, 0.976]->[0.069, 0.0, 0.584, 0.972]->[0.074, 0.0, 0.582, 0.979]: stand, touch (an object), listen to (a person), watch (a person).", "At 1697.0s, [0.098, 0.0, 0.675, 0.984]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1689.0s to 1696.0s, [0.017, 0.0, 0.534, 0.97]->[0.038, 0.0, 0.536, 0.969]->[0.028, 0.0, 0.555, 0.96]->[0.03, 0.0, 0.546, 0.963]->[0.037, 0.0, 0.55, 0.977]->[0.034, 0.0, 0.562, 0.977]->[0.04, 0.0, 0.581, 0.978]->[0.047, 0.0, 0.567, 0.972]: stand, touch (an object), listen to (a person), watch (a person).", "At 1688.0s, [0.0, 0.0, 0.433, 0.972]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1727.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.321, 0.048, 0.55, 0.978]: watch (a person), stand."}]}, {"video": "v0L-WkMO3s4.mp4", "clip": [1745, 1790], "conversations": [{"from": "human", "timestamps": 1775.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.083, 0.124, 0.524, 0.988]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.429, 0.005, 0.836, 0.979]: carry/hold (an object), stand, talk to (e.g., self, a person, a group), listen to (a person)."}, {"from": "human", "timestamps": 1790.0, "value": "What actions is the person in bbox [0.417, 0.02, 0.858, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.417, 0.02, 0.858, 1.0]: stand, carry/hold (an object).", "At 1789.0s, [0.402, 0.011, 0.817, 0.984]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "vBbjA4tWCPg.mp4", "clip": [1184, 1252], "conversations": [{"from": "human", "timestamps": 1214.0, "value": "What actions is the person in bbox [0.094, 0.011, 0.86, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.094, 0.011, 0.86, 0.985]: sit, smoke.", "At 1213.0s, [0.075, 0.008, 0.918, 1.0]: sit, smoke.", "From 1211.0s to 1212.0s, [0.1, 0.031, 0.893, 1.0]->[0.09, 0.02, 0.886, 1.0]: sit, smoke, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1252.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.616, 0.215, 0.926, 0.896]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "vBbjA4tWCPg.mp4", "clip": [1277, 1315], "conversations": [{"from": "human", "timestamps": 1307.0, "value": "What actions is the person in bbox [0.182, 0.061, 0.938, 0.988] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.182, 0.061, 0.938, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1306.0s, [0.204, 0.025, 0.936, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1304.0s to 1305.0s, [0.189, 0.018, 0.933, 0.967]->[0.162, 0.0, 0.924, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1315.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.068, 0.089, 0.26, 0.983]: walk."}]}, {"video": "vBbjA4tWCPg.mp4", "clip": [1272, 1338], "conversations": [{"from": "human", "timestamps": 1302.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.312, 0.03, 0.965, 0.983]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1338.0, "value": "What actions is the person in bbox [0.072, 0.004, 0.497, 0.654] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.072, 0.004, 0.497, 0.654]: bend/bow (at the waist), carry/hold (an object), throw.", "At 1337.0s, [0.094, 0.077, 0.479, 0.947]: bend/bow (at the waist), carry/hold (an object)."]}]}, {"video": "vBbjA4tWCPg.mp4", "clip": [1359, 1465], "conversations": [{"from": "human", "timestamps": 1389.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.075, 0.019, 0.578, 0.998]: listen to (a person), sit, watch (a person); [0.342, 0.078, 0.917, 0.986]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1465.0, "value": "What actions is the person in bbox [0.669, 0.167, 0.897, 0.769] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.669, 0.167, 0.897, 0.769]: sit, touch (an object), listen to (a person), watch (a person).", "From 1463.0s to 1464.0s, [0.721, 0.217, 0.911, 0.891]->[0.696, 0.168, 0.913, 0.732]: sit, touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "vBbjA4tWCPg.mp4", "clip": [1420, 1498], "conversations": [{"from": "human", "timestamps": 1450.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.054, 0.057, 0.367, 0.98]: listen to (a person), sit, touch (an object), watch (a person); [0.276, 0.253, 0.652, 0.95]: sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1498.0, "value": "For the person at bbox [0.179, 0.006, 0.994, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.179, 0.006, 0.994, 0.99]: stand, talk to (e.g., self, a person, a group).", "At 1497.0s, [0.003, 0.026, 0.933, 0.98]: stand, talk to (e.g., self, a person, a group).", "At 1496.0s, [0.2, 0.006, 0.921, 0.994]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1495.0s, [0.448, 0.144, 0.881, 0.836]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1494.0s, [0.457, 0.148, 0.768, 0.742]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "vBbjA4tWCPg.mp4", "clip": [1478, 1532], "conversations": [{"from": "human", "timestamps": 1508.0, "value": "What actions did the person at bbox [0.075, 0.548, 0.408, 0.973] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.075, 0.548, 0.408, 0.973]: lie/sleep.", "From 1505.0s to 1507.0s, [0.076, 0.542, 0.399, 0.952]->[0.077, 0.541, 0.46, 0.959]->[0.078, 0.558, 0.399, 0.962]: lie/sleep."]}, {"from": "human", "timestamps": 1532.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.055, 0.24, 0.359, 0.972]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.667, 0.0, 0.917, 0.998]: listen to (a person), watch (a person), bend/bow (at the waist)."}]}, {"video": "vBbjA4tWCPg.mp4", "clip": [1539, 1600], "conversations": [{"from": "human", "timestamps": 1569.0, "value": "What actions is the person in bbox [0.051, 0.012, 0.613, 0.987] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.051, 0.012, 0.613, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 1568.0s, [0.066, 0.002, 0.685, 0.98]: stand, talk to (e.g., self, a person, a group).", "At 1567.0s, [0.057, 0.031, 0.701, 0.963]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1566.0s, [0.069, 0.11, 0.633, 0.966]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1600.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.405, 0.311, 0.5, 0.468]: sit, drive (e.g., a car, a truck)."}]}, {"video": "vBbjA4tWCPg.mp4", "clip": [1611, 1674], "conversations": [{"from": "human", "timestamps": 1641.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.078, 0.008, 0.224, 0.996]: listen to (a person), stand, watch (a person); [0.421, 0.208, 0.932, 0.995]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1674.0, "value": "Describe the actions of the person at bbox [0.106, 0.08, 0.765, 0.981] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.106, 0.08, 0.765, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1673.0s, [0.121, 0.06, 0.853, 0.972]: stand, watch (a person).", "From 1671.0s to 1672.0s, [0.091, 0.065, 0.804, 0.961]->[0.101, 0.058, 0.761, 0.953]: stand, listen to (a person), watch (a person)."]}]}, {"video": "vBbjA4tWCPg.mp4", "clip": [1648, 1687], "conversations": [{"from": "human", "timestamps": 1678.0, "value": "For the person at bbox [0.044, 0.071, 0.642, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.044, 0.071, 0.642, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1677.0s, [0.058, 0.055, 0.549, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1687.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.398, 0.003, 0.945, 0.988]: watch (a person), talk to (e.g., self, a person, a group), touch (an object), bend/bow (at the waist)."}]}, {"video": "vBbjA4tWCPg.mp4", "clip": [1720, 1768], "conversations": [{"from": "human", "timestamps": 1750.0, "value": "What actions is the person in bbox [0.676, 0.104, 0.917, 0.94] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.676, 0.104, 0.917, 0.94]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1749.0s, [0.681, 0.119, 0.923, 0.944]: stand, drink, talk to (e.g., self, a person, a group).", "At 1748.0s, [0.678, 0.123, 0.922, 0.964]: stand, carry/hold (an object), drink, listen to (a person).", "From 1746.0s to 1747.0s, [0.67, 0.118, 0.922, 0.953]->[0.678, 0.12, 0.919, 0.975]: stand, drink, listen to (a person), watch (a person).", "At 1745.0s, [0.66, 0.108, 0.924, 0.918]: stand, drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1744.0s, [0.666, 0.118, 0.927, 0.927]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1743.0s, [0.684, 0.127, 0.921, 0.96]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1742.0s, [0.68, 0.138, 0.925, 0.919]: stand, drink, talk to (e.g., self, a person, a group).", "At 1741.0s, [0.686, 0.14, 0.926, 0.878]: stand, talk to (e.g., self, a person, a group).", "At 1740.0s, [0.68, 0.128, 0.928, 0.964]: stand, drink, talk to (e.g., self, a person, a group).", "At 1739.0s, [0.651, 0.124, 0.92, 0.917]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1738.0s, [0.653, 0.111, 0.923, 0.809]: stand, drink, talk to (e.g., self, a person, a group), watch (a person).", "At 1737.0s, [0.613, 0.163, 0.914, 0.982]: stand, drink, listen to (a person), talk to (e.g., self, a person, a group).", "From 1733.0s to 1736.0s, [0.618, 0.104, 0.917, 1.0]->[0.573, 0.103, 0.932, 0.966]->[0.575, 0.104, 0.93, 0.964]->[0.656, 0.16, 0.897, 1.0]: stand, drink, listen to (a person).", "At 1732.0s, [0.612, 0.178, 0.905, 0.979]: stand.", "At 1731.0s, [0.556, 0.14, 0.919, 0.915]: stand, carry/hold (an object), take (an object) from (a person), talk to (e.g., self, a person, a group).", "At 1730.0s, [0.633, 0.071, 0.919, 0.977]: stand, take (an object) from (a person), watch (a person)."]}, {"from": "human", "timestamps": 1768.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.06, 0.001, 0.622, 0.954]: read, sit, touch (an object)."}]}, {"video": "vfjywN5CN0Y.mkv", "clip": [898, 1015], "conversations": [{"from": "human", "timestamps": 928.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.347, 0.413, 0.78, 0.983]: climb (e.g., a mountain), walk."}, {"from": "human", "timestamps": 1015.0, "value": "Describe the actions of the person at bbox [0.314, 0.202, 0.94, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.314, 0.202, 0.94, 0.986]: stand, carry/hold (an object), close (e.g., a door, a box).", "From 1012.0s to 1014.0s, [0.353, 0.179, 0.785, 0.975]->[0.34, 0.179, 0.809, 0.987]->[0.33, 0.171, 0.808, 0.997]: stand, carry/hold (an object).", "From 1009.0s to 1011.0s, [0.25, 0.306, 0.757, 0.991]->[0.261, 0.276, 0.737, 0.991]->[0.318, 0.219, 0.769, 0.995]: stand, carry/hold (an object), lift/pick up.", "At 1008.0s, [0.23, 0.38, 0.769, 1.0]: bend/bow (at the waist), carry/hold (an object), touch (an object).", "From 1006.0s to 1007.0s, [0.245, 0.372, 0.744, 1.0]->[0.25, 0.413, 0.811, 0.999]: bend/bow (at the waist), open (e.g., a window, a car door).", "From 1004.0s to 1005.0s, [0.5, 0.239, 0.935, 0.993]->[0.333, 0.294, 0.729, 0.985]: stand, carry/hold (an object), open (e.g., a window, a car door).", "At 1003.0s, [0.627, 0.248, 0.995, 1.0]: walk, carry/hold (an object), listen to (a person)."]}]}, {"video": "vfjywN5CN0Y.mkv", "clip": [965, 1018], "conversations": [{"from": "human", "timestamps": 995.0, "value": "What actions is the person in bbox [0.004, 0.073, 0.554, 0.983] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.004, 0.073, 0.554, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 992.0s to 994.0s, [0.009, 0.076, 0.566, 0.989]->[0.006, 0.07, 0.569, 0.983]->[0.003, 0.071, 0.575, 0.981]: stand, talk to (e.g., self, a person, a group).", "At 991.0s, [0.002, 0.294, 0.28, 0.977]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1018.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.276, 0.265, 0.722, 0.988]: stand, close (e.g., a door, a box), touch (an object)."}]}, {"video": "vfjywN5CN0Y.mkv", "clip": [999, 1049], "conversations": [{"from": "human", "timestamps": 1029.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.447, 0.169, 0.859, 0.987]?"}, {"from": "gpt", "value": ["At Now, [0.447, 0.169, 0.859, 0.987]: carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1026.0s to 1028.0s, [0.472, 0.218, 0.903, 0.989]->[0.435, 0.215, 0.878, 0.986]->[0.396, 0.178, 0.843, 0.987]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1049.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.261, 0.122, 0.871, 0.983]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}]}, {"video": "vfjywN5CN0Y.mkv", "clip": [1045, 1119], "conversations": [{"from": "human", "timestamps": 1075.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.283, 0.142, 0.981]: watch (a person), sit."}, {"from": "human", "timestamps": 1119.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.214, 0.018, 1.0, 0.994]?"}, {"from": "gpt", "value": ["At Now, [0.214, 0.018, 1.0, 0.994]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1117.0s to 1118.0s, [0.222, 0.029, 1.0, 0.978]->[0.227, 0.015, 1.0, 0.991]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "vfjywN5CN0Y.mkv", "clip": [1110, 1159], "conversations": [{"from": "human", "timestamps": 1140.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.191, 0.088, 0.966, 0.989]: stand."}, {"from": "human", "timestamps": 1159.0, "value": "What actions did the person at bbox [0.23, 0.016, 1.0, 0.994] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.23, 0.016, 1.0, 0.994]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1158.0s, [0.265, 0.019, 1.0, 0.99]: sit, watch (a person)."]}]}, {"video": "vfjywN5CN0Y.mkv", "clip": [1186, 1257], "conversations": [{"from": "human", "timestamps": 1216.0, "value": "What actions did the person at bbox [0.552, 0.053, 0.996, 0.953] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.552, 0.053, 0.996, 0.953]: sit, watch (a person).", "From 1211.0s to 1215.0s, [0.553, 0.052, 0.994, 0.952]->[0.531, 0.067, 0.999, 0.959]->[0.532, 0.056, 1.0, 0.97]->[0.537, 0.058, 0.996, 0.957]->[0.511, 0.042, 0.988, 0.968]: sit, watch (a person).", "From 1209.0s to 1210.0s, [0.544, 0.057, 0.995, 0.935]->[0.549, 0.062, 0.997, 0.957]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1257.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.149, 0.0, 0.981, 0.991]: watch (a person), sit, talk to (e.g., self, a person, a group), stand."}]}, {"video": "vfjywN5CN0Y.mkv", "clip": [1238, 1280], "conversations": [{"from": "human", "timestamps": 1268.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.034, 0.519, 0.983]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1280.0, "value": "Describe the actions of the person at bbox [0.001, 0.047, 0.364, 0.996] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.001, 0.047, 0.364, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1279.0s, [0.0, 0.027, 0.379, 0.975]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1278.0s, [0.007, 0.022, 0.334, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1277.0s, [0.0, 0.002, 0.345, 0.973]: sit, talk to (e.g., self, a person, a group).", "From 1273.0s to 1276.0s, [0.0, 0.033, 0.552, 0.998]->[0.0, 0.032, 0.506, 0.992]->[0.0, 0.015, 0.414, 0.982]->[0.0, 0.016, 0.392, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1270.0s to 1272.0s, [0.004, 0.008, 0.542, 0.984]->[0.0, 0.045, 0.536, 0.984]->[0.0, 0.045, 0.546, 0.98]: sit, talk to (e.g., self, a person, a group).", "At 1269.0s, [0.006, 0.046, 0.53, 0.98]: sit, stand, talk to (e.g., self, a person, a group).", "From 1265.0s to 1268.0s, [0.0, 0.002, 0.505, 1.0]->[0.005, 0.071, 0.558, 0.973]->[0.006, 0.043, 0.541, 0.978]->[0.0, 0.034, 0.519, 0.983]: sit, talk to (e.g., self, a person, a group).", "From 1262.0s to 1264.0s, [0.001, 0.085, 0.232, 0.962]->[0.0, 0.06, 0.235, 0.971]->[0.0, 0.036, 0.272, 0.986]: sit, listen to (a person)."]}]}, {"video": "vfjywN5CN0Y.mkv", "clip": [1313, 1359], "conversations": [{"from": "human", "timestamps": 1343.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.019, 0.206, 0.193, 0.584]: carry/hold (an object); [0.024, 0.198, 0.433, 0.739]: carry/hold (an object), bend/bow (at the waist); [0.735, 0.042, 1.0, 0.952]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1359.0, "value": "Describe the actions of the person at bbox [0.549, 0.385, 0.614, 0.618] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.549, 0.385, 0.614, 0.618]: walk, carry/hold (an object).", "From 1354.0s to 1358.0s, [0.483, 0.429, 0.527, 0.587]->[0.466, 0.43, 0.512, 0.591]->[0.49, 0.407, 0.541, 0.595]->[0.513, 0.37, 0.565, 0.566]->[0.53, 0.37, 0.586, 0.579]: walk."]}]}, {"video": "vfjywN5CN0Y.mkv", "clip": [1391, 1424], "conversations": [{"from": "human", "timestamps": 1421.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.254, 0.006, 0.985, 0.975]: stand, write."}, {"from": "human", "timestamps": 1424.0, "value": "What actions did the person at bbox [0.26, 0.014, 0.992, 0.985] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.26, 0.014, 0.992, 0.985]: stand, write.", "From 1421.0s to 1423.0s, [0.254, 0.006, 0.985, 0.975]->[0.189, 0.058, 0.994, 1.0]->[0.18, 0.064, 1.0, 0.995]: stand, write.", "From 1419.0s to 1420.0s, [0.2, 0.012, 0.994, 0.986]->[0.286, 0.005, 0.995, 0.981]: stand, carry/hold (an object), write.", "From 1417.0s to 1418.0s, [0.152, 0.059, 0.987, 1.0]->[0.214, 0.021, 0.999, 0.974]: stand, write.", "At 1416.0s, [0.314, 0.072, 0.98, 0.976]: stand.", "From 1414.0s to 1415.0s, [0.336, 0.022, 1.0, 0.984]->[0.326, 0.044, 0.996, 0.99]: sit, talk to (e.g., self, a person, a group).", "At 1413.0s, [0.333, 0.009, 1.0, 0.995]: sit, stand.", "From 1409.0s to 1412.0s, [0.115, 0.017, 0.967, 0.983]->[0.316, 0.012, 1.0, 0.993]->[0.35, 0.021, 1.0, 1.0]->[0.382, 0.008, 1.0, 0.971]: sit."]}]}, {"video": "vfjywN5CN0Y.mkv", "clip": [1522, 1553], "conversations": [{"from": "human", "timestamps": 1552.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.361, 0.371, 0.442, 0.61]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1553.0, "value": "What actions is the person in bbox [0.43, 0.376, 0.507, 0.632] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.43, 0.376, 0.507, 0.632]: walk, carry/hold (an object).", "From 1551.0s to 1552.0s, [0.409, 0.365, 0.485, 0.599]->[0.361, 0.371, 0.442, 0.61]: walk, carry/hold (an object).", "From 1549.0s to 1550.0s, [0.501, 0.354, 0.555, 0.582]->[0.452, 0.37, 0.52, 0.588]: walk.", "At 1548.0s, [0.543, 0.357, 0.591, 0.565]: walk, carry/hold (an object)."]}]}, {"video": "vfjywN5CN0Y.mkv", "clip": [1537, 1573], "conversations": [{"from": "human", "timestamps": 1567.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.126, 0.308, 0.987]: carry/hold (an object), listen to (a person), walk; [0.311, 0.144, 0.573, 0.984]: carry/hold (an object), talk to (e.g., self, a person, a group), walk; [0.644, 0.088, 0.897, 0.995]: listen to (a person), walk; [0.826, 0.142, 1.0, 0.984]: listen to (a person), watch (a person), walk, carry/hold (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1573.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.269, 0.108, 0.633, 1.0]?"}, {"from": "gpt", "value": ["At Now, [0.269, 0.108, 0.633, 1.0]: walk, carry/hold (an object), listen to (a person).", "From 1568.0s to 1572.0s, [0.25, 0.161, 0.58, 0.992]->[0.321, 0.126, 0.602, 0.999]->[0.357, 0.13, 0.659, 0.985]->[0.353, 0.205, 0.671, 0.984]->[0.323, 0.183, 0.618, 0.984]: walk, carry/hold (an object), listen to (a person).", "From 1566.0s to 1567.0s, [0.308, 0.118, 0.554, 0.975]->[0.311, 0.144, 0.573, 0.984]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1564.0s to 1565.0s, [0.268, 0.208, 0.46, 0.843]->[0.283, 0.128, 0.498, 0.908]: walk, carry/hold (an object), listen to (a person)."]}]}, {"video": "vfjywN5CN0Y.mkv", "clip": [1662, 1720], "conversations": [{"from": "human", "timestamps": 1692.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.22, 0.213, 0.968]: sit, touch (an object), watch (a person), stand, talk to (e.g., self, a person, a group); [0.051, 0.225, 0.638, 0.959]: carry/hold (an object), bend/bow (at the waist), listen to (a person); [0.487, 0.046, 0.975, 0.816]: sit, watch (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group), smoke."}, {"from": "human", "timestamps": 1720.0, "value": "For the person at bbox [0.453, 0.026, 1.0, 0.975], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.453, 0.026, 1.0, 0.975]: sit, listen to (a person).", "At 1719.0s, [0.371, 0.017, 1.0, 0.984]: sit, stand, listen to (a person).", "At 1718.0s, [0.33, 0.015, 1.0, 0.987]: stand, listen to (a person).", "At 1717.0s, [0.382, 0.02, 1.0, 0.98]: sit, listen to (a person).", "From 1715.0s to 1716.0s, [0.363, 0.076, 0.885, 0.961]->[0.208, 0.091, 0.893, 0.968]: sit, carry/hold (an object), listen to (a person).", "At 1714.0s, [0.294, 0.085, 0.864, 0.985]: sit, listen to (a person).", "At 1713.0s, [0.204, 0.102, 0.861, 0.952]: sit, touch (an object), listen to (a person).", "At 1712.0s, [0.329, 0.099, 0.889, 0.959]: bend/bow (at the waist), listen to (a person).", "At 1711.0s, [0.181, 0.09, 0.887, 0.962]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1710.0s, [0.195, 0.079, 0.896, 0.962]: sit, listen to (a person).", "At 1709.0s, [0.41, 0.072, 0.9, 0.961]: sit, stand, carry/hold (an object), listen to (a person).", "At 1708.0s, [0.362, 0.072, 0.888, 0.959]: sit, carry/hold (an object), listen to (a person).", "At 1707.0s, [0.151, 0.081, 0.893, 0.975]: sit, listen to (a person).", "From 1705.0s to 1706.0s, [0.132, 0.057, 0.883, 0.965]->[0.108, 0.065, 0.9, 0.971]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1704.0s, [0.158, 0.063, 0.846, 0.962]: sit, carry/hold (an object), listen to (a person).", "At 1703.0s, [0.122, 0.049, 0.846, 0.969]: stand, drink, listen to (a person).", "At 1702.0s, [0.162, 0.081, 0.885, 0.972]: sit, drink, listen to (a person).", "From 1700.0s to 1701.0s, [0.39, 0.075, 0.867, 0.959]->[0.164, 0.1, 0.828, 0.969]: sit, carry/hold (an object), drink, listen to (a person).", "From 1698.0s to 1699.0s, [0.11, 0.065, 0.871, 0.961]->[0.125, 0.006, 0.783, 0.97]: sit, drink, listen to (a person).", "At 1697.0s, [0.084, 0.029, 0.853, 0.977]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1694.0s to 1696.0s, [0.254, 0.112, 0.742, 0.975]->[0.075, 0.086, 0.743, 0.97]->[0.053, 0.027, 0.837, 0.979]: sit, carry/hold (an object), listen to (a person).", "At 1693.0s, [0.033, 0.027, 0.887, 0.973]: sit, listen to (a person), watch (a person).", "At 1692.0s, [0.051, 0.225, 0.638, 0.959]: bend/bow (at the waist), carry/hold (an object), listen to (a person)."]}]}, {"video": "wEAeql4z1O0.mp4", "clip": [926, 983], "conversations": [{"from": "human", "timestamps": 956.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.201, 0.212, 0.475, 0.853]: listen to (a person), stand, cut, watch (a person)."}, {"from": "human", "timestamps": 983.0, "value": "What actions did the person at bbox [0.875, 0.475, 1.0, 0.795] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.875, 0.475, 1.0, 0.795]: sit, drink, listen (e.g., to music), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 982.0s, [0.841, 0.47, 0.992, 0.858]: sit, carry/hold (an object), sing to (e.g., self, a person, a group)."]}]}, {"video": "wEAeql4z1O0.mp4", "clip": [943, 1001], "conversations": [{"from": "human", "timestamps": 973.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.123, 0.407, 0.242, 0.745]: carry/hold (an object), bend/bow (at the waist), listen to (a person); [0.44, 0.126, 0.756, 0.855]: listen to (a person), watch (a person), sing to (e.g., self, a person, a group), stand, talk to (e.g., self, a person, a group); [0.638, 0.228, 0.997, 0.879]: listen to (a person), sit, watch (a person), play musical instrument."}, {"from": "human", "timestamps": 1001.0, "value": "What actions is the person in bbox [0.531, 0.116, 0.9, 0.857] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.531, 0.116, 0.9, 0.857]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1000.0s, [0.528, 0.126, 0.912, 0.878]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 999.0s, [0.499, 0.18, 0.919, 0.87]: walk, drink, talk to (e.g., self, a person, a group), watch (a person).", "At 998.0s, [0.48, 0.132, 0.938, 0.883]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 997.0s, [0.458, 0.161, 0.884, 0.878]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 995.0s to 996.0s, [0.363, 0.212, 0.814, 0.888]->[0.361, 0.195, 0.744, 0.879]: stand, talk to (e.g., self, a person, a group).", "At 994.0s, [0.326, 0.235, 0.891, 0.888]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1043, 1076], "conversations": [{"from": "human", "timestamps": 1073.0, "value": "For the person at bbox [0.075, 0.179, 0.291, 0.883], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.075, 0.179, 0.291, 0.883]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1072.0s, [0.069, 0.166, 0.313, 0.882]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1070.0s to 1071.0s, [0.088, 0.182, 0.322, 0.875]->[0.09, 0.151, 0.251, 0.884]: walk, carry/hold (an object), watch (a person).", "At 1069.0s, [0.238, 0.158, 0.444, 0.879]: walk, carry/hold (an object).", "At 1068.0s, [0.311, 0.159, 0.496, 0.877]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1067.0s, [0.328, 0.175, 0.487, 0.874]: walk, carry/hold (an object), watch (a person)."]}, {"from": "human", "timestamps": 1076.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.365, 0.141, 0.88, 0.904]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1027, 1106], "conversations": [{"from": "human", "timestamps": 1057.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.214, 0.223, 0.693, 0.89]: sit, listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group), take (an object) from (a person)."}, {"from": "human", "timestamps": 1106.0, "value": "What actions is the person in bbox [0.53, 0.218, 0.701, 0.69] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.53, 0.218, 0.701, 0.69]: stand, touch (an object), listen to (a person), watch (a person).", "From 1104.0s to 1105.0s, [0.524, 0.229, 0.706, 0.714]->[0.529, 0.223, 0.703, 0.691]: stand, touch (an object), listen to (a person), watch (a person).", "At 1103.0s, [0.525, 0.232, 0.672, 0.67]: stand, put down, listen to (a person), watch (a person).", "From 1100.0s to 1102.0s, [0.53, 0.232, 0.659, 0.637]->[0.528, 0.229, 0.665, 0.651]->[0.533, 0.231, 0.669, 0.656]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1064, 1109], "conversations": [{"from": "human", "timestamps": 1094.0, "value": "Describe the actions of the person at bbox [0.155, 0.146, 0.455, 0.87] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.155, 0.146, 0.455, 0.87]: sit, listen to (a person), watch (a person).", "From 1090.0s to 1093.0s, [0.153, 0.137, 0.459, 0.889]->[0.439, 0.168, 0.868, 0.88]->[0.159, 0.14, 0.46, 0.897]->[0.159, 0.128, 0.458, 0.89]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1109.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.408, 0.155, 0.755, 0.888]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1126, 1234], "conversations": [{"from": "human", "timestamps": 1156.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.104, 0.153, 0.349, 0.875]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.35, 0.162, 0.742, 0.877]: stand, talk to (e.g., self, a person, a group); [0.73, 0.299, 0.872, 0.883]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1234.0, "value": "Describe the actions of the person at bbox [0.738, 0.142, 0.998, 0.874] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.738, 0.142, 0.998, 0.874]: sit, listen to (a person), watch (a person).", "From 1232.0s to 1233.0s, [0.683, 0.132, 1.0, 0.879]->[0.685, 0.155, 0.999, 0.879]: sit, listen to (a person), watch (a person).", "At 1231.0s, [0.668, 0.145, 0.993, 0.882]: sit, ride (e.g., a bike, a car, a horse), listen to (a person), watch (a person)."]}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1247, 1307], "conversations": [{"from": "human", "timestamps": 1277.0, "value": "For the person at bbox [0.136, 0.22, 0.368, 0.889], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.136, 0.22, 0.368, 0.889]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1276.0s, [0.136, 0.224, 0.361, 0.886]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1275.0s, [0.134, 0.22, 0.364, 0.888]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1307.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.084, 0.232, 0.39, 0.884]: watch (a person), stand, shoot, talk to (e.g., self, a person, a group)."}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1370, 1436], "conversations": [{"from": "human", "timestamps": 1400.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.458, 0.212, 0.698, 0.879]: watch (a person), walk."}, {"from": "human", "timestamps": 1436.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.193, 0.232, 0.499, 0.855]?"}, {"from": "gpt", "value": ["At Now, [0.193, 0.232, 0.499, 0.855]: walk.", "At 1435.0s, [0.255, 0.216, 0.518, 0.879]: walk.", "From 1432.0s to 1434.0s, [0.608, 0.184, 0.809, 0.881]->[0.608, 0.174, 0.807, 0.88]->[0.609, 0.171, 0.806, 0.885]: stand, watch (a person).", "At 1430.0s, [0.589, 0.196, 0.82, 0.89]: stand.", "From 1427.0s to 1429.0s, [0.514, 0.228, 0.721, 0.872]->[0.541, 0.227, 0.797, 0.886]->[0.564, 0.215, 0.819, 0.883]: walk, carry/hold (an object)."]}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1430, 1490], "conversations": [{"from": "human", "timestamps": 1460.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.0, 0.15, 0.371, 0.888]: listen to (a person), stand, watch (a person); [0.364, 0.128, 0.996, 0.889]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1490.0, "value": "What actions did the person at bbox [0.385, 0.339, 0.504, 0.814] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.385, 0.339, 0.504, 0.814]: sit, ride (e.g., a bike, a car, a horse), watch (a person).", "From 1488.0s to 1489.0s, [0.321, 0.469, 0.464, 0.859]->[0.278, 0.374, 0.38, 0.828]: sit, ride (e.g., a bike, a car, a horse)."]}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1500, 1554], "conversations": [{"from": "human", "timestamps": 1530.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.667, 0.153, 0.953, 0.881]?"}, {"from": "gpt", "value": ["In the current frame, [0.667, 0.153, 0.953, 0.881]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1529.0s, [0.536, 0.148, 0.835, 0.874]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1526.0s to 1528.0s, [0.427, 0.139, 0.696, 0.878]->[0.468, 0.153, 0.719, 0.626]->[0.546, 0.147, 0.76, 0.681]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1525.0s, [0.424, 0.274, 0.666, 0.879]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1554.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.124, 0.17, 0.535, 0.878]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.666, 0.381, 0.945, 0.877]: listen to (a person), stand, watch (a person)."}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1518, 1594], "conversations": [{"from": "human", "timestamps": 1548.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.187, 0.27, 0.383, 0.88]: listen to (a person), stand, watch (a person); [0.453, 0.443, 0.648, 0.867]: listen to (a person), stand, watch (a person); [0.463, 0.157, 0.915, 0.885]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1594.0, "value": "What actions is the person in bbox [0.703, 0.3, 1.0, 0.874] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.703, 0.3, 1.0, 0.874]: carry/hold (an object), lie/sleep.", "At 1592.0s, [0.19, 0.524, 0.317, 0.767]: walk, watch (a person)."]}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1594, 1660], "conversations": [{"from": "human", "timestamps": 1624.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.034, 0.146, 0.301, 0.886]: watch (a person), stand; [0.271, 0.214, 0.516, 0.876]: watch (a person), stand; [0.471, 0.297, 0.777, 0.879]: stand; [0.737, 0.397, 0.978, 0.885]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1660.0, "value": "What actions did the person at bbox [0.456, 0.233, 1.0, 0.882] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.456, 0.233, 1.0, 0.882]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1659.0s, [0.496, 0.24, 0.997, 0.891]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1654.0s to 1658.0s, [0.541, 0.231, 0.993, 0.887]->[0.555, 0.221, 1.0, 0.886]->[0.543, 0.224, 1.0, 0.887]->[0.547, 0.22, 1.0, 0.887]->[0.551, 0.227, 1.0, 0.884]: stand, listen to (a person), watch (a person).", "From 1650.0s to 1653.0s, [0.527, 0.215, 0.996, 0.885]->[0.535, 0.206, 0.996, 0.883]->[0.562, 0.205, 1.0, 0.893]->[0.537, 0.209, 0.998, 0.886]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1642, 1699], "conversations": [{"from": "human", "timestamps": 1672.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.034, 0.288, 0.188, 0.783]: touch (an object), listen to (a person), watch (a person), stand, smoke; [0.136, 0.149, 0.456, 0.893]: watch (a person), stand, touch (an object); [0.408, 0.231, 0.651, 0.889]: watch (a person), stand; [0.648, 0.208, 0.974, 0.888]: watch (a person), stand."}, {"from": "human", "timestamps": 1699.0, "value": "What actions did the person at bbox [0.306, 0.267, 0.427, 0.713] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.306, 0.267, 0.427, 0.713]: sit, ride (e.g., a bike, a car, a horse).", "At 1698.0s, [0.299, 0.308, 0.409, 0.684]: sit, ride (e.g., a bike, a car, a horse).", "At 1697.0s, [0.317, 0.332, 0.398, 0.627]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse)."]}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1681, 1726], "conversations": [{"from": "human", "timestamps": 1711.0, "value": "What actions did the person at bbox [0.36, 0.158, 0.689, 0.887] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.36, 0.158, 0.689, 0.887]: sit, ride (e.g., a bike, a car, a horse), watch (a person).", "From 1704.0s to 1710.0s, [0.271, 0.208, 0.516, 0.872]->[0.147, 0.199, 0.455, 0.877]->[0.249, 0.181, 0.537, 0.871]->[0.196, 0.143, 0.487, 0.737]->[0.205, 0.191, 0.505, 0.865]->[0.18, 0.139, 0.49, 0.883]->[0.183, 0.13, 0.544, 0.886]: sit, ride (e.g., a bike, a car, a horse).", "From 1698.0s to 1702.0s, [0.299, 0.308, 0.409, 0.684]->[0.306, 0.267, 0.427, 0.713]->[0.304, 0.239, 0.454, 0.764]->[0.32, 0.223, 0.46, 0.805]->[0.312, 0.209, 0.491, 0.87]: sit, ride (e.g., a bike, a car, a horse).", "At 1697.0s, [0.317, 0.332, 0.398, 0.627]: sit, carry/hold (an object), ride (e.g., a bike, a car, a horse)."]}, {"from": "human", "timestamps": 1726.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.008, 0.156, 0.524, 0.88]: carry/hold (an object), sit, talk to (e.g., self, a person, a group); [0.662, 0.29, 0.938, 0.888]: listen to (a person), stand."}]}, {"video": "wEAeql4z1O0.mp4", "clip": [1712, 1754], "conversations": [{"from": "human", "timestamps": 1742.0, "value": "What actions did the person at bbox [0.638, 0.214, 0.849, 0.884] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.638, 0.214, 0.849, 0.884]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1741.0s, [0.867, 0.148, 1.0, 0.884]: sit, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1754.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.219, 0.184, 0.47, 0.823]: listen to (a person), sit, watch (a person); [0.437, 0.242, 0.615, 0.879]: carry/hold (an object), sit, listen to (a person); [0.727, 0.266, 0.908, 0.873]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "wfEOx36N4jA.mp4", "clip": [895, 936], "conversations": [{"from": "human", "timestamps": 925.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.281, 0.476, 0.399, 0.739]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group); [0.508, 0.351, 0.598, 0.703]: watch (a person), stand; [0.624, 0.351, 0.724, 0.549]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 936.0, "value": "Describe the actions of the person at bbox [0.002, 0.182, 0.424, 0.916] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.002, 0.182, 0.424, 0.916]: bend/bow (at the waist), carry/hold (an object), smoke, talk to (e.g., self, a person, a group).", "From 934.0s to 935.0s, [0.111, 0.135, 0.659, 0.912]->[0.077, 0.198, 0.625, 0.896]: stand, carry/hold (an object), smoke.", "At 933.0s, [0.089, 0.086, 0.604, 0.91]: stand, carry/hold (an object).", "At 932.0s, [0.089, 0.086, 0.545, 0.903]: stand, carry/hold (an object), pull (an object), smoke."]}]}, {"video": "wfEOx36N4jA.mp4", "clip": [1001, 1044], "conversations": [{"from": "human", "timestamps": 1031.0, "value": "For the person at bbox [0.55, 0.156, 0.896, 0.919], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.55, 0.156, 0.896, 0.919]: stand, watch (a person).", "From 1029.0s to 1030.0s, [0.569, 0.151, 0.996, 0.922]->[0.608, 0.151, 0.986, 0.931]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1044.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.296, 0.126, 0.987, 0.92]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "wfEOx36N4jA.mp4", "clip": [1149, 1213], "conversations": [{"from": "human", "timestamps": 1179.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.511, 0.119, 0.991, 0.93]: watch (a person), stand."}, {"from": "human", "timestamps": 1213.0, "value": "For the person at bbox [0.574, 0.216, 0.696, 0.677], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.574, 0.216, 0.696, 0.677]: walk, watch (a person).", "At 1212.0s, [0.503, 0.21, 0.628, 0.667]: walk, watch (a person).", "At 1211.0s, [0.326, 0.183, 0.811, 0.933]: run/jog, watch (a person).", "At 1210.0s, [0.319, 0.346, 0.881, 0.924]: run/jog.", "From 1208.0s to 1209.0s, [0.206, 0.16, 0.821, 0.915]->[0.282, 0.159, 0.727, 0.916]: walk.", "At 1207.0s, [0.157, 0.121, 0.835, 0.911]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "wfEOx36N4jA.mp4", "clip": [1274, 1307], "conversations": [{"from": "human", "timestamps": 1304.0, "value": "For the person at bbox [0.003, 0.079, 0.565, 0.936], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.003, 0.079, 0.565, 0.936]: stand.", "From 1302.0s to 1303.0s, [0.0, 0.097, 0.535, 0.943]->[0.001, 0.114, 0.536, 0.939]: stand."]}, {"from": "human", "timestamps": 1307.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.019, 0.255, 0.45, 0.892]: stand."}]}, {"video": "wfEOx36N4jA.mp4", "clip": [1360, 1432], "conversations": [{"from": "human", "timestamps": 1390.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.008, 0.157, 0.604, 0.927]?"}, {"from": "gpt", "value": ["In the current frame, [0.008, 0.157, 0.604, 0.927]: stand.", "At 1389.0s, [0.005, 0.166, 0.617, 0.927]: stand, talk to (e.g., self, a person, a group).", "From 1387.0s to 1388.0s, [0.18, 0.176, 0.488, 0.884]->[0.002, 0.159, 0.588, 0.924]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1385.0s to 1386.0s, [0.172, 0.181, 0.48, 0.885]->[0.18, 0.181, 0.488, 0.875]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1384.0s, [0.002, 0.156, 0.617, 0.931]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1382.0s to 1383.0s, [0.0, 0.151, 0.615, 0.934]->[0.005, 0.159, 0.605, 0.928]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1432.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.227, 0.114, 0.531, 0.892]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.587, 0.314, 0.933, 0.734]: carry/hold (an object), sit, touch (an object), watch (a person)."}]}, {"video": "wfEOx36N4jA.mp4", "clip": [1425, 1462], "conversations": [{"from": "human", "timestamps": 1455.0, "value": "What actions is the person in bbox [0.225, 0.119, 0.94, 0.916] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.225, 0.119, 0.94, 0.916]: stand, carry/hold (an object), throw.", "At 1454.0s, [0.245, 0.125, 0.964, 0.937]: sit, carry/hold (an object), touch (an object)."]}, {"from": "human", "timestamps": 1462.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.058, 0.747, 0.92]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "wfEOx36N4jA.mp4", "clip": [1500, 1582], "conversations": [{"from": "human", "timestamps": 1530.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.548, 0.145, 0.815, 0.912]: watch (a person), stand, touch (an object)."}, {"from": "human", "timestamps": 1582.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.117, 0.185, 0.543, 0.929]?"}, {"from": "gpt", "value": ["In the current frame, [0.117, 0.185, 0.543, 0.929]: stand, carry/hold (an object), watch (a person).", "At 1581.0s, [0.106, 0.172, 0.533, 0.92]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "wfEOx36N4jA.mp4", "clip": [1556, 1613], "conversations": [{"from": "human", "timestamps": 1586.0, "value": "What actions is the person in bbox [0.3, 0.115, 0.92, 0.824] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.3, 0.115, 0.92, 0.824]: sit, carry/hold (an object), touch (an object).", "At 1585.0s, [0.371, 0.13, 0.835, 0.73]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1584.0s, [0.33, 0.129, 0.932, 0.855]: sit, carry/hold (an object), watch (a person).", "At 1583.0s, [0.423, 0.309, 0.886, 0.767]: bend/bow (at the waist), carry/hold (an object)."]}, {"from": "human", "timestamps": 1613.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.134, 0.167, 0.711, 0.936]: stand, sing to (e.g., self, a person, a group), play musical instrument."}]}, {"video": "wogRuPNBUi8.mp4", "clip": [881, 945], "conversations": [{"from": "human", "timestamps": 911.0, "value": "For the person at bbox [0.193, 0.288, 0.404, 0.568], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.193, 0.288, 0.404, 0.568]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 910.0s, [0.193, 0.288, 0.414, 0.564]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 945.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.278, 0.073, 0.986, 0.996]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "wogRuPNBUi8.mp4", "clip": [885, 989], "conversations": [{"from": "human", "timestamps": 915.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.136, 0.089, 0.978, 0.998]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 989.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.183, 0.295, 0.372, 0.582]?"}, {"from": "gpt", "value": ["At Now, [0.183, 0.295, 0.372, 0.582]: sit, listen to (a person), watch (a person).", "From 987.0s to 988.0s, [0.068, 0.082, 0.974, 0.983]->[0.098, 0.076, 0.945, 0.988]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "wogRuPNBUi8.mp4", "clip": [1008, 1069], "conversations": [{"from": "human", "timestamps": 1038.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.074, 0.058, 0.96, 0.98]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1069.0, "value": "What actions did the person at bbox [0.394, 0.111, 0.877, 0.978] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.394, 0.111, 0.877, 0.978]: stand, close (e.g., a door, a box).", "From 1067.0s to 1068.0s, [0.343, 0.221, 0.716, 0.986]->[0.0, 0.246, 0.782, 0.983]: stand, close (e.g., a door, a box).", "At 1066.0s, [0.348, 0.186, 0.78, 0.999]: walk, close (e.g., a door, a box).", "At 1065.0s, [0.512, 0.09, 0.912, 0.991]: stand, close (e.g., a door, a box), watch (a person).", "At 1064.0s, [0.518, 0.096, 0.941, 1.0]: stand, carry/hold (an object), watch (a person).", "At 1063.0s, [0.57, 0.101, 0.931, 0.978]: stand, touch (an object), watch (a person)."]}]}, {"video": "wogRuPNBUi8.mp4", "clip": [1011, 1081], "conversations": [{"from": "human", "timestamps": 1041.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.125, 0.316, 0.295, 0.608]: listen to (a person), sit, watch (a person); [0.192, 0.299, 0.309, 0.466]: listen to (a person), sit, watch (a person); [0.364, 0.274, 0.488, 0.554]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group); [0.547, 0.264, 0.651, 0.565]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1081.0, "value": "What actions is the person in bbox [0.215, 0.069, 0.988, 0.968] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.215, 0.069, 0.988, 0.968]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1080.0s, [0.233, 0.063, 1.0, 1.0]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "wogRuPNBUi8.mp4", "clip": [1208, 1240], "conversations": [{"from": "human", "timestamps": 1238.0, "value": "For the person at bbox [0.644, 0.312, 0.749, 0.596], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.644, 0.312, 0.749, 0.596]: walk, talk to (e.g., self, a person, a group), watch (a person).", "From 1235.0s to 1237.0s, [0.586, 0.273, 0.745, 0.684]->[0.653, 0.308, 0.758, 0.666]->[0.67, 0.286, 0.767, 0.642]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1234.0s, [0.601, 0.257, 0.77, 0.57]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1233.0s, [0.555, 0.237, 0.9, 0.719]: walk, touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1232.0s, [0.582, 0.229, 0.844, 0.738]: bend/bow (at the waist), touch (an object), listen to (a person), watch (a person).", "At 1231.0s, [0.564, 0.232, 0.834, 0.738]: bend/bow (at the waist), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1227.0s to 1230.0s, [0.553, 0.226, 0.815, 0.752]->[0.528, 0.245, 0.818, 0.745]->[0.552, 0.235, 0.822, 0.74]->[0.548, 0.241, 0.828, 0.738]: bend/bow (at the waist), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1226.0s, [0.482, 0.206, 0.842, 0.752]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1223.0s to 1225.0s, [0.515, 0.054, 0.861, 0.986]->[0.455, 0.05, 0.77, 0.979]->[0.579, 0.046, 0.82, 0.976]: stand, listen to (a person), watch (a person).", "At 1222.0s, [0.499, 0.049, 0.831, 0.957]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1221.0s, [0.463, 0.046, 0.85, 0.935]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1220.0s, [0.474, 0.079, 0.829, 0.97]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1217.0s to 1219.0s, [0.502, 0.069, 0.828, 1.0]->[0.493, 0.074, 0.812, 0.991]->[0.515, 0.066, 0.815, 0.989]: stand, listen to (a person), watch (a person).", "From 1215.0s to 1216.0s, [0.502, 0.085, 0.78, 0.983]->[0.511, 0.077, 0.783, 0.982]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1210.0s to 1214.0s, [0.466, 0.103, 0.744, 0.979]->[0.448, 0.153, 0.718, 0.99]->[0.449, 0.136, 0.734, 0.978]->[0.452, 0.136, 0.781, 0.96]->[0.483, 0.108, 0.789, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1208.0s to 1209.0s, [0.525, 0.066, 0.839, 0.969]->[0.518, 0.064, 0.81, 0.993]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1202.0s to 1207.0s, [0.536, 0.051, 0.833, 0.946]->[0.53, 0.036, 0.809, 0.987]->[0.51, 0.035, 0.792, 0.973]->[0.502, 0.026, 0.789, 0.978]->[0.503, 0.021, 0.801, 0.986]->[0.408, 0.04, 0.736, 0.959]: stand, listen to (a person), watch (a person).", "At 1201.0s, [0.514, 0.039, 0.795, 0.985]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1196.0s to 1200.0s, [0.492, 0.044, 0.817, 0.983]->[0.379, 0.081, 0.749, 0.978]->[0.413, 0.062, 0.729, 0.995]->[0.42, 0.066, 0.735, 1.0]->[0.423, 0.067, 0.716, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1193.0s to 1195.0s, [0.5, 0.02, 0.998, 1.0]->[0.424, 0.033, 0.819, 0.974]->[0.418, 0.044, 0.81, 0.976]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1191.0s to 1192.0s, [0.5, 0.056, 0.992, 0.978]->[0.466, 0.056, 0.997, 1.0]: stand, touch (an object), listen to (a person), watch (a person).", "At 1190.0s, [0.446, 0.055, 0.996, 1.0]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1188.0s to 1189.0s, [0.578, 0.073, 0.986, 0.982]->[0.574, 0.085, 1.0, 0.99]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "From 1186.0s to 1187.0s, [0.567, 0.156, 0.992, 0.977]->[0.57, 0.112, 0.998, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1185.0s, [0.571, 0.196, 0.993, 0.988]: bend/bow (at the waist), carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1184.0s, [0.458, 0.158, 0.931, 0.987]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person).", "At 1183.0s, [0.5, 0.081, 0.985, 0.97]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1182.0s, [0.789, 0.215, 0.991, 0.984]: walk, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1240.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.76, 0.281, 0.821, 0.541]: watch (a person), stand, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "wogRuPNBUi8.mp4", "clip": [1244, 1290], "conversations": [{"from": "human", "timestamps": 1274.0, "value": "What actions is the person in bbox [0.244, 0.008, 0.988, 0.974] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.244, 0.008, 0.988, 0.974]: stand.", "At 1273.0s, [0.077, 0.036, 0.804, 0.998]: stand."]}, {"from": "human", "timestamps": 1290.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.062, 0.773, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "wogRuPNBUi8.mp4", "clip": [1272, 1348], "conversations": [{"from": "human", "timestamps": 1302.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.066, 0.091, 0.938, 0.977]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1348.0, "value": "Describe the actions of the person at bbox [0.012, 0.01, 0.228, 0.968] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.012, 0.01, 0.228, 0.968]: walk, carry/hold (an object).", "At 1347.0s, [0.018, 0.001, 0.421, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "wogRuPNBUi8.mp4", "clip": [1473, 1555], "conversations": [{"from": "human", "timestamps": 1503.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.098, 0.162, 0.428, 0.499]: listen to (a person), sit, touch (an object), watch (a person); [0.626, 0.105, 0.969, 0.689]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1555.0, "value": "What actions is the person in bbox [0.031, 0.068, 0.927, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.031, 0.068, 0.927, 0.984]: sit.", "From 1548.0s to 1554.0s, [0.134, 0.13, 0.627, 0.913]->[0.179, 0.101, 0.717, 0.967]->[0.114, 0.052, 0.815, 0.989]->[0.074, 0.109, 0.987, 1.0]->[0.164, 0.031, 0.993, 0.998]->[0.122, 0.082, 0.989, 0.99]->[0.09, 0.068, 1.0, 0.975]: sit.", "At 1547.0s, [0.091, 0.108, 0.565, 0.889]: sit, touch (an object).", "From 1544.0s to 1546.0s, [0.154, 0.228, 0.452, 0.697]->[0.147, 0.186, 0.467, 0.712]->[0.137, 0.138, 0.5, 0.764]: sit.", "At 1543.0s, [0.164, 0.264, 0.42, 0.688]: sit, touch (an object).", "From 1541.0s to 1542.0s, [0.163, 0.287, 0.443, 0.68]->[0.184, 0.278, 0.435, 0.737]: sit.", "At 1540.0s, [0.184, 0.366, 0.458, 0.718]: bend/bow (at the waist).", "At 1539.0s, [0.142, 0.202, 0.45, 0.75]: stand.", "At 1538.0s, [0.207, 0.16, 0.456, 0.814]: stand, touch (an object), watch (a person)."]}]}, {"video": "wogRuPNBUi8.mp4", "clip": [1517, 1617], "conversations": [{"from": "human", "timestamps": 1547.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.091, 0.108, 0.565, 0.889]: sit, touch (an object)."}, {"from": "human", "timestamps": 1617.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.713, 0.201, 0.898, 0.795]?"}, {"from": "gpt", "value": ["At Now, [0.713, 0.201, 0.898, 0.795]: stand, watch (a person).", "From 1615.0s to 1616.0s, [0.716, 0.201, 0.891, 0.786]->[0.714, 0.204, 0.894, 0.762]: stand, watch (a person)."]}]}, {"video": "wogRuPNBUi8.mp4", "clip": [1550, 1605], "conversations": [{"from": "human", "timestamps": 1580.0, "value": "For the person at bbox [0.046, 0.388, 0.488, 0.771], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.046, 0.388, 0.488, 0.771]: carry/hold (an object), talk to (e.g., self, a person, a group), lie/sleep.", "From 1578.0s to 1579.0s, [0.026, 0.378, 0.474, 0.764]->[0.046, 0.381, 0.499, 0.758]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1576.0s, [0.101, 0.357, 0.468, 0.75]: sit, carry/hold (an object), read.", "From 1570.0s to 1575.0s, [0.048, 0.353, 0.45, 0.766]->[0.091, 0.334, 0.482, 0.754]->[0.066, 0.342, 0.48, 0.746]->[0.113, 0.304, 0.484, 0.762]->[0.121, 0.302, 0.488, 0.756]->[0.098, 0.32, 0.463, 0.761]: sit, read.", "At 1569.0s, [0.063, 0.352, 0.454, 0.763]: read, lie/sleep.", "From 1567.0s to 1568.0s, [0.052, 0.349, 0.447, 0.748]->[0.071, 0.367, 0.445, 0.768]: sit, read.", "At 1565.0s, [0.082, 0.392, 0.434, 0.776]: read, lie/sleep.", "From 1563.0s to 1564.0s, [0.059, 0.404, 0.533, 0.848]->[0.088, 0.415, 0.497, 0.806]: sit, read."]}, {"from": "human", "timestamps": 1605.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.101, 0.078, 0.291, 0.986]: listen to (a person), stand."}]}, {"video": "wogRuPNBUi8.mp4", "clip": [1605, 1674], "conversations": [{"from": "human", "timestamps": 1635.0, "value": "What actions did the person at bbox [0.428, 0.103, 0.534, 0.396] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.428, 0.103, 0.534, 0.396]: stand, listen to (a person), watch (a person).", "At 1634.0s, [0.447, 0.106, 0.535, 0.407]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1674.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.012, 0.044, 0.303, 0.945]: walk; [0.548, 0.216, 0.734, 0.821]: listen to (a person), stand, watch (a person)."}]}, {"video": "wogRuPNBUi8.mp4", "clip": [1701, 1735], "conversations": [{"from": "human", "timestamps": 1731.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.01, 0.121, 0.073, 0.419]: listen to (a person), stand, watch (a person); [0.161, 0.093, 0.32, 0.872]: listen to (a person), stand, watch (a person); [0.669, 0.104, 0.838, 0.771]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.853, 0.103, 0.992, 0.978]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1735.0, "value": "What actions is the person in bbox [0.009, 0.121, 0.072, 0.407] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.009, 0.121, 0.072, 0.407]: stand, listen to (a person), watch (a person).", "From 1728.0s to 1734.0s, [0.013, 0.123, 0.065, 0.41]->[0.011, 0.119, 0.073, 0.412]->[0.011, 0.12, 0.073, 0.415]->[0.01, 0.121, 0.073, 0.419]->[0.01, 0.119, 0.074, 0.417]->[0.01, 0.119, 0.072, 0.415]->[0.008, 0.12, 0.071, 0.412]: stand, listen to (a person), watch (a person)."]}]}, {"video": "wogRuPNBUi8.mp4", "clip": [1715, 1754], "conversations": [{"from": "human", "timestamps": 1745.0, "value": "What actions did the person at bbox [0.278, 0.135, 0.382, 0.377] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.278, 0.135, 0.382, 0.377]: stand, listen to (a person), watch (a person).", "At 1744.0s, [0.298, 0.13, 0.406, 0.36]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1743.0s, [0.389, 0.138, 0.469, 0.388]: walk, carry/hold (an object), listen to (a person).", "At 1742.0s, [0.49, 0.134, 0.562, 0.485]: walk, watch (a person).", "At 1741.0s, [0.432, 0.16, 0.501, 0.457]: walk."]}, {"from": "human", "timestamps": 1754.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.248, 0.169, 0.289, 0.388]: listen to (a person), stand, watch (a person); [0.258, 0.14, 0.344, 0.444]: listen to (a person), stand, watch (a person); [0.433, 0.147, 0.499, 0.446]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "x-6CtPWVi6E.mkv", "clip": [910, 955], "conversations": [{"from": "human", "timestamps": 940.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.203, 0.261, 0.964]: stand, fight/hit (a person); [0.889, 0.651, 0.998, 0.997]: carry/hold (an object), fight/hit (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 955.0, "value": "For the person at bbox [0.27, 0.114, 0.694, 0.988], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.27, 0.114, 0.694, 0.988]: stand, watch (a person).", "At 954.0s, [0.364, 0.075, 0.648, 0.977]: stand, watch (a person)."]}]}, {"video": "x-6CtPWVi6E.mkv", "clip": [1046, 1080], "conversations": [{"from": "human", "timestamps": 1076.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.049, 0.0, 0.658, 1.0]: stand."}, {"from": "human", "timestamps": 1080.0, "value": "What actions did the person at bbox [0.205, 0.0, 0.968, 0.997] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.205, 0.0, 0.968, 0.997]: stand.", "From 1078.0s to 1079.0s, [0.409, 0.0, 0.995, 0.975]->[0.446, 0.0, 1.0, 0.976]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "x-6CtPWVi6E.mkv", "clip": [1237, 1368], "conversations": [{"from": "human", "timestamps": 1267.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.15, 0.005, 0.783, 0.947]: stand."}, {"from": "human", "timestamps": 1368.0, "value": "Describe the actions of the person at bbox [0.315, 0.002, 0.732, 0.993] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.315, 0.002, 0.732, 0.993]: walk, listen to (a person), watch (a person).", "At 1367.0s, [0.0, 0.002, 0.332, 0.995]: walk, listen to (a person), watch (a person)."]}]}, {"video": "x-6CtPWVi6E.mkv", "clip": [1386, 1422], "conversations": [{"from": "human", "timestamps": 1416.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.139, 0.0, 0.512, 1.0]: watch (a person), stand; [0.508, 0.052, 0.911, 0.997]: carry/hold (an object), walk."}, {"from": "human", "timestamps": 1422.0, "value": "What actions is the person in bbox [0.296, 0.511, 0.511, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.296, 0.511, 0.511, 1.0]: bend/bow (at the waist), throw.", "At 1421.0s, [0.274, 0.548, 0.475, 0.991]: bend/bow (at the waist)."]}]}, {"video": "x-6CtPWVi6E.mkv", "clip": [1394, 1496], "conversations": [{"from": "human", "timestamps": 1424.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.374, 0.497, 0.586, 0.999]: bend/bow (at the waist)."}, {"from": "human", "timestamps": 1496.0, "value": "Describe the actions of the person at bbox [0.018, 0.166, 0.387, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.018, 0.166, 0.387, 1.0]: sit.", "From 1494.0s to 1495.0s, [0.007, 0.177, 0.387, 1.0]->[0.022, 0.156, 0.375, 1.0]: sit, listen to (a person), watch (a person).", "At 1493.0s, [0.012, 0.199, 0.39, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1489.0s to 1492.0s, [0.0, 0.125, 0.406, 1.0]->[0.011, 0.173, 0.378, 1.0]->[0.017, 0.175, 0.38, 1.0]->[0.015, 0.176, 0.386, 1.0]: sit, listen to (a person), watch (a person)."]}]}, {"video": "x-6CtPWVi6E.mkv", "clip": [1435, 1477], "conversations": [{"from": "human", "timestamps": 1465.0, "value": "Describe the actions of the person at bbox [0.303, 0.016, 0.973, 0.977] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.303, 0.016, 0.973, 0.977]: sit, smoke.", "At 1464.0s, [0.296, 0.028, 0.893, 0.994]: sit, smoke.", "At 1463.0s, [0.286, 0.032, 0.882, 0.976]: sit, carry/hold (an object).", "At 1462.0s, [0.302, 0.01, 0.947, 1.0]: sit, put down.", "At 1461.0s, [0.364, 0.022, 0.939, 0.984]: sit, carry/hold (an object), write, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1477.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.095, 0.529, 0.182, 0.817]: walk; [0.722, 0.125, 1.0, 0.799]: watch (a person), stand."}]}, {"video": "x-6CtPWVi6E.mkv", "clip": [1514, 1606], "conversations": [{"from": "human", "timestamps": 1544.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.389, 0.019, 0.546, 0.877]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.753, 0.04, 0.999, 0.953]: carry/hold (an object), sit, listen to (a person)."}, {"from": "human", "timestamps": 1606.0, "value": "For the person at bbox [0.762, 0.024, 0.966, 0.422], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.762, 0.024, 0.966, 0.422]: stand, carry/hold (an object).", "From 1604.0s to 1605.0s, [0.502, 0.093, 0.851, 0.982]->[0.764, 0.04, 0.986, 0.398]: stand, carry/hold (an object).", "At 1603.0s, [0.341, 0.089, 0.999, 0.982]: stand, lift/pick up.", "From 1601.0s to 1602.0s, [0.622, 0.062, 0.988, 0.981]->[0.567, 0.096, 1.0, 0.994]: walk.", "At 1600.0s, [0.775, 0.004, 0.996, 0.656]: stand.", "From 1598.0s to 1599.0s, [0.784, 0.017, 1.0, 0.975]->[0.822, 0.013, 1.0, 0.995]: stand, watch (e.g., TV).", "At 1597.0s, [0.751, 0.105, 0.985, 0.962]: stand.", "At 1596.0s, [0.705, 0.239, 0.952, 0.947]: bend/bow (at the waist).", "At 1595.0s, [0.686, 0.409, 0.869, 0.917]: bend/bow (at the waist), talk to (e.g., self, a person, a group).", "From 1593.0s to 1594.0s, [0.773, 0.023, 1.0, 0.994]->[0.742, 0.025, 1.0, 0.96]: stand, watch (e.g., TV).", "At 1592.0s, [0.769, 0.002, 1.0, 0.993]: stand.", "At 1591.0s, [0.752, 0.0, 1.0, 0.998]: stand, work on a computer.", "At 1590.0s, [0.758, 0.011, 1.0, 0.999]: stand, watch (e.g., TV).", "At 1589.0s, [0.759, 0.006, 1.0, 0.986]: stand.", "At 1588.0s, [0.436, 0.003, 0.994, 0.973]: stand, carry/hold (an object), put down."]}]}, {"video": "x-6CtPWVi6E.mkv", "clip": [1629, 1668], "conversations": [{"from": "human", "timestamps": 1659.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.211, 0.0, 0.981, 1.0]: sit."}, {"from": "human", "timestamps": 1668.0, "value": "For the person at bbox [0.553, 0.15, 0.937, 0.935], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.553, 0.15, 0.937, 0.935]: sit.", "From 1666.0s to 1667.0s, [0.398, 0.016, 0.987, 1.0]->[0.551, 0.156, 0.836, 0.918]: sit, watch (e.g., TV).", "From 1664.0s to 1665.0s, [0.286, 0.017, 0.99, 0.997]->[0.417, 0.0, 1.0, 1.0]: stand.", "At 1663.0s, [0.361, 0.0, 0.998, 0.984]: sit, stand.", "From 1661.0s to 1662.0s, [0.395, 0.003, 1.0, 0.982]->[0.378, 0.0, 0.996, 1.0]: stand.", "From 1658.0s to 1660.0s, [0.282, 0.014, 0.992, 0.999]->[0.211, 0.0, 0.981, 1.0]->[0.251, 0.015, 0.992, 1.0]: sit.", "From 1655.0s to 1657.0s, [0.454, 0.139, 0.821, 0.923]->[0.457, 0.131, 0.818, 0.922]->[0.54, 0.153, 0.827, 0.931]: sit, watch (e.g., TV).", "From 1652.0s to 1654.0s, [0.578, 0.105, 0.904, 0.953]->[0.501, 0.116, 0.919, 0.896]->[0.451, 0.138, 0.934, 0.904]: sit.", "At 1651.0s, [0.607, 0.127, 0.909, 0.875]: sit, put down.", "From 1648.0s to 1650.0s, [0.57, 0.184, 0.843, 0.928]->[0.578, 0.148, 0.845, 0.921]->[0.561, 0.162, 0.849, 0.927]: sit.", "At 1647.0s, [0.597, 0.136, 0.842, 0.905]: sit, watch (e.g., TV).", "At 1646.0s, [0.589, 0.166, 0.837, 0.894]: sit, carry/hold (an object), watch (e.g., TV).", "At 1645.0s, [0.607, 0.171, 0.852, 0.904]: sit, listen (e.g., to music).", "At 1644.0s, [0.592, 0.166, 0.846, 0.896]: sit.", "From 1642.0s to 1643.0s, [0.597, 0.161, 0.817, 0.89]->[0.595, 0.172, 0.809, 0.901]: sit, carry/hold (an object), watch (e.g., TV).", "At 1641.0s, [0.585, 0.156, 0.884, 0.877]: sit, carry/hold (an object).", "From 1638.0s to 1640.0s, [0.574, 0.154, 0.894, 0.872]->[0.57, 0.15, 0.836, 0.897]->[0.581, 0.164, 0.872, 0.898]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1637.0s, [0.574, 0.155, 0.903, 0.872]: sit, carry/hold (an object).", "At 1636.0s, [0.487, 0.164, 0.881, 0.921]: sit, drink.", "At 1635.0s, [0.538, 0.273, 0.854, 0.98]: sit, carry/hold (an object).", "At 1634.0s, [0.596, 0.256, 0.871, 0.982]: sit, stand, carry/hold (an object).", "At 1633.0s, [0.604, 0.255, 0.876, 0.962]: sit, carry/hold (an object).", "At 1632.0s, [0.401, 0.313, 0.728, 0.979]: bend/bow (at the waist), carry/hold (an object).", "At 1631.0s, [0.222, 0.099, 0.593, 1.0]: stand, carry/hold (an object)."]}]}, {"video": "x-6CtPWVi6E.mkv", "clip": [1616, 1732], "conversations": [{"from": "human", "timestamps": 1646.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.589, 0.166, 0.837, 0.894]: carry/hold (an object), sit, watch (e.g., TV)."}, {"from": "human", "timestamps": 1732.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.016, 0.065, 0.418, 0.939]?"}, {"from": "gpt", "value": ["At Now, [0.016, 0.065, 0.418, 0.939]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1731.0s, [0.016, 0.103, 0.49, 0.965]: stand, carry/hold (an object), give/serve (an object) to (a person), listen to (a person).", "At 1730.0s, [0.012, 0.074, 0.427, 0.971]: stand, carry/hold (an object), give/serve (an object) to (a person), listen to (a person), watch (a person).", "At 1729.0s, [0.23, 0.109, 0.704, 0.987]: walk, carry/hold (an object), listen to (a person).", "At 1728.0s, [0.091, 0.028, 0.911, 0.994]: stand, carry/hold (an object).", "From 1726.0s to 1727.0s, [0.028, 0.002, 0.691, 0.999]->[0.029, 0.0, 0.69, 0.997]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "x-6CtPWVi6E.mkv", "clip": [1677, 1714], "conversations": [{"from": "human", "timestamps": 1707.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.055, 0.317, 0.344, 0.986]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1714.0, "value": "What actions did the person at bbox [0.096, 0.0, 0.66, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.096, 0.0, 0.66, 1.0]: stand, listen to (a person).", "At 1713.0s, [0.078, 0.0, 0.655, 1.0]: stand, listen to (a person), watch (a person)."]}]}, {"video": "x-6CtPWVi6E.mkv", "clip": [1741, 1793], "conversations": [{"from": "human", "timestamps": 1771.0, "value": "Describe the actions of the person at bbox [0.011, 0.015, 0.655, 0.998] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.011, 0.015, 0.655, 0.998]: sit.", "At 1770.0s, [0.26, 0.007, 0.986, 0.952]: sit, touch (an object)."]}, {"from": "human", "timestamps": 1793.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.134, 0.059, 0.496, 0.975]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.479, 0.088, 0.878, 0.989]: listen to (a person), sit."}]}, {"video": "xJmRNZVDDCY.mkv", "clip": [897, 949], "conversations": [{"from": "human", "timestamps": 927.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.046, 0.008, 0.851, 0.973]?"}, {"from": "gpt", "value": ["At Now, [0.046, 0.008, 0.851, 0.973]: talk to (e.g., self, a person, a group).", "At 926.0s, [0.072, 0.014, 0.857, 0.98]: sit, talk to (e.g., self, a person, a group).", "At 925.0s, [0.04, 0.02, 0.82, 0.979]: sit."]}, {"from": "human", "timestamps": 949.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.066, 0.004, 0.931, 0.98]: listen to (a person), sit, watch (a person)."}]}, {"video": "xJmRNZVDDCY.mkv", "clip": [920, 983], "conversations": [{"from": "human", "timestamps": 950.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.106, 0.0, 0.932, 0.975]: watch (a person), sit."}, {"from": "human", "timestamps": 983.0, "value": "Describe the actions of the person at bbox [0.11, 0.008, 0.919, 0.979] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.11, 0.008, 0.919, 0.979]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 981.0s to 982.0s, [0.093, 0.013, 0.945, 0.977]->[0.138, 0.032, 0.905, 0.967]: sit, talk to (e.g., self, a person, a group).", "At 980.0s, [0.189, 0.009, 0.958, 0.96]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "xJmRNZVDDCY.mkv", "clip": [980, 1013], "conversations": [{"from": "human", "timestamps": 1010.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.039, 0.009, 0.44, 1.0]: carry/hold (an object), sit, watch (e.g., TV)."}, {"from": "human", "timestamps": 1013.0, "value": "What actions is the person in bbox [0.03, 0.116, 0.43, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.03, 0.116, 0.43, 1.0]: sit, carry/hold (an object), watch (e.g., TV).", "From 1009.0s to 1012.0s, [0.038, 0.016, 0.41, 1.0]->[0.039, 0.009, 0.44, 1.0]->[0.038, 0.068, 0.448, 0.983]->[0.042, 0.068, 0.455, 0.989]: sit, carry/hold (an object), watch (e.g., TV).", "From 1007.0s to 1008.0s, [0.058, 0.032, 0.955, 0.966]->[0.076, 0.018, 0.966, 0.977]: sit.", "From 1004.0s to 1005.0s, [0.045, 0.013, 0.966, 0.975]->[0.079, 0.015, 0.971, 0.967]: sit.", "From 1002.0s to 1003.0s, [0.079, 0.02, 0.865, 0.993]->[0.044, 0.047, 0.798, 0.988]: stand.", "From 999.0s to 1001.0s, [0.054, 0.007, 0.815, 1.0]->[0.059, 0.025, 0.785, 1.0]->[0.067, 0.006, 0.822, 1.0]: sit, stand.", "From 997.0s to 998.0s, [0.033, 0.0, 0.742, 1.0]->[0.041, 0.03, 0.793, 0.984]: stand."]}]}, {"video": "xJmRNZVDDCY.mkv", "clip": [1132, 1179], "conversations": [{"from": "human", "timestamps": 1162.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.068, 0.227, 0.494, 0.98]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.519, 0.039, 0.968, 0.973]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1179.0, "value": "What actions did the person at bbox [0.449, 0.024, 0.953, 0.992] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.449, 0.024, 0.953, 0.992]: sit, talk to (e.g., self, a person, a group).", "From 1169.0s to 1178.0s, [0.451, 0.028, 0.959, 0.985]->[0.449, 0.043, 0.96, 0.995]->[0.427, 0.042, 0.962, 0.986]->[0.451, 0.02, 0.96, 0.988]->[0.442, 0.034, 0.957, 0.989]->[0.45, 0.027, 0.958, 0.973]->[0.455, 0.028, 0.961, 0.996]->[0.452, 0.031, 0.961, 0.99]->[0.375, 0.039, 0.96, 0.971]->[0.442, 0.051, 0.965, 0.998]: sit, talk to (e.g., self, a person, a group).", "From 1163.0s to 1168.0s, [0.488, 0.032, 0.969, 0.97]->[0.483, 0.041, 0.96, 0.976]->[0.506, 0.032, 0.973, 0.968]->[0.47, 0.035, 0.974, 0.993]->[0.43, 0.022, 0.953, 0.984]->[0.416, 0.056, 0.965, 0.989]: sit, listen to (a person).", "From 1160.0s to 1162.0s, [0.432, 0.017, 0.963, 1.0]->[0.494, 0.01, 0.966, 0.988]->[0.519, 0.039, 0.968, 0.973]: sit, talk to (e.g., self, a person, a group).", "At 1159.0s, [0.43, 0.023, 0.954, 0.982]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "From 1156.0s to 1158.0s, [0.37, 0.029, 0.954, 0.983]->[0.45, 0.025, 0.961, 0.985]->[0.452, 0.027, 0.972, 0.975]: sit, listen to (a person).", "At 1155.0s, [0.485, 0.031, 0.959, 0.985]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "From 1153.0s to 1154.0s, [0.453, 0.039, 0.953, 0.971]->[0.491, 0.029, 0.965, 0.978]: sit, talk to (e.g., self, a person, a group).", "At 1152.0s, [0.491, 0.023, 0.966, 0.98]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1151.0s, [0.508, 0.024, 0.97, 0.988]: sit, listen to (a person), watch (a person).", "At 1150.0s, [0.483, 0.027, 0.965, 0.979]: stand, listen to (a person).", "From 1148.0s to 1149.0s, [0.479, 0.038, 0.961, 0.983]->[0.487, 0.032, 0.961, 0.997]: sit, listen to (a person).", "From 1145.0s to 1147.0s, [0.457, 0.02, 0.979, 0.968]->[0.482, 0.012, 0.963, 0.974]->[0.434, 0.024, 0.966, 0.995]: sit.", "At 1144.0s, [0.432, 0.005, 0.966, 0.972]: sit, watch (e.g., TV)."]}]}, {"video": "xJmRNZVDDCY.mkv", "clip": [1144, 1247], "conversations": [{"from": "human", "timestamps": 1174.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.059, 0.235, 0.534, 0.974]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group); [0.45, 0.027, 0.958, 0.973]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1247.0, "value": "Describe the actions of the person at bbox [0.273, 0.022, 0.959, 0.953] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.273, 0.022, 0.959, 0.953]: sit, drive (e.g., a car, a truck).", "At 1246.0s, [0.451, 0.0, 0.962, 1.0]: sit, drive (e.g., a car, a truck).", "At 1245.0s, [0.406, 0.017, 0.976, 0.975]: sit, carry/hold (an object)."]}]}, {"video": "xJmRNZVDDCY.mkv", "clip": [1221, 1253], "conversations": [{"from": "human", "timestamps": 1251.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.422, 0.001, 0.976, 0.972]: sit, drive (e.g., a car, a truck)."}, {"from": "human", "timestamps": 1253.0, "value": "What actions did the person at bbox [0.346, 0.0, 0.98, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.346, 0.0, 0.98, 0.981]: sit, drive (e.g., a car, a truck).", "From 1246.0s to 1252.0s, [0.451, 0.0, 0.962, 1.0]->[0.273, 0.022, 0.959, 0.953]->[0.2, 0.014, 0.972, 0.98]->[0.426, 0.0, 0.958, 0.973]->[0.103, 0.037, 0.951, 0.99]->[0.422, 0.001, 0.976, 0.972]->[0.361, 0.002, 0.966, 0.962]: sit, drive (e.g., a car, a truck).", "At 1245.0s, [0.406, 0.017, 0.976, 0.975]: sit, carry/hold (an object)."]}]}, {"video": "xJmRNZVDDCY.mkv", "clip": [1354, 1425], "conversations": [{"from": "human", "timestamps": 1384.0, "value": "What actions is the person in bbox [0.788, 0.376, 0.847, 0.69] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.788, 0.376, 0.847, 0.69]: walk.", "At 1383.0s, [0.771, 0.394, 0.828, 0.691]: walk."]}, {"from": "human", "timestamps": 1425.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.49, 0.184, 0.561, 0.661]: watch (a person), stand; [0.628, 0.327, 0.765, 0.649]: watch (a person), sit."}]}, {"video": "xJmRNZVDDCY.mkv", "clip": [1389, 1464], "conversations": [{"from": "human", "timestamps": 1419.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.179, 0.11, 0.394, 0.981]: walk."}, {"from": "human", "timestamps": 1464.0, "value": "What actions is the person in bbox [0.276, 0.005, 0.919, 0.968] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.276, 0.005, 0.919, 0.968]: sit, answer phone.", "From 1462.0s to 1463.0s, [0.254, 0.015, 0.904, 0.974]->[0.259, 0.009, 0.918, 0.976]: sit, stand, answer phone.", "From 1460.0s to 1461.0s, [0.22, 0.013, 0.915, 0.982]->[0.254, 0.015, 0.909, 0.965]: stand, answer phone.", "At 1459.0s, [0.199, 0.012, 0.905, 0.985]: stand, answer phone, carry/hold (an object).", "From 1448.0s to 1458.0s, [0.34, 0.066, 0.919, 0.932]->[0.341, 0.082, 0.904, 0.92]->[0.379, 0.085, 0.935, 0.912]->[0.292, 0.082, 0.938, 0.945]->[0.364, 0.055, 0.874, 0.928]->[0.351, 0.066, 0.915, 0.914]->[0.355, 0.066, 0.897, 0.896]->[0.369, 0.062, 0.915, 0.929]->[0.339, 0.071, 0.918, 0.922]->[0.338, 0.069, 0.909, 0.921]->[0.233, 0.016, 0.911, 0.981]: sit, answer phone."]}]}, {"video": "xJmRNZVDDCY.mkv", "clip": [1429, 1478], "conversations": [{"from": "human", "timestamps": 1459.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.199, 0.012, 0.905, 0.985]: carry/hold (an object), stand, answer phone."}, {"from": "human", "timestamps": 1478.0, "value": "What actions did the person at bbox [0.323, 0.053, 0.93, 0.986] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.323, 0.053, 0.93, 0.986]: stand, listen to (a person).", "At 1477.0s, [0.338, 0.042, 0.937, 0.985]: stand."]}]}, {"video": "xJmRNZVDDCY.mkv", "clip": [1500, 1539], "conversations": [{"from": "human", "timestamps": 1530.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.419, 0.077, 0.916, 0.901]: sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1539.0, "value": "Describe the actions of the person at bbox [0.159, 0.053, 0.948, 0.966] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.159, 0.053, 0.948, 0.966]: sit.", "From 1533.0s to 1538.0s, [0.211, 0.008, 0.955, 0.977]->[0.208, 0.013, 0.948, 0.978]->[0.025, 0.018, 0.951, 0.981]->[0.08, 0.023, 0.946, 0.981]->[0.328, 0.034, 0.955, 0.971]->[0.329, 0.009, 0.948, 0.974]: sit."]}]}, {"video": "xJmRNZVDDCY.mkv", "clip": [1480, 1607], "conversations": [{"from": "human", "timestamps": 1510.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.175, 0.073, 0.635, 0.911]: carry/hold (an object), sit, eat."}, {"from": "human", "timestamps": 1607.0, "value": "Describe the actions of the person at bbox [0.177, 0.012, 0.956, 0.974] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.177, 0.012, 0.956, 0.974]: sit, stand, answer phone.", "From 1598.0s to 1606.0s, [0.253, 0.003, 0.954, 0.954]->[0.26, 0.01, 0.963, 0.938]->[0.243, 0.004, 0.97, 0.986]->[0.234, 0.0, 0.967, 0.982]->[0.233, 0.007, 0.963, 0.979]->[0.276, 0.016, 0.965, 0.969]->[0.231, 0.011, 0.957, 0.978]->[0.2, 0.0, 0.977, 0.972]->[0.25, 0.003, 0.96, 0.979]: stand, answer phone.", "At 1597.0s, [0.249, 0.013, 0.95, 0.956]: stand, answer phone, talk to (e.g., self, a person, a group).", "From 1595.0s to 1596.0s, [0.362, 0.0, 0.957, 0.989]->[0.379, 0.0, 0.954, 0.987]: stand.", "From 1593.0s to 1594.0s, [0.356, 0.022, 0.925, 0.989]->[0.371, 0.042, 0.96, 0.986]: sit, stand.", "From 1589.0s to 1592.0s, [0.222, 0.004, 0.953, 0.96]->[0.276, 0.0, 0.948, 0.99]->[0.286, 0.0, 0.956, 0.988]->[0.263, 0.0, 0.952, 0.99]: sit.", "At 1588.0s, [0.171, 0.011, 0.954, 0.958]: sit, stand.", "From 1580.0s to 1587.0s, [0.196, 0.0, 0.965, 0.997]->[0.19, 0.002, 0.949, 0.958]->[0.168, 0.007, 0.979, 0.987]->[0.175, 0.012, 0.946, 0.947]->[0.306, 0.005, 0.958, 0.999]->[0.235, 0.0, 0.966, 0.991]->[0.252, 0.014, 0.94, 0.965]->[0.268, 0.008, 0.957, 0.947]: sit.", "At 1578.0s, [0.144, 0.017, 0.95, 0.962]: sit.", "At 1577.0s, [0.123, 0.0, 0.972, 0.991]: sit, stand.", "From 1574.0s to 1576.0s, [0.189, 0.012, 0.965, 0.991]->[0.29, 0.013, 0.955, 0.986]->[0.215, 0.002, 0.965, 0.992]: sit.", "From 1572.0s to 1573.0s, [0.13, 0.045, 0.94, 0.965]->[0.129, 0.042, 0.944, 0.973]: stand.", "From 1566.0s to 1571.0s, [0.23, 0.048, 0.948, 0.995]->[0.141, 0.058, 0.918, 0.972]->[0.124, 0.065, 0.939, 0.963]->[0.262, 0.044, 0.934, 0.987]->[0.103, 0.07, 0.945, 0.968]->[0.136, 0.04, 0.944, 0.962]: sit.", "At 1565.0s, [0.228, 0.053, 0.951, 0.986]: sit, watch (a person).", "At 1564.0s, [0.358, 0.064, 0.945, 0.969]: sit, listen to (a person), watch (a person).", "At 1563.0s, [0.133, 0.099, 0.913, 0.95]: sit, watch (a person)."]}]}, {"video": "xJmRNZVDDCY.mkv", "clip": [1541, 1583], "conversations": [{"from": "human", "timestamps": 1571.0, "value": "Describe the actions of the person at bbox [0.136, 0.04, 0.944, 0.962] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.136, 0.04, 0.944, 0.962]: sit.", "From 1566.0s to 1570.0s, [0.23, 0.048, 0.948, 0.995]->[0.141, 0.058, 0.918, 0.972]->[0.124, 0.065, 0.939, 0.963]->[0.262, 0.044, 0.934, 0.987]->[0.103, 0.07, 0.945, 0.968]: sit.", "At 1565.0s, [0.228, 0.053, 0.951, 0.986]: sit, watch (a person).", "At 1564.0s, [0.358, 0.064, 0.945, 0.969]: sit, listen to (a person), watch (a person).", "At 1563.0s, [0.133, 0.099, 0.913, 0.95]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1583.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.175, 0.012, 0.946, 0.947]: sit."}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [982, 1017], "conversations": [{"from": "human", "timestamps": 1012.0, "value": "What actions did the person at bbox [0.287, 0.114, 0.837, 0.974] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.287, 0.114, 0.837, 0.974]: stand, play musical instrument, sing to (e.g., self, a person, a group), watch (a person).", "From 1009.0s to 1011.0s, [0.387, 0.156, 0.896, 1.0]->[0.333, 0.112, 0.738, 0.967]->[0.29, 0.101, 0.602, 0.942]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "At 1008.0s, [0.313, 0.176, 0.869, 0.95]: stand, play musical instrument, sing to (e.g., self, a person, a group), watch (a person).", "At 1007.0s, [0.412, 0.194, 0.659, 0.99]: stand, carry/hold (an object), play musical instrument, sing to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1017.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.004, 0.086, 0.167, 0.887]: dance; [0.007, 0.006, 0.197, 0.313]: stand, play musical instrument."}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [1065, 1101], "conversations": [{"from": "human", "timestamps": 1095.0, "value": "Describe the actions of the person at bbox [0.49, 0.402, 0.666, 0.976] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.49, 0.402, 0.666, 0.976]: dance, watch (a person).", "At 1094.0s, [0.505, 0.339, 0.691, 0.927]: dance."]}, {"from": "human", "timestamps": 1101.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.002, 0.528, 0.218, 0.975]: watch (a person), dance; [0.068, 0.384, 0.202, 0.611]: watch (a person), dance; [0.173, 0.384, 0.495, 0.999]: watch (a person), dance; [0.28, 0.216, 0.373, 0.49]: watch (a person), stand; [0.384, 0.181, 0.528, 0.437]: watch (a person), stand; [0.384, 0.346, 0.557, 0.952]: watch (a person), hug (a person), dance; [0.501, 0.377, 0.742, 0.999]: listen (e.g., to music), dance, watch (a person); [0.66, 0.214, 0.764, 0.584]: watch (a person), stand; [0.742, 0.253, 0.866, 0.596]: watch (a person), sit, smoke."}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [1138, 1192], "conversations": [{"from": "human", "timestamps": 1168.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.141, 0.072, 0.978, 0.962]: watch (a person), stand."}, {"from": "human", "timestamps": 1192.0, "value": "For the person at bbox [0.866, 0.268, 1.0, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.866, 0.268, 1.0, 0.981]: sit, listen to (a person), watch (a person).", "At 1191.0s, [0.602, 0.013, 0.968, 0.758]: sit, listen to (a person).", "At 1190.0s, [0.31, 0.0, 0.663, 0.652]: sit."]}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [1179, 1235], "conversations": [{"from": "human", "timestamps": 1209.0, "value": "Describe the actions of the person at bbox [0.124, 0.071, 0.58, 0.974] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.124, 0.071, 0.58, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1208.0s, [0.066, 0.068, 0.533, 0.973]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1207.0s, [0.073, 0.071, 0.541, 0.974]: sit, listen to (a person).", "At 1206.0s, [0.083, 0.081, 0.546, 0.976]: sit, touch (an object), listen to (a person), watch (a person).", "From 1204.0s to 1205.0s, [0.047, 0.103, 0.493, 0.973]->[0.063, 0.112, 0.522, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1235.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.498, 0.315, 0.676, 0.987]: carry/hold (an object), talk to (e.g., self, a person, a group), walk; [0.628, 0.364, 0.708, 0.591]: watch (a person), stand, grab (a person)."}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [1231, 1351], "conversations": [{"from": "human", "timestamps": 1261.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.032, 0.118, 0.496, 0.976]: sit, read, give/serve (an object) to (a person), carry/hold (an object), talk to (e.g., self, a person, a group); [0.454, 0.469, 0.989, 0.984]: carry/hold (an object), sit, read, listen to (a person)."}, {"from": "human", "timestamps": 1351.0, "value": "Describe the actions of the person at bbox [0.0, 0.055, 0.788, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.055, 0.788, 1.0]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1350.0s, [0.001, 0.015, 0.57, 1.0]: walk, talk to (e.g., self, a person, a group).", "At 1349.0s, [0.0, 0.044, 0.91, 0.979]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1348.0s, [0.086, 0.019, 1.0, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 1347.0s, [0.026, 0.031, 1.0, 0.984]: stand.", "At 1346.0s, [0.011, 0.038, 1.0, 0.983]: stand, listen to (a person), watch (a person)."]}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [1304, 1373], "conversations": [{"from": "human", "timestamps": 1334.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.202, 0.037, 0.985, 0.989]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1373.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.021, 0.045, 0.942, 0.98]?"}, {"from": "gpt", "value": ["At Now, [0.021, 0.045, 0.942, 0.98]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1372.0s, [0.023, 0.064, 0.967, 0.976]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [1363, 1423], "conversations": [{"from": "human", "timestamps": 1393.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.561, 0.065, 0.993, 0.981]?"}, {"from": "gpt", "value": ["In the current frame, [0.561, 0.065, 0.993, 0.981]: walk, talk to (e.g., self, a person, a group).", "At 1392.0s, [0.44, 0.03, 0.98, 0.984]: stand, talk to (e.g., self, a person, a group).", "At 1390.0s, [0.406, 0.019, 0.994, 0.974]: stand, listen to (a person)."]}, {"from": "human", "timestamps": 1423.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.118, 0.119, 0.413, 0.977]: listen to (a person), stand, smoke; [0.41, 0.023, 0.879, 0.987]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [1364, 1495], "conversations": [{"from": "human", "timestamps": 1394.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.358, 0.166, 0.983, 0.986]: talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1495.0, "value": "What actions did the person at bbox [0.008, 0.04, 0.857, 0.969] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.008, 0.04, 0.857, 0.969]: walk.", "At 1494.0s, [0.001, 0.016, 0.916, 0.975]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1493.0s, [0.0, 0.025, 0.973, 0.97]: stand.", "From 1491.0s to 1492.0s, [0.014, 0.013, 0.963, 0.991]->[0.004, 0.017, 0.88, 0.997]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [1423, 1467], "conversations": [{"from": "human", "timestamps": 1453.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.532, 0.199, 0.747, 0.954]: watch (a person), listen to (a person), carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.72, 0.226, 0.995, 0.972]: listen to (a person), stand, touch (an object)."}, {"from": "human", "timestamps": 1467.0, "value": "What actions is the person in bbox [0.357, 0.229, 0.718, 0.968] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.357, 0.229, 0.718, 0.968]: walk.", "From 1465.0s to 1466.0s, [0.516, 0.086, 0.999, 0.969]->[0.161, 0.134, 0.796, 0.969]: walk.", "At 1464.0s, [0.349, 0.008, 0.995, 0.984]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1463.0s, [0.683, 0.013, 1.0, 0.976]: stand, watch (a person)."]}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [1482, 1568], "conversations": [{"from": "human", "timestamps": 1512.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.0, 0.174, 0.544, 1.0]: listen to (a person), stand, watch (a person); [0.325, 0.296, 0.474, 0.607]: watch (a person), stand; [0.465, 0.024, 1.0, 0.987]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1568.0, "value": "Describe the actions of the person at bbox [0.636, 0.193, 0.707, 0.35] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.636, 0.193, 0.707, 0.35]: walk, carry/hold (an object), listen to (a person).", "At 1567.0s, [0.707, 0.183, 0.826, 0.455]: stand, listen to (a person), watch (a person)."]}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [1529, 1577], "conversations": [{"from": "human", "timestamps": 1559.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.027, 0.124, 0.391, 1.0]: carry/hold (an object), walk; [0.37, 0.068, 0.873, 0.987]: walk."}, {"from": "human", "timestamps": 1577.0, "value": "What actions did the person at bbox [0.0, 0.062, 0.489, 0.99] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.062, 0.489, 0.99]: stand, carry/hold (an object), listen to (a person).", "At 1576.0s, [0.006, 0.081, 0.408, 0.994]: stand, carry/hold (an object).", "From 1574.0s to 1575.0s, [0.0, 0.114, 0.226, 1.0]->[0.0, 0.104, 0.139, 1.0]: stand, carry/hold (an object), watch (a person).", "At 1573.0s, [0.0, 0.11, 0.266, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [1589, 1633], "conversations": [{"from": "human", "timestamps": 1619.0, "value": "Describe the actions of the person at bbox [0.339, 0.044, 0.824, 0.969] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.339, 0.044, 0.824, 0.969]: walk, carry/hold (an object).", "At 1618.0s, [0.069, 0.0, 0.664, 0.995]: walk, carry/hold (an object).", "At 1617.0s, [0.004, 0.0, 0.509, 0.989]: stand, read, talk to (e.g., self, a person, a group).", "From 1615.0s to 1616.0s, [0.0, 0.0, 0.506, 0.978]->[0.006, 0.018, 0.62, 0.981]: stand, carry/hold (an object), read, talk to (e.g., self, a person, a group).", "At 1614.0s, [0.012, 0.013, 0.633, 0.985]: stand, carry/hold (an object), read.", "At 1613.0s, [0.01, 0.004, 0.577, 0.978]: stand, carry/hold (an object), read, listen to (a person).", "At 1612.0s, [0.0, 0.01, 0.52, 0.984]: stand, read, listen to (a person).", "At 1611.0s, [0.006, 0.0, 0.574, 0.974]: stand, carry/hold (an object), listen to (a person).", "At 1610.0s, [0.0, 0.01, 0.516, 0.978]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1606.0s to 1608.0s, [0.016, 0.013, 0.492, 0.984]->[0.012, 0.008, 0.651, 0.979]->[0.009, 0.004, 0.663, 0.986]: stand, carry/hold (an object), listen to (a person).", "At 1605.0s, [0.003, 0.006, 0.554, 0.998]: stand, read, listen to (a person)."]}, {"from": "human", "timestamps": 1633.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.041, 0.394, 0.139, 0.592]: sit, play board game; [0.132, 0.358, 0.241, 0.55]: watch (a person), sit; [0.412, 0.301, 0.636, 0.974]: watch (a person), stand."}]}, {"video": "xO4ABy2iOQA.mp4", "clip": [1678, 1731], "conversations": [{"from": "human", "timestamps": 1708.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.128, 0.481, 0.989]: listen to (a person), stand, talk to (e.g., self, a person, a group); [0.396, 0.19, 0.559, 0.473]: stand, eat; [0.436, 0.011, 1.0, 0.987]: listen to (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1731.0, "value": "Describe the actions of the person at bbox [0.449, 0.117, 0.732, 0.839] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.449, 0.117, 0.732, 0.839]: stand, listen to (a person), watch (a person).", "From 1729.0s to 1730.0s, [0.431, 0.146, 0.668, 0.716]->[0.417, 0.135, 0.684, 0.61]: stand, listen to (a person), watch (a person).", "At 1728.0s, [0.514, 0.15, 0.734, 0.923]: stand, watch (a person)."]}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [875, 955], "conversations": [{"from": "human", "timestamps": 905.0, "value": "Describe the actions of the person at bbox [0.55, 0.064, 0.988, 0.92] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.55, 0.064, 0.988, 0.92]: stand, listen to (a person), watch (a person).", "From 902.0s to 904.0s, [0.495, 0.189, 0.697, 0.926]->[0.542, 0.19, 0.705, 0.927]->[0.571, 0.2, 0.758, 0.927]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 955.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.781, 0.314, 0.901, 0.78]: talk to (e.g., self, a person, a group), walk."}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [901, 1000], "conversations": [{"from": "human", "timestamps": 931.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.39, 0.381, 0.485, 0.823]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.571, 0.345, 0.697, 0.833]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1000.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.231, 0.083, 0.773, 0.94]?"}, {"from": "gpt", "value": ["In the current frame, [0.231, 0.083, 0.773, 0.94]: stand, watch (a person).", "At 999.0s, [0.339, 0.065, 0.951, 0.945]: stand, watch (a person)."]}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [1018, 1058], "conversations": [{"from": "human", "timestamps": 1048.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.059, 0.176, 0.614, 0.931]?"}, {"from": "gpt", "value": ["At Now, [0.059, 0.176, 0.614, 0.931]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1047.0s, [0.075, 0.165, 0.608, 0.919]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1046.0s, [0.02, 0.166, 0.477, 0.932]: sit, stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1044.0s to 1045.0s, [0.001, 0.191, 0.475, 0.929]->[0.015, 0.167, 0.478, 0.933]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1040.0s to 1043.0s, [0.019, 0.098, 0.555, 0.948]->[0.0, 0.149, 0.276, 0.911]->[0.0, 0.145, 0.313, 0.923]->[0.034, 0.036, 0.641, 0.932]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1039.0s, [0.016, 0.099, 0.494, 0.945]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1058.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.076, 0.196, 0.366, 0.922]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.433, 0.09, 0.912, 0.93]: listen to (a person), watch (a person), bend/bow (at the waist)."}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [995, 1091], "conversations": [{"from": "human", "timestamps": 1025.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.556, 0.094, 0.918, 0.93]: watch (a person), walk."}, {"from": "human", "timestamps": 1091.0, "value": "What actions did the person at bbox [0.315, 0.359, 0.47, 0.936] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.315, 0.359, 0.47, 0.936]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1090.0s, [0.317, 0.359, 0.468, 0.928]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [1138, 1175], "conversations": [{"from": "human", "timestamps": 1168.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.294, 0.245, 0.518, 0.817]: carry/hold (an object), run/jog, talk to (e.g., self, a person, a group); [0.534, 0.164, 0.669, 0.838]: watch (a person), stand."}, {"from": "human", "timestamps": 1175.0, "value": "What actions did the person at bbox [0.432, 0.358, 0.553, 0.918] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.432, 0.358, 0.553, 0.918]: sit, carry/hold (an object), sing to (e.g., self, a person, a group), talk to (e.g., self, a person, a group).", "At 1174.0s, [0.394, 0.412, 0.549, 0.921]: sit, carry/hold (an object).", "At 1172.0s, [0.486, 0.137, 0.597, 0.605]: sit, carry/hold (an object), sing to (e.g., self, a person, a group).", "From 1170.0s to 1171.0s, [0.45, 0.314, 0.569, 0.802]->[0.405, 0.326, 0.549, 0.918]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [1151, 1203], "conversations": [{"from": "human", "timestamps": 1181.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.349, 0.164, 0.604, 0.653]: carry/hold (an object), sit."}, {"from": "human", "timestamps": 1203.0, "value": "Describe the actions of the person at bbox [0.543, 0.436, 0.602, 0.561] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.543, 0.436, 0.602, 0.561]: run/jog, listen to (a person).", "At 1202.0s, [0.521, 0.437, 0.555, 0.54]: run/jog."]}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [1260, 1315], "conversations": [{"from": "human", "timestamps": 1290.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.056, 0.112, 0.387, 0.93]: listen to (a person), stand, watch (a person); [0.495, 0.08, 0.923, 0.961]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1315.0, "value": "What actions did the person at bbox [0.184, 0.076, 0.779, 0.939] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.184, 0.076, 0.779, 0.939]: stand, talk to (e.g., self, a person, a group).", "From 1302.0s to 1314.0s, [0.604, 0.072, 0.987, 0.956]->[0.525, 0.042, 0.878, 0.944]->[0.266, 0.084, 0.845, 0.931]->[0.248, 0.072, 0.825, 0.923]->[0.248, 0.053, 0.826, 0.94]->[0.0, 0.025, 0.762, 0.945]->[0.179, 0.073, 0.926, 0.936]->[0.211, 0.079, 0.876, 0.925]->[0.232, 0.065, 0.872, 0.933]->[0.195, 0.074, 0.831, 0.937]->[0.23, 0.081, 0.809, 0.934]->[0.25, 0.079, 0.831, 0.93]->[0.057, 0.067, 0.871, 0.924]: stand, talk to (e.g., self, a person, a group).", "At 1301.0s, [0.566, 0.072, 0.923, 0.958]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1300.0s, [0.479, 0.079, 0.931, 0.944]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1299.0s, [0.455, 0.094, 0.986, 0.95]: stand, talk to (e.g., self, a person, a group).", "From 1297.0s to 1298.0s, [0.503, 0.109, 0.842, 0.946]->[0.485, 0.106, 0.848, 0.953]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1295.0s to 1296.0s, [0.448, 0.076, 0.874, 0.958]->[0.441, 0.088, 0.836, 0.935]: stand, talk to (e.g., self, a person, a group).", "At 1293.0s, [0.426, 0.141, 0.859, 0.943]: stand, talk to (e.g., self, a person, a group).", "From 1290.0s to 1292.0s, [0.495, 0.08, 0.923, 0.961]->[0.457, 0.083, 0.892, 0.958]->[0.473, 0.109, 0.862, 0.947]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1289.0s, [0.571, 0.063, 0.896, 0.918]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1288.0s, [0.548, 0.068, 0.947, 0.97]: stand, listen to (a person), watch (a person).", "At 1286.0s, [0.529, 0.086, 0.94, 0.939]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1285.0s, [0.572, 0.082, 0.885, 0.934]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1284.0s, [0.565, 0.06, 0.987, 0.953]: stand, listen to (a person), watch (a person).", "At 1283.0s, [0.544, 0.071, 0.977, 0.959]: stand, listen to (a person).", "At 1282.0s, [0.349, 0.231, 0.781, 0.787]: stand, throw, talk to (e.g., self, a person, a group).", "At 1281.0s, [0.393, 0.233, 0.738, 0.656]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [1275, 1329], "conversations": [{"from": "human", "timestamps": 1305.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.248, 0.072, 0.825, 0.923]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1329.0, "value": "What actions did the person at bbox [0.157, 0.211, 0.327, 0.82] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.157, 0.211, 0.327, 0.82]: stand, listen to (a person).", "At 1328.0s, [0.132, 0.219, 0.32, 0.824]: stand, listen to (a person), watch (a person).", "From 1326.0s to 1327.0s, [0.055, 0.125, 0.798, 0.921]->[0.056, 0.111, 0.792, 0.936]: stand, talk to (e.g., self, a person, a group).", "At 1324.0s, [0.094, 0.15, 0.754, 0.936]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [1560, 1597], "conversations": [{"from": "human", "timestamps": 1590.0, "value": "What actions is the person in bbox [0.15, 0.113, 0.622, 0.928] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.15, 0.113, 0.622, 0.928]: sit, carry/hold (an object), give/serve (an object) to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1589.0s, [0.162, 0.088, 0.643, 0.931]: sit, carry/hold (an object), give/serve (an object) to (a person), sing to (e.g., self, a person, a group), watch (a person).", "At 1588.0s, [0.153, 0.053, 0.558, 0.956]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1585.0s to 1587.0s, [0.508, 0.151, 0.708, 0.695]->[0.508, 0.09, 0.7, 0.628]->[0.508, 0.093, 0.743, 0.695]: sit, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1597.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.066, 0.968, 0.954]: watch (a person), sit, give/serve (an object) to (a person), talk to (e.g., self, a person, a group); [0.806, 0.089, 0.999, 0.941]: sit."}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [1615, 1663], "conversations": [{"from": "human", "timestamps": 1645.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.008, 0.033, 0.772, 0.951]: hug (a person), bend/bow (at the waist); [0.482, 0.086, 0.97, 0.946]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1663.0, "value": "What actions is the person in bbox [0.283, 0.052, 0.86, 0.932] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.283, 0.052, 0.86, 0.932]: stand, watch (a person).", "At 1662.0s, [0.321, 0.064, 0.837, 0.958]: stand.", "At 1660.0s, [0.341, 0.096, 0.765, 0.948]: stand, watch (a person)."]}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [1636, 1686], "conversations": [{"from": "human", "timestamps": 1666.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.231, 0.079, 0.672, 0.928]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist); [0.466, 0.169, 0.7, 0.902]: watch (a person), sit, touch (an object)."}, {"from": "human", "timestamps": 1686.0, "value": "For the person at bbox [0.222, 0.194, 0.481, 0.95], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.222, 0.194, 0.481, 0.95]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1685.0s, [0.228, 0.176, 0.48, 0.95]: sit, talk to (e.g., self, a person, a group).", "From 1681.0s to 1684.0s, [0.217, 0.188, 0.489, 0.951]->[0.222, 0.178, 0.5, 0.943]->[0.227, 0.183, 0.477, 0.945]->[0.226, 0.193, 0.494, 0.952]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1680.0s, [0.218, 0.187, 0.485, 0.951]: sit, listen to (a person), watch (a person).", "From 1678.0s to 1679.0s, [0.214, 0.187, 0.491, 0.96]->[0.216, 0.182, 0.485, 0.949]: sit, listen to (a person).", "From 1675.0s to 1677.0s, [0.221, 0.183, 0.49, 0.946]->[0.22, 0.177, 0.487, 0.949]->[0.217, 0.184, 0.486, 0.948]: sit, listen to (a person), watch (a person).", "From 1668.0s to 1674.0s, [0.21, 0.177, 0.481, 0.951]->[0.223, 0.188, 0.481, 0.945]->[0.22, 0.202, 0.467, 0.933]->[0.224, 0.19, 0.51, 0.938]->[0.223, 0.185, 0.479, 0.947]->[0.223, 0.184, 0.481, 0.949]->[0.223, 0.186, 0.487, 0.946]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1667.0s, [0.193, 0.184, 0.474, 0.943]: sit, watch (a person).", "At 1666.0s, [0.231, 0.079, 0.672, 0.928]: bend/bow (at the waist), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [1661, 1703], "conversations": [{"from": "human", "timestamps": 1691.0, "value": "For the person at bbox [0.443, 0.072, 0.984, 0.931], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.443, 0.072, 0.984, 0.931]: sit, listen to (a person), watch (a person).", "At 1690.0s, [0.423, 0.214, 0.649, 0.887]: sit, listen to (a person), watch (a person).", "From 1688.0s to 1689.0s, [0.429, 0.211, 0.647, 0.891]->[0.43, 0.205, 0.646, 0.887]: sit, listen to (a person).", "From 1684.0s to 1687.0s, [0.45, 0.216, 0.687, 0.876]->[0.436, 0.221, 0.684, 0.901]->[0.443, 0.203, 0.681, 0.895]->[0.424, 0.201, 0.647, 0.888]: sit, listen to (a person), watch (a person).", "At 1683.0s, [0.449, 0.225, 0.685, 0.901]: sit, listen to (a person)."]}, {"from": "human", "timestamps": 1703.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.29, 0.104, 0.591, 0.933]: sit, talk to (e.g., self, a person, a group); [0.509, 0.138, 0.763, 0.894]: listen to (a person), sit."}]}, {"video": "xmqSaQPzL1E.mkv", "clip": [1736, 1780], "conversations": [{"from": "human", "timestamps": 1766.0, "value": "For the person at bbox [0.439, 0.37, 0.53, 0.863], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.439, 0.37, 0.53, 0.863]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1764.0s to 1765.0s, [0.363, 0.388, 0.461, 0.837]->[0.41, 0.382, 0.504, 0.863]: walk, answer phone.", "At 1763.0s, [0.325, 0.36, 0.42, 0.803]: walk, answer phone, listen to (a person).", "From 1759.0s to 1762.0s, [0.204, 0.331, 0.288, 0.687]->[0.228, 0.342, 0.306, 0.708]->[0.252, 0.356, 0.335, 0.729]->[0.282, 0.352, 0.369, 0.749]: walk, answer phone.", "From 1755.0s to 1758.0s, [0.265, 0.263, 0.557, 0.948]->[0.286, 0.249, 0.565, 0.947]->[0.29, 0.243, 0.581, 0.954]->[0.299, 0.211, 0.581, 0.947]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1753.0s to 1754.0s, [0.259, 0.26, 0.558, 0.95]->[0.192, 0.259, 0.573, 0.946]: sit, talk to (e.g., self, a person, a group).", "At 1752.0s, [0.254, 0.302, 0.595, 0.931]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1751.0s, [0.282, 0.279, 0.557, 0.951]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1749.0s to 1750.0s, [0.253, 0.267, 0.561, 0.941]->[0.284, 0.269, 0.558, 0.948]: sit, talk to (e.g., self, a person, a group).", "From 1747.0s to 1748.0s, [0.237, 0.267, 0.564, 0.926]->[0.275, 0.255, 0.557, 0.96]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1742.0s to 1746.0s, [0.236, 0.258, 0.557, 0.93]->[0.243, 0.292, 0.553, 0.923]->[0.242, 0.264, 0.562, 0.938]->[0.239, 0.264, 0.542, 0.925]->[0.24, 0.268, 0.562, 0.944]: sit, listen to (a person), watch (a person).", "At 1741.0s, [0.244, 0.261, 0.549, 0.947]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1738.0s to 1740.0s, [0.217, 0.073, 0.898, 0.938]->[0.198, 0.1, 0.892, 0.945]->[0.246, 0.267, 0.553, 0.93]: sit, talk to (e.g., self, a person, a group).", "At 1737.0s, [0.2, 0.065, 0.918, 0.958]: stand, talk to (e.g., self, a person, a group).", "At 1736.0s, [0.224, 0.074, 0.877, 0.941]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1780.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.125, 0.184, 0.255, 0.84]: watch (a person), walk; [0.212, 0.153, 0.35, 0.848]: watch (a person), talk to (e.g., self, a person, a group), walk; [0.623, 0.566, 0.79, 0.866]: carry/hold (an object), sit, crouch/kneel, listen to (a person); [0.663, 0.488, 0.755, 0.669]: listen to (a person), sit, crouch/kneel."}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [902, 946], "conversations": [{"from": "human", "timestamps": 932.0, "value": "Describe the actions of the person at bbox [0.329, 0.186, 0.46, 0.409] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.329, 0.186, 0.46, 0.409]: stand, carry/hold (an object), enter.", "At 931.0s, [0.321, 0.164, 0.436, 0.46]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 946.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.365, 0.095, 0.727, 0.839]: open (e.g., a window, a car door), bend/bow (at the waist)."}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [997, 1040], "conversations": [{"from": "human", "timestamps": 1027.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.446, 0.111, 0.583, 0.467]: carry/hold (an object), listen to (a person), walk; [0.518, 0.135, 0.68, 0.476]: carry/hold (an object), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1040.0, "value": "What actions did the person at bbox [0.095, 0.069, 0.588, 0.997] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.095, 0.069, 0.588, 0.997]: stand, carry/hold (an object), read.", "At 1039.0s, [0.095, 0.057, 0.595, 0.985]: stand, carry/hold (an object).", "At 1038.0s, [0.0, 0.042, 0.785, 0.992]: bend/bow (at the waist), carry/hold (an object).", "At 1037.0s, [0.366, 0.212, 0.758, 0.753]: bend/bow (at the waist), carry/hold (an object), touch (an object)."]}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [1078, 1126], "conversations": [{"from": "human", "timestamps": 1108.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.457, 0.043, 0.837, 0.987]?"}, {"from": "gpt", "value": ["At Now, [0.457, 0.043, 0.837, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1107.0s, [0.426, 0.031, 0.802, 0.987]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1105.0s to 1106.0s, [0.349, 0.027, 0.747, 0.983]->[0.333, 0.052, 0.732, 0.99]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1104.0s, [0.348, 0.065, 0.748, 0.983]: stand, touch (an object).", "At 1103.0s, [0.328, 0.043, 0.738, 0.98]: stand, carry/hold (an object).", "At 1102.0s, [0.321, 0.06, 0.67, 0.987]: stand, open (e.g., a window, a car door).", "At 1101.0s, [0.41, 0.056, 0.65, 0.976]: stand, open (e.g., a window, a car door), push (an object).", "At 1100.0s, [0.482, 0.1, 0.686, 0.976]: stand, carry/hold (an object), open (e.g., a window, a car door).", "At 1099.0s, [0.508, 0.028, 0.647, 0.951]: stand, open (e.g., a window, a car door)."]}, {"from": "human", "timestamps": 1126.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.467, 0.517, 0.657, 0.673]: watch (a person), talk to (e.g., self, a person, a group), swim."}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [1126, 1196], "conversations": [{"from": "human", "timestamps": 1156.0, "value": "What actions did the person at bbox [0.318, 0.238, 0.599, 0.775] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.318, 0.238, 0.599, 0.775]: stand, listen to (a person), watch (a person).", "At 1155.0s, [0.331, 0.263, 0.587, 0.752]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1154.0s, [0.476, 0.241, 0.662, 0.62]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1153.0s, [0.51, 0.232, 0.624, 0.537]: walk, carry/hold (an object), listen to (a person)."]}, {"from": "human", "timestamps": 1196.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.062, 0.184, 0.341, 0.965]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.303, 0.172, 0.599, 0.715]: listen to (a person), stand, watch (a person); [0.569, 0.027, 0.838, 0.999]: listen to (a person), stand, watch (a person)."}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [1141, 1205], "conversations": [{"from": "human", "timestamps": 1171.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.049, 0.012, 0.691, 0.985]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1205.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.12, 0.01, 0.751, 0.99]?"}, {"from": "gpt", "value": ["In the current frame, [0.12, 0.01, 0.751, 0.99]: stand, talk to (e.g., self, a person, a group).", "From 1201.0s to 1204.0s, [0.134, 0.0, 0.765, 0.97]->[0.116, 0.015, 0.769, 0.975]->[0.084, 0.002, 0.741, 0.981]->[0.115, 0.018, 0.75, 0.966]: stand, talk to (e.g., self, a person, a group).", "At 1200.0s, [0.069, 0.205, 0.357, 0.967]: stand, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1197.0s to 1199.0s, [0.055, 0.202, 0.343, 0.967]->[0.06, 0.191, 0.341, 0.984]->[0.069, 0.196, 0.361, 0.987]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1196.0s, [0.062, 0.184, 0.341, 0.965]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1193.0s to 1195.0s, [0.076, 0.198, 0.407, 0.988]->[0.075, 0.195, 0.382, 0.964]->[0.062, 0.209, 0.368, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1192.0s, [0.082, 0.202, 0.397, 0.989]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1191.0s, [0.079, 0.193, 0.405, 0.951]: stand, touch (an object), listen to (a person), watch (a person).", "At 1190.0s, [0.113, 0.019, 0.757, 0.976]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [1244, 1342], "conversations": [{"from": "human", "timestamps": 1274.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.041, 0.012, 0.693, 0.976]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1342.0, "value": "What actions did the person at bbox [0.016, 0.005, 0.37, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.016, 0.005, 0.37, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1337.0s to 1341.0s, [0.009, 0.025, 0.306, 0.998]->[0.012, 0.017, 0.305, 1.0]->[0.007, 0.0, 0.307, 0.983]->[0.01, 0.012, 0.325, 0.999]->[0.019, 0.007, 0.4, 0.967]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1336.0s, [0.012, 0.005, 0.31, 0.997]: stand, talk to (e.g., self, a person, a group).", "From 1331.0s to 1335.0s, [0.018, 0.037, 0.341, 0.863]->[0.02, 0.025, 0.384, 0.955]->[0.017, 0.007, 0.376, 0.945]->[0.014, 0.016, 0.286, 0.996]->[0.013, 0.026, 0.311, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1328.0s to 1330.0s, [0.018, 0.164, 0.352, 0.994]->[0.014, 0.063, 0.352, 0.961]->[0.016, 0.043, 0.338, 0.972]: stand, listen to (a person), watch (a person)."]}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [1330, 1361], "conversations": [{"from": "human", "timestamps": 1360.0, "value": "What actions did the person at bbox [0.033, 0.005, 0.708, 0.984] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.033, 0.005, 0.708, 0.984]: stand, talk to (e.g., self, a person, a group).", "From 1353.0s to 1359.0s, [0.017, 0.032, 0.704, 0.984]->[0.052, 0.021, 0.707, 0.977]->[0.043, 0.012, 0.735, 0.985]->[0.065, 0.024, 0.706, 0.981]->[0.041, 0.026, 0.734, 0.983]->[0.038, 0.022, 0.711, 0.983]->[0.042, 0.035, 0.737, 0.974]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1361.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.016, 0.94, 0.985]: listen to (a person), sit, watch (a person)."}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [1354, 1400], "conversations": [{"from": "human", "timestamps": 1384.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.053, 0.195, 0.421, 0.982]: carry/hold (an object), listen to (a person), walk; [0.386, 0.135, 0.681, 0.981]: carry/hold (an object), grab (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1400.0, "value": "For the person at bbox [0.2, 0.062, 0.521, 0.996], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.2, 0.062, 0.521, 0.996]: stand, carry/hold (an object), close (e.g., a door, a box).", "At 1399.0s, [0.167, 0.083, 0.47, 0.989]: stand, carry/hold (an object), close (e.g., a door, a box), listen to (a person), watch (a person).", "At 1398.0s, [0.173, 0.098, 0.419, 0.987]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1396.0s to 1397.0s, [0.17, 0.085, 0.41, 0.987]->[0.171, 0.072, 0.45, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1395.0s, [0.154, 0.076, 0.393, 0.967]: stand, carry/hold (an object), listen to (a person).", "From 1392.0s to 1394.0s, [0.188, 0.157, 0.409, 0.97]->[0.187, 0.155, 0.412, 0.966]->[0.187, 0.144, 0.412, 0.957]: stand, listen to (a person), watch (a person).", "At 1391.0s, [0.192, 0.068, 0.428, 0.967]: stand, carry/hold (an object), open (e.g., a window, a car door), listen to (a person), watch (a person).", "At 1390.0s, [0.219, 0.056, 0.469, 0.973]: stand, open (e.g., a window, a car door), talk to (e.g., self, a person, a group), watch (a person).", "At 1389.0s, [0.284, 0.078, 0.577, 0.982]: stand, carry/hold (an object), open (e.g., a window, a car door), talk to (e.g., self, a person, a group), watch (a person).", "At 1388.0s, [0.314, 0.082, 0.598, 0.992]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1387.0s, [0.351, 0.095, 0.6, 0.974]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1386.0s, [0.384, 0.217, 0.591, 0.977]: walk, talk to (e.g., self, a person, a group)."]}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [1415, 1472], "conversations": [{"from": "human", "timestamps": 1445.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.442, 0.173, 0.58, 0.712]: carry/hold (an object), talk to (e.g., self, a person, a group), walk; [0.571, 0.199, 0.697, 0.709]: walk."}, {"from": "human", "timestamps": 1472.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.098, 0.079, 0.382, 0.991]?"}, {"from": "gpt", "value": ["In the current frame, [0.098, 0.079, 0.382, 0.991]: walk, talk to (e.g., self, a person, a group).", "From 1467.0s to 1471.0s, [0.241, 0.085, 0.498, 0.991]->[0.207, 0.075, 0.476, 1.0]->[0.253, 0.115, 0.513, 0.994]->[0.148, 0.007, 0.528, 0.989]->[0.275, 0.096, 0.52, 0.982]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1466.0s, [0.162, 0.102, 0.535, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1464.0s to 1465.0s, [0.017, 0.093, 0.31, 1.0]->[0.097, 0.107, 0.405, 0.991]: stand, listen to (a person), watch (a person).", "At 1463.0s, [0.007, 0.061, 0.317, 0.996]: walk, listen to (a person), watch (a person).", "At 1462.0s, [0.014, 0.005, 0.381, 1.0]: walk, listen to (a person).", "From 1460.0s to 1461.0s, [0.02, 0.089, 0.662, 0.988]->[0.057, 0.013, 0.497, 0.996]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1459.0s, [0.072, 0.103, 0.419, 0.99]: walk, talk to (e.g., self, a person, a group).", "At 1458.0s, [0.204, 0.082, 0.523, 1.0]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1455.0s to 1457.0s, [0.32, 0.085, 0.49, 0.985]->[0.236, 0.099, 0.531, 0.982]->[0.229, 0.122, 0.489, 1.0]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1454.0s, [0.363, 0.122, 0.536, 0.954]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1452.0s to 1453.0s, [0.227, 0.26, 0.4, 0.96]->[0.257, 0.078, 0.432, 0.886]: walk, talk to (e.g., self, a person, a group), watch (a person).", "At 1451.0s, [0.323, 0.127, 0.475, 0.76]: walk, talk to (e.g., self, a person, a group).", "At 1450.0s, [0.332, 0.105, 0.48, 0.74]: walk, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [1525, 1597], "conversations": [{"from": "human", "timestamps": 1555.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.02, 0.26, 0.277, 0.999]: listen to (a person), stand, talk to (e.g., self, a person, a group); [0.181, 0.004, 0.385, 0.555]: listen to (a person), stand, watch (a person); [0.388, 0.022, 0.578, 0.549]: carry/hold (an object), stand, listen to (a person); [0.473, 0.174, 0.716, 0.963]: listen to (a person), stand, watch (a person); [0.585, 0.212, 0.904, 0.98]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1597.0, "value": "What actions did the person at bbox [0.131, 0.193, 0.396, 0.982] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.131, 0.193, 0.396, 0.982]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1596.0s, [0.153, 0.199, 0.367, 0.968]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [1591, 1661], "conversations": [{"from": "human", "timestamps": 1621.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.158, 0.113, 0.33, 0.609]: carry/hold (an object), listen to (a person), walk; [0.32, 0.491, 0.814, 0.854]: lie/sleep; [0.544, 0.023, 0.74, 0.619]: listen to (a person), stand, watch (a person); [0.701, 0.033, 0.979, 0.849]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1661.0, "value": "Describe the actions of the person at bbox [0.035, 0.131, 0.442, 0.977] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.035, 0.131, 0.442, 0.977]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1660.0s, [0.009, 0.144, 0.432, 0.995]: stand, carry/hold (an object).", "At 1659.0s, [0.012, 0.128, 0.447, 0.978]: stand, carry/hold (an object), watch (a person).", "At 1658.0s, [0.298, 0.068, 0.456, 0.551]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1657.0s, [0.297, 0.076, 0.471, 0.553]: stand, carry/hold (an object), watch (a person)."]}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [1611, 1684], "conversations": [{"from": "human", "timestamps": 1641.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.195, 0.04, 0.807, 0.988]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1684.0, "value": "What actions is the person in bbox [0.618, 0.093, 0.887, 0.954] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.618, 0.093, 0.887, 0.954]: stand, sing to (e.g., self, a person, a group).", "From 1675.0s to 1683.0s, [0.674, 0.0, 0.984, 0.826]->[0.589, 0.081, 0.89, 0.805]->[0.591, 0.075, 0.88, 0.976]->[0.595, 0.098, 0.885, 0.896]->[0.59, 0.089, 0.887, 0.887]->[0.594, 0.094, 0.885, 0.967]->[0.589, 0.101, 0.891, 0.963]->[0.596, 0.089, 0.891, 0.975]->[0.601, 0.102, 0.893, 0.934]: stand, sing to (e.g., self, a person, a group)."]}]}, {"video": "xp67EC-Hvwk.mkv", "clip": [1648, 1688], "conversations": [{"from": "human", "timestamps": 1678.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.33, 0.334, 0.427, 0.462]?"}, {"from": "gpt", "value": ["In the current frame, [0.33, 0.334, 0.427, 0.462]: sit, listen to (a person), watch (a person).", "From 1676.0s to 1677.0s, [0.332, 0.327, 0.429, 0.462]->[0.324, 0.33, 0.431, 0.465]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1688.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.044, 0.001, 0.914, 0.985]: stand, sing to (e.g., self, a person, a group)."}]}, {"video": "y7ncweROe9U.mkv", "clip": [882, 959], "conversations": [{"from": "human", "timestamps": 912.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.362, 0.163, 0.899, 1.0]: stand, smoke."}, {"from": "human", "timestamps": 959.0, "value": "What actions is the person in bbox [0.21, 0.568, 0.401, 0.784] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.21, 0.568, 0.401, 0.784]: sit, listen to (a person), watch (a person).", "At 958.0s, [0.233, 0.53, 0.427, 0.752]: sit, drive (e.g., a car, a truck), listen to (a person), watch (a person).", "At 957.0s, [0.179, 0.295, 0.421, 0.776]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 956.0s, [0.199, 0.285, 0.556, 0.791]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 953.0s to 955.0s, [0.172, 0.313, 0.427, 0.794]->[0.127, 0.3, 0.412, 0.839]->[0.189, 0.292, 0.427, 0.813]: sit, listen to (a person), watch (a person).", "At 952.0s, [0.166, 0.313, 0.432, 0.777]: sit, touch (an object), listen to (a person), watch (a person).", "From 950.0s to 951.0s, [0.191, 0.322, 0.426, 0.789]->[0.19, 0.317, 0.429, 0.778]: sit, listen to (a person), watch (a person).", "From 947.0s to 949.0s, [0.205, 0.314, 0.436, 0.75]->[0.205, 0.314, 0.43, 0.764]->[0.194, 0.315, 0.429, 0.771]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "y7ncweROe9U.mkv", "clip": [878, 992], "conversations": [{"from": "human", "timestamps": 908.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.362, 0.15, 0.987, 0.992]: stand, smoke."}, {"from": "human", "timestamps": 992.0, "value": "What actions did the person at bbox [0.406, 0.3, 0.523, 0.852] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.406, 0.3, 0.523, 0.852]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 991.0s, [0.491, 0.297, 0.641, 0.827]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 989.0s to 990.0s, [0.502, 0.301, 0.624, 0.835]->[0.497, 0.293, 0.618, 0.824]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 988.0s, [0.51, 0.297, 0.672, 0.84]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 987.0s, [0.51, 0.302, 0.678, 0.838]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 986.0s, [0.513, 0.3, 0.708, 0.833]: stand, put down, listen to (a person).", "From 984.0s to 985.0s, [0.486, 0.297, 0.621, 0.827]->[0.478, 0.3, 0.62, 0.824]: stand, carry/hold (an object), listen to (a person).", "From 981.0s to 983.0s, [0.498, 0.294, 0.617, 0.824]->[0.497, 0.296, 0.615, 0.839]->[0.49, 0.297, 0.615, 0.822]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 980.0s, [0.504, 0.292, 0.626, 0.832]: stand, carry/hold (an object), watch (a person).", "At 979.0s, [0.486, 0.302, 0.71, 0.852]: stand, carry/hold (an object).", "From 976.0s to 978.0s, [0.344, 0.426, 0.438, 0.655]->[0.358, 0.407, 0.416, 0.634]->[0.377, 0.305, 0.563, 0.806]: walk, carry/hold (an object).", "From 974.0s to 975.0s, [0.253, 0.409, 0.34, 0.793]->[0.272, 0.406, 0.369, 0.801]: stand, carry/hold (an object)."]}]}, {"video": "y7ncweROe9U.mkv", "clip": [1000, 1060], "conversations": [{"from": "human", "timestamps": 1030.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.126, 0.176, 0.504, 0.977]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.349, 0.153, 0.788, 0.981]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1060.0, "value": "What actions did the person at bbox [0.257, 0.181, 0.584, 0.968] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.257, 0.181, 0.584, 0.968]: stand.", "At 1059.0s, [0.255, 0.21, 0.594, 0.976]: stand, talk to (e.g., self, a person, a group).", "From 1057.0s to 1058.0s, [0.242, 0.268, 0.572, 0.983]->[0.254, 0.154, 0.589, 0.963]: stand.", "At 1056.0s, [0.238, 0.269, 0.565, 0.988]: stand, listen to (a person).", "From 1051.0s to 1055.0s, [0.254, 0.123, 0.51, 0.982]->[0.244, 0.232, 0.525, 0.975]->[0.244, 0.038, 0.564, 0.976]->[0.263, 0.264, 0.556, 0.982]->[0.259, 0.102, 0.565, 0.977]: stand, listen to (a person), watch (a person).", "At 1050.0s, [0.24, 0.257, 0.537, 0.977]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1046.0s to 1049.0s, [0.17, 0.26, 0.528, 0.988]->[0.206, 0.268, 0.559, 0.983]->[0.217, 0.28, 0.575, 0.992]->[0.228, 0.282, 0.572, 0.985]: stand, talk to (e.g., self, a person, a group).", "At 1045.0s, [0.138, 0.293, 0.592, 0.989]: walk, talk to (e.g., self, a person, a group).", "At 1044.0s, [0.205, 0.225, 0.727, 0.975]: walk.", "At 1043.0s, [0.336, 0.184, 0.795, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "At 1042.0s, [0.369, 0.164, 0.803, 0.998]: stand, listen to (a person), watch (a person).", "At 1041.0s, [0.379, 0.175, 0.787, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1039.0s to 1040.0s, [0.403, 0.164, 0.779, 0.986]->[0.364, 0.175, 0.783, 0.989]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1038.0s, [0.393, 0.18, 0.783, 0.995]: stand, carry/hold (an object), listen to (a person).", "At 1037.0s, [0.398, 0.184, 0.783, 0.99]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person)."]}]}, {"video": "y7ncweROe9U.mkv", "clip": [1049, 1102], "conversations": [{"from": "human", "timestamps": 1079.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.475, 0.514, 0.631, 0.987]: run/jog, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1102.0, "value": "What actions did the person at bbox [0.48, 0.172, 0.712, 0.989] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.48, 0.172, 0.712, 0.989]: stand, watch (a person).", "At 1101.0s, [0.48, 0.157, 0.706, 0.985]: stand, watch (a person).", "At 1100.0s, [0.311, 0.161, 0.764, 0.986]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1099.0s, [0.31, 0.192, 0.776, 0.981]: stand, touch (an object).", "At 1098.0s, [0.306, 0.238, 0.801, 0.982]: stand, carry/hold (an object), touch (an object).", "At 1097.0s, [0.279, 0.309, 0.809, 0.985]: stand, touch (an object)."]}]}, {"video": "y7ncweROe9U.mkv", "clip": [1080, 1123], "conversations": [{"from": "human", "timestamps": 1110.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.586, 0.441, 0.643, 0.679]: hand wave, stand, watch (a person); [0.645, 0.456, 0.696, 0.671]: watch (a person), stand; [0.713, 0.225, 0.998, 0.968]: watch (a person), stand."}, {"from": "human", "timestamps": 1123.0, "value": "What actions did the person at bbox [0.132, 0.238, 0.326, 0.977] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.132, 0.238, 0.326, 0.977]: walk, watch (a person).", "At 1122.0s, [0.125, 0.323, 0.426, 0.992]: bend/bow (at the waist), smoke, talk to (e.g., self, a person, a group), watch (a person).", "At 1121.0s, [0.121, 0.446, 0.488, 0.998]: bend/bow (at the waist), smoke, touch (an object), listen to (a person), watch (a person).", "From 1119.0s to 1120.0s, [0.119, 0.48, 0.499, 1.0]->[0.116, 0.474, 0.506, 0.998]: bend/bow (at the waist), carry/hold (an object), smoke, listen to (a person).", "At 1118.0s, [0.106, 0.467, 0.475, 0.967]: bend/bow (at the waist), smoke, touch (an object)."]}]}, {"video": "y7ncweROe9U.mkv", "clip": [1265, 1302], "conversations": [{"from": "human", "timestamps": 1295.0, "value": "For the person at bbox [0.0, 0.244, 0.287, 0.997], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.244, 0.287, 0.997]: stand, listen to (a person), watch (a person).", "At 1294.0s, [0.0, 0.255, 0.292, 1.0]: stand, listen to (a person), watch (a person).", "At 1293.0s, [0.0, 0.266, 0.282, 0.997]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1292.0s, [0.0, 0.262, 0.284, 0.998]: stand, touch (an object), listen to (a person), watch (a person).", "At 1291.0s, [0.0, 0.254, 0.281, 0.984]: stand, listen to (a person).", "At 1290.0s, [0.0, 0.246, 0.289, 1.0]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1302.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.26, 0.276, 0.995]: listen to (a person), stand, touch (an object), watch (a person); [0.164, 0.252, 0.378, 0.876]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.493, 0.403, 0.798, 0.877]: listen to (a person), sit, watch (a person); [0.741, 0.108, 0.999, 0.982]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "y7ncweROe9U.mkv", "clip": [1234, 1341], "conversations": [{"from": "human", "timestamps": 1264.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.003, 0.529, 0.144, 0.986]: listen to (a person), sit, watch (a person); [0.123, 0.155, 0.361, 0.98]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.624, 0.15, 0.821, 0.98]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1341.0, "value": "For the person at bbox [0.211, 0.278, 0.453, 0.993], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.211, 0.278, 0.453, 0.993]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1340.0s, [0.241, 0.273, 0.502, 0.984]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "y7ncweROe9U.mkv", "clip": [1305, 1343], "conversations": [{"from": "human", "timestamps": 1335.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.265, 0.34, 0.467, 0.987]: carry/hold (an object), run/jog; [0.706, 0.329, 0.994, 0.983]: watch (a person), run/jog."}, {"from": "human", "timestamps": 1343.0, "value": "What actions is the person in bbox [0.212, 0.257, 0.463, 0.982] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.212, 0.257, 0.463, 0.982]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1342.0s, [0.235, 0.255, 0.46, 1.0]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1341.0s, [0.211, 0.278, 0.453, 0.993]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1340.0s, [0.241, 0.273, 0.502, 0.984]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "y7ncweROe9U.mkv", "clip": [1471, 1524], "conversations": [{"from": "human", "timestamps": 1501.0, "value": "What actions did the person at bbox [0.258, 0.271, 0.697, 0.653] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.258, 0.271, 0.697, 0.653]: sit, drive (e.g., a car, a truck).", "From 1499.0s to 1500.0s, [0.29, 0.289, 0.744, 0.691]->[0.304, 0.296, 0.648, 0.67]: sit, drive (e.g., a car, a truck), talk to (e.g., self, a person, a group).", "At 1498.0s, [0.237, 0.291, 0.616, 0.662]: sit, drive (e.g., a car, a truck), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1524.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.424, 0.492, 0.565, 0.697]: carry/hold (an object), sit, smoke."}]}, {"video": "y7ncweROe9U.mkv", "clip": [1491, 1562], "conversations": [{"from": "human", "timestamps": 1521.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.133, 0.433, 0.31, 0.649]: carry/hold (an object), sit, drive (e.g., a car, a truck); [0.405, 0.203, 0.598, 0.659]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1562.0, "value": "What actions did the person at bbox [0.582, 0.319, 0.862, 0.991] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.582, 0.319, 0.862, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1560.0s to 1561.0s, [0.556, 0.411, 0.84, 0.994]->[0.559, 0.379, 0.835, 0.998]: stand, talk to (e.g., self, a person, a group).", "From 1558.0s to 1559.0s, [0.551, 0.449, 0.776, 0.997]->[0.557, 0.441, 0.837, 0.998]: stand, listen to (a person), watch (a person)."]}]}, {"video": "y7ncweROe9U.mkv", "clip": [1559, 1617], "conversations": [{"from": "human", "timestamps": 1589.0, "value": "Describe the actions of the person at bbox [0.194, 0.366, 0.553, 0.996] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.194, 0.366, 0.553, 0.996]: sit, carry/hold (an object), read, listen to (a person).", "From 1586.0s to 1588.0s, [0.171, 0.315, 0.582, 0.996]->[0.183, 0.381, 0.627, 0.976]->[0.167, 0.369, 0.561, 0.991]: sit, carry/hold (an object), read, listen to (a person).", "At 1585.0s, [0.193, 0.368, 0.622, 0.994]: stand, read, listen to (a person).", "At 1584.0s, [0.2, 0.364, 0.623, 0.992]: carry/hold (an object), listen to (a person).", "At 1583.0s, [0.17, 0.359, 0.597, 0.987]: sit, carry/hold (an object), listen to (a person).", "At 1582.0s, [0.111, 0.367, 0.527, 0.977]: sit, carry/hold (an object).", "From 1580.0s to 1581.0s, [0.163, 0.506, 0.522, 0.996]->[0.163, 0.454, 0.523, 1.0]: sit, carry/hold (an object), listen to (a person).", "At 1579.0s, [0.118, 0.553, 0.499, 0.998]: sit, stand, listen to (a person).", "At 1578.0s, [0.062, 0.543, 0.446, 0.994]: sit, listen to (a person).", "At 1577.0s, [0.058, 0.341, 0.526, 0.992]: stand, take (an object) from (a person), talk to (e.g., self, a person, a group).", "At 1576.0s, [0.033, 0.169, 0.497, 0.976]: stand, carry/hold (an object), take (an object) from (a person), talk to (e.g., self, a person, a group).", "At 1575.0s, [0.086, 0.186, 0.576, 0.98]: stand, take (an object) from (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1568.0s to 1574.0s, [0.094, 0.185, 0.461, 0.981]->[0.076, 0.182, 0.466, 0.988]->[0.079, 0.181, 0.461, 0.997]->[0.075, 0.18, 0.465, 0.988]->[0.077, 0.167, 0.452, 0.988]->[0.061, 0.179, 0.447, 0.993]->[0.053, 0.181, 0.454, 1.0]: stand, listen to (a person), watch (a person).", "From 1565.0s to 1567.0s, [0.139, 0.172, 0.584, 0.992]->[0.01, 0.178, 0.458, 0.995]->[0.103, 0.189, 0.458, 0.991]: stand.", "At 1564.0s, [0.154, 0.168, 0.596, 0.995]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1563.0s, [0.154, 0.163, 0.593, 0.993]: stand, carry/hold (an object), listen to (a person).", "At 1562.0s, [0.147, 0.206, 0.618, 0.99]: bend/bow (at the waist), carry/hold (an object), listen to (a person).", "From 1560.0s to 1561.0s, [0.135, 0.211, 0.59, 0.988]->[0.118, 0.272, 0.59, 0.995]: stand, carry/hold (an object), listen to (a person).", "At 1559.0s, [0.135, 0.243, 0.597, 0.991]: stand, carry/hold (an object).", "At 1558.0s, [0.155, 0.23, 0.573, 0.994]: stand, carry/hold (an object), open (e.g., a window, a car door), talk to (e.g., self, a person, a group).", "From 1556.0s to 1557.0s, [0.388, 0.174, 0.709, 0.989]->[0.213, 0.16, 0.665, 0.997]: walk, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1555.0s, [0.536, 0.162, 0.808, 0.978]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1554.0s, [0.553, 0.195, 0.807, 0.985]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1551.0s to 1553.0s, [0.64, 0.151, 0.841, 0.982]->[0.639, 0.153, 0.843, 0.982]->[0.604, 0.169, 0.808, 0.991]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1548.0s to 1550.0s, [0.647, 0.141, 0.843, 0.987]->[0.645, 0.14, 0.846, 0.992]->[0.642, 0.145, 0.842, 0.979]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1547.0s, [0.651, 0.142, 0.844, 0.99]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1546.0s, [0.645, 0.14, 0.894, 0.976]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1545.0s, [0.641, 0.14, 0.84, 0.99]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1544.0s, [0.648, 0.152, 0.854, 0.989]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1540.0s to 1543.0s, [0.527, 0.146, 0.852, 0.981]->[0.484, 0.157, 0.853, 1.0]->[0.503, 0.15, 0.855, 1.0]->[0.515, 0.157, 0.856, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1539.0s, [0.661, 0.132, 0.854, 0.992]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1538.0s, [0.66, 0.145, 0.85, 0.999]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1537.0s, [0.607, 0.159, 0.811, 0.992]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1536.0s, [0.636, 0.193, 0.827, 0.986]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1534.0s to 1535.0s, [0.729, 0.25, 0.985, 0.986]->[0.747, 0.195, 0.995, 0.98]: walk, carry/hold (an object), listen to (a person).", "At 1533.0s, [0.647, 0.311, 0.812, 0.942]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1532.0s, [0.622, 0.362, 0.756, 0.812]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1528.0s to 1531.0s, [0.779, 0.452, 0.872, 0.654]->[0.661, 0.441, 0.755, 0.648]->[0.595, 0.415, 0.685, 0.747]->[0.601, 0.398, 0.685, 0.711]: walk, carry/hold (an object), watch (a person).", "At 1527.0s, [0.796, 0.454, 0.864, 0.668]: walk, carry/hold (an object).", "At 1526.0s, [0.744, 0.453, 0.83, 0.585]: stand, carry/hold (an object), push (an object), watch (a person).", "At 1525.0s, [0.733, 0.448, 0.83, 0.595]: stand, carry/hold (an object), close (e.g., a door, a box)."]}, {"from": "human", "timestamps": 1617.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.436, 0.229, 0.877, 0.989]: stand, close (e.g., a door, a box); [0.726, 0.277, 0.956, 0.492]: close (e.g., a door, a box), bend/bow (at the waist)."}]}, {"video": "y7ncweROe9U.mkv", "clip": [1639, 1671], "conversations": [{"from": "human", "timestamps": 1669.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.357, 0.212, 0.841, 0.994]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1671.0, "value": "Describe the actions of the person at bbox [0.127, 0.087, 0.528, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.127, 0.087, 0.528, 0.986]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1670.0s, [0.0, 0.0, 0.213, 0.981]: walk, listen to (a person), watch (a person)."]}]}, {"video": "y7ncweROe9U.mkv", "clip": [1646, 1720], "conversations": [{"from": "human", "timestamps": 1676.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.216, 0.455, 1.0]: carry/hold (an object), sit, listen to (a person); [0.359, 0.16, 0.889, 0.997]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1720.0, "value": "What actions did the person at bbox [0.036, 0.169, 0.436, 0.997] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.036, 0.169, 0.436, 0.997]: stand, listen to (a person).", "At 1719.0s, [0.016, 0.182, 0.396, 0.996]: stand, listen to (a person).", "At 1718.0s, [0.03, 0.226, 0.449, 0.992]: stand, talk to (e.g., self, a person, a group).", "At 1717.0s, [0.069, 0.221, 0.471, 0.985]: walk, talk to (e.g., self, a person, a group).", "At 1716.0s, [0.124, 0.211, 0.492, 0.997]: walk.", "At 1715.0s, [0.0, 0.146, 0.448, 0.997]: stand.", "At 1714.0s, [0.006, 0.169, 0.453, 0.991]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1713.0s, [0.007, 0.167, 0.459, 0.985]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1711.0s to 1712.0s, [0.004, 0.157, 0.46, 1.0]->[0.005, 0.158, 0.475, 0.988]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1710.0s, [0.001, 0.159, 0.456, 1.0]: stand, listen to (a person), watch (a person).", "From 1707.0s to 1709.0s, [0.019, 0.146, 0.492, 0.995]->[0.014, 0.159, 0.476, 0.99]->[0.01, 0.176, 0.452, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1705.0s to 1706.0s, [0.013, 0.147, 0.484, 0.99]->[0.01, 0.163, 0.469, 0.984]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1702.0s to 1704.0s, [0.029, 0.152, 0.484, 0.985]->[0.027, 0.16, 0.481, 0.987]->[0.02, 0.134, 0.481, 0.998]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1701.0s, [0.031, 0.159, 0.491, 1.0]: stand, listen to (a person), watch (a person).", "From 1696.0s to 1700.0s, [0.091, 0.201, 0.492, 0.996]->[0.05, 0.214, 0.494, 0.994]->[0.046, 0.152, 0.473, 0.989]->[0.03, 0.165, 0.485, 0.999]->[0.03, 0.15, 0.489, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1695.0s, [0.101, 0.164, 0.5, 0.993]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "y7ncweROe9U.mkv", "clip": [1693, 1726], "conversations": [{"from": "human", "timestamps": 1723.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.032, 0.155, 0.438, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.391, 0.251, 0.818, 0.995]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1726.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.551, 0.253, 1.0, 0.993]?"}, {"from": "gpt", "value": ["At Now, [0.551, 0.253, 1.0, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1724.0s to 1725.0s, [0.392, 0.258, 0.807, 0.994]->[0.383, 0.263, 0.825, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1723.0s, [0.391, 0.251, 0.818, 0.995]: stand, listen to (a person), watch (a person).", "At 1722.0s, [0.381, 0.255, 0.818, 0.984]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1719.0s to 1721.0s, [0.412, 0.256, 0.797, 0.992]->[0.413, 0.261, 0.814, 0.997]->[0.435, 0.236, 0.822, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1717.0s to 1718.0s, [0.783, 0.36, 1.0, 1.0]->[0.456, 0.311, 0.865, 1.0]: stand, listen to (a person), watch (a person)."]}]}, {"video": "yMtGmGa8KZ0.mkv", "clip": [1121, 1182], "conversations": [{"from": "human", "timestamps": 1151.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.202, 0.075, 0.516, 0.982]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1182.0, "value": "For the person at bbox [0.334, 0.0, 0.981, 0.969], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.334, 0.0, 0.981, 0.969]: stand, fishing, talk to (e.g., self, a person, a group).", "At 1181.0s, [0.291, 0.054, 0.971, 0.939]: stand, carry/hold (an object), fishing, watch (a person).", "At 1180.0s, [0.26, 0.085, 0.997, 0.995]: fishing."]}]}, {"video": "yMtGmGa8KZ0.mkv", "clip": [1119, 1210], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.207, 0.077, 0.499, 0.983]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1210.0, "value": "What actions did the person at bbox [0.263, 0.228, 0.983, 0.997] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.263, 0.228, 0.983, 0.997]: walk.", "At 1209.0s, [0.351, 0.308, 0.961, 0.993]: walk."]}]}, {"video": "yMtGmGa8KZ0.mkv", "clip": [1196, 1231], "conversations": [{"from": "human", "timestamps": 1226.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.143, 0.049, 0.811, 0.987]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1231.0, "value": "What actions is the person in bbox [0.14, 0.041, 0.812, 0.993] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.14, 0.041, 0.812, 0.993]: stand, talk to (e.g., self, a person, a group).", "From 1229.0s to 1230.0s, [0.129, 0.062, 0.824, 0.978]->[0.135, 0.065, 0.818, 0.984]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "yMtGmGa8KZ0.mkv", "clip": [1248, 1288], "conversations": [{"from": "human", "timestamps": 1278.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.049, 0.002, 0.642, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.387, 0.025, 1.0, 0.986]: carry/hold (an object), stand, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1288.0, "value": "For the person at bbox [0.0, 0.021, 0.456, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.021, 0.456, 1.0]: stand, listen to (a person).", "From 1284.0s to 1287.0s, [0.007, 0.033, 0.523, 1.0]->[0.005, 0.027, 0.539, 1.0]->[0.002, 0.03, 0.487, 1.0]->[0.026, 0.045, 0.593, 1.0]: stand, listen to (a person), watch (a person).", "At 1283.0s, [0.106, 0.055, 0.962, 0.996]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1281.0s to 1282.0s, [0.097, 0.062, 0.953, 0.991]->[0.101, 0.053, 0.953, 0.998]: stand, talk to (e.g., self, a person, a group).", "From 1276.0s to 1280.0s, [0.029, 0.026, 0.624, 1.0]->[0.033, 0.018, 0.605, 1.0]->[0.049, 0.002, 0.642, 1.0]->[0.0, 0.015, 0.484, 1.0]->[0.099, 0.057, 0.97, 0.993]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1275.0s, [0.016, 0.02, 0.596, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1274.0s, [0.047, 0.0, 0.589, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1273.0s, [0.035, 0.008, 0.586, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 1266.0s to 1272.0s, [0.029, 0.0, 0.587, 1.0]->[0.0, 0.0, 0.387, 1.0]->[0.008, 0.0, 0.529, 1.0]->[0.023, 0.0, 0.557, 1.0]->[0.0, 0.0, 0.412, 0.996]->[0.0, 0.0, 0.536, 1.0]->[0.031, 0.021, 0.572, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1264.0s to 1265.0s, [0.034, 0.0, 0.521, 1.0]->[0.033, 0.0, 0.538, 0.991]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1262.0s to 1263.0s, [0.017, 0.0, 0.448, 1.0]->[0.02, 0.0, 0.468, 1.0]: stand, listen to (a person), watch (a person).", "From 1258.0s to 1261.0s, [0.014, 0.014, 0.532, 1.0]->[0.026, 0.0, 0.504, 1.0]->[0.037, 0.0, 0.529, 1.0]->[0.004, 0.0, 0.396, 0.986]: stand, watch (a person).", "At 1257.0s, [0.0, 0.024, 0.407, 1.0]: stand, listen to (a person), watch (a person).", "From 1255.0s to 1256.0s, [0.002, 0.0, 0.349, 0.996]->[0.01, 0.007, 0.481, 1.0]: stand, watch (a person)."]}]}, {"video": "yMtGmGa8KZ0.mkv", "clip": [1270, 1328], "conversations": [{"from": "human", "timestamps": 1300.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.049, 0.024, 0.788, 0.978]: stand."}, {"from": "human", "timestamps": 1328.0, "value": "What actions is the person in bbox [0.393, 0.198, 0.62, 0.589] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.393, 0.198, 0.62, 0.589]: sit, row boat.", "At 1327.0s, [0.036, 0.025, 0.937, 0.974]: stand, watch (a person).", "At 1326.0s, [0.069, 0.032, 0.953, 0.985]: stand."]}]}, {"video": "yMtGmGa8KZ0.mkv", "clip": [1338, 1377], "conversations": [{"from": "human", "timestamps": 1368.0, "value": "What actions is the person in bbox [0.025, 0.067, 0.472, 0.987] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.025, 0.067, 0.472, 0.987]: stand, carry/hold (an object), watch (a person).", "At 1367.0s, [0.019, 0.077, 0.451, 0.986]: stand, carry/hold (an object), drink, listen to (a person).", "At 1366.0s, [0.014, 0.075, 0.445, 0.994]: stand, drink, listen to (a person).", "At 1365.0s, [0.018, 0.067, 0.425, 0.982]: stand, drink.", "At 1364.0s, [0.041, 0.025, 0.476, 1.0]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 1359.0s to 1363.0s, [0.002, 0.014, 0.428, 0.975]->[0.0, 0.042, 0.407, 0.996]->[0.011, 0.027, 0.429, 0.985]->[0.013, 0.038, 0.429, 0.98]->[0.045, 0.029, 0.462, 0.998]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1358.0s, [0.023, 0.048, 0.444, 0.99]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1356.0s to 1357.0s, [0.061, 0.09, 0.444, 0.981]->[0.033, 0.072, 0.464, 0.965]: stand, drink, listen to (a person).", "At 1355.0s, [0.033, 0.066, 0.446, 0.994]: stand, drink.", "At 1354.0s, [0.021, 0.097, 0.422, 0.989]: stand, drink, listen to (a person).", "At 1353.0s, [0.048, 0.109, 0.478, 0.971]: stand, carry/hold (an object), listen to (a person), take (an object) from (a person).", "At 1352.0s, [0.015, 0.09, 0.505, 0.996]: stand, listen to (a person), take (an object) from (a person).", "At 1351.0s, [0.03, 0.075, 0.415, 0.989]: stand, listen to (a person), talk to (e.g., self, a person, a group).", "From 1349.0s to 1350.0s, [0.019, 0.039, 0.393, 0.995]->[0.023, 0.043, 0.405, 1.0]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1346.0s to 1348.0s, [0.062, 0.051, 0.458, 0.991]->[0.045, 0.012, 0.424, 1.0]->[0.042, 0.025, 0.418, 1.0]: stand, listen to (a person), watch (a person).", "At 1345.0s, [0.003, 0.06, 0.391, 0.985]: stand."]}, {"from": "human", "timestamps": 1377.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.017, 0.08, 0.499, 0.976]: carry/hold (an object), stand, watch (a person), listen to (a person); [0.544, 0.078, 1.0, 0.972]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "yMtGmGa8KZ0.mkv", "clip": [1381, 1439], "conversations": [{"from": "human", "timestamps": 1411.0, "value": "Describe the actions of the person at bbox [0.106, 0.102, 0.618, 0.971] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.106, 0.102, 0.618, 0.971]: stand, carry/hold (an object), take (an object) from (a person).", "At 1410.0s, [0.09, 0.097, 0.477, 0.983]: stand, carry/hold (an object), take (an object) from (a person), watch (a person).", "From 1408.0s to 1409.0s, [0.031, 0.067, 0.372, 0.996]->[0.0, 0.05, 0.454, 1.0]: stand, dress/put on clothing, talk to (e.g., self, a person, a group), watch (a person).", "From 1406.0s to 1407.0s, [0.001, 0.026, 0.335, 1.0]->[0.0, 0.106, 0.334, 0.988]: stand, dress/put on clothing, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1405.0s, [0.023, 0.041, 0.512, 1.0]: stand, dress/put on clothing, watch (a person).", "At 1404.0s, [0.026, 0.096, 0.44, 0.991]: stand, dress/put on clothing, talk to (e.g., self, a person, a group), watch (a person).", "At 1403.0s, [0.073, 0.121, 0.611, 1.0]: stand, dress/put on clothing, listen to (a person), watch (a person).", "At 1402.0s, [0.005, 0.177, 0.406, 1.0]: stand, dress/put on clothing, listen to (a person).", "At 1401.0s, [0.007, 0.088, 0.481, 0.981]: stand, dress/put on clothing."]}, {"from": "human", "timestamps": 1439.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.083, 0.029, 0.417, 0.989]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.423, 0.106, 0.823, 0.988]: listen to (a person), stand, watch (a person)."}]}, {"video": "yMtGmGa8KZ0.mkv", "clip": [1401, 1475], "conversations": [{"from": "human", "timestamps": 1431.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.11, 0.036, 0.455, 0.992]: carry/hold (an object), stand, talk to (e.g., self, a person, a group); [0.424, 0.099, 0.867, 0.981]: carry/hold (an object), stand, watch (a person)."}, {"from": "human", "timestamps": 1475.0, "value": "For the person at bbox [0.028, 0.056, 0.634, 0.961], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.028, 0.056, 0.634, 0.961]: stand, listen to (a person), watch (a person).", "At 1474.0s, [0.0, 0.061, 0.6, 0.974]: stand, listen to (a person), watch (a person).", "At 1473.0s, [0.0, 0.053, 0.512, 0.959]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1469.0s to 1472.0s, [0.0, 0.035, 0.436, 0.948]->[0.0, 0.023, 0.467, 0.93]->[0.0, 0.005, 0.427, 1.0]->[0.0, 0.017, 0.469, 0.977]: stand, listen to (a person), watch (a person).", "At 1468.0s, [0.0, 0.01, 0.418, 0.971]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1467.0s, [0.0, 0.063, 0.535, 0.966]: stand, listen to (a person), watch (a person).", "At 1466.0s, [0.004, 0.036, 0.507, 0.995]: stand, listen to (a person).", "From 1464.0s to 1465.0s, [0.045, 0.099, 0.616, 0.994]->[0.035, 0.041, 0.524, 0.994]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "yMtGmGa8KZ0.mkv", "clip": [1446, 1497], "conversations": [{"from": "human", "timestamps": 1476.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.003, 0.047, 0.515, 0.996]: watch (a person), stand, touch (an object); [0.309, 0.11, 0.997, 0.965]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1497.0, "value": "Describe the actions of the person at bbox [0.123, 0.071, 0.527, 0.991] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.123, 0.071, 0.527, 0.991]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1492.0s to 1496.0s, [0.108, 0.103, 0.59, 0.993]->[0.097, 0.074, 0.541, 1.0]->[0.071, 0.084, 0.542, 0.987]->[0.169, 0.086, 0.531, 0.982]->[0.132, 0.078, 0.557, 0.992]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1491.0s, [0.051, 0.081, 0.511, 0.989]: stand, carry/hold (an object), take (an object) from (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1490.0s, [0.06, 0.051, 0.779, 0.999]: stand, take (an object) from (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1483.0s to 1489.0s, [0.0, 0.0, 0.467, 0.98]->[0.0, 0.041, 0.425, 0.982]->[0.004, 0.065, 0.42, 0.976]->[0.002, 0.042, 0.407, 1.0]->[0.0, 0.067, 0.392, 0.98]->[0.013, 0.035, 0.553, 0.991]->[0.071, 0.039, 0.704, 1.0]: stand, listen to (a person), watch (a person).", "At 1482.0s, [0.0, 0.02, 0.428, 0.961]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1478.0s to 1481.0s, [0.008, 0.009, 0.529, 0.991]->[0.0, 0.013, 0.315, 0.94]->[0.0, 0.044, 0.332, 0.923]->[0.0, 0.0, 0.404, 1.0]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1477.0s, [0.015, 0.048, 0.483, 0.985]: stand, watch (a person).", "At 1476.0s, [0.003, 0.047, 0.515, 0.996]: stand, touch (an object), watch (a person).", "From 1474.0s to 1475.0s, [0.0, 0.061, 0.6, 0.974]->[0.028, 0.056, 0.634, 0.961]: stand, listen to (a person), watch (a person).", "At 1473.0s, [0.0, 0.053, 0.512, 0.959]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1469.0s to 1472.0s, [0.0, 0.035, 0.436, 0.948]->[0.0, 0.023, 0.467, 0.93]->[0.0, 0.005, 0.427, 1.0]->[0.0, 0.017, 0.469, 0.977]: stand, listen to (a person), watch (a person).", "At 1468.0s, [0.0, 0.01, 0.418, 0.971]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1467.0s, [0.0, 0.063, 0.535, 0.966]: stand, listen to (a person), watch (a person).", "At 1466.0s, [0.004, 0.036, 0.507, 0.995]: stand, listen to (a person).", "From 1464.0s to 1465.0s, [0.045, 0.099, 0.616, 0.994]->[0.035, 0.041, 0.524, 0.994]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "yMtGmGa8KZ0.mkv", "clip": [1483, 1534], "conversations": [{"from": "human", "timestamps": 1513.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.503, 0.122, 0.729, 0.92]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1534.0, "value": "What actions is the person in bbox [0.215, 0.133, 0.789, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.215, 0.133, 0.789, 0.985]: stand.", "From 1530.0s to 1533.0s, [0.207, 0.163, 0.804, 0.997]->[0.227, 0.172, 0.78, 0.99]->[0.274, 0.179, 0.736, 0.985]->[0.232, 0.157, 0.73, 0.99]: stand."]}]}, {"video": "yo-Kg2YxlZs.mkv", "clip": [908, 945], "conversations": [{"from": "human", "timestamps": 938.0, "value": "What actions is the person in bbox [0.019, 0.051, 0.589, 0.992] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.019, 0.051, 0.589, 0.992]: stand, listen to (a person).", "At 937.0s, [0.106, 0.018, 0.528, 0.994]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 945.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.116, 0.081, 0.745, 0.967]: stand, touch (an object), talk to (e.g., self, a person, a group); [0.647, 0.224, 0.98, 0.99]: listen to (a person), stand, watch (a person)."}]}, {"video": "yo-Kg2YxlZs.mkv", "clip": [917, 1016], "conversations": [{"from": "human", "timestamps": 947.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.105, 0.089, 0.756, 0.985]: stand, touch (an object), talk to (e.g., self, a person, a group); [0.641, 0.232, 0.981, 0.988]: listen to (a person), stand."}, {"from": "human", "timestamps": 1016.0, "value": "What actions did the person at bbox [0.032, 0.035, 0.779, 0.983] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.032, 0.035, 0.779, 0.983]: sit, talk to (e.g., self, a person, a group).", "From 1013.0s to 1015.0s, [0.034, 0.081, 0.667, 0.987]->[0.036, 0.081, 0.659, 0.995]->[0.028, 0.058, 0.669, 0.988]: sit, talk to (e.g., self, a person, a group).", "At 1012.0s, [0.031, 0.084, 0.683, 0.986]: sit, stand, talk to (e.g., self, a person, a group).", "At 1011.0s, [0.053, 0.085, 0.69, 0.986]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "yo-Kg2YxlZs.mkv", "clip": [971, 1018], "conversations": [{"from": "human", "timestamps": 1001.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.027, 0.084, 0.726, 0.993]?"}, {"from": "gpt", "value": ["In the current frame, [0.027, 0.084, 0.726, 0.993]: sit, listen to (a person), watch (a person).", "At 1000.0s, [0.051, 0.071, 0.719, 0.989]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1018.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.091, 0.105, 0.504, 0.893]: listen to (a person), sit, watch (a person); [0.429, 0.182, 0.929, 0.806]: listen to (a person), sit."}]}, {"video": "yo-Kg2YxlZs.mkv", "clip": [1127, 1166], "conversations": [{"from": "human", "timestamps": 1157.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.06, 0.048, 0.944, 0.996]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1166.0, "value": "Describe the actions of the person at bbox [0.069, 0.035, 0.967, 0.986] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.069, 0.035, 0.967, 0.986]: sit, talk to (e.g., self, a person, a group).", "At 1165.0s, [0.073, 0.061, 0.949, 0.983]: sit, talk to (e.g., self, a person, a group).", "At 1164.0s, [0.023, 0.085, 0.951, 0.981]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1162.0s to 1163.0s, [0.038, 0.056, 0.973, 0.974]->[0.024, 0.112, 0.991, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1159.0s to 1161.0s, [0.072, 0.052, 0.957, 0.986]->[0.054, 0.05, 0.97, 1.0]->[0.05, 0.044, 0.984, 0.987]: sit.", "From 1154.0s to 1158.0s, [0.02, 0.039, 0.926, 0.982]->[0.045, 0.044, 0.943, 0.996]->[0.033, 0.042, 0.942, 1.0]->[0.06, 0.048, 0.944, 0.996]->[0.034, 0.054, 0.992, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1152.0s to 1153.0s, [0.034, 0.034, 0.946, 0.981]->[0.028, 0.032, 0.981, 0.983]: sit, smoke, talk to (e.g., self, a person, a group)."]}]}, {"video": "yo-Kg2YxlZs.mkv", "clip": [1154, 1200], "conversations": [{"from": "human", "timestamps": 1184.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.012, 0.034, 0.912, 0.977]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1200.0, "value": "Describe the actions of the person at bbox [0.045, 0.067, 0.959, 0.992] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.045, 0.067, 0.959, 0.992]: sit, talk to (e.g., self, a person, a group).", "From 1196.0s to 1199.0s, [0.053, 0.078, 0.917, 0.974]->[0.034, 0.068, 0.951, 0.989]->[0.053, 0.068, 0.958, 0.981]->[0.038, 0.077, 0.959, 0.984]: sit, talk to (e.g., self, a person, a group).", "From 1194.0s to 1195.0s, [0.037, 0.056, 0.953, 0.98]->[0.023, 0.049, 0.918, 0.974]: sit.", "From 1190.0s to 1193.0s, [0.04, 0.082, 0.939, 0.981]->[0.063, 0.12, 0.961, 0.98]->[0.061, 0.079, 0.966, 0.979]->[0.029, 0.06, 0.95, 0.981]: sit, talk to (e.g., self, a person, a group).", "At 1189.0s, [0.046, 0.061, 0.931, 0.973]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1188.0s, [0.038, 0.06, 0.896, 0.979]: sit, listen to (a person), watch (a person).", "At 1187.0s, [0.0, 0.097, 0.976, 0.995]: sit, talk to (e.g., self, a person, a group).", "At 1186.0s, [0.02, 0.061, 0.958, 0.99]: sit, carry/hold (an object), talk to (e.g., self, a person, a group)."]}]}, {"video": "yo-Kg2YxlZs.mkv", "clip": [1254, 1286], "conversations": [{"from": "human", "timestamps": 1284.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.152, 0.108, 0.411, 0.932]?"}, {"from": "gpt", "value": ["In the current frame, [0.152, 0.108, 0.411, 0.932]: stand, carry/hold (an object), touch (an object), watch (a person).", "At 1283.0s, [0.274, 0.15, 0.41, 0.904]: walk, carry/hold (an object), touch (an object)."]}, {"from": "human", "timestamps": 1286.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.374, 0.027, 0.944, 0.995]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "yo-Kg2YxlZs.mkv", "clip": [1398, 1431], "conversations": [{"from": "human", "timestamps": 1428.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.25, 0.096, 0.986, 0.979]?"}, {"from": "gpt", "value": ["At Now, [0.25, 0.096, 0.986, 0.979]: sit, listen to (a person).", "At 1427.0s, [0.304, 0.063, 1.0, 0.981]: sit, listen to (a person).", "At 1426.0s, [0.32, 0.056, 0.989, 0.99]: sit, listen to (a person), talk to (e.g., self, a person, a group).", "At 1425.0s, [0.337, 0.073, 0.981, 1.0]: sit, listen to (a person).", "At 1424.0s, [0.319, 0.331, 0.583, 0.981]: sit, touch (an object), listen to (a person).", "At 1423.0s, [0.317, 0.369, 0.57, 0.978]: sit, carry/hold (an object), touch (an object), listen to (a person).", "At 1422.0s, [0.355, 0.357, 0.599, 0.99]: sit, carry/hold (an object), listen to (a person).", "At 1421.0s, [0.352, 0.369, 0.613, 0.992]: sit, read, listen to (a person).", "At 1420.0s, [0.38, 0.35, 0.608, 1.0]: sit, carry/hold (an object), listen to (a person).", "From 1417.0s to 1419.0s, [0.374, 0.347, 0.585, 0.891]->[0.398, 0.345, 0.604, 0.961]->[0.391, 0.34, 0.605, 0.994]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1416.0s, [0.414, 0.353, 0.599, 0.902]: sit, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1415.0s, [0.416, 0.388, 0.6, 0.932]: sit, carry/hold (an object), listen to (a person), watch (a person).", "At 1413.0s, [0.469, 0.404, 0.646, 0.926]: sit, carry/hold (an object), touch (an object), listen to (a person).", "From 1410.0s to 1412.0s, [0.664, 0.404, 0.921, 0.888]->[0.532, 0.421, 0.691, 0.65]->[0.537, 0.389, 0.672, 0.925]: sit, carry/hold (an object), read, listen to (a person)."]}, {"from": "human", "timestamps": 1431.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.017, 0.104, 0.779, 1.0]: watch (a person), talk to (e.g., self, a person, a group), bend/bow (at the waist)."}]}, {"video": "yo-Kg2YxlZs.mkv", "clip": [1380, 1455], "conversations": [{"from": "human", "timestamps": 1410.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.013, 0.325, 0.327, 0.924]: listen to (a person), sit, watch (a person); [0.289, 0.391, 0.525, 0.646]: listen to (a person), sit, watch (a person), talk to (e.g., self, a person, a group); [0.411, 0.341, 0.674, 0.859]: listen to (a person), sit, watch (a person); [0.664, 0.404, 0.921, 0.888]: carry/hold (an object), sit, read, listen to (a person)."}, {"from": "human", "timestamps": 1455.0, "value": "What actions did the person at bbox [0.315, 0.093, 0.979, 0.965] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.315, 0.093, 0.979, 0.965]: sit.", "At 1454.0s, [0.278, 0.089, 0.98, 0.981]: sit.", "At 1453.0s, [0.477, 0.204, 0.999, 0.981]: sit, watch (a person).", "From 1447.0s to 1451.0s, [0.598, 0.588, 0.984, 1.0]->[0.471, 0.505, 0.974, 0.993]->[0.409, 0.38, 0.921, 0.972]->[0.327, 0.28, 0.959, 1.0]->[0.438, 0.326, 0.957, 0.998]: sit."]}]}, {"video": "yo-Kg2YxlZs.mkv", "clip": [1509, 1543], "conversations": [{"from": "human", "timestamps": 1539.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.002, 0.106, 0.385, 0.979]: listen to (a person), sit, watch (a person); [0.631, 0.231, 0.952, 0.749]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1543.0, "value": "What actions is the person in bbox [0.003, 0.06, 0.402, 0.964] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.003, 0.06, 0.402, 0.964]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1542.0s, [0.021, 0.094, 0.416, 1.0]: sit, touch (an object), listen to (a person), watch (a person).", "From 1536.0s to 1541.0s, [0.02, 0.104, 0.423, 1.0]->[0.012, 0.104, 0.426, 0.989]->[0.024, 0.103, 0.433, 0.999]->[0.002, 0.106, 0.385, 0.979]->[0.017, 0.096, 0.388, 1.0]->[0.019, 0.102, 0.39, 1.0]: sit, listen to (a person), watch (a person).", "From 1534.0s to 1535.0s, [0.0, 0.111, 0.371, 0.971]->[0.013, 0.106, 0.447, 0.985]: sit, touch (an object), listen to (a person), watch (a person).", "From 1529.0s to 1533.0s, [0.017, 0.11, 0.449, 0.977]->[0.025, 0.1, 0.433, 1.0]->[0.021, 0.103, 0.382, 1.0]->[0.009, 0.097, 0.397, 1.0]->[0.013, 0.11, 0.363, 1.0]: sit, listen to (a person), watch (a person).", "From 1527.0s to 1528.0s, [0.024, 0.099, 0.44, 1.0]->[0.029, 0.103, 0.446, 1.0]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1520.0s to 1526.0s, [0.022, 0.106, 0.43, 1.0]->[0.02, 0.104, 0.429, 1.0]->[0.027, 0.106, 0.475, 1.0]->[0.027, 0.104, 0.475, 1.0]->[0.027, 0.11, 0.458, 0.996]->[0.024, 0.105, 0.458, 1.0]->[0.013, 0.105, 0.475, 0.981]: sit, listen to (a person), watch (a person).", "At 1519.0s, [0.022, 0.106, 0.43, 1.0]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1516.0s to 1518.0s, [0.028, 0.073, 0.522, 1.0]->[0.01, 0.097, 0.429, 1.0]->[0.013, 0.096, 0.444, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1515.0s, [0.009, 0.122, 0.413, 0.974]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1514.0s, [0.019, 0.096, 0.383, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "yo-Kg2YxlZs.mkv", "clip": [1652, 1692], "conversations": [{"from": "human", "timestamps": 1682.0, "value": "What actions is the person in bbox [0.196, 0.05, 0.9, 0.984] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.196, 0.05, 0.9, 0.984]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1680.0s to 1681.0s, [0.216, 0.066, 0.906, 0.989]->[0.214, 0.07, 0.898, 0.985]: sit, talk to (e.g., self, a person, a group).", "At 1678.0s, [0.19, 0.054, 0.903, 0.984]: sit, talk to (e.g., self, a person, a group).", "At 1677.0s, [0.168, 0.039, 0.871, 0.975]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1676.0s, [0.114, 0.052, 0.844, 0.974]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1692.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.191, 0.063, 0.898, 0.98]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "yo-Kg2YxlZs.mkv", "clip": [1715, 1778], "conversations": [{"from": "human", "timestamps": 1745.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.002, 0.208, 0.941]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.002, 0.208, 0.941]: listen to (a person), watch (a person).", "At 1744.0s, [0.0, 0.003, 0.343, 1.0]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1778.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.472, 0.272, 0.558, 0.615]: carry/hold (an object), open (e.g., a window, a car door), push (another person), walk; [0.542, 0.27, 0.645, 0.593]: carry/hold (an object), push (another person), watch (a person), walk."}]}, {"video": "yqImJuC5UzI.mp4", "clip": [899, 941], "conversations": [{"from": "human", "timestamps": 929.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.124, 0.286, 0.307, 0.982]: listen to (a person), stand, watch (a person); [0.426, 0.005, 0.833, 0.993]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 941.0, "value": "What actions is the person in bbox [0.647, 0.091, 0.876, 0.961] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.647, 0.091, 0.876, 0.961]: stand, listen to (a person), watch (a person).", "At 940.0s, [0.652, 0.104, 0.873, 0.986]: stand, listen to (a person), watch (a person).", "At 939.0s, [0.663, 0.102, 0.876, 0.976]: stand, listen to (a person)."]}]}, {"video": "yqImJuC5UzI.mp4", "clip": [895, 989], "conversations": [{"from": "human", "timestamps": 925.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.124, 0.093, 0.496, 0.985]: walk."}, {"from": "human", "timestamps": 989.0, "value": "What actions is the person in bbox [0.441, 0.088, 0.845, 0.981] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.441, 0.088, 0.845, 0.981]: walk.", "At 988.0s, [0.52, 0.203, 0.806, 0.97]: walk."]}]}, {"video": "yqImJuC5UzI.mp4", "clip": [1083, 1123], "conversations": [{"from": "human", "timestamps": 1113.0, "value": "Describe the actions of the person at bbox [0.212, 0.14, 0.625, 0.949] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.212, 0.14, 0.625, 0.949]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1112.0s, [0.24, 0.15, 0.611, 0.962]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1111.0s, [0.199, 0.201, 0.62, 0.984]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1123.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.244, 0.1, 0.613, 0.961]: watch (a person), sit."}]}, {"video": "yqImJuC5UzI.mp4", "clip": [1122, 1189], "conversations": [{"from": "human", "timestamps": 1152.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.648, 0.373, 0.873, 0.948]?"}, {"from": "gpt", "value": ["At Now, [0.648, 0.373, 0.873, 0.948]: sit, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1151.0s, [0.628, 0.387, 0.874, 0.97]: sit, touch (an object), watch (a person)."]}, {"from": "human", "timestamps": 1189.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.124, 0.0, 0.736, 0.991]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "yqImJuC5UzI.mp4", "clip": [1123, 1246], "conversations": [{"from": "human", "timestamps": 1153.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.316, 0.157, 0.644, 0.9]: watch (a person), sit, touch (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1246.0, "value": "What actions did the person at bbox [0.149, 0.218, 0.526, 0.989] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.149, 0.218, 0.526, 0.989]: stand, listen to (a person).", "At 1243.0s, [0.223, 0.429, 0.38, 0.983]: sit."]}]}, {"video": "yqImJuC5UzI.mp4", "clip": [1306, 1375], "conversations": [{"from": "human", "timestamps": 1336.0, "value": "For the person at bbox [0.2, 0.0, 0.615, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.2, 0.0, 0.615, 1.0]: walk, carry/hold (an object).", "From 1332.0s to 1335.0s, [0.216, 0.034, 0.714, 0.995]->[0.21, 0.134, 0.756, 0.915]->[0.188, 0.038, 0.641, 1.0]->[0.285, 0.0, 0.794, 1.0]: walk, carry/hold (an object).", "From 1330.0s to 1331.0s, [0.41, 0.161, 0.852, 0.971]->[0.434, 0.285, 0.842, 0.979]: walk."]}, {"from": "human", "timestamps": 1375.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.127, 0.175, 0.222, 0.599]: run/jog; [0.142, 0.196, 0.337, 0.765]: sit; [0.267, 0.195, 0.386, 0.723]: watch (a person), stand; [0.369, 0.259, 0.498, 0.702]: run/jog."}]}, {"video": "yqImJuC5UzI.mp4", "clip": [1371, 1468], "conversations": [{"from": "human", "timestamps": 1401.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.138, 0.0, 0.374, 0.964]: walk."}, {"from": "human", "timestamps": 1468.0, "value": "What actions did the person at bbox [0.144, 0.0, 0.57, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.144, 0.0, 0.57, 1.0]: stand, listen to (a person), watch (a person).", "From 1465.0s to 1467.0s, [0.125, 0.003, 0.551, 1.0]->[0.132, 0.0, 0.553, 1.0]->[0.142, 0.0, 0.565, 1.0]: stand, listen to (a person).", "From 1463.0s to 1464.0s, [0.128, 0.02, 0.53, 1.0]->[0.124, 0.0, 0.471, 1.0]: stand.", "At 1462.0s, [0.148, 0.029, 0.693, 0.945]: walk.", "At 1461.0s, [0.196, 0.006, 0.824, 0.969]: stand.", "From 1459.0s to 1460.0s, [0.146, 0.023, 0.772, 0.934]->[0.188, 0.0, 0.756, 0.949]: stand, watch (a person).", "At 1458.0s, [0.16, 0.002, 0.738, 0.979]: stand, listen to (a person), watch (a person)."]}]}, {"video": "yqImJuC5UzI.mp4", "clip": [1472, 1540], "conversations": [{"from": "human", "timestamps": 1502.0, "value": "Describe the actions of the person at bbox [0.352, 0.0, 0.876, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.352, 0.0, 0.876, 1.0]: stand, touch (an object).", "From 1500.0s to 1501.0s, [0.278, 0.03, 0.862, 0.971]->[0.284, 0.054, 0.867, 0.974]: stand, touch (an object)."]}, {"from": "human", "timestamps": 1540.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.135, 0.0, 0.721, 0.979]: stand."}]}, {"video": "yqImJuC5UzI.mp4", "clip": [1538, 1577], "conversations": [{"from": "human", "timestamps": 1568.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.124, 0.0, 0.587, 0.993]: watch (a person), walk; [0.298, 0.063, 0.751, 0.97]: watch (a person), stand."}, {"from": "human", "timestamps": 1577.0, "value": "What actions is the person in bbox [0.445, 0.117, 0.826, 0.979] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.445, 0.117, 0.826, 0.979]: stand, watch (a person).", "At 1576.0s, [0.582, 0.358, 0.659, 0.654]: stand, carry/hold (an object), watch (a person).", "At 1575.0s, [0.542, 0.53, 0.611, 0.792]: stand, carry/hold (an object)."]}]}, {"video": "yqImJuC5UzI.mp4", "clip": [1660, 1718], "conversations": [{"from": "human", "timestamps": 1690.0, "value": "What actions is the person in bbox [0.319, 0.013, 0.867, 0.987] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.319, 0.013, 0.867, 0.987]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1689.0s, [0.371, 0.0, 0.873, 0.981]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1718.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.169, 0.031, 0.581, 0.832]: sit, listen to (a person), watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "yqImJuC5UzI.mp4", "clip": [1729, 1780], "conversations": [{"from": "human", "timestamps": 1759.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.309, 0.254, 0.5, 0.856]: stand."}, {"from": "human", "timestamps": 1780.0, "value": "What actions is the person in bbox [0.352, 0.029, 0.824, 0.981] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.352, 0.029, 0.824, 0.981]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1778.0s to 1779.0s, [0.445, 0.247, 0.693, 0.966]->[0.428, 0.234, 0.703, 0.983]: stand, listen to (a person), watch (a person).", "From 1770.0s to 1777.0s, [0.408, 0.21, 0.692, 0.977]->[0.386, 0.213, 0.676, 0.98]->[0.467, 0.22, 0.685, 0.99]->[0.429, 0.22, 0.695, 0.986]->[0.41, 0.215, 0.697, 0.978]->[0.429, 0.232, 0.71, 0.982]->[0.467, 0.259, 0.702, 0.989]->[0.422, 0.219, 0.701, 0.975]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [896, 954], "conversations": [{"from": "human", "timestamps": 926.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.599, 0.339, 0.997, 0.75]?"}, {"from": "gpt", "value": ["In the current frame, [0.599, 0.339, 0.997, 0.75]: sit, touch (an object).", "From 923.0s to 925.0s, [0.576, 0.351, 0.99, 0.772]->[0.569, 0.329, 0.989, 0.744]->[0.564, 0.331, 0.987, 0.734]: sit, touch (an object)."]}, {"from": "human", "timestamps": 954.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.607, 0.212, 0.71, 0.387]: carry/hold (an object), sit."}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [938, 1015], "conversations": [{"from": "human", "timestamps": 968.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.084, 0.369, 0.412, 0.999]: listen to (a person), stand, watch (a person); [0.495, 0.061, 0.954, 0.986]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1015.0, "value": "Describe the actions of the person at bbox [0.214, 0.08, 0.636, 0.955] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.214, 0.08, 0.636, 0.955]: stand, listen to (a person), watch (a person).", "From 1012.0s to 1014.0s, [0.245, 0.063, 0.664, 0.983]->[0.251, 0.082, 0.847, 0.989]->[0.276, 0.075, 0.692, 0.982]: stand, listen to (a person), watch (a person)."]}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [1005, 1069], "conversations": [{"from": "human", "timestamps": 1035.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.223, 0.236, 0.835, 0.982]: bend/bow (at the waist); [0.703, 0.196, 0.988, 0.914]: martial art, fight/hit (a person)."}, {"from": "human", "timestamps": 1069.0, "value": "Describe the actions of the person at bbox [0.916, 0.486, 0.982, 0.721] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.916, 0.486, 0.982, 0.721]: walk, carry/hold (an object), watch (a person).", "At 1068.0s, [0.929, 0.476, 0.986, 0.715]: walk, carry/hold (an object)."]}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [1085, 1117], "conversations": [{"from": "human", "timestamps": 1115.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.786, 0.359, 0.93, 0.552]?"}, {"from": "gpt", "value": ["In the current frame, [0.786, 0.359, 0.93, 0.552]: walk, carry/hold (an object).", "From 1113.0s to 1114.0s, [0.788, 0.363, 0.886, 0.551]->[0.763, 0.358, 0.849, 0.536]: stand, carry/hold (an object).", "At 1112.0s, [0.791, 0.368, 0.918, 0.605]: stand, cook.", "At 1110.0s, [0.885, 0.39, 0.992, 0.578]: walk, carry/hold (an object)."]}, {"from": "human", "timestamps": 1117.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.498, 0.585, 0.639, 0.905]: carry/hold (an object), walk."}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [1111, 1144], "conversations": [{"from": "human", "timestamps": 1141.0, "value": "For the person at bbox [0.059, 0.0, 1.0, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.059, 0.0, 1.0, 1.0]: stand.", "From 1139.0s to 1140.0s, [0.002, 0.032, 1.0, 0.99]->[0.056, 0.0, 0.997, 1.0]: stand.", "At 1138.0s, [0.145, 0.005, 1.0, 0.998]: stand, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1144.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.169, 0.218, 0.716, 0.955]: sit, touch (an object)."}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [1151, 1252], "conversations": [{"from": "human", "timestamps": 1181.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.049, 0.253, 0.445, 0.957]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.34, 0.166, 0.971, 0.945]: listen to (a person), sit, touch (an object), watch (a person)."}, {"from": "human", "timestamps": 1252.0, "value": "What actions did the person at bbox [0.037, 0.222, 0.161, 0.854] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.037, 0.222, 0.161, 0.854]: walk, carry/hold (an object), fight/hit (a person), watch (a person).", "From 1250.0s to 1251.0s, [0.256, 0.29, 0.45, 0.732]->[0.17, 0.301, 0.401, 0.817]: bend/bow (at the waist), carry/hold (an object), fight/hit (a person).", "At 1249.0s, [0.227, 0.169, 0.426, 0.725]: stand, carry/hold (an object), fight/hit (a person)."]}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [1266, 1319], "conversations": [{"from": "human", "timestamps": 1296.0, "value": "Describe the actions of the person at bbox [0.428, 0.215, 0.757, 0.996] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.428, 0.215, 0.757, 0.996]: stand, fight/hit (a person), watch (a person).", "At 1295.0s, [0.202, 0.266, 0.653, 0.983]: fight/hit (a person), watch (a person), martial art."]}, {"from": "human", "timestamps": 1319.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.088, 0.18, 0.385, 0.987]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [1271, 1344], "conversations": [{"from": "human", "timestamps": 1301.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.043, 0.083, 0.835, 0.982]: watch (a person), martial art, fight/hit (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1344.0, "value": "For the person at bbox [0.135, 0.089, 0.796, 0.982], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.135, 0.089, 0.796, 0.982]: stand, touch (an object).", "At 1343.0s, [0.055, 0.03, 0.782, 0.977]: bend/bow (at the waist), touch (an object).", "At 1342.0s, [0.346, 0.174, 0.996, 0.993]: bend/bow (at the waist), push (an object).", "At 1341.0s, [0.067, 0.06, 0.571, 0.993]: run/jog.", "At 1340.0s, [0.036, 0.465, 0.608, 0.983]: bend/bow (at the waist), fight/hit (a person).", "From 1338.0s to 1339.0s, [0.062, 0.356, 0.742, 0.994]->[0.036, 0.39, 0.731, 0.996]: bend/bow (at the waist), listen to (a person).", "From 1336.0s to 1337.0s, [0.061, 0.332, 0.789, 0.998]->[0.04, 0.416, 0.749, 0.998]: bend/bow (at the waist), fight/hit (a person), listen to (a person).", "From 1334.0s to 1335.0s, [0.035, 0.341, 0.631, 0.994]->[0.032, 0.185, 0.552, 0.952]: bend/bow (at the waist), listen to (a person).", "At 1333.0s, [0.048, 0.185, 0.344, 0.983]: stand, fight/hit (a person), listen to (a person).", "At 1332.0s, [0.035, 0.229, 0.356, 0.993]: stand, fight/hit (a person).", "At 1331.0s, [0.089, 0.239, 0.364, 1.0]: stand, fight/hit (a person), watch (a person), martial art."]}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [1373, 1418], "conversations": [{"from": "human", "timestamps": 1403.0, "value": "For the person at bbox [0.022, 0.081, 0.478, 0.976], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.022, 0.081, 0.478, 0.976]: sit, listen to (a person), watch (a person).", "At 1402.0s, [0.02, 0.125, 0.453, 0.995]: sit, listen to (a person), watch (a person).", "At 1401.0s, [0.206, 0.286, 0.704, 0.992]: sit, listen to (a person).", "At 1400.0s, [0.39, 0.125, 0.748, 0.993]: bend/bow (at the waist), listen to (a person), watch (a person).", "At 1399.0s, [0.398, 0.052, 0.611, 0.852]: walk, listen to (a person).", "At 1398.0s, [0.255, 0.048, 0.47, 0.909]: walk, listen to (a person), watch (a person).", "At 1397.0s, [0.172, 0.004, 0.37, 0.939]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1418.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.039, 0.064, 0.626, 0.962]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.64, 0.012, 0.994, 1.0]: listen to (a person), stand, watch (a person)."}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [1377, 1497], "conversations": [{"from": "human", "timestamps": 1407.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.025, 0.048, 0.59, 0.98]: listen to (a person), sit, watch (a person); [0.716, 0.0, 0.998, 0.99]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1497.0, "value": "For the person at bbox [0.284, 0.01, 0.913, 0.989], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.284, 0.01, 0.913, 0.989]: talk to (e.g., self, a person, a group), martial art.", "From 1493.0s to 1496.0s, [0.24, 0.0, 0.816, 0.971]->[0.136, 0.043, 0.786, 0.966]->[0.123, 0.013, 0.712, 1.0]->[0.043, 0.0, 0.721, 0.986]: martial art.", "At 1492.0s, [0.053, 0.0, 0.456, 1.0]: talk to (e.g., self, a person, a group), martial art.", "From 1484.0s to 1491.0s, [0.412, 0.207, 0.773, 0.999]->[0.434, 0.11, 0.963, 0.968]->[0.353, 0.094, 0.825, 0.955]->[0.057, 0.025, 0.592, 0.967]->[0.122, 0.053, 0.645, 0.993]->[0.023, 0.041, 0.391, 1.0]->[0.086, 0.0, 0.579, 0.968]->[0.037, 0.19, 0.302, 0.969]: martial art.", "At 1483.0s, [0.266, 0.163, 0.505, 0.988]: walk, talk to (e.g., self, a person, a group).", "At 1482.0s, [0.224, 0.129, 0.471, 0.975]: stand, listen to (a person), watch (a person).", "At 1481.0s, [0.256, 0.084, 0.504, 0.992]: stand, listen to (a person).", "At 1480.0s, [0.198, 0.036, 0.435, 0.969]: walk, listen to (a person), watch (a person).", "At 1479.0s, [0.245, 0.038, 0.453, 0.939]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [1500, 1542], "conversations": [{"from": "human", "timestamps": 1530.0, "value": "Describe the actions of the person at bbox [0.579, 0.076, 0.843, 0.97] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.579, 0.076, 0.843, 0.97]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1527.0s to 1529.0s, [0.559, 0.081, 0.796, 0.975]->[0.571, 0.07, 0.802, 0.978]->[0.604, 0.073, 0.818, 0.989]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1526.0s, [0.551, 0.093, 0.802, 0.967]: stand, carry/hold (an object), listen to (a person).", "At 1525.0s, [0.518, 0.093, 0.813, 0.986]: stand, carry/hold (an object), listen to (a person), take (an object) from (a person).", "At 1524.0s, [0.622, 0.09, 0.852, 0.982]: stand, listen to (a person), watch (a person).", "At 1523.0s, [0.595, 0.116, 0.863, 0.978]: stand, listen to (a person).", "At 1522.0s, [0.618, 0.133, 0.876, 0.982]: stand, listen to (a person), watch (a person).", "At 1521.0s, [0.655, 0.144, 0.87, 0.983]: listen to (a person), watch (a person).", "At 1520.0s, [0.665, 0.141, 0.906, 0.979]: stand, listen to (a person), watch (a person).", "From 1517.0s to 1519.0s, [0.791, 0.078, 0.994, 0.976]->[0.69, 0.123, 0.914, 0.972]->[0.649, 0.137, 0.891, 0.984]: walk, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1542.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.031, 0.221, 0.384, 1.0]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [1551, 1599], "conversations": [{"from": "human", "timestamps": 1581.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.111, 0.092, 0.773, 0.998]: carry/hold (an object), sit, watch (a person), listen to (a person)."}, {"from": "human", "timestamps": 1599.0, "value": "What actions did the person at bbox [0.486, 0.362, 0.854, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.486, 0.362, 0.854, 0.987]: sit, carry/hold (an object), listen to (a person).", "At 1598.0s, [0.496, 0.177, 0.726, 0.925]: bend/bow (at the waist), carry/hold (an object), listen to (a person), watch (a person).", "At 1597.0s, [0.789, 0.004, 0.994, 0.941]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1593.0s to 1596.0s, [0.674, 0.132, 0.932, 1.0]->[0.631, 0.187, 0.823, 0.944]->[0.561, 0.143, 0.707, 0.711]->[0.512, 0.152, 0.634, 0.609]: walk, carry/hold (an object), watch (a person)."]}]}, {"video": "yvgCGJ6vfkY.mkv", "clip": [1657, 1707], "conversations": [{"from": "human", "timestamps": 1687.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.029, 0.199, 0.682, 0.99]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1707.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.667, 0.103, 0.997, 0.988]?"}, {"from": "gpt", "value": ["In the current frame, [0.667, 0.103, 0.997, 0.988]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1704.0s to 1706.0s, [0.597, 0.038, 0.985, 0.97]->[0.638, 0.098, 1.0, 0.973]->[0.672, 0.076, 0.991, 0.99]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1703.0s, [0.677, 0.039, 0.995, 0.979]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1700.0s to 1702.0s, [0.667, 0.077, 1.0, 1.0]->[0.679, 0.068, 0.994, 0.97]->[0.671, 0.059, 0.994, 0.947]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1698.0s to 1699.0s, [0.672, 0.054, 0.994, 0.991]->[0.635, 0.045, 0.984, 0.973]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1693.0s to 1697.0s, [0.844, 0.112, 0.999, 0.666]->[0.822, 0.116, 0.993, 0.981]->[0.821, 0.095, 0.998, 0.993]->[0.821, 0.092, 0.995, 0.948]->[0.824, 0.095, 0.994, 0.943]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "z3kgrh0L_80.mkv", "clip": [884, 938], "conversations": [{"from": "human", "timestamps": 914.0, "value": "For the person at bbox [0.127, 0.006, 0.985, 1.0], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.127, 0.006, 0.985, 1.0]: stand, talk to (e.g., self, a person, a group).", "From 912.0s to 913.0s, [0.19, 0.0, 0.987, 0.994]->[0.275, 0.0, 1.0, 0.987]: stand, talk to (e.g., self, a person, a group).", "At 911.0s, [0.207, 0.005, 0.99, 0.983]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 908.0s to 910.0s, [0.165, 0.0, 0.998, 0.995]->[0.206, 0.0, 0.981, 0.983]->[0.175, 0.009, 1.0, 0.989]: stand, talk to (e.g., self, a person, a group).", "From 903.0s to 907.0s, [0.206, 0.0, 1.0, 0.986]->[0.867, 0.038, 1.0, 0.983]->[0.784, 0.02, 1.0, 1.0]->[0.784, 0.045, 1.0, 1.0]->[0.195, 0.0, 0.987, 0.964]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 902.0s, [0.116, 0.008, 0.883, 0.982]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 938.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.064, 0.0, 0.879, 0.989]: listen to (a person), sit, watch (a person)."}]}, {"video": "z3kgrh0L_80.mkv", "clip": [973, 1011], "conversations": [{"from": "human", "timestamps": 1003.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.001, 0.0, 0.643, 0.975]: watch (a person), sit, eat, talk to (e.g., self, a person, a group); [0.329, 0.119, 0.843, 0.995]: listen to (a person), sit, eat, watch (a person)."}, {"from": "human", "timestamps": 1011.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.282, 0.084, 0.842, 0.921]?"}, {"from": "gpt", "value": ["At Now, [0.282, 0.084, 0.842, 0.921]: sit, eat, listen to (a person), talk to (e.g., self, a person, a group).", "At 1010.0s, [0.292, 0.063, 0.833, 0.954]: sit, eat, listen to (a person).", "At 1009.0s, [0.332, 0.042, 0.85, 0.963]: sit, eat."]}]}, {"video": "z3kgrh0L_80.mkv", "clip": [1013, 1059], "conversations": [{"from": "human", "timestamps": 1043.0, "value": "What actions did the person at bbox [0.014, 0.112, 0.643, 0.981] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.014, 0.112, 0.643, 0.981]: stand, listen to (a person), watch (a person).", "At 1042.0s, [0.006, 0.087, 0.628, 0.999]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1041.0s, [0.013, 0.0, 0.477, 0.991]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1059.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.019, 0.002, 0.731, 0.989]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "z3kgrh0L_80.mkv", "clip": [1047, 1108], "conversations": [{"from": "human", "timestamps": 1077.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.145, 0.015, 0.773, 0.98]: sit, listen to (a person), carry/hold (an object), talk to (e.g., self, a person, a group), take (an object) from (a person); [0.722, 0.014, 1.0, 1.0]: sit, watch (a person), give/serve (an object) to (a person), carry/hold (an object), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1108.0, "value": "Describe the actions of the person at bbox [0.258, 0.193, 0.91, 0.995] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.258, 0.193, 0.91, 0.995]: sit, talk to (e.g., self, a person, a group).", "At 1107.0s, [0.149, 0.09, 0.709, 0.979]: sit, talk to (e.g., self, a person, a group).", "From 1105.0s to 1106.0s, [0.16, 0.154, 0.735, 1.0]->[0.172, 0.114, 0.762, 0.995]: sit, touch (an object).", "From 1103.0s to 1104.0s, [0.149, 0.042, 0.772, 0.991]->[0.166, 0.117, 0.736, 0.962]: sit, talk to (e.g., self, a person, a group).", "From 1101.0s to 1102.0s, [0.143, 0.043, 0.768, 0.956]->[0.148, 0.039, 0.741, 0.955]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1100.0s, [0.119, 0.043, 0.711, 0.975]: sit, talk to (e.g., self, a person, a group).", "At 1099.0s, [0.106, 0.098, 0.678, 0.988]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1098.0s, [0.097, 0.068, 0.776, 0.968]: sit, talk to (e.g., self, a person, a group).", "From 1094.0s to 1097.0s, [0.002, 0.017, 0.267, 0.94]->[0.104, 0.008, 0.73, 0.974]->[0.084, 0.009, 0.691, 0.967]->[0.086, 0.028, 0.683, 0.965]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1093.0s, [0.0, 0.0, 0.321, 0.995]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1092.0s, [0.0, 0.021, 0.38, 0.991]: sit, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1091.0s, [0.135, 0.031, 0.687, 0.977]: sit, carry/hold (an object), eat, talk to (e.g., self, a person, a group).", "At 1090.0s, [0.15, 0.026, 0.701, 0.931]: sit, carry/hold (an object), eat, talk to (e.g., self, a person, a group), watch (a person).", "At 1089.0s, [0.132, 0.022, 0.729, 0.963]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "z3kgrh0L_80.mkv", "clip": [1084, 1134], "conversations": [{"from": "human", "timestamps": 1114.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.205, 0.017, 0.85, 0.987]: watch (a person), stand."}, {"from": "human", "timestamps": 1134.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.0, 0.185, 0.316, 0.995]?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.185, 0.316, 0.995]: stand, listen to (a person), watch (a person).", "At 1133.0s, [0.055, 0.161, 0.522, 0.987]: stand, listen to (a person), watch (a person).", "At 1132.0s, [0.052, 0.167, 0.456, 0.991]: stand, listen to (a person).", "From 1130.0s to 1131.0s, [0.05, 0.147, 0.424, 0.991]->[0.061, 0.154, 0.468, 0.985]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1129.0s, [0.014, 0.159, 0.435, 0.987]: stand, listen to (a person), watch (a person).", "From 1125.0s to 1128.0s, [0.0, 0.205, 0.182, 0.963]->[0.043, 0.138, 0.421, 0.991]->[0.025, 0.17, 0.431, 0.984]->[0.052, 0.193, 0.474, 0.984]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "z3kgrh0L_80.mkv", "clip": [1156, 1188], "conversations": [{"from": "human", "timestamps": 1186.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.069, 0.262, 0.359, 0.983]: listen to (a person), stand, grab (a person), watch (a person); [0.322, 0.262, 0.569, 0.977]: stand, fight/hit (a person), talk to (e.g., self, a person, a group); [0.511, 0.049, 1.0, 0.912]: listen to (a person), watch (a person), walk; [0.52, 0.267, 0.731, 0.809]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1188.0, "value": "What actions did the person at bbox [0.0, 0.116, 0.352, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.0, 0.116, 0.352, 1.0]: stand, grab (a person), listen to (a person), watch (a person).", "At 1187.0s, [0.011, 0.179, 0.474, 0.966]: stand, grab (a person), listen to (a person), watch (a person)."]}]}, {"video": "z3kgrh0L_80.mkv", "clip": [1134, 1221], "conversations": [{"from": "human", "timestamps": 1164.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.044, 0.453, 0.963]: listen to (a person), sit, watch (a person); [0.329, 0.022, 0.894, 0.946]: ride (e.g., a bike, a car, a horse), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1221.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.001, 0.042, 0.488, 0.99]?"}, {"from": "gpt", "value": ["At Now, [0.001, 0.042, 0.488, 0.99]: stand, listen to (a person), watch (a person).", "At 1220.0s, [0.006, 0.0, 0.46, 0.998]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1219.0s, [0.0, 0.013, 0.296, 0.996]: stand, talk to (e.g., self, a person, a group).", "From 1217.0s to 1218.0s, [0.355, 0.053, 1.0, 0.965]->[0.019, 0.02, 0.544, 0.877]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1216.0s, [0.374, 0.054, 1.0, 0.987]: stand, talk to (e.g., self, a person, a group)."]}]}, {"video": "z3kgrh0L_80.mkv", "clip": [1187, 1239], "conversations": [{"from": "human", "timestamps": 1217.0, "value": "Describe the actions of the person at bbox [0.003, 0.0, 0.521, 0.976] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.003, 0.0, 0.521, 0.976]: stand, listen to (a person), watch (a person).", "At 1216.0s, [0.0, 0.0, 0.526, 0.972]: stand, listen to (a person), watch (a person).", "From 1214.0s to 1215.0s, [0.019, 0.01, 0.538, 1.0]->[0.001, 0.002, 0.55, 0.995]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1212.0s to 1213.0s, [0.001, 0.0, 0.329, 0.995]->[0.0, 0.031, 0.481, 0.981]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1239.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.038, 0.669, 0.987]: listen to (a person), stand, watch (a person); [0.627, 0.009, 1.0, 0.962]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "z3kgrh0L_80.mkv", "clip": [1236, 1346], "conversations": [{"from": "human", "timestamps": 1266.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.009, 0.051, 0.499, 0.975]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.42, 0.06, 0.995, 0.905]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1346.0, "value": "For the person at bbox [0.249, 0.023, 0.862, 0.948], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.249, 0.023, 0.862, 0.948]: stand, talk to (e.g., self, a person, a group).", "At 1345.0s, [0.248, 0.027, 0.86, 0.959]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "z3kgrh0L_80.mkv", "clip": [1364, 1491], "conversations": [{"from": "human", "timestamps": 1394.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.095, 0.055, 0.577, 0.983]: listen to (a person), stand, watch (a person), talk to (e.g., self, a person, a group); [0.496, 0.15, 1.0, 1.0]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1491.0, "value": "What actions is the person in bbox [0.735, 0.0, 1.0, 0.971] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.735, 0.0, 1.0, 0.971]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1490.0s, [0.722, 0.0, 0.994, 0.993]: stand, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "z3kgrh0L_80.mkv", "clip": [1490, 1556], "conversations": [{"from": "human", "timestamps": 1520.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.071, 0.013, 0.638, 0.983]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1556.0, "value": "What actions is the person in bbox [0.012, 0.082, 0.765, 0.917] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.012, 0.082, 0.765, 0.917]: sit, drink, touch (an object).", "At 1555.0s, [0.026, 0.083, 0.737, 0.943]: sit, drink, touch (an object).", "From 1550.0s to 1554.0s, [0.019, 0.122, 0.633, 0.957]->[0.038, 0.141, 0.755, 0.969]->[0.02, 0.158, 0.774, 0.969]->[0.023, 0.149, 0.742, 0.986]->[0.021, 0.124, 0.754, 0.94]: sit, eat, touch (an object).", "From 1546.0s to 1549.0s, [0.023, 0.072, 0.768, 0.94]->[0.029, 0.098, 0.75, 0.938]->[0.056, 0.086, 0.75, 0.919]->[0.014, 0.108, 0.771, 0.941]: sit, eat, touch (an object), talk to (e.g., self, a person, a group).", "From 1538.0s to 1545.0s, [0.012, 0.057, 0.617, 0.895]->[0.019, 0.067, 0.749, 0.938]->[0.0, 0.057, 0.766, 0.925]->[0.027, 0.079, 0.757, 0.987]->[0.014, 0.079, 0.788, 0.958]->[0.028, 0.104, 0.765, 0.933]->[0.116, 0.082, 0.784, 0.924]->[0.014, 0.093, 0.79, 0.934]: sit, eat, touch (an object)."]}]}, {"video": "z3kgrh0L_80.mkv", "clip": [1535, 1566], "conversations": [{"from": "human", "timestamps": 1565.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.092, 0.232, 0.748, 0.991]: sit, eat, touch (an object), talk to (e.g., self, a person, a group); [0.424, 0.0, 0.997, 0.995]: listen to (a person), touch (an object), watch (a person), bend/bow (at the waist)."}, {"from": "human", "timestamps": 1566.0, "value": "Describe the actions of the person at bbox [0.451, 0.012, 1.0, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.451, 0.012, 1.0, 1.0]: bend/bow (at the waist), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1563.0s to 1565.0s, [0.423, 0.018, 0.873, 1.0]->[0.462, 0.0, 0.958, 1.0]->[0.424, 0.0, 0.997, 0.995]: bend/bow (at the waist), touch (an object), listen to (a person), watch (a person).", "At 1562.0s, [0.461, 0.052, 1.0, 1.0]: bend/bow (at the waist), touch (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1561.0s, [0.434, 0.026, 0.984, 1.0]: bend/bow (at the waist), touch (an object), listen to (a person), watch (a person).", "From 1558.0s to 1560.0s, [0.445, 0.0, 0.975, 1.0]->[0.474, 0.033, 0.995, 1.0]->[0.461, 0.014, 0.987, 0.998]: bend/bow (at the waist), touch (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "z3kgrh0L_80.mkv", "clip": [1638, 1681], "conversations": [{"from": "human", "timestamps": 1668.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.134, 0.039, 0.643, 0.998]: carry/hold (an object), stand, talk to (e.g., self, a person, a group), listen to (a person); [0.79, 0.022, 1.0, 0.973]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1681.0, "value": "Describe the actions of the person at bbox [0.141, 0.089, 0.874, 0.98] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.141, 0.089, 0.874, 0.98]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1679.0s to 1680.0s, [0.761, 0.023, 1.0, 0.967]->[0.758, 0.003, 1.0, 0.975]: stand, listen to (a person), watch (a person).", "At 1678.0s, [0.822, 0.012, 1.0, 0.963]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1677.0s, [0.815, 0.007, 1.0, 0.977]: stand, listen to (a person), watch (a person).", "From 1675.0s to 1676.0s, [0.134, 0.029, 0.88, 0.981]->[0.148, 0.014, 0.893, 0.963]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1671.0s to 1674.0s, [0.829, 0.018, 1.0, 0.98]->[0.724, 0.012, 0.997, 0.981]->[0.818, 0.005, 1.0, 0.984]->[0.803, 0.02, 0.999, 0.972]: stand, listen to (a person), watch (a person).", "At 1670.0s, [0.794, 0.009, 1.0, 0.989]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1669.0s, [0.751, 0.024, 1.0, 0.998]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1668.0s, [0.79, 0.022, 1.0, 0.973]: stand, listen to (a person), watch (a person).", "From 1666.0s to 1667.0s, [0.099, 0.006, 0.785, 0.983]->[0.238, 0.004, 0.953, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1665.0s, [0.182, 0.071, 0.855, 0.995]: stand, talk to (e.g., self, a person, a group).", "At 1663.0s, [0.838, 0.014, 1.0, 0.967]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1659.0s to 1662.0s, [0.138, 0.075, 0.822, 0.965]->[0.158, 0.084, 0.848, 0.977]->[0.312, 0.0, 1.0, 0.99]->[0.749, 0.04, 1.0, 0.974]: stand, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "z3kgrh0L_80.mkv", "clip": [1733, 1789], "conversations": [{"from": "human", "timestamps": 1763.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.131, 0.0, 0.975, 0.986]?"}, {"from": "gpt", "value": ["In the current frame, [0.131, 0.0, 0.975, 0.986]: stand, answer phone.", "From 1758.0s to 1762.0s, [0.05, 0.0, 0.95, 0.981]->[0.389, 0.0, 1.0, 0.969]->[0.021, 0.013, 0.724, 0.961]->[0.066, 0.007, 0.982, 0.977]->[0.01, 0.001, 1.0, 0.946]: stand, answer phone."]}, {"from": "human", "timestamps": 1789.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.161, 0.0, 0.798, 0.968]: walk."}]}, {"video": "zG7mx8KiavA.mp4", "clip": [891, 955], "conversations": [{"from": "human", "timestamps": 921.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.035, 0.154, 0.41, 0.983]: listen (e.g., to music), stand; [0.253, 0.133, 0.923, 0.981]: dance, play musical instrument."}, {"from": "human", "timestamps": 955.0, "value": "What actions is the person in bbox [0.0, 0.1, 0.358, 0.998] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.1, 0.358, 0.998]: listen (e.g., to music), dance, listen to (a person).", "At 954.0s, [0.054, 0.105, 0.464, 0.928]: listen (e.g., to music), dance, listen to (a person), sing to (e.g., self, a person, a group).", "At 953.0s, [0.005, 0.122, 0.372, 0.977]: dance, sing to (e.g., self, a person, a group).", "At 952.0s, [0.07, 0.122, 0.47, 0.844]: dance, listen to (a person), sing to (e.g., self, a person, a group).", "From 950.0s to 951.0s, [0.065, 0.099, 0.47, 0.929]->[0.0, 0.116, 0.372, 1.0]: listen (e.g., to music), dance, listen to (a person).", "From 948.0s to 949.0s, [0.061, 0.098, 0.469, 0.975]->[0.006, 0.111, 0.366, 1.0]: stand, listen (e.g., to music), listen to (a person).", "At 947.0s, [0.0, 0.111, 0.397, 0.992]: listen (e.g., to music), dance, listen to (a person).", "At 946.0s, [0.036, 0.102, 0.434, 0.991]: stand, listen (e.g., to music).", "From 944.0s to 945.0s, [0.0, 0.09, 0.448, 0.981]->[0.0, 0.102, 0.409, 0.981]: stand, listen (e.g., to music), dance.", "At 943.0s, [0.0, 0.102, 0.423, 0.981]: listen (e.g., to music), dance.", "At 942.0s, [0.0, 0.103, 0.393, 0.99]: listen (e.g., to music), dance, listen to (a person).", "From 940.0s to 941.0s, [0.0, 0.112, 0.395, 0.989]->[0.015, 0.106, 0.457, 0.983]: listen (e.g., to music), dance.", "At 939.0s, [0.0, 0.096, 0.388, 0.994]: stand, listen (e.g., to music), watch (a person).", "At 938.0s, [0.0, 0.116, 0.39, 0.987]: stand, listen (e.g., to music), sing to (e.g., self, a person, a group).", "At 937.0s, [0.061, 0.08, 0.432, 0.977]: listen (e.g., to music), dance, sing to (e.g., self, a person, a group).", "At 936.0s, [0.0, 0.137, 0.387, 0.998]: listen (e.g., to music), dance, listen to (a person), sing to (e.g., self, a person, a group).", "At 935.0s, [0.119, 0.1, 0.487, 0.976]: listen (e.g., to music), dance, listen to (a person).", "At 934.0s, [0.0, 0.129, 0.368, 0.982]: stand, listen (e.g., to music), dance, listen to (a person).", "From 932.0s to 933.0s, [0.021, 0.122, 0.42, 0.991]->[0.055, 0.133, 0.461, 0.982]: listen (e.g., to music), dance, listen to (a person).", "At 931.0s, [0.023, 0.123, 0.421, 0.935]: stand, listen (e.g., to music), dance, listen to (a person), sing to (e.g., self, a person, a group).", "At 930.0s, [0.074, 0.129, 0.476, 0.965]: stand, listen (e.g., to music), sing to (e.g., self, a person, a group).", "At 929.0s, [0.0, 0.135, 0.398, 0.993]: dance, listen to (a person), sing to (e.g., self, a person, a group).", "At 928.0s, [0.085, 0.127, 0.473, 0.955]: stand, listen to (a person).", "At 927.0s, [0.001, 0.135, 0.381, 0.979]: listen (e.g., to music), dance, listen to (a person).", "From 925.0s to 926.0s, [0.016, 0.135, 0.394, 0.982]->[0.103, 0.12, 0.511, 0.959]: dance, listen to (a person), watch (a person).", "At 924.0s, [0.051, 0.128, 0.467, 0.966]: listen (e.g., to music), dance, touch (an object), listen to (a person), watch (a person).", "At 923.0s, [0.028, 0.138, 0.401, 0.982]: stand, listen (e.g., to music), listen to (a person).", "From 920.0s to 922.0s, [0.095, 0.144, 0.472, 0.95]->[0.035, 0.154, 0.41, 0.983]->[0.096, 0.129, 0.496, 0.969]: stand, listen (e.g., to music).", "At 919.0s, [0.014, 0.163, 0.398, 1.0]: listen (e.g., to music), dance.", "At 918.0s, [0.059, 0.13, 0.467, 0.976]: dance, listen to (a person).", "At 917.0s, [0.0, 0.135, 0.4, 0.989]: stand.", "From 915.0s to 916.0s, [0.0, 0.159, 0.456, 0.992]->[0.0, 0.173, 0.407, 1.0]: stand, listen to (a person).", "At 914.0s, [0.0, 0.158, 0.402, 0.996]: dance, listen to (a person).", "At 913.0s, [0.031, 0.153, 0.467, 0.978]: dance, listen to (a person), sing to (e.g., self, a person, a group).", "At 912.0s, [0.0, 0.155, 0.42, 0.982]: stand, listen to (a person), sing to (e.g., self, a person, a group).", "From 910.0s to 911.0s, [0.006, 0.154, 0.449, 0.972]->[0.0, 0.164, 0.396, 0.981]: stand, listen to (a person).", "From 907.0s to 909.0s, [0.0, 0.152, 0.397, 0.94]->[0.03, 0.151, 0.475, 0.932]->[0.0, 0.16, 0.377, 0.938]: stand, listen (e.g., to music), listen to (a person).", "At 906.0s, [0.036, 0.153, 0.464, 0.91]: stand, listen (e.g., to music), dance.", "From 904.0s to 905.0s, [0.019, 0.158, 0.463, 0.98]->[0.0, 0.143, 0.416, 0.957]: stand, listen (e.g., to music).", "At 903.0s, [0.0, 0.137, 0.403, 0.981]: listen (e.g., to music), dance.", "At 902.0s, [0.001, 0.147, 0.45, 0.997]: stand, listen (e.g., to music)."]}]}, {"video": "zG7mx8KiavA.mp4", "clip": [876, 984], "conversations": [{"from": "human", "timestamps": 906.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.036, 0.153, 0.464, 0.91]: listen (e.g., to music), stand, dance; [0.324, 0.12, 0.982, 0.977]: stand, sing to (e.g., self, a person, a group), dance, play musical instrument."}, {"from": "human", "timestamps": 984.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.402, 0.154, 0.954, 0.955]?"}, {"from": "gpt", "value": ["At Now, [0.402, 0.154, 0.954, 0.955]: stand, play musical instrument, listen to (a person).", "From 982.0s to 983.0s, [0.394, 0.132, 0.95, 0.973]->[0.327, 0.093, 0.97, 0.975]: stand, play musical instrument, listen to (a person).", "From 978.0s to 981.0s, [0.402, 0.165, 0.966, 0.994]->[0.299, 0.163, 0.863, 0.98]->[0.366, 0.17, 0.987, 0.957]->[0.276, 0.125, 0.9, 0.987]: dance, play musical instrument, listen to (a person).", "From 975.0s to 977.0s, [0.275, 0.141, 0.818, 0.991]->[0.376, 0.129, 0.942, 0.99]->[0.273, 0.14, 0.828, 0.99]: dance, play musical instrument.", "At 974.0s, [0.334, 0.133, 0.942, 0.983]: listen (e.g., to music), dance, play musical instrument, listen to (a person).", "At 973.0s, [0.264, 0.152, 0.855, 0.984]: stand, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "At 972.0s, [0.285, 0.133, 0.82, 0.983]: stand, dance, play musical instrument, sing to (e.g., self, a person, a group).", "From 967.0s to 971.0s, [0.376, 0.094, 0.948, 0.964]->[0.236, 0.108, 0.84, 0.974]->[0.36, 0.11, 0.921, 0.99]->[0.282, 0.123, 0.859, 0.979]->[0.261, 0.129, 0.888, 1.0]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "From 964.0s to 966.0s, [0.218, 0.104, 0.859, 0.977]->[0.371, 0.106, 0.954, 0.994]->[0.243, 0.104, 0.875, 0.981]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 963.0s, [0.375, 0.075, 0.913, 0.969]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "At 962.0s, [0.253, 0.085, 0.869, 0.957]: stand, dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 961.0s, [0.336, 0.091, 0.913, 0.955]: stand, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "At 960.0s, [0.254, 0.08, 0.892, 0.985]: stand, dance, play musical instrument, listen to (a person).", "At 959.0s, [0.337, 0.074, 0.905, 0.973]: stand, dance, play musical instrument, sing to (e.g., self, a person, a group).", "From 957.0s to 958.0s, [0.239, 0.097, 0.833, 0.983]->[0.343, 0.075, 0.898, 0.963]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "At 956.0s, [0.341, 0.074, 0.902, 0.986]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 955.0s, [0.24, 0.079, 0.85, 0.976]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "From 953.0s to 954.0s, [0.252, 0.077, 0.862, 0.996]->[0.358, 0.062, 0.965, 0.955]: stand, play musical instrument, listen to (a person).", "At 952.0s, [0.381, 0.09, 0.939, 0.956]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 951.0s, [0.276, 0.081, 0.846, 0.995]: dance, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "From 948.0s to 950.0s, [0.351, 0.072, 0.943, 0.972]->[0.262, 0.07, 0.907, 0.969]->[0.382, 0.065, 0.957, 0.984]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 947.0s, [0.306, 0.061, 0.901, 0.961]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "At 946.0s, [0.368, 0.057, 0.943, 0.958]: stand, play musical instrument.", "At 945.0s, [0.326, 0.088, 0.931, 0.987]: dance, play musical instrument.", "From 942.0s to 944.0s, [0.254, 0.07, 0.935, 0.965]->[0.338, 0.078, 0.93, 0.961]->[0.324, 0.065, 0.925, 0.97]: stand, play musical instrument.", "At 941.0s, [0.353, 0.071, 0.929, 0.949]: dance, play musical instrument.", "From 939.0s to 940.0s, [0.3, 0.092, 0.923, 0.963]->[0.288, 0.077, 0.929, 0.968]: stand, play musical instrument.", "At 938.0s, [0.212, 0.09, 0.927, 0.978]: dance, play musical instrument.", "At 937.0s, [0.369, 0.075, 0.977, 0.961]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 936.0s, [0.255, 0.1, 0.902, 0.974]: stand, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "At 935.0s, [0.353, 0.104, 0.965, 0.957]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 934.0s, [0.277, 0.096, 0.92, 1.0]: stand, dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 933.0s, [0.39, 0.078, 0.972, 0.981]: dance, play musical instrument, sing to (e.g., self, a person, a group), talk to (e.g., self, a person, a group).", "At 932.0s, [0.277, 0.095, 0.938, 0.984]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 931.0s, [0.316, 0.071, 0.944, 1.0]: stand, play musical instrument, listen to (a person).", "At 930.0s, [0.377, 0.071, 0.975, 0.957]: dance, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "At 929.0s, [0.241, 0.085, 0.918, 0.988]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "At 928.0s, [0.378, 0.089, 0.952, 0.96]: dance, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "At 927.0s, [0.282, 0.098, 0.923, 0.975]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 926.0s, [0.383, 0.086, 0.988, 0.985]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "From 924.0s to 925.0s, [0.351, 0.098, 0.942, 0.965]->[0.27, 0.096, 0.907, 0.975]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "From 922.0s to 923.0s, [0.354, 0.113, 0.979, 0.973]->[0.279, 0.097, 0.931, 0.964]: stand, play musical instrument.", "From 919.0s to 921.0s, [0.248, 0.124, 0.905, 0.96]->[0.37, 0.107, 0.963, 0.963]->[0.253, 0.133, 0.923, 0.981]: dance, play musical instrument.", "At 918.0s, [0.385, 0.103, 0.944, 0.981]: stand, play musical instrument.", "At 917.0s, [0.308, 0.114, 0.925, 0.982]: stand, play musical instrument, listen to (a person).", "At 916.0s, [0.34, 0.147, 0.965, 0.987]: stand, carry/hold (an object), play musical instrument.", "At 915.0s, [0.325, 0.148, 0.943, 1.0]: stand, play musical instrument.", "From 911.0s to 914.0s, [0.262, 0.143, 0.892, 0.995]->[0.328, 0.128, 0.952, 1.0]->[0.369, 0.113, 0.949, 0.994]->[0.261, 0.15, 0.953, 1.0]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 910.0s, [0.383, 0.131, 0.974, 0.988]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "From 908.0s to 909.0s, [0.348, 0.109, 0.978, 0.978]->[0.225, 0.137, 0.893, 0.994]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 907.0s, [0.227, 0.119, 0.893, 0.98]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "At 906.0s, [0.324, 0.12, 0.982, 0.977]: stand, dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 905.0s, [0.249, 0.13, 0.891, 0.972]: stand, play musical instrument.", "At 904.0s, [0.344, 0.113, 0.978, 0.993]: listen (e.g., to music), play musical instrument.", "At 903.0s, [0.284, 0.118, 0.872, 0.983]: dance, play musical instrument.", "At 902.0s, [0.314, 0.111, 0.925, 0.985]: stand, play musical instrument."]}]}, {"video": "zG7mx8KiavA.mp4", "clip": [970, 1019], "conversations": [{"from": "human", "timestamps": 1000.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.093, 0.166, 0.55, 0.944]: listen to (a person), dance; [0.439, 0.14, 0.99, 0.98]: stand, sing to (e.g., self, a person, a group), play musical instrument."}, {"from": "human", "timestamps": 1019.0, "value": "For the person at bbox [0.107, 0.11, 0.525, 0.978], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.107, 0.11, 0.525, 0.978]: listen (e.g., to music), listen to (a person).", "At 1018.0s, [0.016, 0.108, 0.44, 0.972]: stand, dance, listen to (a person).", "At 1017.0s, [0.082, 0.075, 0.498, 0.968]: stand, listen to (a person).", "At 1016.0s, [0.014, 0.103, 0.458, 1.0]: stand, listen (e.g., to music), listen to (a person).", "At 1015.0s, [0.088, 0.101, 0.479, 0.973]: stand, listen to (a person).", "At 1014.0s, [0.012, 0.14, 0.459, 0.977]: dance, listen to (a person).", "At 1013.0s, [0.052, 0.105, 0.478, 1.0]: stand, listen (e.g., to music), dance, listen to (a person).", "At 1012.0s, [0.082, 0.104, 0.494, 0.856]: listen (e.g., to music), dance, sing to (e.g., self, a person, a group).", "From 1010.0s to 1011.0s, [0.051, 0.103, 0.495, 0.991]->[0.026, 0.117, 0.465, 0.98]: listen (e.g., to music), dance, listen to (a person), sing to (e.g., self, a person, a group).", "At 1009.0s, [0.007, 0.126, 0.439, 0.945]: listen (e.g., to music), dance, listen to (a person).", "At 1008.0s, [0.134, 0.095, 0.539, 0.994]: stand, dance, listen to (a person).", "At 1007.0s, [0.023, 0.132, 0.439, 0.982]: dance, listen to (a person).", "At 1006.0s, [0.129, 0.082, 0.549, 0.984]: stand, listen to (a person), sing to (e.g., self, a person, a group).", "At 1005.0s, [0.022, 0.113, 0.458, 1.0]: listen (e.g., to music), dance, sing to (e.g., self, a person, a group).", "At 1004.0s, [0.114, 0.102, 0.518, 0.96]: stand, listen (e.g., to music), sing to (e.g., self, a person, a group).", "At 1003.0s, [0.004, 0.157, 0.43, 0.974]: stand, listen (e.g., to music), listen to (a person).", "From 1001.0s to 1002.0s, [0.045, 0.191, 0.464, 0.969]->[0.122, 0.155, 0.535, 0.967]: listen (e.g., to music), dance, listen to (a person).", "At 1000.0s, [0.093, 0.166, 0.55, 0.944]: dance, listen to (a person).", "At 999.0s, [0.01, 0.161, 0.461, 0.98]: stand, sing to (e.g., self, a person, a group).", "At 998.0s, [0.103, 0.135, 0.49, 0.98]: dance, sing to (e.g., self, a person, a group).", "At 997.0s, [0.002, 0.138, 0.451, 0.984]: stand, listen to (a person).", "At 996.0s, [0.006, 0.123, 0.467, 0.965]: dance, listen to (a person).", "From 992.0s to 995.0s, [0.0, 0.13, 0.453, 0.999]->[0.0, 0.138, 0.585, 0.982]->[0.0, 0.113, 0.412, 0.993]->[0.001, 0.124, 0.479, 0.985]: listen (e.g., to music), dance, listen to (a person).", "At 991.0s, [0.0, 0.132, 0.473, 0.979]: listen (e.g., to music), dance.", "At 990.0s, [0.0, 0.142, 0.445, 0.971]: stand, dance, listen to (a person).", "From 988.0s to 989.0s, [0.009, 0.12, 0.477, 0.983]->[0.0, 0.161, 0.431, 0.984]: listen (e.g., to music), dance, listen to (a person).", "From 986.0s to 987.0s, [0.002, 0.149, 0.476, 0.976]->[0.0, 0.147, 0.42, 0.974]: dance, listen to (a person).", "At 985.0s, [0.458, 0.119, 0.994, 0.951]: stand, dance, play musical instrument, listen to (a person).", "From 982.0s to 984.0s, [0.394, 0.132, 0.95, 0.973]->[0.327, 0.093, 0.97, 0.975]->[0.402, 0.154, 0.954, 0.955]: stand, play musical instrument, listen to (a person).", "From 978.0s to 981.0s, [0.402, 0.165, 0.966, 0.994]->[0.299, 0.163, 0.863, 0.98]->[0.366, 0.17, 0.987, 0.957]->[0.276, 0.125, 0.9, 0.987]: dance, play musical instrument, listen to (a person).", "From 975.0s to 977.0s, [0.275, 0.141, 0.818, 0.991]->[0.376, 0.129, 0.942, 0.99]->[0.273, 0.14, 0.828, 0.99]: dance, play musical instrument.", "At 974.0s, [0.334, 0.133, 0.942, 0.983]: listen (e.g., to music), dance, play musical instrument, listen to (a person).", "At 973.0s, [0.264, 0.152, 0.855, 0.984]: stand, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "At 972.0s, [0.285, 0.133, 0.82, 0.983]: stand, dance, play musical instrument, sing to (e.g., self, a person, a group).", "From 967.0s to 971.0s, [0.376, 0.094, 0.948, 0.964]->[0.236, 0.108, 0.84, 0.974]->[0.36, 0.11, 0.921, 0.99]->[0.282, 0.123, 0.859, 0.979]->[0.261, 0.129, 0.888, 1.0]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "From 964.0s to 966.0s, [0.218, 0.104, 0.859, 0.977]->[0.371, 0.106, 0.954, 0.994]->[0.243, 0.104, 0.875, 0.981]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 963.0s, [0.375, 0.075, 0.913, 0.969]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "At 962.0s, [0.253, 0.085, 0.869, 0.957]: stand, dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 961.0s, [0.336, 0.091, 0.913, 0.955]: stand, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "At 960.0s, [0.254, 0.08, 0.892, 0.985]: stand, dance, play musical instrument, listen to (a person).", "At 959.0s, [0.337, 0.074, 0.905, 0.973]: stand, dance, play musical instrument, sing to (e.g., self, a person, a group).", "From 957.0s to 958.0s, [0.239, 0.097, 0.833, 0.983]->[0.343, 0.075, 0.898, 0.963]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "At 956.0s, [0.341, 0.074, 0.902, 0.986]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 955.0s, [0.24, 0.079, 0.85, 0.976]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "From 953.0s to 954.0s, [0.252, 0.077, 0.862, 0.996]->[0.358, 0.062, 0.965, 0.955]: stand, play musical instrument, listen to (a person).", "At 952.0s, [0.381, 0.09, 0.939, 0.956]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 951.0s, [0.276, 0.081, 0.846, 0.995]: dance, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "From 948.0s to 950.0s, [0.351, 0.072, 0.943, 0.972]->[0.262, 0.07, 0.907, 0.969]->[0.382, 0.065, 0.957, 0.984]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 947.0s, [0.306, 0.061, 0.901, 0.961]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "At 946.0s, [0.368, 0.057, 0.943, 0.958]: stand, play musical instrument.", "At 945.0s, [0.326, 0.088, 0.931, 0.987]: dance, play musical instrument.", "From 942.0s to 944.0s, [0.254, 0.07, 0.935, 0.965]->[0.338, 0.078, 0.93, 0.961]->[0.324, 0.065, 0.925, 0.97]: stand, play musical instrument.", "At 941.0s, [0.353, 0.071, 0.929, 0.949]: dance, play musical instrument.", "From 939.0s to 940.0s, [0.3, 0.092, 0.923, 0.963]->[0.288, 0.077, 0.929, 0.968]: stand, play musical instrument.", "At 938.0s, [0.212, 0.09, 0.927, 0.978]: dance, play musical instrument.", "At 937.0s, [0.369, 0.075, 0.977, 0.961]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 936.0s, [0.255, 0.1, 0.902, 0.974]: stand, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "At 935.0s, [0.353, 0.104, 0.965, 0.957]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 934.0s, [0.277, 0.096, 0.92, 1.0]: stand, dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 933.0s, [0.39, 0.078, 0.972, 0.981]: dance, play musical instrument, sing to (e.g., self, a person, a group), talk to (e.g., self, a person, a group).", "At 932.0s, [0.277, 0.095, 0.938, 0.984]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 931.0s, [0.316, 0.071, 0.944, 1.0]: stand, play musical instrument, listen to (a person).", "At 930.0s, [0.377, 0.071, 0.975, 0.957]: dance, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "At 929.0s, [0.241, 0.085, 0.918, 0.988]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "At 928.0s, [0.378, 0.089, 0.952, 0.96]: dance, play musical instrument, listen to (a person), sing to (e.g., self, a person, a group).", "At 927.0s, [0.282, 0.098, 0.923, 0.975]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 926.0s, [0.383, 0.086, 0.988, 0.985]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "From 924.0s to 925.0s, [0.351, 0.098, 0.942, 0.965]->[0.27, 0.096, 0.907, 0.975]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "From 922.0s to 923.0s, [0.354, 0.113, 0.979, 0.973]->[0.279, 0.097, 0.931, 0.964]: stand, play musical instrument.", "From 919.0s to 921.0s, [0.248, 0.124, 0.905, 0.96]->[0.37, 0.107, 0.963, 0.963]->[0.253, 0.133, 0.923, 0.981]: dance, play musical instrument.", "At 918.0s, [0.385, 0.103, 0.944, 0.981]: stand, play musical instrument.", "At 917.0s, [0.308, 0.114, 0.925, 0.982]: stand, play musical instrument, listen to (a person).", "At 916.0s, [0.34, 0.147, 0.965, 0.987]: stand, carry/hold (an object), play musical instrument.", "At 915.0s, [0.325, 0.148, 0.943, 1.0]: stand, play musical instrument.", "From 911.0s to 914.0s, [0.262, 0.143, 0.892, 0.995]->[0.328, 0.128, 0.952, 1.0]->[0.369, 0.113, 0.949, 0.994]->[0.261, 0.15, 0.953, 1.0]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 910.0s, [0.383, 0.131, 0.974, 0.988]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "From 908.0s to 909.0s, [0.348, 0.109, 0.978, 0.978]->[0.225, 0.137, 0.893, 0.994]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 907.0s, [0.227, 0.119, 0.893, 0.98]: stand, play musical instrument, sing to (e.g., self, a person, a group).", "At 906.0s, [0.324, 0.12, 0.982, 0.977]: stand, dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 905.0s, [0.249, 0.13, 0.891, 0.972]: stand, play musical instrument.", "At 904.0s, [0.344, 0.113, 0.978, 0.993]: listen (e.g., to music), play musical instrument.", "At 903.0s, [0.284, 0.118, 0.872, 0.983]: dance, play musical instrument.", "At 902.0s, [0.314, 0.111, 0.925, 0.985]: stand, play musical instrument."]}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1011, 1058], "conversations": [{"from": "human", "timestamps": 1041.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.189, 0.015, 0.882, 0.996]: sit."}, {"from": "human", "timestamps": 1058.0, "value": "For the person at bbox [0.307, 0.095, 0.931, 0.993], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.307, 0.095, 0.931, 0.993]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "From 1056.0s to 1057.0s, [0.251, 0.092, 0.918, 0.967]->[0.263, 0.105, 0.854, 0.995]: dance, play musical instrument, listen to (a person).", "From 1052.0s to 1055.0s, [0.277, 0.085, 0.92, 0.995]->[0.25, 0.1, 0.858, 0.981]->[0.298, 0.089, 0.912, 0.966]->[0.237, 0.12, 0.931, 0.983]: dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 1051.0s, [0.244, 0.1, 0.864, 0.96]: stand, dance, play musical instrument, sing to (e.g., self, a person, a group).", "At 1050.0s, [0.266, 0.109, 0.954, 0.975]: dance, play musical instrument, sing to (e.g., self, a person, a group)."]}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1079, 1111], "conversations": [{"from": "human", "timestamps": 1109.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.1, 0.142, 0.527, 0.921]: listen (e.g., to music), stand, dance, listen to (a person); [0.406, 0.117, 0.996, 0.989]: sing to (e.g., self, a person, a group), dance, play musical instrument."}, {"from": "human", "timestamps": 1111.0, "value": "What actions did the person at bbox [0.096, 0.114, 0.507, 0.914] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.096, 0.114, 0.507, 0.914]: stand, listen (e.g., to music), listen to (a person).", "At 1110.0s, [0.0, 0.136, 0.388, 0.976]: listen (e.g., to music), dance, listen to (a person).", "At 1109.0s, [0.1, 0.142, 0.527, 0.921]: stand, listen (e.g., to music), dance, listen to (a person).", "At 1108.0s, [0.014, 0.127, 0.476, 0.993]: stand, listen (e.g., to music), listen to (a person), sing to (e.g., self, a person, a group).", "At 1107.0s, [0.108, 0.115, 0.527, 0.93]: dance, listen to (a person), sing to (e.g., self, a person, a group).", "From 1105.0s to 1106.0s, [0.057, 0.122, 0.49, 0.957]->[0.003, 0.133, 0.47, 0.968]: dance, listen to (a person).", "From 1103.0s to 1104.0s, [0.074, 0.133, 0.498, 0.933]->[0.0, 0.131, 0.401, 0.976]: stand, listen (e.g., to music), listen to (a person).", "At 1102.0s, [0.0, 0.164, 0.451, 0.946]: listen (e.g., to music), dance, listen to (a person).", "At 1101.0s, [0.109, 0.151, 0.519, 0.948]: stand, listen (e.g., to music), sing to (e.g., self, a person, a group).", "At 1100.0s, [0.0, 0.136, 0.456, 0.963]: listen (e.g., to music), dance, listen to (a person), sing to (e.g., self, a person, a group).", "At 1099.0s, [0.003, 0.157, 0.456, 0.971]: stand, listen (e.g., to music), listen to (a person), sing to (e.g., self, a person, a group).", "From 1097.0s to 1098.0s, [0.0, 0.163, 0.457, 0.968]->[0.004, 0.145, 0.441, 0.977]: stand, listen to (a person).", "At 1096.0s, [0.0, 0.135, 0.537, 0.97]: stand, listen (e.g., to music), listen to (a person).", "At 1094.0s, [0.0, 0.128, 0.596, 0.994]: dance, listen to (a person)."]}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1131, 1185], "conversations": [{"from": "human", "timestamps": 1161.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.008, 0.1, 0.424, 0.996]: stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1185.0, "value": "For the person at bbox [0.004, 0.038, 0.733, 0.995], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.004, 0.038, 0.733, 0.995]: sit, touch (an object), talk to (e.g., self, a person, a group).", "At 1184.0s, [0.007, 0.036, 0.732, 0.992]: sit, talk to (e.g., self, a person, a group).", "At 1183.0s, [0.011, 0.04, 0.727, 0.987]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1180.0s to 1182.0s, [0.01, 0.036, 0.724, 0.993]->[0.014, 0.029, 0.727, 0.988]->[0.009, 0.039, 0.724, 0.987]: sit, talk to (e.g., self, a person, a group).", "At 1179.0s, [0.011, 0.027, 0.729, 1.0]: sit, watch (e.g., TV), talk to (e.g., self, a person, a group).", "From 1174.0s to 1178.0s, [0.007, 0.04, 0.724, 0.984]->[0.008, 0.031, 0.721, 0.985]->[0.01, 0.056, 0.72, 0.987]->[0.004, 0.037, 0.729, 0.987]->[0.0, 0.052, 0.724, 0.993]: sit, talk to (e.g., self, a person, a group).", "At 1173.0s, [0.006, 0.054, 0.725, 0.986]: sit, watch (e.g., TV), talk to (e.g., self, a person, a group).", "From 1167.0s to 1172.0s, [0.004, 0.039, 0.698, 1.0]->[0.0, 0.04, 0.705, 0.992]->[0.0, 0.039, 0.703, 0.991]->[0.007, 0.051, 0.698, 0.999]->[0.0, 0.031, 0.698, 0.998]->[0.0, 0.034, 0.704, 1.0]: sit, talk to (e.g., self, a person, a group).", "From 1164.0s to 1166.0s, [0.002, 0.04, 0.668, 0.998]->[0.014, 0.029, 0.701, 0.999]->[0.004, 0.028, 0.71, 0.993]: sit, watch (e.g., TV), talk to (e.g., self, a person, a group).", "At 1163.0s, [0.004, 0.016, 0.603, 0.988]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1176, 1219], "conversations": [{"from": "human", "timestamps": 1206.0, "value": "What actions did the person at bbox [0.163, 0.139, 0.979, 0.978] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.163, 0.139, 0.979, 0.978]: stand, talk to (e.g., self, a person, a group).", "At 1205.0s, [0.158, 0.163, 0.968, 0.983]: stand.", "At 1204.0s, [0.161, 0.167, 0.997, 0.98]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1219.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.541, 0.415, 0.696, 0.636]: get up; [0.646, 0.357, 0.802, 0.616]: carry/hold (an object), watch (a person), bend/bow (at the waist)."}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1265, 1311], "conversations": [{"from": "human", "timestamps": 1295.0, "value": "For the person at bbox [0.43, 0.132, 0.56, 0.952], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.43, 0.132, 0.56, 0.952]: stand, carry/hold (an object).", "At 1294.0s, [0.359, 0.088, 0.484, 0.886]: stand, carry/hold (an object).", "From 1290.0s to 1292.0s, [0.358, 0.166, 0.505, 0.623]->[0.354, 0.173, 0.521, 0.616]->[0.303, 0.205, 0.484, 0.599]: stand, carry/hold (an object).", "At 1289.0s, [0.353, 0.222, 0.474, 0.625]: stand.", "At 1288.0s, [0.344, 0.3, 0.473, 0.613]: bend/bow (at the waist).", "At 1287.0s, [0.338, 0.303, 0.472, 0.746]: bend/bow (at the waist), touch (an object).", "At 1286.0s, [0.312, 0.354, 0.46, 0.75]: bend/bow (at the waist), carry/hold (an object), listen to (a person).", "At 1285.0s, [0.3, 0.392, 0.463, 0.857]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1311.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.424, 0.099, 0.732, 0.672]: carry/hold (an object)."}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1240, 1377], "conversations": [{"from": "human", "timestamps": 1270.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.117, 0.806, 0.991]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1377.0, "value": "For the person at bbox [0.084, 0.13, 0.241, 0.481], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.084, 0.13, 0.241, 0.481]: stand, watch (a person).", "From 1374.0s to 1376.0s, [0.082, 0.128, 0.249, 0.481]->[0.083, 0.13, 0.241, 0.481]->[0.084, 0.133, 0.24, 0.482]: stand, watch (a person).", "At 1373.0s, [0.09, 0.123, 0.259, 0.488]: stand, carry/hold (an object), watch (a person).", "From 1369.0s to 1372.0s, [0.098, 0.128, 0.275, 0.491]->[0.093, 0.124, 0.264, 0.491]->[0.091, 0.123, 0.262, 0.48]->[0.092, 0.125, 0.259, 0.493]: stand, watch (a person).", "From 1367.0s to 1368.0s, [0.112, 0.122, 0.269, 0.479]->[0.103, 0.125, 0.268, 0.484]: stand, listen to (a person), watch (a person)."]}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1299, 1376], "conversations": [{"from": "human", "timestamps": 1329.0, "value": "Describe the actions of the person at bbox [0.695, 0.382, 0.877, 0.994] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.695, 0.382, 0.877, 0.994]: walk, carry/hold (an object).", "From 1327.0s to 1328.0s, [0.405, 0.422, 0.571, 0.999]->[0.445, 0.45, 0.701, 0.991]: walk, carry/hold (an object).", "At 1326.0s, [0.38, 0.159, 0.579, 0.91]: stand, carry/hold (an object)."]}, {"from": "human", "timestamps": 1376.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.084, 0.133, 0.24, 0.482]: watch (a person), stand; [0.23, 0.171, 0.385, 0.491]: carry/hold (an object), stand, watch (a person); [0.456, 0.103, 0.739, 0.991]: stand."}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1359, 1405], "conversations": [{"from": "human", "timestamps": 1389.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.106, 0.122, 0.26, 0.471]: watch (a person), stand."}, {"from": "human", "timestamps": 1405.0, "value": "What actions is the person in bbox [0.065, 0.118, 0.222, 0.482] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.065, 0.118, 0.222, 0.482]: stand, watch (a person).", "At 1404.0s, [0.066, 0.133, 0.231, 0.481]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1399.0s to 1403.0s, [0.047, 0.119, 0.206, 0.481]->[0.059, 0.124, 0.207, 0.481]->[0.076, 0.143, 0.224, 0.484]->[0.07, 0.148, 0.221, 0.482]->[0.067, 0.141, 0.228, 0.483]: stand, watch (a person).", "At 1398.0s, [0.049, 0.12, 0.204, 0.481]: stand, listen to (a person), watch (a person).", "At 1397.0s, [0.043, 0.122, 0.195, 0.472]: stand, watch (a person).", "At 1396.0s, [0.101, 0.133, 0.355, 0.606]: bend/bow (at the waist), stand, carry/hold (an object), watch (a person).", "At 1395.0s, [0.096, 0.127, 0.259, 0.482]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1389.0s to 1394.0s, [0.106, 0.122, 0.26, 0.471]->[0.098, 0.123, 0.229, 0.448]->[0.072, 0.126, 0.223, 0.462]->[0.078, 0.125, 0.211, 0.467]->[0.076, 0.126, 0.209, 0.477]->[0.073, 0.135, 0.222, 0.467]: stand, watch (a person).", "At 1388.0s, [0.105, 0.121, 0.27, 0.478]: stand, listen to (a person), watch (a person).", "At 1387.0s, [0.109, 0.122, 0.272, 0.494]: stand, watch (a person).", "At 1386.0s, [0.1, 0.122, 0.268, 0.479]: stand, listen to (a person), watch (a person).", "At 1385.0s, [0.098, 0.118, 0.25, 0.485]: stand, carry/hold (an object), watch (a person).", "From 1383.0s to 1384.0s, [0.095, 0.125, 0.261, 0.469]->[0.109, 0.12, 0.278, 0.47]: stand, listen to (a person), watch (a person).", "At 1381.0s, [0.106, 0.122, 0.271, 0.468]: stand, listen to (a person), watch (a person).", "At 1380.0s, [0.086, 0.136, 0.251, 0.484]: stand, watch (a person).", "At 1379.0s, [0.089, 0.129, 0.254, 0.481]: stand, listen to (a person), watch (a person).", "From 1374.0s to 1378.0s, [0.082, 0.128, 0.249, 0.481]->[0.083, 0.13, 0.241, 0.481]->[0.084, 0.133, 0.24, 0.482]->[0.084, 0.13, 0.241, 0.481]->[0.088, 0.126, 0.248, 0.48]: stand, watch (a person).", "At 1373.0s, [0.09, 0.123, 0.259, 0.488]: stand, carry/hold (an object), watch (a person).", "From 1369.0s to 1372.0s, [0.098, 0.128, 0.275, 0.491]->[0.093, 0.124, 0.264, 0.491]->[0.091, 0.123, 0.262, 0.48]->[0.092, 0.125, 0.259, 0.493]: stand, watch (a person).", "From 1367.0s to 1368.0s, [0.112, 0.122, 0.269, 0.479]->[0.103, 0.125, 0.268, 0.484]: stand, listen to (a person), watch (a person)."]}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1356, 1473], "conversations": [{"from": "human", "timestamps": 1386.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.1, 0.122, 0.268, 0.479]: listen to (a person), stand, watch (a person); [0.273, 0.125, 0.535, 0.886]: carry/hold (an object), stand, watch (a person), push (another person); [0.451, 0.102, 0.642, 0.899]: carry/hold (an object), stand."}, {"from": "human", "timestamps": 1473.0, "value": "What actions is the person in bbox [0.619, 0.141, 1.0, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.619, 0.141, 1.0, 1.0]: sit, listen to (a person), watch (a person).", "From 1467.0s to 1472.0s, [0.618, 0.135, 1.0, 1.0]->[0.616, 0.144, 1.0, 1.0]->[0.616, 0.145, 0.999, 1.0]->[0.62, 0.146, 0.999, 1.0]->[0.624, 0.149, 1.0, 1.0]->[0.621, 0.141, 1.0, 1.0]: sit, listen to (a person), watch (a person).", "From 1465.0s to 1466.0s, [0.621, 0.123, 1.0, 1.0]->[0.622, 0.12, 1.0, 1.0]: sit, listen to (a person).", "At 1464.0s, [0.606, 0.124, 0.994, 1.0]: sit, listen to (a person), watch (a person)."]}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1448, 1497], "conversations": [{"from": "human", "timestamps": 1478.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.009, 0.148, 0.481, 0.998]?"}, {"from": "gpt", "value": ["At Now, [0.009, 0.148, 0.481, 0.998]: sit, listen to (a person).", "At 1477.0s, [0.005, 0.144, 0.517, 1.0]: sit, listen to (a person).", "From 1467.0s to 1476.0s, [0.015, 0.142, 0.48, 1.0]->[0.011, 0.127, 0.488, 1.0]->[0.008, 0.136, 0.504, 1.0]->[0.008, 0.125, 0.508, 1.0]->[0.004, 0.133, 0.512, 1.0]->[0.001, 0.14, 0.528, 1.0]->[0.003, 0.133, 0.507, 1.0]->[0.009, 0.137, 0.536, 1.0]->[0.028, 0.18, 0.493, 1.0]->[0.017, 0.143, 0.537, 0.995]: sit, listen to (a person), watch (a person).", "From 1465.0s to 1466.0s, [0.017, 0.147, 0.485, 1.0]->[0.017, 0.14, 0.475, 1.0]: sit, listen to (a person).", "At 1464.0s, [0.014, 0.144, 0.504, 0.997]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1497.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.006, 0.133, 0.566, 1.0]: listen to (a person), sit, watch (a person); [0.362, 0.245, 0.679, 0.992]: sit, touch (an object), talk to (e.g., self, a person, a group); [0.594, 0.157, 0.996, 0.991]: listen to (a person), sit."}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1479, 1525], "conversations": [{"from": "human", "timestamps": 1509.0, "value": "Describe the actions of the person at bbox [0.617, 0.142, 1.0, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.617, 0.142, 1.0, 1.0]: sit, listen to (a person), watch (a person).", "From 1505.0s to 1508.0s, [0.603, 0.143, 0.991, 0.991]->[0.614, 0.144, 1.0, 1.0]->[0.614, 0.14, 1.0, 1.0]->[0.613, 0.142, 0.997, 1.0]: sit, listen to (a person), watch (a person).", "At 1504.0s, [0.601, 0.141, 0.998, 1.0]: sit, listen to (a person).", "From 1498.0s to 1503.0s, [0.608, 0.143, 0.99, 0.998]->[0.61, 0.141, 0.992, 0.996]->[0.613, 0.133, 0.994, 0.997]->[0.614, 0.138, 0.985, 0.995]->[0.606, 0.141, 0.995, 1.0]->[0.613, 0.136, 0.997, 1.0]: sit, listen to (a person), watch (a person).", "From 1496.0s to 1497.0s, [0.611, 0.143, 0.996, 1.0]->[0.594, 0.157, 0.996, 0.991]: sit, listen to (a person).", "From 1493.0s to 1495.0s, [0.615, 0.143, 0.998, 0.999]->[0.615, 0.138, 0.994, 1.0]->[0.594, 0.143, 0.995, 0.997]: sit, listen to (a person), watch (a person).", "At 1492.0s, [0.615, 0.136, 0.998, 1.0]: sit, listen to (a person).", "From 1467.0s to 1491.0s, [0.618, 0.135, 1.0, 1.0]->[0.616, 0.144, 1.0, 1.0]->[0.616, 0.145, 0.999, 1.0]->[0.62, 0.146, 0.999, 1.0]->[0.624, 0.149, 1.0, 1.0]->[0.621, 0.141, 1.0, 1.0]->[0.619, 0.141, 1.0, 1.0]->[0.624, 0.141, 1.0, 1.0]->[0.615, 0.138, 0.999, 1.0]->[0.613, 0.147, 1.0, 1.0]->[0.625, 0.158, 1.0, 0.991]->[0.62, 0.152, 1.0, 1.0]->[0.629, 0.149, 1.0, 1.0]->[0.618, 0.149, 1.0, 1.0]->[0.612, 0.144, 0.999, 1.0]->[0.617, 0.148, 1.0, 1.0]->[0.619, 0.148, 1.0, 1.0]->[0.623, 0.149, 1.0, 1.0]->[0.609, 0.147, 1.0, 1.0]->[0.617, 0.144, 1.0, 1.0]->[0.601, 0.142, 0.989, 0.991]->[0.618, 0.144, 1.0, 1.0]->[0.618, 0.144, 1.0, 1.0]->[0.617, 0.149, 1.0, 1.0]->[0.62, 0.148, 0.997, 1.0]: sit, listen to (a person), watch (a person).", "From 1465.0s to 1466.0s, [0.621, 0.123, 1.0, 1.0]->[0.622, 0.12, 1.0, 1.0]: sit, listen to (a person).", "At 1464.0s, [0.606, 0.124, 0.994, 1.0]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1525.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.005, 0.057, 0.641, 0.991]: watch (e.g., TV), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1559, 1599], "conversations": [{"from": "human", "timestamps": 1589.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.059, 0.165, 0.604, 0.97]?"}, {"from": "gpt", "value": ["In the current frame, [0.059, 0.165, 0.604, 0.97]: sit, talk to (e.g., self, a person, a group).", "From 1576.0s to 1588.0s, [0.001, 0.178, 0.409, 0.982]->[0.001, 0.179, 0.423, 0.986]->[0.0, 0.171, 0.439, 1.0]->[0.011, 0.174, 0.461, 0.992]->[0.013, 0.172, 0.468, 0.988]->[0.026, 0.169, 0.475, 0.989]->[0.042, 0.174, 0.498, 0.996]->[0.054, 0.169, 0.501, 0.989]->[0.058, 0.171, 0.621, 0.978]->[0.052, 0.174, 0.502, 0.977]->[0.065, 0.174, 0.585, 0.977]->[0.063, 0.163, 0.506, 0.989]->[0.056, 0.166, 0.522, 0.969]: sit, talk to (e.g., self, a person, a group).", "At 1575.0s, [0.0, 0.179, 0.425, 0.996]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1574.0s, [0.0, 0.182, 0.408, 0.99]: sit, talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1599.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.062, 0.154, 0.532, 0.996]: sit, talk to (e.g., self, a person, a group); [0.371, 0.135, 0.807, 0.969]: watch (e.g., TV), sit, touch (an object), listen to (a person)."}]}, {"video": "zG7mx8KiavA.mp4", "clip": [1655, 1693], "conversations": [{"from": "human", "timestamps": 1685.0, "value": "For the person at bbox [0.021, 0.28, 0.344, 0.94], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.021, 0.28, 0.344, 0.94]: dance.", "At 1684.0s, [0.084, 0.046, 0.268, 0.912]: dance.", "At 1683.0s, [0.081, 0.069, 0.523, 0.79]: dance, watch (a person).", "At 1682.0s, [0.088, 0.086, 0.547, 0.794]: stand, watch (a person).", "At 1681.0s, [0.673, 0.333, 0.911, 0.862]: crouch/kneel, talk to (e.g., self, a person, a group), watch (a person).", "At 1680.0s, [0.662, 0.441, 0.911, 0.845]: crouch/kneel, put down, listen to (a person)."]}, {"from": "human", "timestamps": 1693.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.084, 0.051, 0.303, 0.956]: watch (a person), dance; [0.343, 0.05, 0.817, 0.957]: dance."}]}, {"video": "zR725veL-DI.mkv", "clip": [909, 959], "conversations": [{"from": "human", "timestamps": 939.0, "value": "Describe the actions of the person at bbox [0.415, 0.262, 0.582, 0.884] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.415, 0.262, 0.582, 0.884]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 938.0s, [0.492, 0.066, 0.632, 0.912]: stand, carry/hold (an object), talk to (e.g., self, a person, a group).", "From 936.0s to 937.0s, [0.23, 0.0, 0.828, 0.992]->[0.551, 0.082, 0.681, 0.688]: walk, carry/hold (an object).", "At 935.0s, [0.268, 0.002, 0.809, 0.989]: walk, carry/hold (an object), watch (a person).", "At 934.0s, [0.25, 0.063, 0.704, 0.94]: walk, watch (a person).", "At 933.0s, [0.371, 0.093, 0.657, 0.676]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 932.0s, [0.281, 0.124, 0.692, 0.935]: stand, watch (a person)."]}, {"from": "human", "timestamps": 959.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.0, 0.0, 0.456, 1.0]: listen to (a person), sit, watch (a person); [0.387, 0.136, 0.637, 0.929]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "zR725veL-DI.mkv", "clip": [898, 971], "conversations": [{"from": "human", "timestamps": 928.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.243, 0.1, 0.624, 0.964]: carry/hold (an object), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 971.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.098, 0.053, 0.554, 0.985]?"}, {"from": "gpt", "value": ["At Now, [0.098, 0.053, 0.554, 0.985]: sit, watch (e.g., TV).", "From 969.0s to 970.0s, [0.26, 0.029, 0.582, 0.922]->[0.252, 0.021, 0.563, 0.939]: sit, carry/hold (an object).", "At 968.0s, [0.263, 0.019, 0.576, 0.959]: sit, carry/hold (an object), watch (e.g., TV).", "From 964.0s to 967.0s, [0.103, 0.0, 0.903, 0.991]->[0.021, 0.0, 0.892, 0.984]->[0.077, 0.0, 0.839, 1.0]->[0.057, 0.0, 0.834, 1.0]: sit.", "From 962.0s to 963.0s, [0.368, 0.264, 0.549, 0.875]->[0.415, 0.269, 0.5, 0.493]: sit, carry/hold (an object), watch (a person).", "At 961.0s, [0.0, 0.0, 0.453, 1.0]: sit, listen to (a person), watch (a person).", "At 960.0s, [0.0, 0.0, 0.449, 1.0]: sit, watch (e.g., TV), listen to (a person), watch (a person).", "From 958.0s to 959.0s, [0.0, 0.0, 0.452, 1.0]->[0.0, 0.0, 0.456, 1.0]: sit, listen to (a person), watch (a person).", "From 954.0s to 957.0s, [0.03, 0.0, 0.825, 0.97]->[0.009, 0.0, 0.805, 0.982]->[0.0, 0.0, 0.782, 0.984]->[0.0, 0.0, 0.786, 0.986]: sit, talk to (e.g., self, a person, a group).", "At 953.0s, [0.001, 0.0, 0.756, 0.97]: sit, watch (a person)."]}]}, {"video": "zR725veL-DI.mkv", "clip": [954, 1015], "conversations": [{"from": "human", "timestamps": 984.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.166, 0.0, 0.949, 1.0]: sit, work on a computer, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1015.0, "value": "For the person at bbox [0.113, 0.076, 0.498, 0.991], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.113, 0.076, 0.498, 0.991]: walk, carry/hold (an object), listen to (a person), watch (a person).", "From 1012.0s to 1014.0s, [0.096, 0.139, 0.427, 0.994]->[0.009, 0.126, 0.45, 0.993]->[0.0, 0.17, 0.473, 0.993]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1011.0s, [0.113, 0.171, 0.405, 0.987]: walk, carry/hold (an object), listen to (a person).", "At 1010.0s, [0.249, 0.172, 0.518, 0.986]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1007.0s to 1009.0s, [0.412, 0.324, 0.544, 0.991]->[0.355, 0.277, 0.515, 0.988]->[0.312, 0.203, 0.537, 0.984]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1006.0s, [0.248, 0.291, 0.468, 0.986]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1005.0s, [0.306, 0.316, 0.522, 1.0]: walk, carry/hold (an object), listen to (a person).", "At 1004.0s, [0.358, 0.26, 0.533, 0.988]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1003.0s, [0.29, 0.161, 0.461, 0.993]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1002.0s, [0.266, 0.212, 0.405, 0.99]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "zR725veL-DI.mkv", "clip": [1003, 1037], "conversations": [{"from": "human", "timestamps": 1033.0, "value": "What actions is the person in bbox [0.005, 0.0, 0.415, 1.0] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.005, 0.0, 0.415, 1.0]: stand, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1032.0s, [0.014, 0.0, 0.387, 0.996]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1022.0s to 1031.0s, [0.107, 0.092, 0.4, 0.988]->[0.107, 0.093, 0.396, 0.972]->[0.013, 0.006, 0.54, 0.975]->[0.019, 0.0, 0.543, 0.971]->[0.096, 0.087, 0.388, 0.986]->[0.098, 0.092, 0.386, 0.994]->[0.011, 0.0, 0.53, 0.983]->[0.017, 0.002, 0.52, 0.979]->[0.008, 0.0, 0.481, 0.978]->[0.007, 0.0, 0.447, 0.984]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1021.0s, [0.096, 0.08, 0.378, 0.981]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1016.0s to 1020.0s, [0.025, 0.092, 0.359, 0.986]->[0.058, 0.077, 0.373, 0.999]->[0.105, 0.093, 0.407, 0.981]->[0.049, 0.024, 0.513, 0.985]->[0.057, 0.031, 0.513, 0.984]: stand, carry/hold (an object), listen to (a person), watch (a person).", "From 1012.0s to 1015.0s, [0.096, 0.139, 0.427, 0.994]->[0.009, 0.126, 0.45, 0.993]->[0.0, 0.17, 0.473, 0.993]->[0.113, 0.076, 0.498, 0.991]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1011.0s, [0.113, 0.171, 0.405, 0.987]: walk, carry/hold (an object), listen to (a person).", "At 1010.0s, [0.249, 0.172, 0.518, 0.986]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group).", "From 1007.0s to 1009.0s, [0.412, 0.324, 0.544, 0.991]->[0.355, 0.277, 0.515, 0.988]->[0.312, 0.203, 0.537, 0.984]: walk, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1006.0s, [0.248, 0.291, 0.468, 0.986]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1005.0s, [0.306, 0.316, 0.522, 1.0]: walk, carry/hold (an object), listen to (a person).", "At 1004.0s, [0.358, 0.26, 0.533, 0.988]: walk, carry/hold (an object), listen to (a person), watch (a person).", "At 1003.0s, [0.29, 0.161, 0.461, 0.993]: walk, carry/hold (an object), listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "At 1002.0s, [0.266, 0.212, 0.405, 0.99]: walk, carry/hold (an object), listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1037.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.252, 0.105, 0.537, 0.988]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group); [0.592, 0.233, 0.794, 0.938]: listen to (a person), stand, watch (a person)."}]}, {"video": "zR725veL-DI.mkv", "clip": [1023, 1098], "conversations": [{"from": "human", "timestamps": 1053.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.018, 0.307, 0.268, 0.992]: carry/hold (an object), watch (a person), talk to (e.g., self, a person, a group), walk; [0.317, 0.114, 0.672, 0.979]: carry/hold (an object), listen to (a person), watch (a person), walk."}, {"from": "human", "timestamps": 1098.0, "value": "For the person at bbox [0.326, 0.186, 0.621, 0.99], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.326, 0.186, 0.621, 0.99]: stand, talk to (e.g., self, a person, a group), watch (a person).", "At 1097.0s, [0.308, 0.178, 0.619, 0.996]: stand, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "zR725veL-DI.mkv", "clip": [1111, 1198], "conversations": [{"from": "human", "timestamps": 1141.0, "value": "For the person at bbox [0.431, 0.008, 0.814, 0.993], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.431, 0.008, 0.814, 0.993]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1140.0s, [0.604, 0.22, 0.713, 0.981]: stand, watch (a person).", "From 1138.0s to 1139.0s, [0.403, 0.021, 0.771, 0.985]->[0.505, 0.199, 0.733, 0.99]: walk, watch (a person)."]}, {"from": "human", "timestamps": 1198.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "At now, [0.275, 0.003, 0.845, 0.992]: watch (a person), stand, talk to (e.g., self, a person, a group)."}]}, {"video": "zR725veL-DI.mkv", "clip": [1141, 1220], "conversations": [{"from": "human", "timestamps": 1171.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.102, 0.319, 0.434, 0.981]: listen to (a person), sit, eat, watch (a person); [0.259, 0.088, 0.44, 0.702]: carry/hold (an object), stand, talk to (e.g., self, a person, a group), listen to (a person); [0.542, 0.461, 0.764, 0.995]: carry/hold (an object), sit, watch (a person); [0.727, 0.593, 0.856, 0.944]: watch (a person), sit."}, {"from": "human", "timestamps": 1220.0, "value": "Describe the actions of the person at bbox [0.0, 0.0, 0.729, 1.0] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.0, 0.0, 0.729, 1.0]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1217.0s to 1219.0s, [0.096, 0.0, 0.888, 0.993]->[0.118, 0.0, 0.92, 0.986]->[0.058, 0.003, 0.814, 0.973]: sit, talk to (e.g., self, a person, a group).", "From 1212.0s to 1216.0s, [0.277, 0.0, 0.919, 0.98]->[0.023, 0.009, 0.825, 0.968]->[0.028, 0.0, 0.748, 0.957]->[0.211, 0.0, 0.79, 0.99]->[0.016, 0.0, 0.809, 0.995]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "zR725veL-DI.mkv", "clip": [1180, 1257], "conversations": [{"from": "human", "timestamps": 1210.0, "value": "Describe the actions of the person at bbox [0.28, 0.0, 0.928, 0.982] in the current frame and the previous frames."}, {"from": "gpt", "value": ["At Now, [0.28, 0.0, 0.928, 0.982]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1209.0s, [0.448, 0.003, 0.979, 0.971]: sit, listen to (a person), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1257.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.25, 0.121, 0.695, 0.985]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "zR725veL-DI.mkv", "clip": [1233, 1301], "conversations": [{"from": "human", "timestamps": 1263.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "In the current frame, [0.119, 0.488, 0.362, 0.864]: watch (a person), sit, talk to (e.g., self, a person, a group); [0.662, 0.104, 0.821, 0.984]: listen to (a person), walk."}, {"from": "human", "timestamps": 1301.0, "value": "What actions did the person at bbox [0.059, 0.161, 0.258, 0.761] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.059, 0.161, 0.258, 0.761]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1300.0s, [0.054, 0.192, 0.342, 0.754]: sit, carry/hold (an object), touch (an object), watch (a person).", "From 1297.0s to 1299.0s, [0.061, 0.15, 0.254, 0.745]->[0.05, 0.154, 0.253, 0.76]->[0.046, 0.16, 0.269, 0.773]: sit, carry/hold (an object), listen to (a person), watch (a person)."]}]}, {"video": "zR725veL-DI.mkv", "clip": [1306, 1339], "conversations": [{"from": "human", "timestamps": 1336.0, "value": "Describe the actions of the person at bbox [0.492, 0.215, 0.822, 0.997] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.492, 0.215, 0.822, 0.997]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1335.0s, [0.65, 0.223, 0.834, 0.897]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1339.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.205, 0.14, 0.375, 0.99]: listen to (a person), stand, watch (a person); [0.485, 0.079, 0.958, 0.976]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}]}, {"video": "zR725veL-DI.mkv", "clip": [1376, 1407], "conversations": [{"from": "human", "timestamps": 1406.0, "value": "What actions is the person in bbox [0.509, 0.0, 1.0, 0.987] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.509, 0.0, 1.0, 0.987]: stand, listen to (a person), watch (a person).", "At 1405.0s, [0.503, 0.0, 1.0, 0.985]: stand, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1407.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.0, 0.018, 0.367, 0.99]: watch (a person), stand, talk to (e.g., self, a person, a group); [0.498, 0.0, 1.0, 0.989]: listen to (a person), stand, watch (a person)."}]}, {"video": "zR725veL-DI.mkv", "clip": [1426, 1486], "conversations": [{"from": "human", "timestamps": 1456.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.532, 0.327, 0.588, 0.59]: carry/hold (an object), play with pets, walk; [0.624, 0.378, 0.651, 0.549]: watch (a person), talk to (e.g., self, a person, a group), walk."}, {"from": "human", "timestamps": 1486.0, "value": "What actions did the person at bbox [0.644, 0.342, 0.779, 0.917] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.644, 0.342, 0.779, 0.917]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1485.0s, [0.641, 0.341, 0.778, 0.912]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "zR725veL-DI.mkv", "clip": [1511, 1547], "conversations": [{"from": "human", "timestamps": 1541.0, "value": "What actions is the person in bbox [0.484, 0.314, 0.689, 0.99] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.484, 0.314, 0.689, 0.99]: sit, watch (a person).", "At 1540.0s, [0.482, 0.302, 0.677, 0.984]: sit, watch (a person)."]}, {"from": "human", "timestamps": 1547.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.158, 0.168, 0.421, 1.0]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group); [0.399, 0.02, 0.7, 0.995]: listen to (a person), sit, watch (a person); [0.464, 0.288, 0.702, 1.0]: listen to (a person), sit, watch (a person)."}]}, {"video": "zR725veL-DI.mkv", "clip": [1498, 1601], "conversations": [{"from": "human", "timestamps": 1528.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.01, 0.0, 0.687, 1.0]: carry/hold (an object), sit, watch (a person), listen to (a person); [0.519, 0.16, 0.867, 0.932]: listen to (a person), sit, watch (a person)."}, {"from": "human", "timestamps": 1601.0, "value": "What actions is the person in bbox [0.044, 0.038, 0.725, 0.999] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.044, 0.038, 0.725, 0.999]: stand.", "At 1600.0s, [0.079, 0.016, 0.766, 0.985]: stand."]}]}, {"video": "zR725veL-DI.mkv", "clip": [1590, 1653], "conversations": [{"from": "human", "timestamps": 1620.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.461, 0.413, 0.542, 0.619]?"}, {"from": "gpt", "value": ["In the current frame, [0.461, 0.413, 0.542, 0.619]: sit.", "From 1618.0s to 1619.0s, [0.458, 0.412, 0.543, 0.62]->[0.463, 0.411, 0.543, 0.618]: sit, watch (a person).", "At 1617.0s, [0.466, 0.413, 0.547, 0.621]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1653.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.397, 0.336, 0.546, 0.639]: listen to (a person), sit, watch (a person); [0.446, 0.433, 0.533, 0.606]: listen to (a person), sit, watch (a person); [0.526, 0.355, 0.792, 0.989]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "zR725veL-DI.mkv", "clip": [1687, 1737], "conversations": [{"from": "human", "timestamps": 1717.0, "value": "What actions did the person at bbox [0.577, 0.056, 1.0, 0.987] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.577, 0.056, 1.0, 0.987]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1716.0s, [0.212, 0.003, 0.961, 0.973]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1715.0s, [0.19, 0.0, 0.956, 0.98]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 1712.0s to 1714.0s, [0.039, 0.0, 0.872, 0.985]->[0.053, 0.0, 0.896, 0.992]->[0.121, 0.0, 0.901, 0.997]: sit, talk to (e.g., self, a person, a group).", "At 1711.0s, [0.027, 0.0, 0.85, 0.989]: sit, eat, talk to (e.g., self, a person, a group).", "At 1710.0s, [0.0, 0.0, 0.873, 0.974]: sit, eat, listen to (a person).", "At 1709.0s, [0.022, 0.0, 0.87, 0.983]: sit, eat."]}, {"from": "human", "timestamps": 1737.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.243, 0.023, 1.0, 0.982]: stand, talk to (e.g., self, a person, a group)."}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [876, 957], "conversations": [{"from": "human", "timestamps": 906.0, "value": "For the person at bbox [0.133, 0.048, 0.836, 0.981], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.133, 0.048, 0.836, 0.981]: sit, stand.", "At 905.0s, [0.13, 0.048, 0.856, 0.976]: sit, stand, watch (a person).", "At 904.0s, [0.118, 0.046, 0.828, 0.973]: stand.", "At 903.0s, [0.113, 0.046, 0.834, 0.974]: stand, watch (a person)."]}, {"from": "human", "timestamps": 957.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "At now, [0.022, 0.051, 0.696, 0.982]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [872, 1007], "conversations": [{"from": "human", "timestamps": 902.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.299, 0.056, 0.965, 0.971]: watch (a person), stand, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1007.0, "value": "What actions did the person at bbox [0.223, 0.047, 0.827, 0.972] perform now and earlier?"}, {"from": "gpt", "value": ["At Now, [0.223, 0.047, 0.827, 0.972]: sit, stand, talk to (e.g., self, a person, a group).", "From 1005.0s to 1006.0s, [0.288, 0.051, 1.0, 1.0]->[0.225, 0.053, 0.965, 0.986]: sit, eat.", "From 1002.0s to 1004.0s, [0.643, 0.127, 0.999, 0.965]->[0.603, 0.13, 0.995, 0.976]->[0.617, 0.128, 1.0, 0.976]: sit, eat, listen to (a person), watch (a person).", "From 997.0s to 1001.0s, [0.041, 0.068, 0.909, 0.974]->[0.318, 0.069, 0.997, 0.988]->[0.333, 0.061, 0.996, 0.983]->[0.312, 0.057, 0.979, 0.985]->[0.284, 0.05, 0.983, 0.979]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 996.0s, [0.717, 0.23, 0.985, 0.974]: sit, eat, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 993.0s to 995.0s, [0.649, 0.218, 0.989, 0.981]->[0.645, 0.219, 0.988, 0.985]->[0.671, 0.228, 0.983, 0.983]: sit, eat, touch (an object), listen to (a person), watch (a person).", "At 992.0s, [0.634, 0.227, 0.982, 0.974]: sit, eat, touch (an object), listen to (a person)."]}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [931, 1008], "conversations": [{"from": "human", "timestamps": 961.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.014, 0.032, 0.694, 1.0]: sit."}, {"from": "human", "timestamps": 1008.0, "value": "For the person at bbox [0.223, 0.039, 0.981, 0.996], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["At Now, [0.223, 0.039, 0.981, 0.996]: sit, talk to (e.g., self, a person, a group).", "At 1007.0s, [0.223, 0.047, 0.827, 0.972]: sit, stand, talk to (e.g., self, a person, a group).", "From 1005.0s to 1006.0s, [0.288, 0.051, 1.0, 1.0]->[0.225, 0.053, 0.965, 0.986]: sit, eat.", "From 1002.0s to 1004.0s, [0.643, 0.127, 0.999, 0.965]->[0.603, 0.13, 0.995, 0.976]->[0.617, 0.128, 1.0, 0.976]: sit, eat, listen to (a person), watch (a person).", "From 997.0s to 1001.0s, [0.041, 0.068, 0.909, 0.974]->[0.318, 0.069, 0.997, 0.988]->[0.333, 0.061, 0.996, 0.983]->[0.312, 0.057, 0.979, 0.985]->[0.284, 0.05, 0.983, 0.979]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 996.0s, [0.717, 0.23, 0.985, 0.974]: sit, eat, touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "From 993.0s to 995.0s, [0.649, 0.218, 0.989, 0.981]->[0.645, 0.219, 0.988, 0.985]->[0.671, 0.228, 0.983, 0.983]: sit, eat, touch (an object), listen to (a person), watch (a person).", "At 992.0s, [0.634, 0.227, 0.982, 0.974]: sit, eat, touch (an object), listen to (a person)."]}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [1046, 1139], "conversations": [{"from": "human", "timestamps": 1076.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.002, 0.033, 0.862, 0.986]: watch (a person), sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1139.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.004, 0.058, 0.843, 0.988]?"}, {"from": "gpt", "value": ["In the current frame, [0.004, 0.058, 0.843, 0.988]: sit, talk to (e.g., self, a person, a group), watch (a person).", "From 1130.0s to 1138.0s, [0.008, 0.046, 0.813, 0.989]->[0.009, 0.044, 0.811, 0.988]->[0.012, 0.037, 0.823, 0.983]->[0.01, 0.042, 0.831, 0.983]->[0.01, 0.037, 0.823, 0.98]->[0.005, 0.032, 0.844, 1.0]->[0.004, 0.054, 0.823, 0.986]->[0.008, 0.051, 0.838, 0.993]->[0.009, 0.043, 0.84, 0.989]: sit, talk to (e.g., self, a person, a group).", "At 1129.0s, [0.007, 0.044, 0.818, 0.987]: sit, stand, talk to (e.g., self, a person, a group).", "From 1125.0s to 1128.0s, [0.005, 0.035, 0.819, 0.986]->[0.008, 0.033, 0.807, 0.987]->[0.009, 0.044, 0.814, 0.987]->[0.013, 0.039, 0.816, 0.987]: sit, talk to (e.g., self, a person, a group).", "From 1123.0s to 1124.0s, [0.008, 0.038, 0.818, 0.978]->[0.015, 0.038, 0.824, 0.971]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [1055, 1120], "conversations": [{"from": "human", "timestamps": 1085.0, "value": "Describe the actions of the person at bbox [0.171, 0.062, 0.976, 0.985] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.171, 0.062, 0.976, 0.985]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1084.0s, [0.093, 0.033, 0.988, 0.978]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1083.0s, [0.084, 0.015, 1.0, 0.975]: sit, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group).", "At 1082.0s, [0.056, 0.016, 1.0, 0.989]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1081.0s, [0.137, 0.026, 0.945, 1.0]: sit, carry/hold (an object), eat, talk to (e.g., self, a person, a group).", "From 1078.0s to 1080.0s, [0.02, 0.045, 0.918, 0.992]->[0.06, 0.036, 0.943, 0.978]->[0.063, 0.042, 1.0, 0.984]: sit, carry/hold (an object), talk to (e.g., self, a person, a group).", "At 1077.0s, [0.061, 0.047, 1.0, 0.969]: sit, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}, {"from": "human", "timestamps": 1120.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "At now, [0.21, 0.049, 0.981, 0.979]: watch (a person), sit, talk to (e.g., self, a person, a group)."}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [1119, 1177], "conversations": [{"from": "human", "timestamps": 1149.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.073, 0.397, 0.986]: listen to (a person), sit, watch (a person); [0.449, 0.093, 0.969, 0.983]: carry/hold (an object), sit, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1177.0, "value": "What actions is the person in bbox [0.18, 0.006, 0.974, 0.985] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.18, 0.006, 0.974, 0.985]: sit, talk to (e.g., self, a person, a group).", "From 1175.0s to 1176.0s, [0.281, 0.006, 1.0, 1.0]->[0.151, 0.031, 1.0, 0.969]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [1150, 1255], "conversations": [{"from": "human", "timestamps": 1180.0, "value": "What actions is each person performing, and where are they positioned in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.197, 0.019, 0.993, 0.983]: sit, talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1255.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.236, 0.025, 0.972, 0.98]?"}, {"from": "gpt", "value": ["At Now, [0.236, 0.025, 0.972, 0.98]: sit, talk to (e.g., self, a person, a group).", "From 1250.0s to 1254.0s, [0.144, 0.01, 0.945, 0.979]->[0.144, 0.004, 0.954, 0.994]->[0.156, 0.002, 0.968, 0.993]->[0.128, 0.006, 0.95, 0.989]->[0.106, 0.015, 0.949, 0.992]: sit, talk to (e.g., self, a person, a group).", "At 1249.0s, [0.22, 0.004, 0.959, 0.996]: sit, stand, talk to (e.g., self, a person, a group).", "At 1248.0s, [0.195, 0.019, 0.975, 0.983]: sit, talk to (e.g., self, a person, a group)."]}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [1199, 1240], "conversations": [{"from": "human", "timestamps": 1229.0, "value": "Describe the locations and actions of each person in the current frame."}, {"from": "gpt", "value": "In the current frame, [0.0, 0.012, 0.874, 0.986]: watch (a person), sit, talk to (e.g., self, a person, a group), stand."}, {"from": "human", "timestamps": 1240.0, "value": "What actions is the person in bbox [0.226, 0.021, 1.0, 0.976] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["At Now, [0.226, 0.021, 1.0, 0.976]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1239.0s, [0.259, 0.004, 0.994, 0.991]: sit, talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [1280, 1362], "conversations": [{"from": "human", "timestamps": 1310.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.131, 0.0, 0.994, 0.883]: carry/hold (an object), stand, watch (a person), talk to (e.g., self, a person, a group)."}, {"from": "human", "timestamps": 1362.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.651, 0.016, 0.973, 0.969]?"}, {"from": "gpt", "value": ["In the current frame, [0.651, 0.016, 0.973, 0.969]: stand, carry/hold (an object), listen to (a person), watch (a person).", "At 1361.0s, [0.645, 0.01, 0.966, 0.983]: stand, listen to (a person), watch (a person).", "At 1360.0s, [0.642, 0.034, 0.972, 0.961]: stand, touch (an object), listen to (a person), watch (a person).", "From 1355.0s to 1359.0s, [0.392, 0.127, 0.85, 0.791]->[0.397, 0.115, 0.847, 0.791]->[0.4, 0.122, 0.855, 0.795]->[0.404, 0.132, 0.878, 0.812]->[0.633, 0.028, 0.972, 0.956]: stand, carry/hold (an object), touch (an object), listen to (a person), watch (a person).", "At 1354.0s, [0.588, 0.0, 0.985, 0.975]: stand, carry/hold (an object), touch (an object), talk to (e.g., self, a person, a group), watch (a person).", "At 1353.0s, [0.618, 0.0, 0.963, 0.982]: stand, carry/hold (an object), talk to (e.g., self, a person, a group), watch (a person)."]}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [1352, 1386], "conversations": [{"from": "human", "timestamps": 1382.0, "value": "Describe the actions of the person at bbox [0.206, 0.102, 0.666, 0.872] in the current frame and the previous frames."}, {"from": "gpt", "value": ["In the current frame, [0.206, 0.102, 0.666, 0.872]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1381.0s, [0.202, 0.109, 0.661, 0.877]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1378.0s to 1380.0s, [0.208, 0.103, 0.654, 0.876]->[0.216, 0.111, 0.657, 0.889]->[0.216, 0.116, 0.654, 0.888]: stand, touch (an object), talk to (e.g., self, a person, a group).", "At 1377.0s, [0.225, 0.119, 0.657, 0.891]: sit, touch (an object), talk to (e.g., self, a person, a group).", "From 1375.0s to 1376.0s, [0.222, 0.114, 0.659, 0.899]->[0.22, 0.113, 0.661, 0.878]: stand, touch (an object), talk to (e.g., self, a person, a group)."]}, {"from": "human", "timestamps": 1386.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.217, 0.108, 0.654, 0.888]: stand, touch (an object), talk to (e.g., self, a person, a group)."}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [1516, 1558], "conversations": [{"from": "human", "timestamps": 1546.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "At now, [0.22, 0.03, 0.857, 0.98]: listen to (a person), stand, watch (a person)."}, {"from": "human", "timestamps": 1558.0, "value": "What actions did the person at bbox [0.053, 0.029, 0.914, 1.0] perform now and earlier?"}, {"from": "gpt", "value": ["In the current frame, [0.053, 0.029, 0.914, 1.0]: stand, watch (a person).", "At 1557.0s, [0.256, 0.013, 0.991, 0.991]: stand, watch (a person)."]}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [1588, 1623], "conversations": [{"from": "human", "timestamps": 1618.0, "value": "What actions is the person in bbox [0.181, 0.108, 0.374, 0.822] doing in the current frame, and what were the person previous actions?"}, {"from": "gpt", "value": ["In the current frame, [0.181, 0.108, 0.374, 0.822]: stand, carry/hold (an object), watch (a person).", "From 1616.0s to 1617.0s, [0.045, 0.052, 0.674, 0.983]->[0.052, 0.039, 0.869, 0.979]: stand, carry/hold (an object), watch (a person).", "At 1615.0s, [0.0, 0.042, 0.731, 0.985]: stand, watch (a person)."]}, {"from": "human", "timestamps": 1623.0, "value": "Where is each person positioned, and what actions are they performing in the current frame?"}, {"from": "gpt", "value": "In the current frame, [0.184, 0.05, 0.863, 0.961]: lie/sleep."}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [1662, 1721], "conversations": [{"from": "human", "timestamps": 1692.0, "value": "Can you tell me the current and past actions of the person located in bbox [0.263, 0.026, 0.988, 0.985]?"}, {"from": "gpt", "value": ["At Now, [0.263, 0.026, 0.988, 0.985]: stand, talk to (e.g., self, a person, a group).", "At 1691.0s, [0.259, 0.015, 0.988, 0.986]: stand, talk to (e.g., self, a person, a group), watch (a person).", "From 1689.0s to 1690.0s, [0.655, 0.086, 0.843, 0.995]->[0.652, 0.108, 0.858, 0.993]: stand, listen to (a person).", "At 1688.0s, [0.644, 0.093, 0.877, 0.998]: stand, listen to (a person), watch (a person).", "At 1687.0s, [0.637, 0.111, 0.885, 0.985]: walk, listen to (a person), talk to (e.g., self, a person, a group), watch (a person).", "From 1685.0s to 1686.0s, [0.426, 0.109, 0.745, 0.989]->[0.643, 0.097, 0.838, 0.958]: stand, listen to (a person), watch (a person).", "At 1684.0s, [0.249, 0.149, 0.519, 0.883]: stand, watch (a person).", "At 1683.0s, [0.143, 0.165, 0.395, 0.808]: stand, touch (an object), watch (a person)."]}, {"from": "human", "timestamps": 1721.0, "value": "For the current frame, show each individual's position and actions."}, {"from": "gpt", "value": "At now, [0.037, 0.046, 0.785, 0.994]: sit, talk to (e.g., self, a person, a group)."}]}, {"video": "zlVkeKC6Ha8.mp4", "clip": [1711, 1751], "conversations": [{"from": "human", "timestamps": 1741.0, "value": "For the person at bbox [0.052, 0.063, 0.82, 0.982], what are their actions in the current frame and in prior frames?"}, {"from": "gpt", "value": ["In the current frame, [0.052, 0.063, 0.82, 0.982]: sit, talk to (e.g., self, a person, a group), watch (a person).", "At 1740.0s, [0.042, 0.073, 0.804, 0.993]: sit, listen to (a person), watch (a person)."]}, {"from": "human", "timestamps": 1751.0, "value": "In the current frame, list each person's location and actions."}, {"from": "gpt", "value": "In the current frame, [0.132, 0.124, 0.639, 1.0]: carry/hold (an object), sit, talk to (e.g., self, a person, a group)."}]}] \ No newline at end of file