diff --git "a/livesqlbench_data.jsonl" "b/livesqlbench_data.jsonl" new file mode 100644--- /dev/null +++ "b/livesqlbench_data.jsonl" @@ -0,0 +1,600 @@ +{"instance_id": "solar_panel_1", "selected_database": "solar_panel", "query": "How likely is the 'solar plant west davidport' (matching the name regardless of case) to be down when we need it? Give me its system unavailability score, just the number, to four decimal points.", "normal_query": "For the solar plant labeled 'solar plant west davidport' (case-insensitive match), calculate its system unavailability. Display the result as a scalar value, rounded to 4 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 4, "distinct": false, "order": false}} +{"instance_id": "solar_panel_2", "selected_database": "solar_panel", "query": "I need to know the financial hit from plants with recurring warranty issues—the ones whose warranty status is 'claimed' and have had three or more claims logged against them. Can you figure out the total lifetime revenue loss for them, but only count ones where we know their go-live date and degradation? Just assume they all have 15 years left, produce 500,000 kwh a year, and we sell the power at 12 cents. Give me the grand total.", "normal_query": "Calculate the total projected lifetime revenue loss for all plants that are flagged for Warranty Claim Risk. For this calculation, only include plants where the commissioning date and cumulative degradation are known. For the projection, assume a remaining lifetime of 15 years, an average annual energy production of 500,000 kwh, and an energy price of $0.12/kwh. Present the total loss as a single value.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_3", "selected_database": "solar_panel", "query": "If we could magically cool the panels for snapshot pv945724 down to 25 degrees celsius, what would its power output be? Give me the temperature-corrected performance in watts, with two decimal points.", "normal_query": "For the snapshot 'pv945724', calculate the temperature-corrected performance. Use a reference temperature of 25°c. Display the result in watts, rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "solar_panel_4", "selected_database": "solar_panel", "query": "For the maintenance event pv937101, did the repair cost more than the revenue we lost during the downtime? To figure that out, you'll have to clean up the revenue loss text by stripping out any '$' or ',' characters. Tell me the maintenance cost to revenue impact ratio, just the number, rounded to two decimals.", "normal_query": "What is the maintenance cost to revenue impact ratio for the snapshot 'pv937101'? The calculation requires cleaning the revenue loss text by removing dollar signs and commas to convert it to a numeric value. Calculate it and return a single numeric value rounded to 2 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "solar_panel_5", "selected_database": "solar_panel", "query": "How many of our plants are real lemons, both losing more than a quarter of their potential power and being offline for more than one day out of every twenty? Make sure you only use records that have all the numbers needed for the math. Just give me the total count.", "normal_query": "What is the total count of plants that are classified as both an underperforming asset, meaning its performance ratio is less than three-quarters, and a chronic downtime asset, meaning its availability is below nineteen-twentieths? Only include snapshots where all data necessary for the calculations is available and valid. Return a single integer.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_6", "selected_database": "solar_panel", "query": "Using the latest data for each plant, find the one that costs the most to run for its size, and tell me how much power it loses internally. I need the system power loss ratio for whichever plant has the biggest operational expenditure index. Give me the number to 4 decimal places, and only consider plants and snapshots with all the necessary and valid data to make the calculation crash-proof.", "normal_query": "For the plant with the highest operational expenditure index based on its most recent snapshot, what is its system power loss ratio, presented to 4 decimal places? Only plants with a known, non-zero power capacity and snapshots with known power values should be considered, and the logic must prevent division-by-zero errors.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "solar_panel_7", "selected_database": "solar_panel", "query": "When our panel busbars are as corroded as they can get, how much does the quality drop? Calculate the average fill factor degradation for all panels in the worst category for corrosion (regardless of case), but only use data where we have both a before and after fill factor. Give me the result to 3 decimal places.", "normal_query": "What is the average fill factor degradation for panels where the busbar corrosion has reached the highest level of severity (case-insensitive)? Only include snapshots where both initial and current fill factors are known. Display the result to 3 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": false}} +{"instance_id": "solar_panel_8", "selected_database": "solar_panel", "query": "When a plant with hjt panels breaks, what's the average cost to fix it? Calculate the mean repair cost for those plants (matching 'hjt' regardless of case), assuming they've been running for two years straight and have a valid, positive mtbf record. Give me the final number, rounded to a whole dollar.", "normal_query": "Determine the mean repair cost for plants using the 'hjt' panel type (case-insensitive), assuming a total operational time of 2 years (17520 hours). Only include snapshots with a known and positive mtbf for the calculation. Provide the result rounded to the nearest dollar.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 0, "distinct": false, "order": false}} +{"instance_id": "solar_panel_9", "selected_database": "solar_panel", "query": "When our electrical systems fail, how much money do we lose? Add up all the revenue loss from every incident with an 'electrical integrity failure', making sure to strip the dollar signs and commas from the text to get the total.", "normal_query": "What is the total revenue loss for snapshots where there is an electrical integrity failure? To perform the sum, the revenue loss text must be cleaned by removing dollar signs and commas. Sum up the cleaned revenue loss for these records.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_10", "selected_database": "solar_panel", "query": "After accounting for all the internal power drains, what's the actual juice each plant is sending to the grid right now? Only using snapshots where we know both the power loss and current output, and their combined total isn't zero, give me a list of plant names and their latest effective power output, rounded to two decimal places, with the most powerful plant at the top.", "normal_query": "For each site, calculate the effective power output using the most recent snapshot. Only include snapshots where both power loss and current power output are known, and their sum is not zero to prevent calculation errors. Display the site label and the calculated power in a table, sorted by the effective power in descending order. Show the result to 2 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "solar_panel_11", "selected_database": "solar_panel", "query": "For the plants that are aging terribly—meaning their performance drops by more than 0.5% a year—how long does it typically take to fix them? I need the average mean-time-to-repair for these 'accelerated aging assets'. The age calculation needs to be safe for new plants. Give me the answer in hours, rounded to two decimal places.", "normal_query": "Find the average mean time to repair for all plants classified as accelerated aging assets, defined as those with an Annual Degradation Rate greater than 0.5%. The calculation for the degradation rate must handle cases where the plant's age is zero. Round to 2 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "solar_panel_12", "selected_database": "solar_panel", "query": "How many times have our panels gotten so dirty that they're losing more than three-twentieths of their potential energy? Just give me the total count.", "normal_query": "Count the number of snapshots where the power loss from soiling means that for every 200 watts of potential power, more than 30 watts are lost. Return a single integer value.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_13", "selected_database": "solar_panel", "query": "Which of our plants are a recurring headache for warranty claims, with more than just a couple of filings? I need a list of sites whose status is 'claimed' (regardless of case). Show their names and how many claims they've had, from most to least.", "normal_query": "List all plants where the number of warranty claims exceeds the typical initial one or two filings, and their warranty status is 'claimed' (case-insensitive). Show the site label and the number of warranty claims. Sort by the number of claims in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "solar_panel_14", "selected_database": "solar_panel", "query": "Among our plants in the toughest, highest-risk locations, what's the worst we've seen dirt and grime impact performance? I need the highest soiling loss index from any site that's in that top risk category. Give me the percentage.", "normal_query": "What is the highest soiling loss index recorded for a plant that is located in one of our designated top-tier environmental risk zones (case-insensitive)? Return the value as a percentage.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_15", "selected_database": "solar_panel", "query": "Let's get a financial forecast for our worst panels, the ones that degrade so fast they'll lose over 14% of their power in 20 years. What's the total projected revenue loss over their remaining 15-year lifespan? Base the calculation on a standard 400,000 mwh annual output and a sale price of $50 per mwh.", "normal_query": "What is the total lifetime revenue loss projection for all plants using panel models that are projected to lose more than 14% of their output over a 20-year lifespan? Assume an average annual energy production of 400,000 mwh, an energy price of $50/mwh, and a remaining lifetime of 15 years for all plants.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_16", "selected_database": "solar_panel", "query": "How much are the different types of panels losing their voltage punch over time? I need you to group by the panel technology, making sure to ignore case, and then figure out the average voltage degradation factor for each. But hey, only use data where we actually have a valid 'before' and 'after' voltage to compare, and make sure the starting voltage isn't zero. List the panel types and their average voltage loss, with the worst ones first.", "normal_query": "For each distinct panel model type, calculate the average voltage degradation factor. This calculation should only use snapshots that contain all the necessary voltage data and where the initial voltage reading is a positive number. The panel type should be converted to lowercase before grouping. Display the panel kind and the average degradation factor, sorted by the factor in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": true}} +{"instance_id": "solar_panel_17", "selected_database": "solar_panel", "query": "For the machines that are down more than one day in a 20-day period, what's the average price tag on a single repair? To calculate the mean repair cost, you'll need to figure out how long each machine has been running. Only use data where the mtbf and service time are positive.", "normal_query": "What is the average mean repair cost for assets that are offline more than 5% of the time? The calculation requires the total time in service, which must be derived from the snapshot and go-live dates, and only include snapshots where mtbf and total hours are positive.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_18", "selected_database": "solar_panel", "query": "How many of our plants have a major electrical issue right now? I'm talking about situations where the grounding is shot or the bypass diodes are not running in their normal state. Just give me a count of the unique plants with these problems, and don't worry about the case of the status text.", "normal_query": "Count the number of distinct plants where the electrical integrity is compromised, indicated by either a complete failure of the grounding system or a bypass diode status that is anything other than nominal (checks performed case-insensitively).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": true, "order": false}} +{"instance_id": "solar_panel_19", "selected_database": "solar_panel", "query": "After accounting for all the power being lost inside the system, what was the actual usable power output for snapshot 'pv945724'? Give me the final number in watts.", "normal_query": "What is the effective power output for snapshot 'pv945724'? Calculate it and return the value in watts.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_20", "selected_database": "solar_panel", "query": "For the panels specifically made by longi (regardless of case), how much has their current output dropped on average? To get a good average, please only use records where you have a valid, positive starting current to compare against. Calculate the mean current degradation factor across all of them.", "normal_query": "What is the average current degradation factor for all panel models from the manufacturer 'longi' (case-insensitive)? For an accurate average, include only snapshots that have a valid, positive initial current reading to compare against the current reading.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_M_1", "selected_database": "solar_panel", "query": "Let's make a special table for problems that need immediate attention, call it `high_risk_alerts`. It needs to store the snapshot id, the alert status, both maintenance and replacement priorities, and when it happened. After creating it, fill it with any alert that's so serious we'd need to send our top people out or order a new part right away. Make sure to find these alerts regardless of case. Also, make sure the snapshot id links back to the main plant record table.", "normal_query": "Create a new table `high_risk_alerts` with columns for the snapshot key, alert state, maintenance priority, replacement priority, and the timestamp of the snapshot. Then, populate it by inserting records for any issue that would require either dispatching a senior engineer or ordering a replacement part before the end of the day (checks must be case-insensitive). Add a foreign key constraint on the snapshot key referencing `plant_record`.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_M_2", "selected_database": "solar_panel", "query": "I need a handy summary of how our plants are doing right now. Can you create a view called `v_plant_performance_overview`? It should show the plant's name, when the data was taken, how much power it was making, how much sunlight was hitting it, and the cell temperature. Make sure it only shows the very latest data we have for each plant.", "normal_query": "Create a view named `v_plant_performance_overview`. This view should join data from the `plants`, `electrical_performance`, and `environmental_conditions` tables. It must display the site label, snapshot timestamp, power output, plane-of-array irradiance, and cell temperature for the most recent snapshot of each plant.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_M_3", "selected_database": "solar_panel", "query": "I need a faster way to see yearly energy production. Create a materialized view called `mv_yearly_plant_yield`. It should calculate the total kilowatt-hours produced by each plant for each year and store it, but only use records that actually have a yield value. The view should have the plant's name, the year, and the total yield.", "normal_query": "Create a materialized view named `mv_yearly_plant_yield` which summarizes the total energy yield for each plant for each year. It should include the site label, the year, and the total energy yield in kwh, only including records where the energy yield is not null.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_M_4", "selected_database": "solar_panel", "query": "Let's build a cleaning schedule table. Call it `panel_cleaning_schedule`. It needs a unique ID for each entry, the plant's ID, the date it was last cleaned, and the date it's due next. Then, fill it up for all our plants using the latest cleaning info from their mechanical health reports to calculate the next due date.", "normal_query": "Create a new table `panel_cleaning_schedule` with columns `schedule_id` (Primary Key, Serial), `site_key` (Foreign Key to plants), `last_cleaned_date` (Date), and `next_cleaning_due` (Date). Populate it for all plants, setting `last_cleaned_date` to the most recent `last_clean_date` from `mechanical_condition` and `next_cleaning_due` by adding the `cleaning_cycle_days` to that date.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_M_5", "selected_database": "solar_panel", "query": "I want a tool to quickly tell me how old a plant is. Can you create a function called `get_plant_age`? You give it a plant's ID, and it should spit out its current age in years.", "normal_query": "Create a function `get_plant_age` that takes a site key as input and returns the age of the plant in years (as a real number) based on its go-live date and the current date.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_M_6", "selected_database": "solar_panel", "query": "I want a 'hall of fame' for extreme weather events at our plants. Can you make a view called `v_environmental_extremes`? It should find the highest ambient temperature, strongest wind speed, and most intense uv index ever recorded across all sites. For each of these records, show which plant it happened at, what the record-breaking value was, and when it happened.", "normal_query": "Create a view `v_environmental_extremes` which, for each environmental variable, shows the plant site label, the value, and the timestamp for the all-time maximum recorded value. Include ambient temperature, wind speed, and uv index.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_M_7", "selected_database": "solar_panel", "query": "Let's make a log of all our plants that aren't up to code. Create a table called `compliance_issues` with an id, the plant's id, a space for a description, and the date it was logged. After you create it, go through the main plants list and add an entry for every single one that's failed its compliance checks (ignoring case). You can just put 'Initial non-compliance record' for the description.", "normal_query": "Create a new table `compliance_issues` with columns for `issue_id`, `plant_sitekey`, `issue_description`, and `date_logged`. Then, insert a record for every plant that has failed to meet its regulatory standards, based on a case-insensitive check of its compliance flag, using the specific description 'Initial non-compliance record'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_M_8", "selected_database": "solar_panel", "query": "I need a new place to keep track of our plant's health stats. Can you create a table called `plant_kpi_summary`? It should have columns for the site's id, its age in years, its annual performance drop, and its uptime percentage.", "normal_query": "Create a new table named `plant_kpi_summary` to store key performance indicators. The table should include a key for the site (text, primary key), the plant's age in years (real), its annual degradation rate (real), and its system availability (real).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_M_9", "selected_database": "solar_panel", "query": "Let's make a quick-look list of the absolute worst problems. Create a view, call it `v_critical_alerts_details`, for every alert that's got the highest possible priority for both a maintenance dispatch and a part replacement. Make sure you find them regardless of case. Show me the plant name, when it happened, and the event count.", "normal_query": "Create a view named `v_critical_alerts_details` that lists the site label, the snapshot timestamp, and the alert count for all snapshots where the issue is so severe it has been assigned the maximum priority level for both maintenance and replacement (checks performed case-insensitively).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "solar_panel_M_10", "selected_database": "solar_panel", "query": "I want to start logging all our repair jobs. Can you set up a new table for me called `maintenance_log`? It needs a unique id for each entry, a reference to the snapshot it's related to, the date of the repair, a description of what was done, and how much it cost. Make sure the snapshot reference actually links to a real record.", "normal_query": "Create a new table `maintenance_log` with columns `log_id` (serial primary key), `snap_reference` (text), `log_date` (date), `action_taken` (text), and `cost` (numeric(10, 2)). Add a foreign key on `snap_reference` to the `plant_record` table.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "hulushows_1", "selected_database": "hulushows", "query": "Let’s check which shows have tons of content across different releases but no written description. Add up their standard content (episodes, clips, etc.) across all tiers, keep only the ones with over 500 total, and no annotations. Show each show’s ID, name, and total volume—sorted by volume, highest first.", "normal_query": "I want to identify all Incomplete High-Engagement Titles. Compute the total content volume for each title by summing up standard content quantities across all distribution records. Then check whether the title has any descriptive annotation. Can you only include titles with a high total volume (greater than 500) and no annotations? List each title's ID, name, and total content volume, sorted by volume in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_2", "selected_database": "hulushows", "query": "I want to find shows that show up in three or more different subscription tiers. For each show, can you count how many unique tiers it’s available in? First, keep the ones that are in at least three tiers, and then sort the results from the most widely distributed to the last.", "normal_query": "I want to know all Multitier Syndicated Shows. For each show with at least three tiers, show its unique identifier and the number of tiers it appears in. Sort the results by tier count in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_3", "selected_database": "hulushows", "query": "Let’s find out which titles are getting strong user scores even though they don’t have any trailers or clips. I want to look across all content and find the highest user rating among those that don’t offer any visual previews but still include a valid score. Just return that one number, rounded to 2 decimals—it tells us how well these visually sparse titles are performing.", "normal_query": "My goal is to identify the Highly Rated but Visually Empty titles in the catalog. Specifically, I want to calculate the highest user rating among all titles that have no available trailers or clips but still include valid user score data.Give me the maximum user score across these titles, rounded to 2 decimals", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "hulushows_4", "selected_database": "hulushows", "query": "I want to find out how long it's been since each show got any new updates. For each show, check the most recent update date. But if there's no update info, just use the launch date instead. Then, I’d like to see how many days it's been since that date, and treat that as the staleness score. If a show is available in multiple tiers, take the smallest one. Can you show the show ID and the number of days it's been stale? Finally, sort the list so the stalest shows—that is, the ones that haven't been updated in the longest time—come first.", "normal_query": "For each show, I need to measure the Temporal Staleness Index (TSI). Please determine how many days have passed since the show last had any updates. If no update timestamp is available, use the launch date as a fallback. I’d like to see the show ID along with its staleness index, and the minimum value of this index across all its distribution tiers. Sort the results so that the shows with the highest staleness appear first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_5", "selected_database": "hulushows", "query": "How many titles are spread across over six nested genre tags and lean more on short clips, including both general clips and film-related clips, than full-length features?", "normal_query": "Count how many shows meet the Over-Fragmented Offering classification in the catalog.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "hulushows_6", "selected_database": "hulushows", "query": "Let’s all find groups of shows that belong to the same franchise. Can you only include franchises that have at least two shows? For each group, can you show me the franchise ID, how many shows it has, and list the show titles? Also, I need to sort the list so that the biggest franchises with the most shows come first.", "normal_query": "Please find all franchise groups. For each group with at least two shows, list the franchise ID, total show count, and the list of show titles. Sort the results by show count in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_7", "selected_database": "hulushows", "query": "I want to find out how many episodes there are on average in each season for every show. Can you look at shows where we know both the total number of episodes and how many seasons they have. For each one, give me the show ID, how many episodes it has, how many seasons, and the average episodes per season. Please skip anything where the season count is missing or zero. Finally, show the ones with the highest average first.", "normal_query": "Please calculate the average number of episodes per season for each show. Can you only include shows with both episode and season counts? For each, list the show ID, total episodes, total seasons, and the episode-to-season ratio. Importantly, exclude entries with missing or zero seasons. Sort results by the ratio in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "hulushows_8", "selected_database": "hulushows", "query": "Let’s figure out what the most frequent top-end maturity rating is across all the shows. Basically, I want to scan all the records, grab the maturity info, and tell me which of those high-end ratings pops up the most. Just return the one that shows up the most often.", "normal_query": "To support catalog analysis, compute the Most Common Peak TV Rating across All Distribution Records. It should consider all available distributiondata, extract their rating information, and determine the single most frequently assigned rating value. Give me a single text result representing the most common rating.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_9", "selected_database": "hulushows", "query": "Which franchises are producing the most content? Group shows in the same franchise and add up their episodes. Some episode counts may be text or invalid — after trimming whitespace, parse only digit strings (digits 0–9 only) and treat the rest as zero. Show only franchises with more than 100 total episodes, listing the identifier, number of shows, and total episodes from largest to smallest.", "normal_query": "Generate a Franchise Engagement Summary by grouping shows that belong to the same franchise. The episode count field may be stored as text and can include non-numeric values; after trimming whitespace, parse only digit strings (digits 0–9 only) and treat everything else as zero. Only include franchises whose total number of episodes exceeds 100. For each franchise, provide its identifier, the number of shows it contains, and the combined episode count, sorted by total episodes in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_10", "selected_database": "hulushows", "query": "Let’s see how our shows are spread out across the different subscription plans. For each plan, I want to know how many titles it has and what chunk of the full catalog that is. Just give me the plan name, the total count of media in it, and what percentage of the catalog that represents. Start with the plans that have the biggest share of content.", "normal_query": "Determine the Tier Distribution Ratio to understand how media content is shared across different access levels. First, sum up the total media volume available under each tier. Then compute the overall media total across all tiers. For each tier, calculate its share of the total by dividing the tier’s media volume by the grand total. List the tier ID, tier type, media total, and its Tier Distribution Ratio. Sort the results by the ratio in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "hulushows_11", "selected_database": "hulushows", "query": "Let’s see which franchises are really making waves across different subscription levels. We’re looking for those that have at least 3 shows, and those shows appear across 3 or more tiers. For each of these franchise powerhouses, show me the franchise ID, how many shows they’ve got, and how many tiers they show up in. Sort the list by number of shows to spotlight the most widely spread ones first.", "normal_query": "To evaluate Syndicated Franchise Engagement, we need to check which franchise groups have both a strong show count and wide distribution. For each franchise, count how many shows belong to it and how many unique distribution tiers those shows appear in. These shows should include franchises with at least 3 shows and presence in 3 or more tiers. List the franchise ID, number of shows, and number of tiers, ordered by show count in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_12", "selected_database": "hulushows", "query": "Let’s dive into the main genre types that keep popping up in our show catalog. I’m only interested in shows labeled as Drama, Comedy, or Animation and Cartoons. For each of those, can you pull together a quick list that includes the show’s ID, its title, and what genre it’s tagged under? Sort the list by title.", "normal_query": "We want to analyze Primary Genre Classification across our show catalog. For this, filter and retrieve all titles that fall under the Drama, Comedy, or Animation and Cartoons categories. For each matching title, show its unique ID, name, and its primary genre type. Sort the results alphabetically by title.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_13", "selected_database": "hulushows", "query": "I want to look at how packed each show’s video library is. Can you pull up a list that shows the total number of video items for each show and group them into three levels? Label them High if they’ve got over 500 videos, Medium if they’re between 200 and 500, and Low if they’re under 200. Let’s sort the list so the shows with the most content show up first, and include the show ID, total count, and the volume level tag.", "normal_query": "For each show, compute its total number of video items and classify it using the Content Volume Level Classification. Return the show ID, total volume, and the resulting volume category, ordered by total volume from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_14", "selected_database": "hulushows", "query": "Which show feels the most crammed with promotional stuff? Just give me the one with the heaviest promo presence overall.", "normal_query": "Find the Maximum Promo Saturation Ratio across all shows in the catalog.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "hulushows_15", "selected_database": "hulushows", "query": "How many shows land in our usual user-score buckets—Low, Medium, or High? Just give me the total.", "normal_query": "Report the total number of shows whose user scores fall into the standard Low, Medium, or High buckets.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "hulushows_16", "selected_database": "hulushows", "query": "I want to find shows that show up in three or more different subscription tiers. For each show, can you count how many unique tiers it’s available in? First, keep the ones that are in at least three tiers, and then sort the results from the most widely distributed to the last.", "normal_query": "I want to know all Multitier Syndicated Shows. For each show with at least three tiers, show its unique identifier and the number of tiers it appears in. Sort the results by tier count in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_17", "selected_database": "hulushows", "query": "Let’s grab the shows where the bigger of their trailer or feature count is over 100. Show the ID, title, and that number, sorted from highest to lowest.", "normal_query": "Find shows whose Peak Media Load is greater than 100. Give me the show ID, title, and the peak value, sorted from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_18", "selected_database": "hulushows", "query": "I want to see how shows rank based on what viewers think. Just group them by how well they’re rated, ignore anything without a proper score, and tell me the show ID, name, how it scored, and which group it ended up in—start from the highest-rated and go down.", "normal_query": "Analyze show-level user ratings to assign each show to its corresponding Episode Rating Band. Only include shows with valid numeric scores. For each show, return its ID, title, user score, and band, sorted from highest to lowest score.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 8, "distinct": false, "order": true}} +{"instance_id": "hulushows_19", "selected_database": "hulushows", "query": "Which shows actually have film clips? List the ones with the most film-related clips first. For each show, show the title, how many film clips it has, and a quick flag for Has Clips or No Clips.", "normal_query": "I want to check film-clip availability for each show. For every show, return its ID, title, the number of film-related clips, and a flag saying Has Clips if that count is greater than 0, otherwise No Clips. Sort from highest to lowest film-clip count.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_20", "selected_database": "hulushows", "query": "Let’s see which shows are loading up on promo messages. For each one, count availability updates, promo messages, alerts, and expiration notices across the free and member tiers. Only include shows with at least one note, and list them starting with the most.", "normal_query": "Show the Promotional Intensity Summary for each show with at least one note. Include the show ID and the total count, sorted descending.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_M_1", "selected_database": "hulushows", "query": "Let’s drop in a new show using these exact values: make the ID 900001, set the official name to new-show-canonical, call it New Show Title, link it to series 99999999, tag it to studio 8, and add the note ‘This is a newly added show for fall season release.’ For genres, store a JSON with score 4.25, type show, main genre Science Fiction, and breakdown Science Fiction~Space|Adventure. Once that’s saved, return what you added.", "normal_query": "Add a brand-new show with these exact details: ID 900001, official name new-show-canonical, title New Show Title, series 99999999, studio 8, and the note This is a newly added show for fall season release. For its genre info, save a JSON that has a score 4.25, type show, main genre Science Fiction, and a breakdown Science Fiction~Space|Adventure. After saving, show me the inserted record.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "hulushows_M_2", "selected_database": "hulushows", "query": "So, which studios are really cranking out the content? Let’s create a function called calculate_studio_activity_index that tells us how many entries a studio has in the system. Just pass in the studio’s ID, and it’ll return the total number of catalog records linked to that studio—even if some titles repeat. Simple enough, right? Oh, and while we’re at it—find the show with ID 54 and update its official name to ‘updated-family-guy’.", "normal_query": "Create a PostgreSQL function called calculate_studio_activity_index that computes the Studio Activity Index and returns the calculated value. The function takes one parameter: the unique identifier of a studio. It calculates the total number of content records that are associated with the given studio in the catalog, counting all entries regardless of whether the titles repeat. The result is an integer representing the count of all such records. Additionally, update the canonical name of a specific show in the catalog. Locate the show using its unique content key, which is 54, and set its canonical name to 'updated-family-guy'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "hulushows_M_3", "selected_database": "hulushows", "query": "Let’s check how much content each subscription gets. Just give me the plan name—like “free” or “subscriber”—and I’ll count all the shows linked to it. Don’t worry about casing or spaces; it should match even if someone types it differently.", "normal_query": "Create a function that returns the number of unique shows available under a given subscription plan like \"free\" or \"subscriber\". Match the plan name in a case-insensitive and trimmed way to ensure accurate mapping. Return the total number of linked shows.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_M_4", "selected_database": "hulushows", "query": "Let’s check how many titles belong to a given series. Just pass in a series ID, and we’ll return the total number of titles linked to that series.", "normal_query": "We need to calculate the number of distinct titles that belong to a specific series to support the Series Entry Count metric. Given a series identifier as input, the system should return a single integer representing how many entries are part of that series.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_M_5", "selected_database": "hulushows", "query": "Let’s see how our shows break down by age-appropriateness—like “TV-Y”, “TV-PG”, etc. Just group them and count how many land in each level, making sure different casing or extra spaces are treated the same.", "normal_query": "Could you help me get a quick overview of how shows are distributed across different TV Rating types? For each rating, return how many shows fall under it, normalizing the rating values by lowercasing and trimming to avoid mismatches.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_M_6", "selected_database": "hulushows", "query": "I want to know if all shows in a series share the same name? Just use check_series_title_uniformity with the series ID. it returns true if the titles match across the board, false if they don’t.", "normal_query": "A function named check_series_title_uniformity is required. This function determines the Series Title Uniformity Flag for a given series. It checks whether all shows linked to the same series share an identical canonical title. The output is a boolean value—true if all titles match, and false otherwise.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_M_7", "selected_database": "hulushows", "query": "Let’s figure out which studios have been the busiest. For each one, can you show me how many titles they’ve worked on? Just include the studios that are actually linked to content, and sort the list so the most active ones show up first. I need this saved as a permanent table called studio_catalog_size.", "normal_query": "We need to create a persistent table of all Studio Catalog Size data for our content analysis. Please set up a table called studio_catalog_size that includes each studio’s unique identifier and the total number of titles linked to that studio. The count should be grouped by studio and sorted from the most prolific to the least. Please note only include entries that are explicitly associated with a studio.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_M_8", "selected_database": "hulushows", "query": "Let’s figure out which studios have been the busiest in the catalog and save it in a table called title_count_per_studio. For each one, can you show me their ID, name, and how many shows they’ve worked on? Only count the ones that are actually linked to a studio. We’ll need to pull the studio info by joining the show records with the studio list. Then, sort the results so the studios with the most titles show up first.", "normal_query": "Let’s build a persistent table called title_count_per_studio to analyze Title Count per Studio for catalog assessment. This table should include each studio’s unique ID, its canonical name, and the number of titles linked to it. Only include entries where a valid studio association exists. The result must be grouped by studio and sorted so the most prolific studios appear first. Join is required between the show catalog and the studio registry. The output will be a structured table listing studio ID, studio name, and how many titles are attributed to each.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_M_9", "selected_database": "hulushows", "query": "Set up a permanent table called avg_title_length_per_studio so we can track how long each studio’s show titles usually are. It should include which studio it is and the average number of characters in the titles of its shows. We’re only defining the structure for avg_title_length_per_studio right now—no data yet.", "normal_query": "Please create a permanent table named avg_title_length_per_studio to track the average length of show titles per production studio. The table must have two columns: (1) the studio’s unique ID and (2) the average number of characters in titles of shows linked to that studio. This step only defines the schema for avg_title_length_per_studio—do not insert any data.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "hulushows_M_10", "selected_database": "hulushows", "query": "Let’s check how busy our release schedule was in a particular year. I need a function that takes in a year and tells me how many shows were launched during that time. It should go through the catalog and count only the shows whose launch dates fall in that year, but only for test titles with srkeys 900001 and 900002. Please don’t include the rest of the system’s data. The result should just be a number showing how many of those selected titles came out in that year.", "normal_query": "Create a function named get_launch_count_by_year that computes the Launch Year Distribution for a specific year. This function analyzes the release history by counting how many titles were launched in the specified year. It operates over the catalog of shows, using each show's recorded launch timestamp, and filters to only include test data with srkeys in (900001, 900002). The output is a single integer indicating the number of titles launched in that year.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_1", "selected_database": "cybermarket_pattern", "query": "Give me all platforms sorted by its risk score, most dangerous on top and show 4 digits.", "normal_query": "List each marketplace with its Marketplace Risk Score (MRS), rounded to 4 decimal places, highest first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "cybermarket_pattern_M_1", "selected_database": "cybermarket_pattern", "query": "Mark every seller who's currently being investigated or getting a lot of attention from authorities as “High” on the compliance scale, leave the already-High ones alone, and give me the IDs that changed.", "normal_query": "Set the compliance category to “High” for all sellers with an active investigation or high attention from authorities, skipping those already at “High”. Return the IDs of the sellers that were updated.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_M_2", "selected_database": "cybermarket_pattern", "query": "Add a daily review entry for each sale the model rates over 70% fraud risk and doesn't already have one.", "normal_query": "Create a daily review entry for every transaction with model-assessed fraud probability above 70% that currently has no review entry.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_M_3", "selected_database": "cybermarket_pattern", "query": "Purge the top-priority alert cases that are resolved and whose next review date is over 180 days old.", "normal_query": "Delete alert cases at the highest escalation level that are resolved and have a next review date more than 180 days ago.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_M_4", "selected_database": "cybermarket_pattern", "query": "Save the current list of sites that meet the security rule, along with their computed rating, into a fresh archive—replace any prior archive.", "normal_query": "Archive the current list of Secure Platforms together with their Marketplace Risk Score, replacing any existing archive if present.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_2", "selected_database": "cybermarket_pattern", "query": "Split shoppers into three risk-per-dollar groups; for each group, show how many shoppers there are, what fraction of their orders go across countries, and how often their sessions look highly/medium/low hidden.", "normal_query": "Group buyers into three buckets based on Buyer Risk Dollar Ratio; for each bucket, return the buyer count, the share of their transactions that are cross-border, and the distribution of session anonymity (High/Medium/Low).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cybermarket_pattern_3", "selected_database": "cybermarket_pattern", "query": "Give me a list of sellers with their transaction flow scores, plus details about how complicated their shipping networks are.", "normal_query": "List vendors along with their Platform Liquidity Rate (PLR), including metrics related to Shipping Route Complexity.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cybermarket_pattern_4", "selected_database": "cybermarket_pattern", "query": "Give me how fast each session processed threats, and the levels of login verification for buyers.", "normal_query": "Provide Threat Handling Rate (THR) for each security session, ordered from highest to lowest. Additionally, include metrics related to Buyer Authentication Levels.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cybermarket_pattern_5", "selected_database": "cybermarket_pattern", "query": "I want to know the keyword-hitting values for all customer and internal chats to identify high-risk patterns. Round to 3 decimal places and show in descending order", "normal_query": "Calculate Suspicion Signal Density (SSD) for every communication thread, rounded to 3 decimal places and shown in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "cybermarket_pattern_M_5", "selected_database": "cybermarket_pattern", "query": "Update table statistics and query plans for the vendors table, focusing on improving efficiency-related query performance.", "normal_query": "Analyze the vendors table to refresh statistics for Compliance Efficiency Index (CEI) queries.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_6", "selected_database": "cybermarket_pattern", "query": "Show me all protected platforms, whether they're up or down, how many serious escalation cases they have, and how bad their current alerts are.", "normal_query": "List all Secure Platforms and their current operational status. Also include metrics related to Tier-3 Escalation Case and Alert Severity Levels.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_7", "selected_database": "cybermarket_pattern", "query": "Tell me how many live listings we have in each category, along with which ones have weird descriptions and how many sketchy buyers are interacting with them.", "normal_query": "Count active listings for each Product Category, shown in descending order. Besides, show metrics related to Language Patterns, Suspicious Buyer.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cybermarket_pattern_8", "selected_database": "cybermarket_pattern", "query": "Break down transactions by how complicated their shipping routes were, then show me the counts with the trickiest routes at the top.", "normal_query": "Show the number of transactions per Shipping Route Complexity label, highest first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cybermarket_pattern_9", "selected_database": "cybermarket_pattern", "query": " Tell me how the average security score stacks up across sessions with different privacy levels, rounded to 2 decimal places, from totally open to fully masked connections.", "normal_query": "List average OpSec score for each Session Anonymity Level, rounded to 2 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cybermarket_pattern_M_6", "selected_database": "cybermarket_pattern", "query": "I need to optimize the database for cross-border transaction lookups - could you create a dedicated index for those searches?", "normal_query": "Create an index to speed up searches for Cross-Border Transactions.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_10", "selected_database": "cybermarket_pattern", "query": "I want to know the average keyword-hitting values for all customer and internal chats to identify high-risk patterns. Round to 3 decimal places.", "normal_query": "Return the average Suspicion Signal Density (SSD) across all communications, rounded to 3 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_M_7", "selected_database": "cybermarket_pattern", "query": "Make a table called 'suspicious_buyers_cap' that lists all the shady buyers, but only include ones that hit at least $10 in suspicious activity.", "normal_query": "Create table suspicious_buyers_cap listing Suspicious Buyers with a $10 cap.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_M_8", "selected_database": "cybermarket_pattern", "query": "I need to mandate sessions secured by two factor across the board. Please configure the system to upgrade any active sessions still relying on basic authentication.", "normal_query": "Force Premium Authentication by setting auth_protocol_type to \"2FA\" for every session that is currently using \"Basic\".", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_11", "selected_database": "cybermarket_pattern", "query": "I need the total number of transactions that were both marked as fraud and involved cross-border payments.", "normal_query": "Count Fraud-Flagged Transactions that are Cross-Border.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_12", "selected_database": "cybermarket_pattern", "query": "Calculate how many hours we typically take to close Tier-3 escalations. Show the average value, rounded to hundredths.", "normal_query": "Return the average resolve time in hours for Tier-3 Escalation Cases, rounded to 2 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_13", "selected_database": "cybermarket_pattern", "query": "How many platforms show as 'active' right now?", "normal_query": "Count platforms currently marked as Active.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_M_9", "selected_database": "cybermarket_pattern", "query": "Show me where our response is slowest—give me a quick breakdown by key groups, a percentile snapshot, and the 50 slowest sessions.", "normal_query": "Analyze connection_security to optimize Threat Handling Rate reports.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_14", "selected_database": "cybermarket_pattern", "query": "How many shoppers are using advanced authentication?", "normal_query": "Count buyers who have Advanced authentication.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_15", "selected_database": "cybermarket_pattern", "query": "What's the overall revenue from digital goods? Round the result to 2 decimal places.", "normal_query": "Sum total sales value for Digital product listings, rounded to 2 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_16", "selected_database": "cybermarket_pattern", "query": "What's the average distance traveled for shipments with complex routes? Round the result to 2 decimal places.", "normal_query": "Compute the average geographical distance for shipments on complex routes and round the result to two decimals.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_M_10", "selected_database": "cybermarket_pattern", "query": "Set up the secure-platform snapshot—only create it if it isn't there yet.", "normal_query": "Create the secure-platform summary materialized view if it does not already exist.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_17", "selected_database": "cybermarket_pattern", "query": "How many critical alerts do we have?", "normal_query": "Count alerts with Critical severity level.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_18", "selected_database": "cybermarket_pattern", "query": "What's the ratio of sales went through escrow? Round to 2 decimal places.", "normal_query": "Calculate the ratio of transactions that used escrow, rounded to 2 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_19", "selected_database": "cybermarket_pattern", "query": "How many message threads contain irregular phrasing, sudden language switches, or machine translated text that indicate possible deception?", "normal_query": "Count communication threads with Suspicious language patterns.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cybermarket_pattern_20", "selected_database": "cybermarket_pattern", "query": "How many buyers have unpredictable spending trends?", "normal_query": "Count buyers with Variable spend pattern.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "archeology_scan_1", "selected_database": "archeology_scan", "query": "I'd like to see which of our dig sites have the best scan quality ratings. Could you show me each site's ID and name along with their average quality score, sorted best to worst?", "normal_query": "I'd like to see a quality assessment of scans across our archaeological sites. Show site code, site name, average Scan Quality Score for each site and rank them from highest to lowest quality.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "archeology_scan_2", "selected_database": "archeology_scan", "query": "Which sites need urgent conservation work? Please show me each location's ID, name, structural condition, preservation status, and whether they're in a high-risk category.", "normal_query": "Could you help me find archaeological sites that might need urgent conservation attention? I'm particularly interested in identifying sites that fall into Degradation Risk Zones. For each site, I'd like to see their code, name, structural state, and preservation status, along with their Risk Zone Category. This information would help our conservation team prioritize their efforts.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "archeology_scan_3", "selected_database": "archeology_scan", "query": "Where are the best places to do scanning based on weather conditions? Show me each site's ID and name with their average environmental condition score indicating suitability for scanning operations.", "normal_query": "I'm planning our upcoming archaeological scanning sessions and want to understand which sites have the most favorable scanning environments. Could you show me a report with each site's code, name, and its average Environmental Suitability Index? This would help us prioritize locations where we'll get the best scan quality.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "archeology_scan_4", "selected_database": "archeology_scan", "query": "How reliable are our scan alignments? For each alignment record, could you show me the registration accuracy relative to scan resolution and the registration confidence category. I need to see its registration ID, project ID, accuracy measurements, error values, calculated ratio, and the confidence category.", "normal_query": "I'm evaluating the quality of our scan registrations and would like to understand which ones are most reliable for spatial analysis. Could you show me the Registration Accuracy Ratio and Registration Confidence Level for each registration? I'd need to see the registration ID, project ID, accuracy measurements, error values, calculated RAR (rounded to 2 decimal places), and what confidence level that translates to.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "archeology_scan_5", "selected_database": "archeology_scan", "query": "Which archaeologicalsites have the best digital preservation? Rank our locations showing their ID, designation, and a comprehensive metric for evaluating digital preservation quality, with the best first.", "normal_query": "For our archaeological site evaluation, I need to quantify the Digital Preservation Quality metrics across our collection. Please compute a comprehensive DPQ index for each archaeological location. Present the results in descending order of DPQ values, displaying only the site identification code, site designation, and calculated DPQ value (rounded to two decimal places) to facilitate prioritization of our digital preservation resources.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "archeology_scan_6", "selected_database": "archeology_scan", "query": "How good are our 3D models based on the criteria for high-fidelity standard? Please generate a comprehensive report that shows each site's ID, name, total mesh count, high-fidelity mesh count and proportion (as a percentage), average ratio of mesh complexity, average resolution parameters (in mm), average geometric accuracy measurements and Mesh Quality category. Present the data with the highest-fidelity results first.", "normal_query": "Would you generate a comprehensive report categorizing sites based on High Fidelity Mesh standard? For each archaeological location, please include the site code, side name, total mesh count, high-fidelity mesh count and proportion (as a percentage), the average Mesh Complexity Ratio, average resolution parameters (in mm), average geometric accuracy measurements and Mesh Quality Classification. The data should be presented in descending order of high-fidelity percentage.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "archeology_scan_7", "selected_database": "archeology_scan", "query": "What are the scanning conditions like at each site? Show me each location's code and name, along with weather averages (temperature, humidity, and illumination levels), environment suitability score, and corresponding quartile ranking and environmental condition category based on the score.", "normal_query": "Show me each site's code and name, along with the average temperature, humidity, and illumination levels. I'd also like to see the average Environmental Suitability Index for each site, classified into quartiles, to understand the range of conditions. Finally, classify each site into Environmental Condition Classification System according to average ESI value.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 1, "distinct": false, "order": true}} +{"instance_id": "archeology_scan_8", "selected_database": "archeology_scan", "query": "I'd like to analyze how efficiently each scan processing workflow performs and spot any bottlenecks. For every software and stage combination, show me the software, processing stage, average hours needed for processing, average CPU and GPU usage percentages, average data size in GB, the ratio of the processing efficiency, and whether it's running efficiently or hitting bottlenecks ('Bottleneck Detected' if it is qualified as processing bottleneck, 'Efficient' if it is not). Also include how many workflows we're looking at for each combination. Sort the results by bottleneck status first, followed by the ratio value from lowest to highest.", "normal_query": "I want to evaluate each scan processing workflow's Processing Efficiency Ratio and identify whether it qualifies as a Processing Bottleneck. For each combination of processing software and stage, please include the software, stage, average processing hours, average CPU and GPU usage percentages, average data size in GB, the average PER value, and the the efficiency status ('Bottleneck Detected' if it is qualified as processing bottleneck, 'Efficient' if it is not). Additionally, provide the total count of workflows for each combination. Sort the results by bottleneck status first, followed by the PER value in ascending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 1, "distinct": false, "order": true}} +{"instance_id": "archeology_scan_9", "selected_database": "archeology_scan", "query": "Which sites are best for finding artifacts? Show me each location's ID along with the average ratio between total points and cloud density, and the average efficiency of feature identification. I need all sites included, even if some data might be missing. Sort the results by average feature identification efficiency in descending order.", "normal_query": "For each archaeological site, I need its Point Cloud Density Ratio and Feature Extraction Efficiency to identify sites with high potential for feature extraction. Please include the site code, average PCDR value, and average FEE value. Ensure that all sites are included, even if some data might be missing. Sort the results by average FEE in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "archeology_scan_10", "selected_database": "archeology_scan", "query": "Hey, can you help me figure out how efficient our archaeological scanning gear is? I need to know the equipments' IDs, their efficiency of computing resource utilization (rounded to two decimal places), the average processing time in hours, their efficiency rankings, and their workflow efficiency status. Also, please include CPU usage (named 'cpu_usage'), GPU usage (named 'gpu_usage'), and processing hours (named 'processing_hours') as JSON in the resource details. Make sure to include all equipments, even if the data's incomplete, and sort everything by PRU value from lowest to highest. Thanks!", "normal_query": "My purpose is to analyze the Processing Resource Utilization (PRU) of our archaeological scanning equipment and categorize workflows according to the Workflow Efficiency Classification system. Please provide the equipments' IDs, PRU values (rounded to two decimal places), average processing time in hours, efficiency rankings, workflow efficiency status, and include the CPU usage (named 'cpu_usage'), GPU usage (named 'gpu_usage'), and processing hours (named 'processing_hours') in json format as resource details. I'd like all equipment to be included in the analysis, even those with incomplete data. Please sort the results by PRU value in ascending order to help identify the most efficient setups.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "archeology_scan_M_1", "selected_database": "archeology_scan", "query": "For our analysis work, let's create a special, pre-calculated table called high_fidelity_meshes to keep track of our best 3D models. In this table, I want to see the mesh's unique ID, the site it belongs to, the equipment used, the vertex and face counts, its resolution in millimeters, and its geometric accuracy. Also, please add a column for the ratio of its topological complexity to resolution. Only include the high fidelity meshes.", "normal_query": "We need to create a persistent table of all High Fidelity Mesh data for our archaeological analysis. Please set up a materialized view called 'high_fidelity_meshes'. The view should include the mesh's registry ID, site reference, equipment used, vertex and face counts, resolution in millimeters, geometric accuracy, and the calculated MCR value. Only include meshes that meet all the High Fidelity Mesh criteria.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "archeology_scan_M_3", "selected_database": "archeology_scan", "query": "Can you create a view for me called view_premium_quality_scans to identify high-quality archaeological scans? For each of these scans, please display its ID, project and site refs, the scan timestamp, scan resolution (mm), point density (points/m²), coverage percentage, overlap percentage, and noise level (dB). The main thing is to only include scans that meet our standards: high resolution, comprehensive coverage, and the noise level is below 1.5 dB.", "normal_query": "Create a view called view_premium_quality_scans that identifies high-quality archaeological scans. This view should include the Scan ID, Project Reference, Site Reference, Scan Timestamp, Scan Resolution (mm), Point Density (points/m²), Coverage (%), Overlap (%), and Noise Level (dB). The view should identify scans that meet the criteria for both a High Resolution Scan and Comprehensive Coverage, and also have a Noise Level less than 1.5.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "archeology_scan_M_4", "selected_database": "archeology_scan", "query": "I need a way to quickly check how good the scanning conditions were for our different sites. Can you create a view called site_esi that calculates how suitable environmental conditions were for scanning operations? For each site, just show its zone reference ID and the calculated ESI score, rounded to two decimal places.", "normal_query": "A view named site_esi is required. This view should determine the Environmental Suitability Index for each site. The output should include the Zone Reference and the calculated ESI value, rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cross_border_1", "selected_database": "cross_border", "query": "Let's check out the top 5 riskiest data flows. For each one, show me the flow ID, how risky it is, and how sensitive the data is. Sort them by the most sensitive data first, and make sure to round everything to two decimal places.", "normal_query": "List the top 5 high-risk data flows, showing each flow's ID, Risk Exposure Score, and Data Sensitivity Index, including all flows even if risk or profile data is missing. Sort by Data Sensitivity Index from highest to lowest, rounding scores to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cross_border_2", "selected_database": "cross_border", "query": "Let’s see how vendors are distributed across different risk tiers. For each tier, tell me the tier name, how many vendors fall into it, and what percentage of the total that is (rounded to two decimals). Sort them so the tier with the most vendors comes first.", "normal_query": "Group all vendors by their Vendor Risk Tier. For each tier, return the tier name, the number of vendors in that tier, and the percentage of total vendors (rounded to two decimals). Sort the results by the number of vendors in each tier from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cross_border_3", "selected_database": "cross_border", "query": "Let’s find the top 10 overloaded data flows. For each, show me the flow ID, how much of the available bandwidth is being used compared to the total possible, and how efficient the transfer was based on the success rate and error count. We’ll sort them by bandwidth usage, from highest to lowest, and round the numbers to two decimal places.", "normal_query": "Find the top 10 Overloaded Data Flows, and list each flow's ID, its Bandwidth Saturation Index, and its Data Transfer Efficiency, with both metrics rounded to two decimal places. Sort by BSI from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cross_border_4", "selected_database": "cross_border", "query": "Let’s find the 5 data profiles most at risk for sensitive data exposure. For each one, tell me the profile ID, how sensitive the data is, and how strong the security protections are. Round the sensitivity score to two decimals and sort highest-to-lowest sensitivity. Use the scale High=3, Medium=2, Low=1; treat any other label (including 'Critical') as Low.", "normal_query": "Find the top 5 data profiles with potential Sensitive Data Exposure. For each one, show the profile ID, the data sensitivity score, and the security score. Round the sensitivity score to two decimal places and list them from highest to lowest sensitivity. Use the existing sensitivity scale (High=3, Medium=2, Low=1); treat any other label (including 'Critical') as Low.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cross_border_5", "selected_database": "cross_border", "query": "Let’s find the top 10 compliance records where there are issues with data moving between countries—like mismatched or missing origin and destination—and either GDPR or local law compliance is marked as failed. For each, I want the compliance ID, GDPR and local law status, and the data transfer route. Sort them by ID from smallest to biggest.", "normal_query": "Find the top 10 records where data is moving between different countries (the two countries don’t match or one is missing) and either GDPR or local-law status is marked Non-compliant. Show the record ID, the GDPR status, the local-law status, and the transfer route (origin to destination). Sort by ID from smallest to largest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cross_border_6", "selected_database": "cross_border", "query": "Let’s find the top 3 months with the highest average severity for audit findings, but only include audits where the severity score was over 0.5. For each month, I need the month (in 'year-month' format), the average severity (rounded to two decimal places), and how severe it was compared to other months. We’ll sort everything from the earliest to the latest month.", "normal_query": "Find the top 3 months with the highest average Audit Finding Severity for audits with a Critical Audit Issue. List each month ('year-month'), the average AFS (rounded to two decimal places), and its severity rank. Sort by month from earliest to latest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cross_border_7", "selected_database": "cross_border", "query": "Find audits where the pressure from data subject requests is greater than 50. For each of them, I need the audit ID, the pressure score (rounded to two decimal places), and a breakdown of the request types, such as how many requests for access, deletion, rectification, and portability were made. Sort the results by the pressure score from highest to lowest, and show up to 100 records.", "normal_query": "Find audits with a Data Subject Request Pressure greater than 50. List each audits ID, the DSRP (rounded to two decimal places), and a breakdown of request types (access, deletion, rectification, portability). Sort by DSRP from highest to lowest, and show up to 100 records.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cross_border_8", "selected_database": "cross_border", "query": "Let's look at data flows that cross borders and calculate their associated risk based on their volume. For each flow, I need to see the flow ID, its risk factor (rounded to two decimal places), the total risk (rounded to two decimal places), and how each flow ranks based on its total risk. Give me the flows where the total risk exceeds 1000, and sort them from highest to lowest. Please limit the results to the top 5 flows.", "normal_query": "For cross-border data flows, calculate the Cross-Border Data Volume Risk and list the flow ID, Cross-Border Risk Factor (rounded to two decimal places), CDVR (rounded to two decimal places), and the rank of CDVR. Show only flows where CDVR is greater than 1000, sort by CDVR from highest to lowest, and limit to the top 5.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cross_border_9", "selected_database": "cross_border", "query": "Let’s find the data profiles that have failed their integrity checks. For each profile, I need the profile ID, the count of integrity failures, and a list of failure types (like 'Integrity Check' or 'Checksum Verification') in a single string, separated by commas. Sort the profiles by the failure count, starting with the highest, and show me just the top 10.", "normal_query": "Find data profiles with a Data Integrity Failure, and calculate their Integrity Failure Count. List each profiles ID, its IFC, and the types of failures (like 'Integrity Check' or 'Checksum Verification') in a single string, separated by commas. Sort by IFC from highest to lowest, and show only the top 10 profiles.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cross_border_10", "selected_database": "cross_border", "query": "Let’s find cross-border data flows that are under high audit compliance pressure. Focus on those with slow remediation timelines and remediation deadlines approaching within the next 5 days (assuming today is 2025-04-01). For each of these flows, I need the flow ID, the audit compliance pressure (rounded to two decimal places), and how many days the remediation is overdue. Sort these by the most overdue flows first, followed by audit compliance pressure from highest to lowest. Limit the results to the top 10 flows.", "normal_query": "I want to find cross-border data flows with High Audit Compliance Pressure. Focus on flows with slow remediation timelines and remediation deadlines within the next 5 days (assuming today is 2025-04-01). Show the flow ID, the Audit Compliance Pressure rounded to 2 decimal places, and the days overdue. Sort by days overdue from most overdue to least, then by Audit Compliance Pressure from highest to lowest, and limit to the top 10 flows.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cross_border_M_1", "selected_database": "cross_border", "query": "Find the systems that work with a lot of sensitive stuff but don’t have strong protection in place. If something fits that risk profile, mark it for review. For each one, show its ID, whether we flagged it, and key details about how it’s secured.", "normal_query": "Identify systems that should be flagged for review if they have a high Data Sensitivity Index (DSI) and a low Security Robustness Score (SRS). For each, return the system ID, whether it's marked for review, and key security settings.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cross_border_M_2", "selected_database": "cross_border", "query": "We need to keep an updated summary of how well the data flows are performing. Make sure we have a place to store the record ID, the success rate, the error count, and a timestamp showing when the data was last updated. For every record, calculate how efficient the data transfer was. Then, if we don’t already have a record for it, add a new one, or if it’s already there, update it with the latest success rate, error count, and timestamp.", "normal_query": "We need to maintain a reliable summary that tracks the performance of each data flow. For every data transfer, calculate its Data Transfer Efficiency (DTE) and make sure this value is stored in a dedicated record, along with the original success rate, the number of errors, and the timestamp when this performance summary was last refreshed. If there’s already a summary for a data flow, make sure it gets updated with the latest numbers; if not, create a new one with all the required information. The DTE value should be rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cross_border_M_3", "selected_database": "cross_border", "query": "Let’s find all data transfers that go between two different countries and clearly fail to meet legal requirements. We’ll only consider it a serious compliance gap if either the general data protection rules or the local laws are explicitly marked as not being followed — not just partially done, but fully non-compliant. For each one, show the countries involved, some identifying info about the flow, and who the vendor is if we know it.", "normal_query": "Please create a materialized view named cross_border_compliance_gap_view. This view should act as a pre-computed list identifying all data flows that exhibit a Cross-Border Compliance Gap, defined as flows where the origin and destination countries differ, and where either GDPR compliance or local law compliance is marked as 'Non-compliant'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cross_border_M_4", "selected_database": "cross_border", "query": "Let's update the dataflow table by adding a column called transfer_path. For all the data flows that cross borders, I want you to create a string that shows the journey from the origin to the destination country in this format: 'OrigNation -> DestNation'. Make sure the column gets filled in for all the existing records.", "normal_query": "Please modify the dataflow table by adding a new column called transfer_path. Once the column is added, populate it for all existing Cross-Border Data Flows by creating their Transfer Path string, which combines the origin and destination nations.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cross_border_M_5", "selected_database": "cross_border", "query": "Let’s go through the audit records. If a record has a lot of critical findings — meaning the number of critical issues is more than half of the total findings — and the remediation deadline has already passed, change its status to 'Overdue'. But only do that if the current status isn’t already set to 'Complete' or 'Overdue'.", "normal_query": "Please update the AuditAndCompliance table. For the purpose of this operation, define a 'Critical Audit Issue' as any audit where the number of critical findings is greater than 50% of total findings. For any such audit record where the remediation due date is earlier than today, set its remediation status to 'Overdue'. This should only apply if the current status is not already marked as 'Complete' or 'Overdue'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cross_border_11", "selected_database": "cross_border", "query": "Let’s figure out which data flows are really feeling the heat from audits and compliance costs. First, how heavy is the audit load? For each flow, take the number of critical findings, divide by total findings plus one, and multiply that by the total number of data subject requests. Then, how costly is it to stay compliant? Divide the compliance cost by the penalties plus one. Now show me the flows where both numbers are high—specifically, audit load over 10 and cost pressure over 0.8. For each of those, give me the flow ID, the audit load, and the cost pressure, both rounded nicely. Just make sure to link everything properly across the tables using the flow ID.", "normal_query": "I want to identify data flows with both high audit remediation load and high compliance cost pressure. Calculate the remediation load as audit severity (critical findings over findings + 1) times total data subject requests. Compute cost pressure as total compliance cost divided by penalties plus 1. List the flow ID along with both values, rounded to two decimal places, but only include flows where remediation load is over 10 and cost pressure exceeds 0.8. Ensure to join the relevant tables using the flow ID.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cross_border_12", "selected_database": "cross_border", "query": "I want to find data flows that seem both risky and unreliable. I'm looking at how dangerous they are based on how well protections are working, and how often the data transfers succeed without problems. Just show me the ID, how much risk is involved, and whether they usually work well.", "normal_query": "I want to identify data transfers with high RES and low DFRS. Please return the unique identifier, RES, and DFRS for each qualifying transfer.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cross_border_13", "selected_database": "cross_border", "query": "Let’s create a list that keeps track of data flows with compliance issues when data crosses borders. For each flow that has this compliance gap, we need to include details like the record ID, the flow tag, the countries involved (origin and destination), the compliance status with relevant data protection laws, the status of compliance with local regulations, and the vendor trace ID. This list will be called cross_border_compliance_gap_view.", "normal_query": "Please create a materialized view named cross_border_compliance_gap_view. This view should act as a pre-computed list identifying all data flows exhibiting a Cross-Border Compliance Gap. For each identified data flow, include the following details in the view: the record registry ID, flow tag, origin nation, destination nation, GDPR compliance status, local law compliance status, and the vendor trace ID.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cross_border_14", "selected_database": "cross_border", "query": "Let’s check out the data flows that might be considered high-risk because they involve sensitive data. For each of these, I need the flow ID, the sensitivity score, and the destination country. The sensitivity score is calculated by multiplying the data size by a factor: if the data is highly sensitive, it gets a 3x factor, otherwise it gets a 1x factor. Show me the flows where the sensitivity score is above 100 and sort them with the highest sensitivity first.", "normal_query": "I want to find data flows that could be considered high-risk based on their sensitivity. For each data flow, show me the flow ID, the calculated sensitivity score (called DSI), and the country where the data is going. The DSI is calculated by taking the data volume (in GB) and multiplying it by a factor based on how sensitive the data is: if the data is marked as 'High' sensitivity, the factor is 3, and for any other sensitivity, it’s 1. Only show the data flows where the DSI is more than 100, and sort them by DSI from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cross_border_15", "selected_database": "cross_border", "query": "I want to get a general idea of how trustworthy our vendors are. Can you give me a single number that reflects their overall reliability based on things like how secure they seem and whether they’re still actively working with us?", "normal_query": "Calculate the average Vendor Reliability Index (VRI) using the standard definition, across all vendors.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cross_border_16", "selected_database": "cross_border", "query": "Let’s figure out which data profile has the highest sensitivity score. For each profile, we’ll calculate a score by multiplying how much data it has (in GB) with a factor based on how sensitive it is—3 for High sensitivity, 2 for Medium, and 1 for Low. I just need the highest score from all the profiles.", "normal_query": "I’m trying to find out which data profile has the highest sensitivity score based on how much data it holds and how sensitive the data is. Each profile has a volume in gigabytes and a sensitivity level—either High, Medium, or Low. I want to multiply the volume by a factor depending on sensitivity: 3 if it's High, 2 if it's Medium, and 1 if it's Low. Then give me maximum of these calculated values across all data profiles.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cross_border_17", "selected_database": "cross_border", "query": "Let’s see which audits are under the most pressure from user data requests. For each one, add up the number of access, deletion, rectification, and portability requests—use zero if any values are missing—then multiply that total by the average response time. I just want to know which audit ends up with the highest result.", "normal_query": "I’m trying to find the maximum Data Subject Request Pressure (DSRP) from the audit records. To get this, I’ll calculate the Data Subject Request Load (DSRL) by adding up the number of access, deletion, rectification, and portability requests—treating any missing values as zero. Then I’ll multiply that total by the average response time (also defaulting to zero) and return the highest result.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cross_border_18", "selected_database": "cross_border", "query": "Let’s check out which vendors are carrying the biggest compliance burden. First, we’ll measure how bad their audit issues are by dividing critical findings by total findings plus one. Then we turn their security rating into a score—4 for ‘A’, 3 for ‘B’, 2 for ‘C’, and 1 for anything else. To get the compliance burden, we multiply the audit severity by (5 minus the security score). Show me the vendors with a burden over 1.5, and list their ID, compliance burden, and security score—sorted from highest to lowest. Include vendors even if they don’t have audit data.", "normal_query": "I’m looking for vendors with a high Vendor Compliance Burden (VCB). To get that, first compute their Audit Finding Severity (AFS) by dividing critical findings by total findings plus one. Then turn their security rating into a number: 4 for ‘A’, 3 for ‘B’, 2 for ‘C’, and 1 for anything else. Multiply AFS by (5 minus the security score) to get the VCB. Show only vendors with a VCB above 1.5, and return their vendor ID, the VCB rounded to two decimals, and their numeric security rating—sorted from highest to lowest. Include all vendors with ratings, even if they don’t have audit data.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cross_border_19", "selected_database": "cross_border", "query": "Let's look at countries where the data is super sensitive but the encryption's a bit lacking. For each country, I need the number of profiles, the average sensitivity score, average security strength, how well data is encrypted, and how long it’s being kept. Only show me the countries where the sensitivity score is above 100, and the encryption coverage is below 2. Make sure to sort them by encryption first (lowest to highest) and then by sensitivity (highest to lowest), and give me the top 20. You’ll need to work out the sensitivity from data volume, the security score from encryption and access settings, and the coverage ratio by combining both.", "normal_query": "I’m looking to assess countries where data sensitivity is high but encryption coverage is weak. For each destination country, calculate the number of profiles, the average Data Sensitivity Index (DSI), average Security Robustness Score (SRS), average Encryption Coverage Ratio (ECR), and average retention days. Only include destinations where the average DSI is over 100 and the ECR is below 2. Sort the results by ECR in ascending order, then DSI in descending order, and return the top 20. You’ll need to compute DSI from data volume and sensitivity, SRS from encryption and access control settings, and ECR by combining both. Be sure to link the profiles and flow data properly using their shared identifiers.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cross_border_20", "selected_database": "cross_border", "query": "Let’s find data flows that look risky and involve a lot of sensitive data. For each one, can you tell me when it happened, who sent it, where it went, and which protocol it used? I only want the ones where the risk exposure score is above 0.7 and the data sensitivity index is over 100. Show me the top 50, sorted from highest risk to lowest, and use the sensitivity score as a tiebreaker. You’ll need to pull info from different places using the flow ID, even if some values are missing.", "normal_query": "I want to find data flows with high Risk Exposure Score and high Data Sensitivity Index. For each of these flows, show the timestamp, origin actor, destination country, protocol used, the computed risk exposure (rounded), and data sensitivity (rounded). A flow qualifies if its risk exposure is greater than 0.7 and its sensitivity index exceeds 100. Sort the results by risk exposure and sensitivity, both in descending order, and return the top 50 flows. Use the flow identifier to combine data across the necessary tables, even if not all values are present.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cross_border_M_6", "selected_database": "cross_border", "query": "Let’s create a quick summary view called `DataFlowSummary` for data flows with 'High' or 'Critical' sensitivity levels. For each flow, I need details like the record ID, destination country, the actor that started the flow, the data size, the duration, and the sensitivity level. This is for summarizing only those flows with the specific sensitivity levels.", "normal_query": "I want to create a view called `DataFlowSummary` that summarizes the data flows from the DataFlow record, specifically for flows with 'High' or 'Critical' sensitivity levels. The view should include details like the record identifier, destination country, originating actor, data size, duration, and sensitivity level. This involves filtering based on the sensitivity level of the data flows.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cross_border_M_7", "selected_database": "cross_border", "query": "So, I need a list of data flows that are marked as 'Critical' and have a risk score above 50. For each one, I’d like to see the flow ID, sensitivity level, risk score, mitigation state, encryption status and method, the vendor assessment, and when the contract expires. Make sure the list is sorted by the highest risk score first.", "normal_query": "Generate a report for data flows with a sensitivity level of 'Critical' and a Risk Exposure Score (RES) greater than 50. The report should include the flow identifier, sensitivity level, risk assessment, risk mitigation state, encryption status and method, vendor assessment, and contract expiry date. The results should be ordered by risk assessment in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cross_border_M_8", "selected_database": "cross_border", "query": "Let’s pretend we just had a critical data transfer. It happened on 2025-06-29 at 10:30 AM, 'ActorA' sent 150.5 MB of data to 'ActorB' in the USA over TCP. It took 60 minutes, and the data was marked as “Critical”. Let’s log that with ID 'UUID-1236'.", "normal_query": "Please add a new data exchange event to the system. Use ID 'UUID-1236', timestamp '2025-06-29T10:30:00', initiated by 'ActorA', received by 'ActorB', sent to 'USA', over 'TCP'. The data volume was 150.5 MB, it lasted 60 minutes, and the sensitivity level is 'Critical'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cross_border_M_10", "selected_database": "cross_border", "query": "Let’s clean up the data a bit. I need you to delete any records where the success rate is under 50% and the sensitivity level is 'Low.' But only delete those if they’re also linked to records with a risk score under 20, and if they’re tied to records where the GDPR status is 'Non-compliant.'", "normal_query": "I want to delete records where the success percentage is below 50 and the data sensitivity level is 'Low.' Additionally, only delete these records if they are linked to entries with a risk assessment score under 20 (this is related to the Risk Exposure Score calculation) and if they are also linked to records with non-compliant GDPR status (this refers to GDPR compliance).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_1", "selected_database": "crypto_exchange", "query": "What's the current spread percentage of the midpoint price? Show me the exchange code, the timestamp of the snapshot, and the calculated spread for the latest market data.", "normal_query": "Could you calculate the Spread Percentage for the most recent market snapshot. Show me the exchange code of the most recent market snapshot with the timestamp of the snapshot, and the calculated percentage?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "crypto_exchange_2", "selected_database": "crypto_exchange", "query": "Show me how much of each order has been filled by checking the most recent execution record. Please include the order ID, total order quantity, remaining quantity, and the calculated rate.", "normal_query": "For each order, calculate the Order Fill Rate based on its latest execution record. Display the order ID, total order quantity, remaining quantity, and the calculated order fill rate.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "crypto_exchange_3", "selected_database": "crypto_exchange", "query": "What's the risk exposure for our top 5 positions right now? Show me the margin-form identifier, the position's notional value, the volatility measure used and the calculated risk value.", "normal_query": "Calculate the Position Value at Risk (PVaR) for the top 5 positions, using their notional value from risk and margin data and the single latest market volatility reading. Show me the margin-form identifier, the position's notional value, the volatility measure used, and the calculated PVaR.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "crypto_exchange_4", "selected_database": "crypto_exchange", "query": "Show me the risk and margin pivot ID, the associated order ID, the account balance node ID, the initial margin hold value, the margin account balance, and the percentage of margin being utilized.", "normal_query": "Please display the risk and margin pivot ID, the associated order ID, the account balance node ID, the initial margin hold value, the margin account balance, and the calculated margin utilization.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_5", "selected_database": "crypto_exchange", "query": "What's our overall profit ratio when comparing winning and losing trades across all accounts? Display the total sum of positive realized PnL, the total sum of negative realized PnL, and the calculated ratio of profitable trades to losing trades.", "normal_query": "Can you calculate the Profit Factor based on the realized PnL across all account balances? Display the total sum of positive realized PnL, the total sum of negative realized PnL, and the calculated Profit Factor.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_6", "selected_database": "crypto_exchange", "query": "How do trading spreads vary with market mood across different currency pairs? Show me the the market pair name, the calculated percentage, the overall market sentiment, the buy force, the average percentage for that sentiment, and the percentile rank of the percentage.", "normal_query": "Analyze the Spread Percentage across different markets and correlate it with market sentiment indicators. For each market pair, display the market pair name, the calculated spread percentage, the overall market sentiment, the buy force, the average spread percentage for that sentiment, and the percentile rank of the spread percentage.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "crypto_exchange_7", "selected_database": "crypto_exchange", "query": "How well does smart money predict price changes? I'd like to see the dominance category, the level of 'Whale-Driven Market' activity, the market pair, the average price change over 1 hour, average price change over 4 hours, average price change over 24 hours for different market pairs and calculate the success rate of smart money flow. Please group the results by flow dominance, whale activity, and market pair, and sort them by the successful smart money flow rate, from highest to lowest.", "normal_query": "I want to understand the impact of 'Smart Money Flow' on price movements across different market pairs. Can you provide the 'flow dominance' category, the level of 'Whale-Driven Market' activity, the market pair, the average price change over 1 hour, average price change over 4 hours, average price change over 24 hours for different market pairs and calculate the 'smart money accuracy' rate. Please group the results by flow dominance, whale activity, and market pair, and sort them by smart money accuracy, from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "crypto_exchange_8", "selected_database": "crypto_exchange", "query": "I want to know the real leverage traders are using. Can you provide the notional value of position, position leverage multiplier, the total wallet balance, and the resulting effective leverage for each relevant position?", "normal_query": "To analyze the 'Effective Leverage' for positions, please provide the notional value of position, position leverage multiplier, the total wallet balance, and the resulting effective leverage for each relevant position.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_9", "selected_database": "crypto_exchange", "query": "I want to determine the strength of technical signals in the market. Please provide the RSI(14) value, MACD line value, Bollinger Band width, the technical meter direction, and the calculated strength.", "normal_query": "I want to determine the 'Technical Signal Strength' in the market. Please provide the RSI(14) value, MACD line value, Bollinger Band width, the technical meter direction, and the calculated technical signal strength.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_10", "selected_database": "crypto_exchange", "query": "I need to identify the large orders that could significantly impact market prices. Please include the order ID, the trade side (Buy or Sell), the order quantity, and the depth volume in units of both bid and ask.", "normal_query": "Help me find the Whale Orders, including the order ID, the trade side (Buy or Sell), the order quantity, and the depth volume in units of both bid and ask for any order that qualifies as a Whale Order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_M_1", "selected_database": "crypto_exchange", "query": "Clean up our executed orders data by removing all records for orders that were cancelled.", "normal_query": "We need to clean up our 'orderExecutions' table by removing all orders with a 'Cancelled' orderflow status. Can you create such query?", "preprocess_sql": ["CREATE table \"orderexecutions_bak\" as select * from \"orderExecutions\";"], "clean_up_sqls": ["\nINSERT INTO \"orderExecutions\"\nSELECT * FROM \"orderexecutions_bak\"\nWHERE ordersmark IN (\n SELECT RecordVault\n FROM \"orders\"\n WHERE LOWER(TRIM(order_attributes->>'status')) = 'cancelled') AND (order_attributes->>'quantity')::real > 5;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_M_2", "selected_database": "crypto_exchange", "query": "Make a function called 'calc_effective_leverage' that figures out how leveraged a position really is by comparing its size to the trader's wallet balance.", "normal_query": "Create a function called 'calc_effective_leverage' that takes position leverage (as text), position value, and wallet balance to calculate Effective Leverage.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_M_3", "selected_database": "crypto_exchange", "query": "Can you help me create a recalc_market_impact_cost procedure that grabs the current market impact factor and calculates impact costs for all our 'New' orders, then saves the results with timestamps? We'll need a special log table for this, named market_impact_cost_log, which should have columns for a unique auto-incrementing ID (primary key), the order's reference text field, the calculated impact cost as a number, and when it was calculated with timezone info defaulting to current time. We don't need to run the procedure just yet.", "normal_query": "We need to track and calculate Market Impact Cost for all new orders. Please create a procedure called 'recalc_market_impact_cost' that gets the current market impact factor, calculates MIC for all orders with 'New' status using the formula, and logs the results with timestamps. Besides, create a log table 'market_impact_cost_log' to store the impact costs with columns for ID, order reference, calculated MIC, and timestamp (log_id SERIAL PRIMARY KEY, ordersmark TEXT, mic NUMERIC, calculated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()). No need to call the procedure now.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "crypto_exchange_M_4", "selected_database": "crypto_exchange", "query": "Make a view called 'whale_orders' that flags really big orders by comparing their size to the market's available liquidity, showing the order ID, market note, order quantity, and available liquidity.", "normal_query": "Could you create a view called 'whale_orders' that identifies all Whale Orders in our system? We need to see the order ID, market note, order quantity, and available liquidity for orders.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_M_5", "selected_database": "crypto_exchange", "query": "Add a new field called 'spread_percentage' to show the spread percentage calculation of all market data records by updating their JSON fields for orderbook metrics.", "normal_query": "Please update all market data records to include the Spread Percentage as a new field 'spread_percentage' in the orderbook_metrics JSON in table 'marketdata'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_11", "selected_database": "crypto_exchange", "query": "I need to understand our platform's overall risk level. Can you tell me, on average, what percentage of their available margin our users have currently tied up in positions?", "normal_query": "Help me calculate the platform-wide average for 'Margin Utilization'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_12", "selected_database": "crypto_exchange", "query": "I need a quick risk assessment of how many of our users are in the danger zone of getting a margin call.", "normal_query": "Generate a count of all accounts that are currently at 'Margin Call Risk'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_13", "selected_database": "crypto_exchange", "query": "Can you count how many enormous trades have occurred on our platform? I'm looking for the total number of single orders that were so large they were more than 10% of the market's depth at that moment.", "normal_query": "Provide a total count of all orders that are classified as a 'Whale Order'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_14", "selected_database": "crypto_exchange", "query": "Can you calculate the average spread as a percentage of the midpoint price?", "normal_query": "What is the average 'Spread Percentage' across all of our markets?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_15", "selected_database": "crypto_exchange", "query": "How risky is order OR6015391 in terms of getting liquidated?", "normal_query": "What is the Liquidation Risk Level for order OR6015391?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_16", "selected_database": "crypto_exchange", "query": "If we execute order OR6015391 right now, what is the cost of its impact on market? Please rounded to 2 decimals", "normal_query": "What is the Market Impact Cost for order OR6015391, rounded to 2 decimals?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_17", "selected_database": "crypto_exchange", "query": "Is the EX203 market drying up right now? Tell me if we're in a liquidity crunch where it's hard to trade without moving prices by returning the categorical status 'Liquidity Crisis' or 'Normal Market Conditions'.", "normal_query": "Our trading strategy requires large transactions in liquid markets. Is market EX203 experiencing a Liquidity Crisis? Return the categorical status 'Liquidity Crisis' or 'Normal Market Conditions'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_18", "selected_database": "crypto_exchange", "query": "How good are the average returns on order OR6015391 adjusted for risk exposure?", "normal_query": "What are the average Risk-Adjusted Returns for order OR6015391?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_19", "selected_database": "crypto_exchange", "query": "Our arbitrage strategy robot needs to identify cross-market spread opportunities. Does EX203 have significant arbitrage opportunities across markets? Return 'Arbitrage Opportunity' if the value exceeds the threshold, otherwise 'Normal Market'.", "normal_query": "Our arbitrage strategy robot needs to identify cross-market spread opportunities. According to our arbitrage strategy, when the cross-market spread exceeds the threshold, an Arbitrage Window exists, triggering automated trading. Please determine whether EX203 presents an arbitrage opportunity. Return 'Arbitrage Opportunity' if the value exceeds the threshold, otherwise 'Normal Market'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_20", "selected_database": "crypto_exchange", "query": "What percentage of order OR6015391 has been filled?", "normal_query": "What is the Order Fill Rate for order OR6015391?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_M_6", "selected_database": "crypto_exchange", "query": "Clean up old execution records that have passed their expiration date, but only for those quick-fire orders that either fill immediately or cancel.", "normal_query": "Purge expired execution records for IOC/FOK orders where expireSpot timestamp is before current time.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_M_7", "selected_database": "crypto_exchange", "query": "Build a live liquidity dashboard 'market_liquidity_dashboard' showing the exchange spot market symbol, snapshot timestamp, and the corresponding liquidity ratio.", "normal_query": "Create a view market_liquidity_dashboard showing the exchange spot market symbol, the timestamp when the snapshot was taken, and the corresponding liquidity ratio for each market.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_M_8", "selected_database": "crypto_exchange", "query": "Can you make a helper calc_spread_pct() that takes the JSONB for fast order-book analytics and returns the calculated spread percentage?", "normal_query": "Create function calc_spread_pct() that takes the JSONB for fast order-book analytics and returns the calculated spread percentage.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_M_9", "selected_database": "crypto_exchange", "query": "I want to create a trg_margin_util that automatically update how much percentage of margin being utilized whenever their risk profile changes. Please store the result in a new JSONB key named margin_util_pct inside the margin_risk_profile column.", "normal_query": "Create trigger trg_margin_util that auto-calculates Margin Utilization whenever margin profile changes. The result should be stored in a new JSONB key named margin_util_pct inside the margin_risk_profile column.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "crypto_exchange_M_10", "selected_database": "crypto_exchange", "query": "Emergency brake on dangerous bets! Cancel orders classified as Critically Over-Leveraged Position and set the cancellation reason to 'Critical Leverage'.", "normal_query": "Please cancel executions for positions classified as Critically Over-Leveraged Position and set the cancellation reason to 'Critical Leverage'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_1", "selected_database": "polar_equipment", "query": "Let's compare how efficient our equipment is versus how safe it is. Can you show me a list with the equipment type, its code, its efficiency score, and its safety score? Then, for each equipment type, rank them by efficiency and by safety. I also want to see how big the gap is between those two ranks. Sort everything by type, and then by the best efficiency score.", "normal_query": "Show me the equipment type, equipment code, equipment efficiency rx ating, safety index, efficiency rank, safety rank, and the absolute rank difference between them. Sort the results by equipment type and then by EER in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "polar_equipment_2", "selected_database": "polar_equipment", "query": "I need to know which of our gear is ready for a bad storm. Can you check everything and give me a list of all equipment that's up to our 'extreme weather readiness' standard? For each item, I want to see its code and type, whether the heater, insulation, and emergency lights are good to go, its structural safety score, and the final 'Ready' or 'Not Ready' label.", "normal_query": "Could you identify all equipment that meets the extreme weather readiness criteria in our polar database? Show me the equipment code, equipment type, heater status, insulation status, emergency light status, the calculated structural safety factor, and the extreme weather readiness status. Make sure to include all equipment with available structural safety data, even if some equipment might be missing cabin environment, lighting safety, or thermal insulation information.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_3", "selected_database": "polar_equipment", "query": "Time for a safety check on our life support gear. Can you create a report for me? I need to see the equipment's code and type, its current life support status, and its calculated reliability score. Based on that score, tell me if it's 'High', 'Moderate', or 'Low' reliability. Also, toss in a little JSON summary showing the status of the oxygen, medical, and safety systems with fields names: 'oxygen_status', 'medical_status', 'safety_system_status'. Let's just focus on the 'Safety' type equipment and sort it by the best reliability score.", "normal_query": "For our polar safety assessment, I need to evaluate the safety equipment's life support system reliability. Please provide a report showing the equipment code, equipment type, life support status, calculated LSSR score (rounded to 2 decimal places), and reliability classification based on life support reliability classification. Also include a JSON summary of oxygen status , medical status, and safety system status as support systems status with fields names: 'oxygen_status', 'medical_status', 'safety_system_status'. Focus only on safety equipment and sort the results by LSSR in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "polar_equipment_4", "selected_database": "polar_equipment", "query": "How green are our stations? I want a report showing each station's type and name, how many pieces of gear are there, and how much they rely on renewable energy. Please show the percentage of renewable use, the total renewable power in watts, and a simple classification according to the classification system of energy sustainability. Only look at stations with solar or wind data, and please sort them to show the greenest stations first.", "normal_query": "Provide the location type, station name, number of equipment at each station, their renewable energy contribution values (rounded to 2 decimal places), total renewable energy output in watts, and how they're categorized according to the energy sustainability classification System? Only include equipment that has measurable solar or wind output data, and sort the results from highest to lowest REC value.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "polar_equipment_5", "selected_database": "polar_equipment", "query": "Let's get a handle on our water situation at each station. For each station, can you tell me its name and location type? I need to see the average water quality score, the average water management score, and a count of how many systems are in 'conservation needed' mode. Also, give me a simple classification for both the water quality and the overall management status. Sort the list with the best-managed stations at the top.", "normal_query": "For each combination of station name and location type, I need to see station names, location types, average water quality indices, average water resource management index scores (both rounded to 2 decimal places), count of systems with water conservation requirement, water quality classification, and water resource management status. Sort by highest WRMI first, then by water quality.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "polar_equipment_6", "selected_database": "polar_equipment", "query": "I need to check how ready our equipment is. Can you go through all the maintenance records and calculate the score for its operational readiness for each one? Just show me a list with the record ID, its operating hours, maintenance cycle hours, its current status, and the final readiness score.", "normal_query": "Could you calculate the operational readiness score for all our equipment maintenance records? I'd like to see the registry ID, operation hours, maintenance cycle hours, operational status, and the calculated ORS value for each record.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_7", "selected_database": "polar_equipment", "query": "Let's figure out how sustainable our power gear is. Can you calculate the index of energy sustainability for every power device? I need a list showing the device's code, its energy efficiency percentage, what its power source is, and the final index score you calculated.", "normal_query": "I want to calculate the energy sustainability index for each power device in our database. Please retrieve the equipment reference code, energy efficiency percentage, power source, and then calculate the corresponding ESI value for each device.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_8", "selected_database": "polar_equipment", "query": "How stable are our comms systems? I need a report that calculates the stability index for each communication unit. Can you show me the unit's ID, antenna status, signal strength, and network lag? Then, using that, calculate both the simple reliability index and the more complex stability index. Please round the numbers to make them easier to read.", "normal_query": "I would like to assess our polar base communication systems by calculating the base station communication stability index for each communication unit. Please extract the registry ID, antenna status, radio signal strength, and network latency from our communication records, then calculate both the communication reliability index and BSCSI for each unit. Make sure to round all values to two decimal places for clarity in reporting.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_9", "selected_database": "polar_equipment", "query": "I need a list of our safest and best-performing equipment. Can you find all the gear with a top-tier performance index of overall safety-say, anything over 0.75? For each item on the list, show me its equipment code, its calculated efficiency rating, and the final safety/performance score.", "normal_query": "Could you list all equipment with high overall safety performance index scores greater than 0.75? Please display the equipment code, calculate the equipment efficiency rating, and show the OSPI value for each item.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_10", "selected_database": "polar_equipment", "query": "Let's assess how our vehicles are performing. Can you calculate the coefficient for the vehicle's performance for every chassis we have? I just need a simple report with the chassis ID and its calculated performance score. Make sure to check all of them, even if some have missing engine data.", "normal_query": "For each chassis in our database, calculate the vehicle performance coefficient. I need a report showing the chassis registry ID first, followed by the calculated VPC value. Please include all chassis records in your analysis, even those without corresponding engine data.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_11", "selected_database": "polar_equipment", "query": "I need a quick number: how many of our shelters are actually ready if a big storm hits? Even if we're missing some sensor data for a shelter, it should still be part of the initial check. Just give me the final tally.", "normal_query": "I need to get a total count of all shelters that are prepared for severe weather. Please determine this by applying the extreme weather readiness status standard. The analysis should include all shelters, even if some weather or thermal data is missing. Provide the final result as a single number representing the total count.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_12", "selected_database": "polar_equipment", "query": "What's our best-case scenario for getting good science data from the Arctic? Looking only at our equipment up north, find the highest chance of success for a mission from any single instrument. Just give me that single, top-line probability score rounded to two decimal places.", "normal_query": "I want to assess our top-end capability for research in the Arctic. Could you please calculate the maximum scientific mission success probability for any single piece of scientific equipment operating in the 'Arctic' region? Please provide the final result as a single value rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_13", "selected_database": "polar_equipment", "query": "Let's find our safest and most efficient truck. Just calculate their safety performance overall, and I want to see the single highest score out of the entire fleet. Just give me that top number, rounded.", "normal_query": "I need to identify the absolute best-performing vehicle in our fleet from a safety perspective. Please calculate the overall safety performance index for every vehicle. From all the calculated OSPI scores, find the single maximum value, rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_14", "selected_database": "polar_equipment", "query": "I want to see our best and worst stability of equipment during long-term operation. For each category of equipment, can you show me a list of the top 5 most stable machines and the 5 least stable ones? Show me the equipment's ID, its category, and its stability score, and group the results by category, with the best ones on top.", "normal_query": "I need you, for each equipment type, please identify the 5 units with the highest long-term operational stability score (LOSS) and the 5 units with the lowest LOSS. Please display the equipment code, its type, and the calculated LOSS, ordered first by equipment type and then by the LOSS score in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "polar_equipment_15", "selected_database": "polar_equipment", "query": "Let's see how much the antenna's condition matters for our comms. Can you group everything by the antenna status—like normal, warning, or error—and tell me the average communication stability for each? Also, show me how many links are in each group. Make sure to only use records where we have all the necessary data, and list the results with the most stable antenna status on top.", "normal_query": "I want to perform an analysis of communication link stability grouped by antenna status. For each antenna stat category, please calculate the average base station communication stability index. The final report should display the antenna status, the total number of links for that status, and the average bscsi rounded to two decimal places. For this analysis, please ensure you are using a complete data set. Sort the results by the average BSCSI in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "polar_equipment_16", "selected_database": "polar_equipment", "query": "I want to rank our most efficient vehicles. For each truck, calculate its overall transportation efficiency number. Please show me a list of the top 100 vehicles, with the vehicle's ID, the coefficient for vehicle performance, the index for energy sustainability, and the overall transportation efficiency number, ordered from most efficient to least.", "normal_query": "I need to generate a comprehensive vehicle efficiency and sustainability report. For all vehicles, please calculate the polar transportation efficiency coefficient. The report should display the equipment ref for each vehicle, along with its calculated VPC, ESI, and the final PTEC. Please round the VPC, ESI and PTEC scores to two decimal places. Sort the results by the PTEC in descending order and show only the top 100 vehicles.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "polar_equipment_17", "selected_database": "polar_equipment", "query": "What's the overall reliability score for all the gear we have running right now? I need the average comprehensive score, but only for the active equipment. Make sure you factor in the efficiency, readiness, and communication scores. Just give me that one final number, rounded.", "normal_query": "I need a high-level summary of our fleet's current operational state. Please calculate the average comprehensive operational reliability indicator for all equipment that is currently in an 'Active' operational status. Present the final result as a single value, rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_18", "selected_database": "polar_equipment", "query": "How does the cold affect our batteries? I want to see a breakdown of battery health based on how cold it is outside. Group the gear into a few temperature buckets like 'Extreme Cold,' 'Standard Cold,' and 'Mild Cold,' and for each bucket, show how many pieces of equipment are in it and what their average battery health is.", "normal_query": "I need to analyze battery performance under thermal stress. Please calculate the temperature-zoned average battery health for all equipment. The report should group equipment by the standard external temperature ranges and display the equipment count and the average battery health for each zone, rounded to two decimal places. Order the results by the temperature range.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": true, "order": true}} +{"instance_id": "polar_equipment_19", "selected_database": "polar_equipment", "query": "I want a list of our most unreliable comms hubs. Figure out which stations have consistently bad link resilience. For each of those problem stations, show me the station's name, its average reliability and stability scores rounded by 2 decimals, and a list of all the equipment there so we know what to check. Only use complete data for this, and show me the worst stations at the top of the list.", "normal_query": "Please generate a report on stations with poor communication links. Use the communication network resilience assessment to identify all stations with 'Low Resilience'. For each of these stations, I need to see the station name, the average communication reliability index rounded by 2 decimals, the average base station communication stability index rounded by 2 decimals, and a list of all equipment at station contributing to the low score. Please ensure you use a complete data set for the calculations. Order the results by the average BSCSI, with the lowest first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "polar_equipment_20", "selected_database": "polar_equipment", "query": "I need to see the water situation at all of our stations. Can you give me a list showing each station's name, its water quality score, and the water tank level? Also, add a simple category based on our standard classification system for water quality. Make sure every station shows up, even the ones we don't have water readings for, and list them alphabetically.", "normal_query": "Please generate a comprehensive water quality report for each station. For every station, show its name, the raw water quality index, and the water level percentage. Additionally, apply the water quality classification system to categorize the water. Ensure that all stations are included in the report, even if they have no associated water data. Order the results by station name.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "polar_equipment_M_1", "selected_database": "polar_equipment", "query": "To make things easier, can we build a reusable tool to figure out the index of energy sustainability? I need a function called 'calculate_esi' that takes an efficiency number and a power source name, and then just spits out the ESI score.", "normal_query": "I want to create a function called 'calculate_esi' taking two inputs, efficiency and resource, that returns the energy sustainability index for our equipment. Please make this a reusable PostgreSQL function that our team can call whenever needed.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_M_2", "selected_database": "polar_equipment", "query": "Our queries filtering scientific equipment by reliability are slow. Can you create a special index called 'idx_scientific_reliability' to speed things up? It should be built directly on the reliability score calculation so we can find our most reliable gear faster.", "normal_query": "Create a function-based index called 'idx_scientific_reliability' to optimize queries that filter scientific equipment based on their scientific equipment reliability. This index should directly implement the SER formula.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_M_3", "selected_database": "polar_equipment", "query": "Let's reward the well-maintained cabins. For any equipment that's in a cabin meeting our 'habitability standard', can you give its reliability index a 15% boost?", "normal_query": "Increase the reliability index by 15% for all equipment associated with cabins that meet our cabin habitability standard.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_M_4", "selected_database": "polar_equipment", "query": "I need a simple way to check on water usage for our dashboards. Can you create a view called 'water_management_view'? It should show the equipment ID, its calculated water management score, and categorize each of them based on the status classification of water resource management. Let's base it on all equipment that has water level data.", "normal_query": "Create a dashboard view called 'water_management_view' that calculates the water resource management index for all equipment with water level data. The view should display the equipment reference, the calculated WRMI value, and categorize each item according to the water resource management status classification.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_M_5", "selected_database": "polar_equipment", "query": "We need a standard way to calculate the performance coefficient for vehicles. Can you build a function called 'calculate_vpc' that takes brake wear, track wear, speed, and engine load as inputs? It's important that it's robust, so please make sure it throws an error if any of the input values are out of the expected range.", "normal_query": "For our polar vehicles, we need a utility function 'calculate_vpc' to calculate the vehicle performance coefficient for performance assessment. Create a PostgreSQL function that takes four parameters: brake pad wear percentage (0-100), track wear percentage (0-100), vehicle speed (km/h, non-negative), and engine load percentage (0-100). The function should validate these inputs with clear error messages.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_M_6", "selected_database": "polar_equipment", "query": "I want a standard way to figure out how reliable our life support systems are. Can you build a reusable function called get_lssr that takes an equipment ID and gives back its life support score? It should be based on our formulas for how ready the gear is and how well its insulation is holding up. Make sure the calculator doesn't break if some of the sensor data is missing; it should just return zero in that case.", "normal_query": "Please create a reusable function named get_lssr to standardize the calculation of the life support system reliability for any given piece of equipment. This function should take an equipment code as input and calculate the LSSR. The function must also handle cases where component data might be missing, returning 0 in such instances.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_M_7", "selected_database": "polar_equipment", "query": "Let's find our most problematic equipment. I need a list of all assets that are both performing poorly—let's use an a rating to calculate equipment efficiency below 40 as the cutoff—and are also more expensive to maintain than other gear in their same category. For each one that meets these criteria, please log its equipment code in our review system table called EquipmentReviewLog, create if it doesn't exists, and note the reason it was flagged.", "normal_query": "Please identify all equipment with an equipment efficiency rating below 40 that also have a maintenance cost higher than the average for their specific equipment type. For each identified piece of equipment, create a new record in the EquipmentReviewLog table, create if the table doesn't exists. You should also insert its equipment code and a reason for the review.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_M_8", "selected_database": "polar_equipment", "query": "We need to add a hard safety stop to our system. Can you set things up so that if a piece of gear has failed its inspection, nobody can mark it as 'Active' and put it back in service? The system should block the update and give an error message. We can't have people using equipment that we know is broken.", "normal_query": "I need to implement a critical safety protocol. Please create a failed inspection activation lockout rule in the database. If this condition is met, the transaction should be blocked and an exception raised to prevent using unsafe equipment.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_M_9", "selected_database": "polar_equipment", "query": "Let's make a standard tool for measuring how well our energy and water systems work together. Can you build a calculator function called get_ewrii? It should take an equipment ID and give back the a single score based on its energy sustainability and its water management performance. It's important that this tool is reliable; if it can't find some of the data it needs for a calculation, it should just return zero instead of breaking.", "normal_query": "I need to create a reusable function named get_ewrii to standardize our energy-water resource integration index calculation. The function should accept an equipment code and calculate the EWRII. The function must return a value of 0 if any of the underlying data for a calculation component is not found, thereby preventing query failures.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "polar_equipment_M_10", "selected_database": "polar_equipment", "query": "We need a database failsafe to protect our most important gear. Can you set up a trigger named trigger_prevent_delete_active_critical that stops anyone from deleting a piece of critical equipment from the system if it's currently running? The system should throw an error and block the deletion automatically.", "normal_query": "I need to enforce a database-level safety protocol for critical equipment. Please create a trigger named trigger_prevent_delete_active_critical that prevents the deletion of any equipment record that is currently 'Active' and also meets the definition of critical equipment. This trigger should fire before any delete operation on the Equipment table and raise an exception if the conditions are met, ensuring that essential, in-use assets cannot be accidentally removed.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_1", "selected_database": "sports_events", "query": "Please show me the average age of all sprint session winners at the time they won. The result should be a single age in years.", "normal_query": "Calculate the average age of all Sprint Winners at the time they won. Show the result as a single value representing the average age in years.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_2", "selected_database": "sports_events", "query": "Our team is studying how thin air affects car performance at racing venues. Can you pull up a list of all tracks that are located high enough above sea level to create high-altitude circuit? I need to see the track names and their exact elevations, with the highest altitude venues listed first.", "normal_query": "I need to identify all High-Altitude Circuits in our database for aerodynamics research. Please retrieve the circuit name and elevation for all circuits that qualify as High-Altitude Circuits. Sort the results by elevation in descending order to show the highest circuits first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "sports_events_3", "selected_database": "sports_events", "query": "To analyze team performance, can you calculate the rate of constructor reliability for each team? I need to see the team names, their total races started, total races finished, the reliability rate as a percentage, and give them a reliability rank. Only include constructors with significant participation so we get meaningful data, and sort them from most reliable to least reliable.", "normal_query": "I need to analyze team performance by calculating the Constructor Reliability Rate for all constructors in our championship database. Please provide a ranking that shows each constructor's name, total races started, total races finished, their reliability rate as a percentage, and their reliability rank. Only include Constructors with Significant Participation to ensure statistical validity. Sort the results by reliability rate from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "sports_events_4", "selected_database": "sports_events", "query": "I'm curious about the dominant wins in sprint races, which are characterized by large margins. Just give me the total number of these landslide wins.", "normal_query": "Please count how many Dominant Victory events occurred in sprint races. Just return the total count.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "sports_events_5", "selected_database": "sports_events", "query": "I want to see how McLaren's overall team performance develops over seasons - like watching their report card get updated after each race. Just show me the year, race ID, constructor name, and the cumulative constructor's performance score after each race event, so I can track how their performance score changes as the season progresses.", "normal_query": "Our team need to analyze how McLaren's Constructor's Performance Score (CPS) evolves throughout different seasons. Show the year, race ID, constructor name, and the cumulative CPS score after each race event, so I can track how their performance score changes as the season progresses.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "sports_events_6", "selected_database": "sports_events", "query": "I'm curious about how Hamilton's value as a driver changes as he gets older and more experienced. Please give me the race IDs and his performance values.", "normal_query": "I need to analyze Lewis Hamilton's Driver Performance Value throughout his career. Can you show the race ID and the calculated DPV value?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "sports_events_7", "selected_database": "sports_events", "query": "Can you rank the drivers based on the stability of their average lap time? Please show me each driver's surname and first_name in a JSON format, their average consistency score, and the number of Races Analyzed. Just focus on drivers who have competed in more than five races, and list the most consistent ones at the top.", "normal_query": "Can you rank the drivers based on their Average Lap Time Consistency? Please show me each driver's surname and first_name in a JSON format, their average consistency score, and the number of Races Analyzed. Just focus on drivers who have competed in more than five races, and list the most consistent ones at the top.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "sports_events_8", "selected_database": "sports_events", "query": "I'm interested in the achievements of veterans. Could you pull up a list of the race year, official race event name, driver's full name, their podium position, and their age at the time of the race. Please show accomplishments by oldest drivers first, and for same-age drivers, show most recent results first.", "normal_query": "Retrieve all instances of a Veteran's Podium. For each occurrence, please provide the race year, the official race event name, the driver's full name, their specific podium position, and their calculated age at the time of the race. The results should be ordered first in descending order by the driver's age at the time of the race, and then in descending order by the race year.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "sports_events_9", "selected_database": "sports_events", "query": "I need to generate a list that ranks the drivers' overall performance in a Sprint session. The output should include the event name, the driver's ID, and their performance index score. Please make sure the best performances are right at the top.", "normal_query": "I need to generate a report that calculates the Sprint Performance Index for every completed driver result in a sprint session. The output should include the event name, the driver's reference code, and the calculated Sprint Performance Index. Sort the results in descending order based on the index to feature the highest-scoring performances first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "sports_events_10", "selected_database": "sports_events", "query": "For each qualifying session, calculate the average percentage of qualifying specialists and tell me the average of those percentages across all sessions, rounded to two decimal places.", "normal_query": "For each qualifying session, calculate the average percentage of drivers who meet the Qualifying Specialist criteria: specifically and output the average of those percentages across all sessions, rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "sports_events_11", "selected_database": "sports_events", "query": "Using historical data, estimate the average probability that a driver achieves hat trick achievements, given that they start from pole position. Use the simplified probability assumptions for calculation, which estimate the chance to win and the chance to set the fastest lap if they're on pole, and the result should be rounded to four decimal places.", "normal_query": "Using historical data, estimate the average probability that a driver achieves a Hat Trick, given that they start from Pole Position. Base your calculation on assumed Pole-Based Race Win Probability and Pole-Based Fastest Lap Probability, and the result should be rounded to four decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 4, "distinct": false, "order": false}} +{"instance_id": "sports_events_12", "selected_database": "sports_events", "query": "Can you calculate how well the top 8 finishers perform on average in sprint sessions? Round the result to two decimal places.", "normal_query": "I need to analyze the average Sprint Performance Index (SPI) across top 8 finishers in sprint sessions. Please round the result to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "sports_events_13", "selected_database": "sports_events", "query": "Which constructor has the best track record for finishing races? Calculate the reliability rate among all constructors who have participated in at least 5 races, which shows the races finished out of races started, and return the highest reliability percentage, rounded to two decimal places.", "normal_query": "Which constructor has the best track record for finishing races? Help me find the highest Constructor Reliability Rate among all constructors who have participated in at least 5 races. The result should be rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "sports_events_14", "selected_database": "sports_events", "query": "Find drivers with at least 10 laps recorded, and identify the highest stability of a driver's lap times during a race. Give me the top score rounded to two decimals.", "normal_query": "I want to identify the best Lap Time Consistency performance from drivers who have completed at least 10 laps. Please provide me with the highest consistency score rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "sports_events_15", "selected_database": "sports_events", "query": "What's the absolute fastest lap time ever recorded in our database, measured in seconds? Make sure to ignore any zero or negative times.", "normal_query": "What is the fastest single Lap Time in Seconds recorded in the database? Exclude any zero or negative lap times.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_16", "selected_database": "sports_events", "query": "To do the performance analysis, please help me calculate the average duration of our pit stops (in seconds), excluding any records where the duration is not a positive value. I want a single output, rounded to three decimal places.", "normal_query": "For our performance analysis, please calculate the Average Pit Stop Duration (in seconds), excluding any records where the duration is not a positive value. The final output should be a single value, rounded to three decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": false}} +{"instance_id": "sports_events_17", "selected_database": "sports_events", "query": "I need to know if we have any circuits with specific environmental characteristics regarded as 'high-altitude'. Can you just give me a simple 'Yes' or 'No' answer?", "normal_query": "I need to know if we have any circuits that are considered a High-Altitude Circuit. Can you just give me a simple 'Yes' or 'No' answer?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_18", "selected_database": "sports_events", "query": "Please analyze every session that constitute a championship race weekend and count the occurrences of unavailable date or time information. What's the session name with the highest total count of indeterminate entries?", "normal_query": "Please analyze every session within the standard Race Weekend Structure and count the occurrences of Indeterminate Event Timings for each session type. What's the session name with the highest total count of indeterminate entries?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_19", "selected_database": "sports_events", "query": "Calculates the time difference between each driver's qualifying lap and the pole sitter's lap, and then categorize drivers into three groups based on their qualifying performance. Return driver IDs, average deficits (rounded to 3 decimal places), and their qualifying cluster.", "normal_query": "Calculating each driver's Qualifying Time Deficit to Pole, and then categorize drivers into three groups based on Qualifying Performance Cluster. Return driver IDs, average deficits (rounded to 3 decimal places), and their qualifying cluster.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": false}} +{"instance_id": "sports_events_20", "selected_database": "sports_events", "query": "Can you calculate the average stops per car for each event, and just show me the total count of races classified as a 'Single-Stop Race' based on the pit strategy classification criteria?", "normal_query": "Can you calculate the Average Stops Per Car for each event and just show me the total count of races classified as a 'Single-Stop Race' based on Pit Strategy Cluster?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_M_1", "selected_database": "sports_events", "query": "Please add a boolean column to the table that records pit stops to identify whether the pit stops are efficient. The new column should be named 'is_efficient' and contain TRUE for efficient stops and FALSE otherwise. Besides, the value should remain NULL if the millisecond count is NULL.", "normal_query": "Please add a boolean column to the pit_stops table based on the Efficient Pit Stop criteria for our analysis. The new column should be named 'is_efficient' and contain TRUE for efficient stops and FALSE otherwise. Besides, the value should remain NULL if the millisecond count is NULL.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_M_2", "selected_database": "sports_events", "query": "Can you create a function named get_driver_age that takes the driver information (JSONB) as input and calculates their current driver age?", "normal_query": "Can you create a function named get_driver_age that takes a driver_identity JSONB parameter as input, extracts the birth_date from it, and returns the driver's current age in years as an INTEGER?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_M_3", "selected_database": "sports_events", "query": "Create a high_altitude_circuits view that shows all circuits that can be classified as high-altitude. I want to see their circuit ID, name, and elevation.", "normal_query": "Create a view called high_altitude_circuits showing all High-Altitude Circuit entries. Include the circuit key, name, and elevation from the circuits table.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_M_4", "selected_database": "sports_events", "query": "Update the race records with a victory type marker using the sprint results timing data, which set victory_type to 'Dominant Victory' if the cretiria is satisfied.", "normal_query": "Update the races table to flag Dominant Victory events in the event_schedule JSONB field (set victory_type to 'Dominant Victory').", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_M_5", "selected_database": "sports_events", "query": "Create a stored procedure named award_hat_trick that takes driver ID and race ID as parameters to verify and record the three key achievements in a single race weekend. If all three conditions are met, insert a record with achievement type being 'Hat Trick' into the table that records all achievements.", "normal_query": "Create a stored procedure named award_hat_trick that takes driver_id and race_id as parameters to verify and record if a specified driver accomplished a 'Hat Trick'. If all three conditions are met, insert a Hat Trick record (achievement_type should be 'Hat Trick') into the achievements table.", "preprocess_sql": ["-- Pre-process SQL to create achievements table\nCREATE TABLE IF NOT EXISTS achievements (\n achievement_id SERIAL PRIMARY KEY,\n driver_id INTEGER NOT NULL,\n race_id INTEGER NOT NULL,\n achievement_type VARCHAR(50) NOT NULL,\n recorded_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n FOREIGN KEY (driver_id) REFERENCES drivers(drv_main),\n FOREIGN KEY (race_id) REFERENCES races(rak_id),\n CONSTRAINT unique_achievement UNIQUE (driver_id, race_id, achievement_type)\n);"], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_M_6", "selected_database": "sports_events", "query": "Build a view called podium_finishes showing all podium finishes in season standings. Display the driver's last name, race year, and their finishing position.", "normal_query": "Create a view named podium_finishes that displays all Podium Finish achievements in season standings. Show the driver surname (from driver_identity JSONB), race year, and final position.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_M_8", "selected_database": "sports_events", "query": "Please add a new true/false column called is_pole_position to the qualifying table with a default value of FALSE, then mark it as TRUE for whoever got the pole position.", "normal_query": "Add a new boolean column named is_pole_position to the qualifying table with a default value of FALSE. Then update this column to TRUE for all records having achieved Pole Position.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_29", "selected_database": "sports_events", "query": "I need to create a custom domain called championship_points based on the REAL data type that only allows zero or positive numbers for championship points.", "normal_query": "I want to create a custom domain named championship_points based on the REAL data type to store Championship Points System (Race) values. The domain should include a CHECK constraint to ensure all values are non-negative.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "sports_events_30", "selected_database": "sports_events", "query": "I need to clean up our constructor database by handling missing nationality information. Can you find all the teams where we don't know what country they're from and mark those entries as 'Not Recorded' instead of leaving them empty?.", "normal_query": "I need to clean up our constructor database by handling missing nationality information. For all constructors where the nationality field shows Data Unavailability, please update these records to explicitly indicate 'Not Recorded' instead of leaving them blank.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_1", "selected_database": "labor_certification_applications", "query": "I'm trying to figure out which visas take the longest to get approved. Can you give me a breakdown of the average wait time for each type of visa? Just show me the ones that actually got certified, and list them from longest to shortest wait time.", "normal_query": "I'm curious about how long it takes for different visa applications to get approved. Could you show me the average Application Processing Time for each of the Visa Classification Types? Please only include applications that were certified, and sort the list to show the visa types that take the longest on top.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_2", "selected_database": "labor_certification_applications", "query": "What's the percentage of H-1B applications that are successful?", "normal_query": "I want to know the Approval Rate for H-1B Visa Classification Types. Can you calculate the percentage of H-1B visa applications that end up being certified?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_3", "selected_database": "labor_certification_applications", "query": "I'm trying to see if bigger companies have an easier time getting visas approved. Can you break down companies based on different employer sizes? Then, for each size, tell me the average application success rate for getting those visas approved. I want to see how many companies are in each size group, and what their average approval rate is, from the highest approval rate to the lowest.", "normal_query": "I'm looking to understand how the size of an employer, based on their application volume, relates to their success in getting visa applications approved. Can you categorize employers into Employer Size Classifications, and then calculate the average Application Success Rate for each of these categories? I'd like to see the number of employers in each size category, and their average success rate, sorted from highest success rate to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_4", "selected_database": "labor_certification_applications", "query": "I'm curious to know which soc code categories are most frequently associated with successful H-1B visa applications. Can you list the top 5 job soc titles that appear most often in certified H-1B visa cases? I want to see which jobs are most commonly approved for H-1B visas.", "normal_query": "I'm interested in identifying the most frequently certified occupations for H-1B visas. Could you provide a list of the top 5 SOC Code Framework that appear most often in certified H-1B visa applications? The output should include the job title and the number of certified H-1B applications for each title, sorted in descending order by the number of applications.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_5", "selected_database": "labor_certification_applications", "query": "The goal is to calculate the average wage differential ratio for top-level positions that are compensated on an annual basis. When a pay range is provided, the midpoint between the lower and upper amounts should be used. If only a single value is available, that value will be used as the offered amount. This ensures consistency in how compensation is interpreted across all relevant records.", "normal_query": "I want to analyze the Wage Differential Rate specifically for top-tier, high-skill roles that are paid on a yearly basis. For these roles, when a salary range is given, the midpoint between the lower and upper values should be used; if only one value is available, that value should be used directly. The calculation should focus only on entries where all required wage information is clearly provided and valid. Finally, I want to compute the average percentage difference between the offered pay and the standard market rate for these positions.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_6", "selected_database": "labor_certification_applications", "query": "Within the custom computer programming services industry where NAICS code equals to 541511, how many annually paid positions qualify as significantly high wage positions? I'm looking for a count of positions where the wage differential ratio exceeds 20%.", "normal_query": "I am analyzing compensation trends within the Custom Computer Programming Services industry, specifically NAICS code 541511. I need to determine the number of Premium Wage Positions that are paid annually. Can you provide a count of the positions within this industry where the Wage Differential Rate exceeds 20%?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_7", "selected_database": "labor_certification_applications", "query": "I need to identify attorneys with high level performance who specialize in E-3 Australian visas. Can you provide a count of attorneys who meet the criteria of a high performer and for whom E-3 Australian visa cases constitute more than 50% of their total caseload?", "normal_query": "I am interested in identifying attorneys who are highly proficient in handling E-3 Australian visa applications. Can you provide a count of attorneys who qualify as High Performers, based on the Attorney Performance Rating, and for whom E-3 Australian visa cases constitute a significant portion of their practice? Specifically, I need the number of attorneys where more than 50% of their caseload consists of E-3 Australian visa applications.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_8", "selected_database": "labor_certification_applications", "query": "I want to see how competitive the salaries are for software quality assurance analysts and testers jobs that pay yearly. Group these jobs into the wage competitiveness levels, and for each level, show the total number of positions, where positions means the total worker count from the dataset's head count information, not the number of rows. Sort so the most common category is at the top.", "normal_query": "I am preparing to analyze the distribution of Wage Competitiveness Tiers for Software Quality Assurance Analysts and Testers positions. Specifically, I would like to see a breakdown of annually paid positions in this occupation, categorized by their wage competitiveness level. For clarity, positions here are defined strictly as the total number of worker positions calculated by summing the head count information from the dataset, rather than counting individual records. Please provide the total number of worker positions in each tier, and sort the results by the number of positions in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_10", "selected_database": "labor_certification_applications", "query": "I'm looking to compare how long visa applications take to process based on how complex they are. Could you sort the applications into application complexity levels and for each group, tell me how many applications there are along with the average processing time in days? Please round the averages to two decimals and list the groups starting with the ones that take the longest.", "normal_query": "I require a comparative analysis of visa application processing times segmented by application complexity. Please classify each application using the Application Complexity Tiers. Then, for each complexity category, compute the number of applications and the average processing time in days. Round the average processing time to two decimal places and present the results in descending order of average processing time.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_11", "selected_database": "labor_certification_applications", "query": "I'm trying to figure out when people are submitting their visa applications to see if they're doing it at the best time. Could you help me break down the applications based on different visa filing window? And I also need to know how many applications are in each category and what percentage of the total they make up and round it to two decimal places. Please sort the results from most to least applications.", "normal_query": "I am eager to create a report detailing the Visa Filing Window Distribution for all visa applications. The report should categorize applications based on Visa Filing Window. The output should include the category name, the number of applications falling into each category, and the percentage of total applications represented by each category, rounded to two decimal places. Please present the results in descending order by the number of applications.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_12", "selected_database": "labor_certification_applications", "query": "Find the jobs where the offered pay is over 10% higher than the going market rate and there are at least 30 applications. Only include cases where both pay figures are available and in the same pay unit, like both per hour or both per year. For each job, list its title, number of applications, average percentage pay difference (rounded to two decimals), and mark it as “Skill Shortage Occupation”. Show only the top five with the biggest average pay differences, starting from the highest.", "normal_query": "For each occupation, identify those that meet the definition of Skill Shortage Occupations — having a Wage Differential Rate (WDR) greater than 10% and at least 30 applications. Include only situations where both the offered wage and the prevailing wage are available and measured in the same pay unit. Show the occupation title, total applications, average WDR (rounded to two decimals), and mark them with the category “Skill Shortage Occupation”. List only the top five with the highest average WDR in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_13", "selected_database": "labor_certification_applications", "query": "List the states that have at least 1.5× the national average of visa applications, and tell me how many such hotspot states there are in total.", "normal_query": "For each U.S. state, identify Geographic Application Hotspots based on visa application counts exceeding 1.5 times the national average. The output should include the list of hotspot states and the total count of such hotspot states.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_14", "selected_database": "labor_certification_applications", "query": "Can you figure out which industries depend on visa? I'd like to see the NAICS code, the total number of applications, and the percentage of all applications rounded off to two decimal places. List the results starting with the industries that have the highest percentages.", "normal_query": "Please determine which industries belong to Visa-Dependent Industry. The output should include the NAICS code, the total number of applications, and the percentage of total applications (rounded to two decimal places). Sort the results from the highest to the lowest percentage.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_15", "selected_database": "labor_certification_applications", "query": "Can you group attorneys based on the types of visa cases they usually work on? Only include attorneys who’ve worked on at least 5 cases. For each group, show the category name, how many attorneys are in it, their average specialization score rounded off to two decimals, and the average percentage of cases they handle in their main visa type rounded to two decimals. Sort the list so the categories with the most attorneys come first.", "normal_query": "For each attorney, categorize them into Attorney Specialization Categories based on their visa case specialization. Only include attorneys who have handled at least 5 cases. For each category, display the category name, the number of attorneys in each category, the average Attorney Specialization Index (ASI) (rounded to two decimal places, as handled in the SQL), and the average percentage of cases they handle in their dominant visa type (rounded to two decimal places). Sort the results by the number of attorneys in each category in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_16", "selected_database": "labor_certification_applications", "query": "I'm trying to figure out which states have the best lawyers when it comes to handling visa cases. Could you help me check the rate of attorney success for different states? I'd like to see: which state they're practicing in, how many cases they've handled in total, how many cases were successful and their success rate as a percentage with 2 decimal points. Let's focus on states where attorneys have handled at least 3 cases, and just show me the top 3 states with the highest success rates.", "normal_query": "Could you analyze the performance of attorneys across different court jurisdictions by calculating their Attorney Success Rate? Please show the jurisdiction state, total number of cases handled, number of certified cases, and success rate as a percentage with 2 decimal places. Only include jurisdictions with at least 3 cases and show the top 3 most successful jurisdictions.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_17", "selected_database": "labor_certification_applications", "query": "I'm curious about the wage levels for H-1B jobs - could you help me break down the numbers? I'd like to know how many applications we have for each wage level, and what percentage of the total they make up with 2 decimal points. And rank them from most common to least common.", "normal_query": "I need an overview of Prevailing Wage Levels distribution in H-1B visa applications with valid prevailing wage level. Please show each wage level along with its application count and the percentage share of total applications, with percentages shown to 2 decimal places. Sort the results to highlight which wage levels are most commonly requested.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_18", "selected_database": "labor_certification_applications", "query": "Could you help me find out which industries tend to pay more than others? I'd love to see the top 5 highest-paying industries and just show me their NAICS codes, how many job applications each industry has, and average industry wage difference. Make sure we're only looking at entries with valid NAICS codes and consistent wage units, and round the wage differences to 2 decimal places to keep it clean.", "normal_query": "Can you analyze how wages differ across industries by calculating the Industry Wage Differential for each NAICS code where naics code is valid and wage unit equals? Please show: the industry NAICS code, number of applications in that industry and average wage differential (rounded to 2 decimal places). Show only the top 5 industries with the highest wage differentials.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_19", "selected_database": "labor_certification_applications", "query": "I'm wondering if having a lawyer really boosts your chances of getting a visa approved. Can you compare visa approval rates for applications filed with an attorney versus those filed without one? I want to see the total number of applications in each category, the number that were approved, and the approval rate percentage, rounded to two decimal places. Include all the attorney cases.", "normal_query": "I am working on analyzing the effectiveness of legal representation in visa application outcomes including all the attorney cases. Can you provide a report comparing the approval rate for applications that used an attorney versus those that were self-represented? I'd like to see the total number of applications in each category, the number of certified applications, and the calculated approval rate rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_20", "selected_database": "labor_certification_applications", "query": "Which jobs are most in demand for visa applications? Could you show me the top 5 jobs that are most popular? I'd like to see the job title, how many applications there are for each, and some kind of occupational demand index for each job, rounded to two decimal places. Also, let's just stick to jobs that have valid SOC codes.", "normal_query": "I'm interested in understanding the relative demand for different occupations within the visa application process. Could you generate a report showing the top 5 occupations with the highest Occupational Demand Index? The report should include the occupation title, the number of applications for that occupation, and the calculated ODI rounded to two decimal places. Please only include occupations with valid SOC codes.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "labor_certification_applications_M_1", "selected_database": "labor_certification_applications", "query": "Can you make a table called 'employer_analytics' that shows how big each employer is in our visa database? I'm looking to track which companies submit lots of visa applications versus just a few. For each employer, I need their name, their employer scale indicator number, and their employer size category. If this table's already existed, just update it with the newest information.", "normal_query": "Could you create a table called 'employer_analytics' that calculates and stores the Employer Scale Indicator for each employer in our visa application database? I need the table to include the employer name, their ESI value, and categorize them according to the Employer Size Classification framework. If the table already exists, please update the records with the latest values.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_M_2", "selected_database": "labor_certification_applications", "query": "Hey, can you help me sort our visa attorneys into different categories based on their specialization patterns? I need to add a new column to our attorney table that shows if each lawyer is a 'Specialist,' 'Hybrid Practitioner,' or 'Generalist' depending on the different attorney specialization classification standard.", "normal_query": "will you identify and categorize attorneys in our visa database according to their Attorney Specialization Category? This requires adding a new column to the attorney table to store this classification.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_M_3", "selected_database": "labor_certification_applications", "query": "I'm trying to figure out how long our visa applications take to process. Can you make a simple procedure called 'calculate_apt' that works out the time taken for application processing for each case? After you've created the procedure, could you run it to update all our records?", "normal_query": "I intend to implement a procedure to calculate the Application Processing Time for our visa applications database. Could you create a stored procedure called calculate apt? After creating the procedure, please execute it to update all records.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_M_4", "selected_database": "labor_certification_applications", "query": "I'm trying to build a function for our visa database that figures out the wage differential ratio. Can you make it round to two decimal places and return null if there's no prevailing wage? I need it to take four inputs: the offered wage amount, prevailing wage amount, and both of their payment units.", "normal_query": "I am considering creating a PostgreSQL function that calculates the Wage Differential Rate in our visa application database. Please round the final percentage to two decimal places and return null if the prevailing wage is zero. The function should accept four parameters: offered wage amount, prevailing wage amount, offered wage unit, and prevailing wage unit.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_M_5", "selected_database": "labor_certification_applications", "query": "We want to add a new column that shows how much employers rely on visa workers, but only for those who have submitted any applications. Just sort them into Low, Moderate, or High based on their visa usage. Use 20 times their case count to estimate workforce and handle any divide-by-zero issues.", "normal_query": "We need to enhance our employer table by adding an Employer Dependency Level classification column. Please create an enumerated type with three dependency levels (Low, Moderate, High) and update the classification only for employers that have submitted at least one application. For workforce estimation, use a factor of 20 times the distinct case count per employer. Make sure to handle division by zero properly.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_M_6", "selected_database": "labor_certification_applications", "query": "Can you set up something that automatically checks how strong a wage is whenever someone adds or changes one? I want the system to figure out the difference between what’s being offered and what’s typical, then sort it into the right group. Also, make sure it works even if the wages are in different formats, like hourly versus yearly.", "normal_query": "Please build an automated system that assigns each wage entry to a category based on the Wage Competitiveness Tiers framework. This system should activate whenever a new entry is added or an existing one is modified. It needs to calculate the Wage Differential Rate (WDR) and use that value to determine the appropriate category. The process must ensure any required conversions between wage types — such as hourly or annual — are handled correctly during the calculation.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_M_7", "selected_database": "labor_certification_applications", "query": "I am curious about companies that regularly file visa applications throughout the year? I'm looking for these continuous filing employers. I'd like to see each employer's name, how many total applications they filed, in how many different months they submitted applications, and whether they qualify as continuous filers or not. Could you make this into a procedure where I can specify which year I want to look at? If I don't specify a year, just use the current year by default.", "normal_query": "Could you identify all continuous filing employers for the current year? I'd like to see the employer name, their total number of applications, how many months they filed in, and whether they qualify as continuous filers. Please make this as a procedure that can accept a specific year parameter, defaulting to the current year if no year is provided.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_M_8", "selected_database": "labor_certification_applications", "query": "I want to make our visa database smarter by adding a complexity score for each application. Could you add a new column called 'application_complexity_score' to our cases table that starts at zero by default? Then I need you to fill it in by calculating application complexity value. Just make sure each factor adds to the score when it applies.", "normal_query": "I need to enhance our visa application database by adding and calculating the Application Complexity Score for each case in our records. Please add a new integer column called 'application_complexity_score' to the cases table with a default value of 0, then populate it based on ACS standard. All these factors should contribute to the total score when positive.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_M_9", "selected_database": "labor_certification_applications", "query": "I'm trying to understand when companies are submitting their visa applications compared to when people actually start working. Could you make a function that looks at the receipt date and start date to figure out the Visa Filing Window category? The function should take in those two dates and spit out which category the application falls into. Just make sure it handles the date formats correctly since they might be in text format.", "normal_query": "I wish to categorize all visa applications based on their Visa Filing Window timing. Create a function that determines how far in advance applications were submitted before the employment start date. The function should take the receipt date and begin date as inputs and classify applications into appropriate categories. Please ensure the function handles date conversions properly and returns the categorical result as text.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "labor_certification_applications_M_10", "selected_database": "labor_certification_applications", "query": "I’d like to know how good different companies are at keeping people in their jobs. Use whatever job history information we have to figure this out, but skip companies where there’s no relevant data.", "normal_query": "We want to enhance our employer records by calculating the Retention Rate, showing the percentage of how often employers keep their workers. This should be based on available employment signals and should only apply to employers that have relevant job history data.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_1", "selected_database": "insider_trading", "query": "Give me all trades for high-risk traders, with how much they traded and their leverage. Make sure to show the biggest trades first.", "normal_query": "Show all trades for high-risk compliance cases, including trader ID, trade amount, and leverage, ordered by the trade amount from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "insider_trading_2", "selected_database": "insider_trading", "query": "Find me transactions that look suspiciously like insider trading. For these, calculate the Sentiment-Driven Leakage Risk. If that risk score is over 1000, show me the transaction ID, trader ID, time, the original leakage score, and the new SDLR score. Cap it at 100 results.", "normal_query": "Please identify transaction records that trigger a Potential Insider Trading Flag. For these flagged transactions, calculate their Sentiment-Driven Leakage Risk score. For transactions where this SDLR score is over 1000, please show the transaction register ID, the trader reference ID, the transaction timestamp, the original information leakage score, and the calculated SDLR score, limited to the top 100 results.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "insider_trading_3", "selected_database": "insider_trading", "query": "Let's compare our different kinds of traders. For each type, what's their average aggression and compliance score? Show me the trader type (in lowercase), their avg aggression, and avg compliance. List the most aggressive types first.", "normal_query": "I need an analysis comparing different types of traders. For each trader type, please calculate the average Aggressive Trading Intensity and the average Compliance Health Score. Display the trader type (all in lower case), the calculated average ATI, and the average CHS. Finally, sort the results by the average ATI in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "insider_trading_4", "selected_database": "insider_trading", "query": "Some traders seem to just copy others in their network. Find compliance cases for this behavior, then figure out an 'investigation intensity' score for them. Give me the top 100, sorted by that score, showing the case ID and the score.", "normal_query": "Please identify all compliance cases associated with traders flagged for Networked Mimicry Risk. For each of these specific cases, calculate the Investigation Intensity Index (III). List the compliance case registration ID and its corresponding Investigation Intensity Index (III). Finally, sort the results by the Investigation Intensity Index in descending order and show only the top 100 cases.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "insider_trading_5", "selected_database": "insider_trading", "query": "Let's find our riskiest manipulators. I mean traders who are either high-frequency with high risk-leverage, or have been confirmed for layering. For that specific group, what's their average 'uniqueness' score? I just need the single number.", "normal_query": "First, identify all traders who qualify as High-Risk Manipulator Candidates. Then, for this specific group of traders, calculate the average Unique Pattern Deviation Ratio based on their transaction history. Please provide only this single average value.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "insider_trading_6", "selected_database": "insider_trading", "query": "For our most intense insider trading investigations, what are the usual penalties? Tally them up and show me the list, from most common to least.", "normal_query": "I want to analyze the enforcement outcomes specifically for cases flagged as High-Intensity Insider Investigations. Could you provide a frequency count for each type of Penalty Imposed that resulted from these investigations? Please list the penalty types and their corresponding frequencies, ordered from the most frequent penalty to the least frequent.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "insider_trading_7", "selected_database": "insider_trading", "query": "Are the 'copycat' traders any good? Let's see. Compare their average risk-adjusted win rate to the traders who act independently.", "normal_query": "I want to compare the performance of traders potentially involved in Peer Mimicry Suspicion versus other traders. Please calculate the average Risk-Adjusted Win Rate for these two groups. Display a boolean indicating if the group represents Peer Mimicry Suspicion (True) or not (False), and the corresponding average RAWR for that group.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "insider_trading_8", "selected_database": "insider_trading", "query": "For traders who speculate on volatile events, what's their average Order Modification Intensity? Just give me the one number.", "normal_query": "I need to analyze the order modification behavior of a specific trader group. Please identify all traders classified as Volatile Event Speculators. Then, calculate the average Order Modification Intensity across all transactions associated with this group. Provide just the calculated average OMI.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": false}} +{"instance_id": "insider_trading_9", "selected_database": "insider_trading", "query": "I want to see the cases for high-frequency trades that resulted in a big fine over $100,000. If a trading restriction was applied in those cases, show me the case ID and the exact restriction type.", "normal_query": "List all enforcement actions for cases involving high-frequency trades where the penalty amount exceeded $100,000. Only include cases where a trading restriction was applied, and show the enforcement ID and the specific trading restriction period type.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_10", "selected_database": "insider_trading", "query": "What's the difference in average aggression score between trades with 'confirmed' vs 'suspected' layering? Show me the comparison.", "normal_query": "I need to compare the average Aggressive Suspicion Score between transactions where the layering index is 'Confirmed' and those where it is 'Suspected'. Please calculate the average ASS for each of these two groups. Display the layering status (in lower case) and the corresponding average ASS.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 3, "distinct": false, "order": false}} +{"instance_id": "insider_trading_11", "selected_database": "insider_trading", "query": "Give me the stats on how much trader 'TR94368' messes with their orders. I need a single row showing the trade count, and the min, avg, median, and max OMI.", "normal_query": "For the trader with ID 'TR94368', calculate the distribution statistics for their Order Modification Intensity (OMI) based on all their valid transactions. Please return a single row containing the trader's ID, the total count of transactions considered, and the minimum, average, median, and maximum OMI.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_12", "selected_database": "insider_trading", "query": "Time to bulk update risk scores. For all 2024+ cases, recalculate the behavioral risk score with the new suspicious activity index. Cap the score at 100, and use a temp table called `score_updates` to do the update.", "normal_query": "Please create a temporary table named `score_updates` containing new `behav_score` values for all cases with transactions from 2024 onwards. Calculate the new scores using the suspicious activity index, ensuring all calculation components use double precision and the final result is capped at 100. Then, use this temporary table to update the `reg_compliance` table.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_13", "selected_database": "insider_trading", "query": "Find trades from after-hours where the news leaked early and the price got choppy. Specifically, find trades near a 'post-market' announcement with an info leak rate over 0.8 and price acceleration over 3.0. Show me the trade ID, leak rate, and price acceleration.", "normal_query": "Find all trade records (`REC_KEY`) that occurred in proximity to a corporate event with an `announce_time` of 'Post-market hrs before' and an `info_leak_rate` greater than 0.8 score/hour. Additionally, these trades must have a corresponding `price_accel` value greater than 3.0 %/(hour²). List the record key, the info leak rate, and the price acceleration.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_14", "selected_database": "insider_trading", "query": "Find trades where liquidity impact was over 80k and there was a 'Strong' ignite signal. Show me the trade ID, liquidity impact, the signal, and the trader's type in lowercase.", "normal_query": "I need to identify trade records where the market experienced a high liquidity impact, defined as `liq_imp` greater than 80,000 USD/min, and where a 'Strong' `ignite_sig` was detected in the manipulation signals. For each of these records, please list the record key (`REC_TAG`), the liquidity impact, the ignite signal, and the associated trader's `typeFlag` (in lower case).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_15", "selected_database": "insider_trading", "query": "Are our most-connected institutional traders also the least compliant? For every institution, show their ID, their compliance health score, and their network strength. Sort by the worst compliance score first.", "normal_query": "For all traders with a `typeFlag` as 'Institution', calculate two metrics: their Compliance Health Score (CHS) and their Insider Network Strength. Please display the trader's key (`TR_KEY`), the calculated CHS, and the `insider_net_str`. Sort the results by CHS in ascending order (worst first).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "insider_trading_16", "selected_database": "insider_trading", "query": "Find our cowboys—traders who are aggressive with over 5x leverage, or who trade over half their balance daily. List their key and type.", "normal_query": "Identify all high-risk traders. A trader is considered high-risk if their leverage exposure is over 5.0 and their risk level is 'Aggressive', or if their daily turnover rate is over 0.5. Display their trader key and type flag.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": false}} +{"instance_id": "insider_trading_17", "selected_database": "insider_trading", "query": "Find trades that might be insider trading. The flag goes up if leak score is > 50, it's near a corporate event, AND the announcement was pre-market or intraday. Show the record key and trader ID.", "normal_query": "Find trades flagged for potential insider trading. A flag is raised if the information leakage score is over 50, there is an upcoming corporate event, and the announcement time is 'Pre-market hrs before' or 'Intraday hrs before'. Return the record key and trader anchor.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_18", "selected_database": "insider_trading", "query": "Find trades that look like layering or spoofing. It's a match if layering is 'Confirmed', OR if spoofing probability is > 75% AND order modification intensity is > 1.0. Just show the record tags.", "normal_query": "Identify trades indicating a layering or spoofing manipulation pattern. A trade is suspect if its layering index is 'Confirmed', or if its spoofing probability is over 75% and its Order Modification Intensity is above 1.0. Display the record tag for each matching trade.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_19", "selected_database": "insider_trading", "query": "Find any cozy trader networks. The ones I want have a circle size > 5, a group score > 60, and use a 'Regular' communication path. Just show the root trader's key.", "normal_query": "Identify potential collusion networks. A network is flagged if its relationship circle size is greater than 5, its group score is over 60, and its communication path is 'Regular'. Show the root trader key for each flagged network.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_20", "selected_database": "insider_trading", "query": "Show me the hot cases. I want records with 'High' or 'Critical' alerts, 'High' investigation priority, and 'Intensive' monitoring. Just the record keys.", "normal_query": "Find compliance records under 'Elevated Regulatory Scrutiny'. This status applies when the alert level is 'High' or 'Critical', investigation priority is 'High', and monitoring is 'Intensive'. Return the compliance record keys.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_M_1", "selected_database": "insider_trading", "query": "Make a reusable list called `high_risk_trader_view` for our high-risk traders. For each, I need their ID, type, balance, daily volume, DTR, TLE, and risk level text.", "normal_query": "Please create a reusable view named high_risk_trader_view that identifies traders fitting the High-Risk Trader Profile. For each trader identified, the view should show their registration ID (tradereg), trader kind (tradekind), account balance (acctbal), daily volume (voldaily), their calculated Daily Turnover Rate (DTR), their extracted Trader Leverage Exposure (TLE), and the text description of their risk level (risk_level_text) from their performance data.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "insider_trading_M_2", "selected_database": "insider_trading", "query": "Time to bulk update risk scores. For all 2024+ cases, recalculate the behavioral risk score with the new suspicious activity index. Cap the score at 100, and use a temp table called `score_updates` to do the update.", "normal_query": "Please create a temporary table named `score_updates` containing new `behav_score` values for all cases with transactions from 2024 onwards. Calculate the new scores using the suspicious activity index, ensuring all calculation components use double precision and the final result is capped at 100. Then, use this temporary table to update the `reg_compliance` table.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_M_3", "selected_database": "insider_trading", "query": "Make a trigger to put a safety check on the enforcement actions table. Before an update can change a case's status to 'Resolved', it must check the investigation intensity. If that score is over 150, block the update and throw an error.", "normal_query": "Please create a database trigger function named prevent_premature_resolution. This function should be attached to the enforcement_actions table and fire before any update operation. Its purpose is to implement a Premature Resolution Block, where if the `enf_actions` ->> 'res_state' field is changed to 'Resolved' and the associated intensity score exceeds 150, the update is blocked.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_M_4", "selected_database": "insider_trading", "query": "If a compliance record's file state is 'Missing' or 'Delayed', bump its investigation priority to 'High'. Show me the IDs and new priority for everything you changed.", "normal_query": "Update the `invest_prior` column in the `reg_compliance` table. For every record where the `file_state` is either 'Missing' or 'Delayed', set the `invest_prior` to 'High'. After the update, return the record's primary key (`REC_COMP`) and the new `invest_prior` value for each modified row.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_M_5", "selected_database": "insider_trading", "query": "Let's bump up monitoring for our fast traders. First, find the record IDs for all high-frequency trades that are still on 'standard' monitoring. Then, take that list of IDs and update their monitoring level to 'enhanced'. Let me know which records you changed.", "normal_query": "Please perform an update on the `reg_compliance` table. First, identify all record keys (`REC_COMP`) where the associated trade has a `freq_tag` of 'High' and the record's current `mon_inten` is 'Standard'. Then, for all records matching these keys, set their `mon_inten` column to 'Enhanced' and return the `REC_COMP` of all updated rows.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_M_6", "selected_database": "insider_trading", "query": "Find our repeat offenders. I'm looking for traders with more than 3 past violations, a compliance rate of 'C' or 'D', or a recidivism score over 1.0. Show me their trader ID and their calculated recidivism score, which is their previous violations per year.", "normal_query": "Identify traders with a 'Problematic Compliance History'. This status applies to traders with over 3 previous violations, a compliance rate of 'C' or 'D', or a calculated Compliance Recidivism Score (CRS) over 1.0. The CRS is calculated as the number of previous violations per year of the trader's history. Display the trader's key and their calculated CRS.", "preprocess_sql": [], "clean_up_sqls": ["DROP VIEW IF EXISTS V_Problematic_Traders;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_M_7", "selected_database": "insider_trading", "query": "Show me the top 10 records where news and social media feelings are most split. I need the ID and the score gap.", "normal_query": "For all sentiment analytics records that have both a news score and a social sentiment score, find the top 10 records with the greatest absolute difference between these two scores. Display the record ID and the calculated difference, rounded to 4 decimal places.", "preprocess_sql": [], "clean_up_sqls": ["DROP VIEW IF EXISTS V_Top10_Sentiment_Disagreement;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "insider_trading_M_8", "selected_database": "insider_trading", "query": "Let's see which fines really hurt. For all cases with a monetary penalty, calculate the ratio of the fine to the trader's account balance. I only want to see cases where this ratio is over 5%. Show me the enforcement ID, trader key, and the calculated ratio.", "normal_query": "Calculate the 'Enforcement Financial Impact Ratio (EFIR)'. The EFIR is the penalty amount divided by the trader's account balance. Return the enforcement record ID, trader key, and EFIR for cases where the ratio exceeds 0.05.", "preprocess_sql": [], "clean_up_sqls": ["DROP VIEW IF EXISTS V_High_Impact_Fines;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_M_9", "selected_database": "insider_trading", "query": "I'm looking for gamblers who bet big on corporate news. Find traders where over 30% of their trades are linked to corporate events and who also have an 'Aggressive' risk level. Give me their IDs.", "normal_query": "Identify 'Aggressive Event Speculators'. These are traders with an 'Aggressive' risk level where over 30% of their trades are linked to corporate events. List the keys for all qualifying traders.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "insider_trading_M_10", "selected_database": "insider_trading", "query": "I need an overall risk rating for our trades. For each trade, calculate a composite score by averaging its suspicious activity index and its pattern anomaly score. Show me the 10 trades with the highest scores, along with their record key and the score itself.", "normal_query": "Calculate a 'Composite Suspicion Score' for all trade records. This score is the average of the 'Suspicious Activity Index (SAI)' and the 'Pattern Anomaly Score (PAS)'. Return the record key and composite score for the top 10 trades with the highest scores.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_1", "selected_database": "virtual_idol", "query": "We're looking for our most spontaneous big spenders. Could you pull a list of our top 10 fans who have had at least one session with an extremely high spending rate? Let's rank them by that single best spending-per-minute session. I need to see their nickname, ID, that peak spending number, and their final rank on the list, with the highest listed first.", "normal_query": "Retrieve a ranked list of the top 10 fans classified as a 'Whale Fan' (Peak Monetization Index > 20). The output must include the fan's nickname, their fan ID, the calculated Peak Monetization Index rounded to two decimal places, and their final rank. The ranking should be in descending order of the index.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_2", "selected_database": "virtual_idol", "query": "I'm curious about the quiet observers on our platform, the ones who consume a lot of content but rarely participate in chats. Could you analyze this group to see what type of content they prefer? I need a list of content categories and how many of these specific fans prefer each, with the most popular categories at the top.", "normal_query": "Generate a report summarizing the content preferences of fans classified as 'Engaged Lurkers' (avg_cci > 0.5 and avg_cs < 0.5). The output should list each content preference and the corresponding count of unique Engaged Lurkers, sorted in descending order of the count.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": true}} +{"instance_id": "virtual_idol_3", "selected_database": "virtual_idol", "query": "I want to identify our most influential users on the platform. Can you generate a list of the top 20 people who have a large follower-to-following ratio and a high overall influence score? For each person on this list, I need to see their nickname, their calculated influence score, and their follower ratio. Also, please add their rank and sort them with number one at the top.", "normal_query": "Retrieve the top 20 fans who meet the definition of a 'Community Influencer' (FFR > 2.0 and CII > 10000). For each, provide their nickname, their Community Influence Index rounded to two decimals, their Follower-to-Following Ratio rounded to two decimals, and their rank based on the index. The final list must be sorted by rank in ascending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_4", "selected_database": "virtual_idol", "query": "I'm trying to figure out if the fans who contact support a lot are also the ones we think might leave soon. Could you make a table that shows this breakdown for our entire user base? I want to see a count of fans for each combination: those who are 'at-risk' and contact support a lot, those who are 'at-risk' but don't, and the same for the 'not at-risk' folks.", "normal_query": "Generate a correlation analysis between a fan's support status and their churn risk. For all fans, categorize each into 'High-Maintenance' (SLS > 10) or 'Low-Maintenance' and 'At-Risk' (Churn Risk Flag = 'High') or 'Not At-Risk'. The output should be a table showing the churn status, support status, and the distinct count of fans in each cross-category.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": false}} +{"instance_id": "virtual_idol_5", "selected_database": "virtual_idol", "query": "I need a list of our absolute best fans, the kind of people who are both big spenders and show up to all our events. For each person in that elite group, can you show me their nickname, how much they've spent in total, their score for event attendance, and what loyalty tier they're in right now? Please sort the list so the biggest spenders are at the top.", "normal_query": "Generate a profile of all fans who meet the criteria for the 'Idol Superfan' segment. For each qualifying fan, the output should list their nickname, their total spending in USD rounded to two decimals, their Event Participation Score, and their current Loyalty Reward Tier. The results should be sorted by total spending in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_6", "selected_database": "virtual_idol", "query": "I have a theory that our happiest users are more likely to buy our merchandise. Could we check that? I'd like to see a comparison of the average merchandise spending habits between our biggest advocates—the ones who rate us really highly—and our strongest critics, the ones who give us low scores. Essentially, let's see what percentage of their total spending goes towards merch for each of those two groups.", "normal_query": "Generate a comparative analysis of the average Merchandise Affinity Score (MAS) between fans classified as 'Platform Promoters' (NPS 9-10) and those as 'Detractors' (NPS 0-6). The output must display the fan segment name and their corresponding average MAS, rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "virtual_idol_7", "selected_database": "virtual_idol", "query": "I want to know which idol genres our most dedicated collectors are into. Could you first identify everyone who owns a large number of items and has a high completion rate for their collections? Then, for that specific group, tell me how many of them interact with idols from each genre. The final list should just show the genre and the number of these collectors, with the most popular genre at the top.", "normal_query": "Identify all fans classified as 'Collector Fans' and determine the popularity of virtual idol genres among this segment. The output should list each idol genre and the distinct count of Collector Fans who have interacted with that genre, sorted in descending order of the fan count.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": true}} +{"instance_id": "virtual_idol_9", "selected_database": "virtual_idol", "query": "I need to understand how quickly our best fans start spending money. Can you run an analysis on our 'Idol Superfans'—the ones who are both top spenders and event enthusiasts? I want to see their average cumulative spending at key points after they sign up: specifically at the 7-day, 30-day, and 90-day marks. The output should just show these three milestones and the average total spend for each.", "normal_query": "Perform a cohort analysis on 'Idol Superfans' to determine their spending velocity. For this specific segment, calculate the average cumulative spending at three milestones post-registration: 7 days, 30 days, and 90 days. The output should display each milestone and its corresponding average cumulative spend rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "virtual_idol_10", "selected_database": "virtual_idol", "query": "I want to see if our key influencers have a 'ripple effect' on chat conversations. Can you analyze chats where at least one of these influencers is present and measure the overall tone of the messages from everyone else? Specifically, for any chat with an influencer, look at all messages from non-influencers with the same idol in the same session, and give me the total counts of 'Positive', 'Negative', and 'Neutral' messages.", "normal_query": "Measure the Ripple Effect of 'Community Influencers' on chat sentiment. For chat sessions where at least one Community Influencer is present, calculate the total count of 'Positive', 'Negative', and 'Neutral' messages based on Interaction Tone, sent by non-influencer participants. The output should be a single row with the total counts for each sentiment.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 0, "distinct": false, "order": false}} +{"instance_id": "virtual_idol_11", "selected_database": "virtual_idol", "query": "I need a deep-dive profile of the fans we think we might lose. For every fan flagged with a high churn risk, can you show me where they stand compared to everyone else? I want to see their percentile rank for a few key behaviors: how sticky their platform usage is, their average spending per minute, and their average content consumption rate. Please list the fan's nickname along with these three percentile ranks, and sort them to show the ones with the worst platform stickiness at the top.", "normal_query": "Generate a behavioral profile for all 'At-Risk Fans'. For each fan with a 'High' Churn Risk Flag, calculate their percentile rank across the entire fan base for three key metrics: Platform Stickiness Score (PSS), average Fan Monetization Index (FMI), and average Content Consumption Index (CCI). The output should list the fan's nickname and their three percentile ranks (rounded to three decimal places), sorted by the lowest stickiness percentile.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_12", "selected_database": "virtual_idol", "query": "I'm concerned our most socially-connected users might be disengaging. For all the fans who have a large social network and belong to multiple groups, can you find their longest period of inactivity? I'm only interested in seeing people who have been gone for more than two weeks. Please show me a list of their nicknames and the number of days in their longest absence, sorted from the longest time away to the shortest.", "normal_query": "For each fan classified as a 'Social Butterfly' (SCS > 1000 and Group Memberships > 5), calculate their longest inactivity streak in days. The output should list the fan's nickname and their streak duration, but only for fans whose streak is greater than 14 days. Sort the results in descending order of the streak duration.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_13", "selected_database": "virtual_idol", "query": "I want to do a quadrant analysis to better understand our fan base. Could you categorize every fan based on their financial value to us and their support needs? Specifically, split everyone into a top half and bottom half based on their monthly financial value, and do the same for their support ticket volume. This should give us four groups, like 'High-Value, Low-Support' or 'Low-Value, High-Support'. Please show me the names for these four segments and the number of fans in each, with the largest group listed first.", "normal_query": "Generate a quadrant analysis report by creating four fan segments. These segments are based on a 2x2 grid, dividing all fans into a top and bottom 50% for both Fan Financial Value (FFV) and Support Load Score (SLS). The output must list the name of each fan segment and the total number of fans within it, sorted in descending order by the fan count.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_14", "selected_database": "virtual_idol", "query": "I need a leaderboard of the biggest spenders for each of our idols. Can you go through every idol and, for each one, identify their top three financial supporters? The ranking should be based only on the total value of gifts a fan has given to that specific idol. The output should show the idol's name, the fan's nickname, how much they've given to that idol, and their rank for that idol.", "normal_query": "For each virtual idol, generate a ranked list of their top three contributing 'Whale Fans'. The ranking must be based on the total gift value each fan has given to that specific idol. The output should include the idol's name, the fan's nickname, the total gift value to that idol, and the fan's rank (1, 2, or 3) for that idol. The final list should be sorted by idol name, then by rank.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_15", "selected_database": "virtual_idol", "query": "I want to understand the journey our users take to become paying members. Can you calculate the average time it takes for a fan to upgrade to a premium account after their first interaction with us? And alongside that, could you also figure out, on average, how many interactions they have with the platform before they decide to subscribe? The final result should just be those two numbers.", "normal_query": "Analyze the conversion funnel for fans who become 'Premium Members' (membership kind is not 'Free'). Calculate two metrics: the average Time to Conversion in days (from first interaction to subscription date) and the average number of interactions that occurred before this conversion. The final output should be a single row containing these two averages.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "virtual_idol_16", "selected_database": "virtual_idol", "query": "I'm worried about the activity patterns of fans who might be about to leave. Can you look at everyone who is flagged as a high churn risk and calculate the average time between their interactions? From that group, I want a list of the top 15 who have the longest average gaps, showing their nickname and that average number of days they wait between activities.", "normal_query": "For the top 15 'At-Risk Fans' (Churn Risk Flag = 'High') with the longest average time gap between interactions, retrieve their nickname and the calculated average number of days between their successive interactions, rounded to two decimal places. The ranking must be based on this average gap in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_17", "selected_database": "virtual_idol", "query": "I need to find our most consistently negative users to understand their issues. Can you generate a list of fans whose chat messages are flagged as 'Negative' more than 70% of the time? Please only include fans who have had at least one interaction with a recorded tone. For each fan on the list, show their nickname, their total number of interactions, their count of negative interactions, and the exact percentage. Sort the list to show the most negative person at the top.", "normal_query": "Identify fans with a consistently negative Interaction Tone. For fans with at least one interaction, generate a list where the 'Negative' tone accounts for over 70% of their total interactions with a recorded tone. The output must include the fan's nickname, total interactions, count of negative interactions, and the calculated negativity percentage, sorted in descending order by this percentage.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_18", "selected_database": "virtual_idol", "query": "I need a really detailed, multi-dimensional report on our gift spending. Can you please show me the total gift spending broken down by every possible combination of these three things: the idol's genre, the fan's preferred language for content, and whether or not the fan has opted in to marketing? I need all the subtotals included, for example, by genre alone, by language alone, by genre and language together, all the way up to a grand total for everything.", "normal_query": "Generate a multi-dimensional report of total gift spending. The report must calculate the sum of gift values for every possible combination of idol genre, fan content Language Preference Setting, and marketing preference (e.g., opted-in or opted-out). All possible subtotals, including a grand total, must be included in the output using the CUBE operator.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_19", "selected_database": "virtual_idol", "query": "We need to find our next big content creators before they blow up. Can you help me find users who have a knack for making content that could go viral, but haven't quite hit that top-tier influencer status yet? I'm looking for people with a high viral potential score but a community influence that's still in the medium range. For anyone who fits that description, could you show me their nickname, their exact viral score, and their community influence score? Let's list the ones with the highest viral potential at the very top.", "normal_query": "Identify all fans who are classified as 'Rising Star Influencers' (VPS > 50 and 1000 < CII < 10000). For each of these fans, provide their nickname, their Viral Potential Score (VPS), and their Community Influence Index (CII) rounded to two decimal places. The results should be sorted first by VPS in descending order, then by CII in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_20", "selected_database": "virtual_idol", "query": "I'm wondering if being a dedicated achiever translates to having a good reputation in the community. Can you give me a breakdown of our 'Loyal Achievers'—the ones who consistently earn achievements and loyalty points—by their community reputation level? I'd like to see a table showing each reputation level and the number of these dedicated fans within it, sorted to show which level has the most.", "normal_query": "Analyze the distribution of 'Loyal Achievers' (AD > 0.2 and LPR > 500) across different Reputation Levels. The output should display each Reputation Level and the total count of fans classified as Loyal Achievers within that level. Sort the results in descending order of the count.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "virtual_idol_M_1", "selected_database": "virtual_idol", "query": "To help the marketing team, we need a special list of our top spenders that's easy to access. Can you run a process that creates a new table for this, let's call it WhaleFan_Summary? Once the table is ready, please fill it up by finding all of our Whale Fans. The way we find them is by looking at their single best spending-per-minute session, their Peak Monetization Index. For every fan who makes the cut, I need their ID, nickname, that peak spending value, and the date it happened.", "normal_query": "Execute a data provisioning task. First, create a new table named WhaleFan_Summary with a primary key on fan_id and a foreign key referencing fans.user_registry. The table must include columns for nickname, peak_fmi, peak_fmi_date, and a timestamp of calculation. Second, populate this table by calculating the Peak Monetization Index for every fan. Insert a row for each fan who qualifies as a Whale Fan, containing their fan ID, nickname, the calculated peak FMI value, and the corresponding date of the interaction.", "preprocess_sql": ["DROP TABLE IF EXISTS WhaleFan_Summary;"], "clean_up_sqls": ["DROP TABLE IF EXISTS WhaleFan_Summary;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "virtual_idol_M_2", "selected_database": "virtual_idol", "query": "To speed up our dashboard reporting, I want you to create a materialized view named Fan_Segment_Analysis. This view should contain our Fan Segments analysis, categorizing users based on their Fan Financial Value (FFV) and Support Load Score (SLS). After creating the view, please also provide the command to refresh it with the latest data.", "normal_query": "Create a materialized view named Fan_Segment_Analysis to pre-calculate and store Fan Segments. The view should categorize all fans into a 2x2 grid based on their relative ranking for Fan Financial Value (FFV) and Support Load Score (SLS). After creation, execute a refresh of the view.", "preprocess_sql": ["DROP MATERIALIZED VIEW IF EXISTS Fan_Segment_Analysis;"], "clean_up_sqls": ["DROP MATERIALIZED VIEW IF EXISTS Fan_Segment_Analysis;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "virtual_idol_M_3", "selected_database": "virtual_idol", "query": "We need to perform some database maintenance to manage our storage. I want you to create a data archival process for old interactions. First, please ensure a table named interactions_archive exists, with the same structure as the main interactions table. Then, you need to move all interaction records older than three years into this archive table, but only for users who meet two specific conditions: they must have an 'Inactive' Fan Status Tier and they must not be a Premium Member. After successfully copying the data to the archive, you must delete those same records from the original interactions table. This entire process, the copy and the delete, must be performed as a single, atomic transaction to ensure data integrity.", "normal_query": "Execute a data archival process within a single transaction. This process must first ensure an interactions_archive table exists. Then, it must copy all interaction records older than three years from fans with an 'Inactive' Fan Status Tier who are not Premium Members into the archive table. Finally, it must delete these same records from the primary interactions table.", "preprocess_sql": ["DROP TABLE IF EXISTS interactions_archive;"], "clean_up_sqls": ["DROP TABLE IF EXISTS interactions_archive;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "virtual_idol_M_5", "selected_database": "virtual_idol", "query": "I need a way to track daily fan activity stats efficiently. Could you set up a summary table called fan_daily_activity? Then, for the fan 'FAN75581', can you run a process that gathers their total messages, gifts, and gift value for today and either adds it as a new line or just adds to their totals if they're already in there for today?", "normal_query": "Create a table fan_daily_activity if one does not exist. Then, perform an Upsert Operation for fan 'FAN75581'. The operation must aggregate their total messages, total gifts, and total gift value for the current date from the interactions table. If a record for this fan and date already exists in fan_daily_activity, the new values should be added to the existing ones; otherwise, a new record should be inserted.", "preprocess_sql": ["DROP TABLE IF EXISTS fan_daily_activity;"], "clean_up_sqls": ["DROP TABLE IF EXISTS fan_daily_activity;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "virtual_idol_M_6", "selected_database": "virtual_idol", "query": "Let's automate our fan promotions. I want you to create a trigger named `on_spending_update_grant_vip`. This trigger should automatically change a fan's `status_tag` to 'VIP' in their profile as soon as their total spending in the `membershipandspending` table hits or goes over the $10,000 mark. This should work for both new purchases and updates to their spending record.", "normal_query": "Create a database trigger named on_spending_update_grant_vip and its associated function check_and_grant_vip_status. This trigger must automatically update a fan's status_tag to 'VIP', according to the Fan Status Tiers definition, whenever an INSERT or UPDATE on the membershipandspending table causes their total spend_usd to meet or exceed $10,000.", "preprocess_sql": ["DROP TRIGGER IF EXISTS on_spending_update_grant_vip ON membershipandspending;", "DROP FUNCTION IF EXISTS check_and_grant_vip_status();"], "clean_up_sqls": ["DROP TRIGGER IF EXISTS on_spending_update_grant_vip ON membershipandspending;", "DROP FUNCTION IF EXISTS check_and_grant_vip_status();"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "virtual_idol_M_7", "selected_database": "virtual_idol", "query": "To standardize how we handle money values, please create a custom Monetary Domain named monetary_value. This domain should be a numeric type that cannot be negative. After creating the domain, create a new table called transaction_log that uses this new domain for its transaction_amount column.", "normal_query": "Create a custom Monetary Domain named monetary_value which is a NUMERIC(12, 2) type that must be non-negative. Subsequently, create a new table named transaction_log utilizing this domain for the transaction_amount column.", "preprocess_sql": ["DROP TABLE IF EXISTS transaction_log;", "DROP DOMAIN IF EXISTS monetary_value;"], "clean_up_sqls": ["DROP TABLE IF EXISTS transaction_log;", "DROP DOMAIN IF EXISTS monetary_value;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "virtual_idol_M_8", "selected_database": "virtual_idol", "query": "I've noticed that queries filtering interactions by both an idol and a platform are running slowly. To improve performance, could you create a Composite Index named idx_interactions_idol_platform on the interactions table? It should cover the columns for the idol pivot and the activity platform.", "normal_query": "To improve query performance for analyses related to idol and platform activity, create a Composite Index named idx_interactions_idol_platform on the interactions table. This index should be created on the interact_idol_pivot and act_plat columns.", "preprocess_sql": ["DROP INDEX IF EXISTS idx_interactions_idol_platform;"], "clean_up_sqls": ["DROP INDEX IF EXISTS idx_interactions_idol_platform;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "virtual_idol_M_9", "selected_database": "virtual_idol", "query": "I need a quick, ad-hoc script to check the health of our top user segment. Can you write a server-side script that calculates the total number of Idol Superfans, then shows me a message with that count and what percentage of our total fans they make up? I don't need a table back, just the notice.", "normal_query": "Execute an anonymous procedural block that calculates the total number of Idol Superfans. The block must then raise a server notice containing this count and the calculated percentage of Idol Superfans relative to the total fan population. The script should not return a result set.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "virtual_idol_M_10", "selected_database": "virtual_idol", "query": "To improve our data quality, please add a Data Integrity Constraint to the loyaltyandachievements table. This constraint, named trust_value_is_a_percentage, must ensure that the trust_val column can only contain numbers between 0 and 100, inclusive.", "normal_query": "Add a Data Integrity Constraint named trust_value_is_a_percentage to the loyaltyandachievements table. This constraint must ensure that the trust_val column only accepts values within the inclusive range of 0 to 100.", "preprocess_sql": ["ALTER TABLE loyaltyandachievements DROP CONSTRAINT IF EXISTS trust_value_is_a_percentage;"], "clean_up_sqls": ["ALTER TABLE loyaltyandachievements DROP CONSTRAINT IF EXISTS trust_value_is_a_percentage;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_1", "selected_database": "organ_transplant", "query": "Let's dig into the files of patients who are getting positive crossmatch results.\nI need a list of these folks.\nFor each one, show me their ID, their PRA score, and whether they have those donor-specific antibodies.\nThen, based on our official rules for Antibody-Mediated Rejection (AMR) Risk Stratification, tell me if they're considered 'High Risk'.\nOh, and I also want to see the date of the last time we tried to find a match for them.\nSort the whole thing so the most sensitized patients are at the top.", "normal_query": "I want a report on all recipients with a positive crossmatch test to evaluate their risk profile.\nFor each recipient, display their registry ID, their Panel Reactive Antibody (PRA) score, and their donor-specific antibody (DSA) status.\nThen, classify their risk according to the formal Antibody-Mediated Rejection (AMR) Risk Stratification rules, labeling them 'High Risk' or 'Standard Risk'.\nAdditionally, for context, please include the timestamp of their most recent prior matching attempt if one exists.\nOrder the results to show recipients with the highest PRA scores at the top.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_2", "selected_database": "organ_transplant", "query": "I need the pancreas waiting list, sorted exactly how the official Allocation Policy says we should for all the pending matches. Show me the patient's ID, what region they're in, their exact urgency status, the HLA mismatch number, and their final rank in their local area.", "normal_query": "Generate a report for transplant coordinators that follows the formal Allocation Policy for all pending pancreas matches. The report should display the recipient's registry ID, their allocation region, their specific medical urgency, their HLA mismatch count, and their final rank within their region.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_3", "selected_database": "organ_transplant", "query": "I want to find the lung transplants that were either insanely expensive for the benefit, or a massive bargain.\nCould you calculate the Cost-Effectiveness Ratio for all the lung transplants we've finished? For the QALY part of the formula, just use the patient's quality-of-life score and multiply it by 5 years.\nThen, rank all of them and split the list into 20 groups. I want to see all the details—match, donor, and recipient IDs, and the final cost-effectiveness number rounded to two decimals—for only the absolute worst group and the absolute best group.", "normal_query": "I need to identify cost-effectiveness outliers.\nPlease calculate the Cost-Effectiveness Ratio (CER) for all 'Completed' lung transplants, assuming a Quality-Adjusted Life Year (QALY) gain of 5 years multiplied by the recipient's quality of life score.\nThen, using the `NTILE` window function, divide the results into 20 buckets.\nDisplay the full details (match ID, donor ID, recipient ID, and the final CER rounded to 2 decimal places) for all transplants that fall into the top and bottom buckets.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_4", "selected_database": "organ_transplant", "query": "Let's see how often we find a perfect match within and between different ethnic groups.\nFirst, you need to identify every single Optimal Donor-Recipient Match we have.\nOnce you have that list of perfect pairs, I want a table that shows the donor's ethnicity down the side and the recipient's ethnicity across the top, with the cells showing the count of how many times each combination happened.", "normal_query": "I want a detailed ethnic compatibility report for all pairings that qualify as an Optimal Donor-Recipient Match.\nFor every such optimal match found, create a cross-tabulation showing the count of matches, with the donor's ethnicity as rows and the recipient's ethnicity as columns.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_6", "selected_database": "organ_transplant", "query": "Let's check our CMV exposure risk. I want a list of all current and completed transplants where a CMV-positive donor gave an organ to a CMV-negative patient.\nFor each of these risky cases, show me the match ID and the transplant center. I also want to see the patient's Infection Risk score from their chart and, right next to it, the average infection risk—rounded to four decimals—for all the non-mismatched transplants done at that same hospital. I want to see if the scores reflect the risk. Please order the results by the hospital's ID.", "normal_query": "I need to perform a viral mismatch risk analysis for all completed and in-progress transplants.\nPlease produce a report that identifies every donor-recipient pair with a Cytomegalovirus (CMV) mismatch, defined as a CMV-positive donor matched with a CMV-negative recipient.\nFor each identified mismatch, display the match registry ID, the center where the transplant occurred, the pre-calculated Infection Risk, and compare this to the average Infection Risk (rounded to 4 decimal places) for all other transplants at that same center that did not have a CMV mismatch. The final report should be ordered by center ID.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_7", "selected_database": "organ_transplant", "query": "I need a list of our absolute sickest patients—the ones on ECMO or a VAD.\nFor each of these patients, show me their ID and what kind of life support they're on.\nThen, calculate their full Patient Urgency Score. The crucial part is, I want to see their score next to the average score for all the other, more stable patients who are waiting for the same organ, with both scores rounded to four decimals. Let's see how big the gap is. Group the list by organ, and show the sickest patients first within each group.", "normal_query": "I need to assess the urgency of recipients currently on advanced life support.\nPlease identify all pending recipients who are on 'ECMO' or 'VAD' life support.\nFor each of these critical recipients, display their registry ID, the specific life support method, their calculated Patient Urgency Score, and compare this score to the average urgency score of other patients waiting for the same organ who are not on life support. Both scores should be rounded to 4 decimal places. Order the results by organ type, then by the critical patient's urgency score.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_8", "selected_database": "organ_transplant", "query": "I'm wondering if how we ship organs really makes a difference. Can you run some numbers for me?\nLet's look at all our finished transplants.\nGroup them by how the organ was transported—you know, ground, helicopter, commercial air, all that.\nFor each of those transport types, I want to see the average Total Ischemia Time rounded to two decimals, and the average Expected Graft Survival Score rounded to four decimals.\nSort the results so I can see which transport methods are linked with the best outcomes.", "normal_query": "I need a report on the impact of ischemia time on expected graft survival, broken down by the transportation method used.\nFor every completed transplant, determine the Total Ischemia Time and retrieve the Expected Graft Survival (EGS) Score.\nGroup the results by the `trans_method` from the logistics table, and for each method, calculate the average Total Ischemia Time (rounded to 2 decimal places) and the average EGS Score (rounded to 4 decimal places). The report should be sorted by the average EGS Score from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_9", "selected_database": "organ_transplant", "query": "I want to know what the most common health problems our patients have and if those problems make surgery riskier.\nCan you go through all the patient files, break apart their list of health conditions, and find the top 5 most common ones?\nThen, for each of those top 5, figure out the average Surgical Risk Score for all patients who have that specific condition. I want to see the condition, how many people have it, and what the average risk score is, rounded to four decimals.", "normal_query": "I need to analyze the prevalence of comorbidities and their impact on surgical risk.\nFirst, analyze each individual condition listed for all recipients.\nThen, identify the top 5 most frequently occurring comorbidities across all patients.\nFinally, for each of these top 5 conditions, calculate the average Surgical Risk Score for the patients who have that comorbidity. Display the comorbidity, its total count, and the calculated average risk score rounded to 4 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_10", "selected_database": "organ_transplant", "query": "I need to see who's been stuck on our waiting list the longest. Can you pull a special report for me?\nFor each organ, find the 2% of patients who have been waiting longer than everyone else.\nFor this group, I want to see everything that might be making them hard to match: their patient ID, the organ they need, how many days they've been waiting, their PRA score, and a tally of their other health problems. Please group the list by organ and put the longest-waiting patients at the top of each group.", "normal_query": "Please generate a profile of our longest-waiting patients. For each organ type, identify the top 2% of pending recipients with the longest wait times using the `PERCENT_RANK` window function.\nFor this elite cohort of long-waiters, display their registry ID, organ type, wait time in days, their Panel Reactive Antibody (PRA) score to assess Immunological Sensitization, and a count of their listed comorbidities. Order the results by organ type and then by wait time descending.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_11", "selected_database": "organ_transplant", "query": "I need to find out which of our hospitals are doing the heaviest lifting. I'm talking about the ones that take on the toughest cases, both in terms of travel and patient health.\nCan you create a ranking? For each hospital, come up with a 'Logistical Challenge Score' based on average distance and organ-on-ice time, and a 'Medical Complexity Score' based on average surgical risk and how many other illnesses the patients have.\nThen, average those two scores together to get a final 'Workhorse Score'. I just want to see the top 10 hospitals based on that final score, rounded to four decimals.", "normal_query": "I want to identify our 'workhorse' transplant centers, defined as those that handle a high volume of logistically and medically complex cases.\nFor each center, calculate a 'Logistical Challenge Score' (average distance * 0.4 + average expected ischemia time * 0.6) and a 'Medical Complexity Score' (average surgical risk * 0.7 + average number of recipient comorbidities * 0.3).\nThen, combine these into a final 'Workhorse Score' (Logistical Score * 0.5 + Medical Score * 0.5). Display the top 10 centers ranked by this final score, rounded to 4 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_13", "selected_database": "organ_transplant", "query": "I want to know if our fancy Decision Support System is actually helping us pick better matches. Can you check if its score lines up with the EGS score?\nPlease take all our completed transplants and divide them into 5 groups based on their DSS score. For each of these 5 buckets, tell me how many transplants are in it and what their average Expected Graft Survival Score is, rounded to four decimals. I want to see if the average EGS score goes up as the DSS score bucket goes up.", "normal_query": "I need to analyze if our Decision Support System score is aligned with our primary success metric, the Expected Graft Survival score.\nUsing the `WIDTH_BUCKET` function, group all completed transplants into 5 equal buckets based on their Decision Support System score, from the minimum to the maximum score in the dataset.\nFor each bucket, calculate the number of transplants and the average Expected Graft Survival Score, rounded to 4 decimal places. This will show if a higher DSS score correlates with a higher predicted graft survival.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_14", "selected_database": "organ_transplant", "query": "I'm curious if some hospitals are more willing to take a chance on a less-than-perfect genetic match.\nFor every transplant center that has performed at least two transplants, can you calculate their average HLA Mismatch Score?\nAlso, for each of those centers, figure out the standard deviation so we can see if their mismatch numbers are all over the place or pretty consistent.\nThen, just show me the top 10 from that group with the highest average mismatch scores.\nI'll need to see their total number of transplants, that average mismatch score rounded to four decimals, and the standard deviation, also rounded to four decimals.", "normal_query": "I want to identify transplant centers that may have a higher tolerance for immunological risk.\nPlease calculate the average HLA Mismatch Score for all completed transplants at each unique transplant center that has performed two or more transplants.\nConcurrently, calculate the standard deviation of the mismatch scores for each center to understand the variability in their matches.\nDisplay the top 10 centers with the highest average mismatch scores, along with their transplant volume, the average mismatch rounded to 4 decimal places, and the standard deviation rounded to 4 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_16", "selected_database": "organ_transplant", "query": "I need to see the trade-offs we're making with our less-than-perfect donor organs.\nCan you pull a list of all matches that fall under our Marginal Donor Acceptance Criteria? That means either the age gap is huge—more than 25 years—or their kidney score is poor, say under 40.\nFor each of those matches, show me the donor and patient IDs, tell me exactly why we're calling the donor 'marginal', and then calculate the patient's standard Patient Urgency Score so I can see just how desperate they are. Round the score to four decimals. Sort it so the most urgent patients are at the top.", "normal_query": "Generate a risk-benefit report for transplants using organs from Marginal Donors.\nFirst, identify all donors who meet the Marginal Donor Acceptance Criteria, defined as having an age difference greater than 25 years with the recipient OR a Renal Function Score below 40.\nFor each of these marginal donor matches, list the donor ID, the recipient ID, the specific marginal criterion met ('Age Difference' or 'Low Renal Score'), and then calculate the standard Patient Urgency Score for the recipient to assess the necessity of using the marginal organ. The final score should be rounded to 4 decimal places. Sort the results by the calculated urgency score in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_17", "selected_database": "organ_transplant", "query": "I want to figure out which transport method is the most efficient at getting organs delivered quickly relative to the distance they have to travel.\nCan you come up with a 'Logistical Efficiency Ratio' for every completed transplant? Just divide the total time the organ was on ice by the distance it traveled.\nThen, for each transport type—ground, air, etc.—I want to see the average, best, and worst efficiency ratio, all rounded to four decimals. Ignore any really short trips, say under 10km. Sort the list by the average efficiency.", "normal_query": "I need to analyze the logistical efficiency of different transport methods. For each completed transplant, calculate a 'Logistical Efficiency Ratio', defined as the Total Ischemia Time in minutes divided by the geographic distance in kilometers.\nA lower ratio indicates better efficiency. Then, for each `trans_method`, calculate the average, minimum, and maximum efficiency ratio, all rounded to 4 decimal places. The report should exclude any trips under 10km as outliers and be sorted by the average efficiency.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_18", "selected_database": "organ_transplant", "query": "I want to see how our patients are doing based on how hard they were to match immunologically.\nCan you sort all the patients who've received a transplant into four groups based on their PRA score? Let's do 'Low' for 0-10, 'Moderate' for 11-79, 'High' for 80-95, and 'Very High' for 96 and up.\nFor each of these four groups, I want to see the total number of patients, the average HLA mismatch they ended up with rounded to two decimals, how long they had to wait on average to the nearest day, and what their average EGS score was, rounded to four decimals.", "normal_query": "I need a comprehensive profile of patient outcomes across different levels of Immunological Sensitization.\nGroup all recipients of completed transplants into four Panel Reactive Antibody (PRA) score buckets: 'Low' (0-10), 'Moderate' (11-79), 'High' (80-95), and 'Very High' (96-100).\nFor each bucket, calculate the total number of transplants, the average HLA Mismatch Score of their match (rounded to 2 decimal places), their average wait time in days (rounded to the nearest whole day), and the average Expected Graft Survival (EGS) score (rounded to 4 decimal places) for their transplant.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_19", "selected_database": "organ_transplant", "query": "I'm curious about where our single HLA mismatches are happening. Are they usually on the A, the B, or the DR?\nCan you look at all our completed transplants that had exactly one mismatch? For that group, I want you to figure out which specific HLA type was the one that didn't match.\nThen, just give me a count: how many were mismatched at 'A', how many at 'B', and how many at 'DR'.", "normal_query": "I want to analyze potential HLA mismatch patterns. For all completed matches that have exactly one HLA mismatch, I need to determine on which specific locus (A, B, or DR) the mismatch occurred.\nPlease produce a report that counts the number of mismatches that occurred on the 'A-locus', 'B-locus', and 'DR-locus'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_20", "selected_database": "organ_transplant", "query": "I want to create a map of where we're struggling the most to find organs.\nCan you calculate a 'demand versus supply ratio' for each region and for each blood type, including the +/-?\nTo get 'demand', just count the number of patients waiting in a region for a certain blood type. For 'supply', count all the donors we've ever had from that region with that blood type.\nShow me a table with the region, the blood type, the number of patients, the number of donors, and the final ratio rounded to four decimals. Put the biggest problem spots at the top.", "normal_query": "I need to find geographic and blood type scarcity hotspots.\nFor each allocation region and for each main blood type (A, B, AB, O, and their Rh variations), calculate a 'Demand-to-Supply Ratio'.\n'Demand' is the number of pending recipients of that blood type in that region. 'Supply' is the total number of unique donors of that blood type from that region available in the entire dataset.\nDisplay the region, blood type, demand count, supply count, and the final ratio rounded to 4 decimal places, sorted to show the highest scarcity ratios first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_21", "selected_database": "organ_transplant", "query": "I need to know if we're getting less bang for our buck on the really urgent transplants.\nCan you run a cost-effectiveness analysis for me? For every completed transplant, figure out the CER. Let's just assume the quality-adjusted life year gain is 8 years times whatever their quality-of-life score is.\nThen, I want you to group the results by the patient's medical urgency status. Show me the average CER, rounded to two decimal places, for the Status 1A patients, the Status 1B patients, and so on. Keep them in order of urgency.", "normal_query": "I want to find out if transplanting sicker patients is less cost-effective.\nCalculate the Cost-Effectiveness Ratio (CER) for every completed transplant where cost and quality-of-life data are available. For the QALY gain, use a standard of 8 years multiplied by the patient's quality of life score.\nThen, group these transplants by the recipient's Medical Urgency Status tier ('Status 1A', 'Status 1B', 'Status 2', etc.) and calculate the average CER for each tier, rounded to 2 decimal places. The results should be ordered by urgency.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_22", "selected_database": "organ_transplant", "query": "I'm worried some of our hospitals might be having a rough patch. I want to look for streaks of failed matches.\nCan you go through the data for each transplant center and find every time they had two or more failed matches in a row, based on when the match was created?\nI want a list that shows the hospital ID, the ID of the match that continued the streak, and the time it happened. Just show me the second failure in any given streak.", "normal_query": "I need a report on consecutive match failures at our transplant centers to identify potential systemic issues.\nFor each transplant center, find every instance where at least two consecutive matches (ordered by creation time) both had a final status of 'Failed'.\nThe report should list the center ID, the registry ID of the second failed match in the sequence, and the timestamp of that failure. Only show the start of sequences of 2 or more failures.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_M_1", "selected_database": "organ_transplant", "query": "We keep calculating the Size Compatibility Score over and over.\nCan we just build a tool for it?\nI want a function, let's call it `calculate_size_compatibility`, where I can just plug in a donor's ID and a recipient's ID, and it spits out the score.\nIt needs to find the BMI for both the donor and the recipient and then do the math.\nIf it can't find the BMI for either one, it should just return nothing instead of crashing.", "normal_query": "Create a reusable PostgreSQL function named `calculate_size_compatibility` that computes the Size Compatibility Score.\nThis function must accept a donor's registry ID and a recipient's registry ID as text inputs.\nIt should retrieve the Body Mass Index for both the donor and the recipient, then apply the standard formula for the Size Compatibility Score.\nThe function must be robust and return NULL if the BMI for either individual is missing or zero to prevent division errors.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_M_2", "selected_database": "organ_transplant", "query": "Finding a perfect organ match is like looking for a needle in a haystack, and I want a special list that only shows these golden tickets.\nCan you build a 'live' list, let's call it `v_optimal_matches`, that shows every single donor-recipient pair that qualifies as an Optimal Donor-Recipient Match?\nI want this list to update itself without locking everything up every time we get a new patient registered in the system.", "normal_query": "I need a system to continuously identify every potential match that qualifies as an Optimal Donor-Recipient Match.\nFirst, create a materialized view named `v_optimal_matches` that contains the donor and recipient registry IDs for every such pair.\nSecond, create a trigger named `trg_refresh_optimal_matches` that automatically executes a procedure to refresh this materialized view concurrently whenever a new recipient is inserted into the `recipients_demographics` table.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_M_3", "selected_database": "organ_transplant", "query": "I want to be able to check a donor's kidney health easily.\nCan you create a function called `get_donor_renal_score` that takes a donor's ID?\nIt should do the math for the Renal Function Score automatically. For the final score, use a weighting of 0.8 for the eGFR part and 0.2 for the creatinine part. I just need it to spit out the single score.", "normal_query": "Create a PostgreSQL function called `get_donor_renal_score` that accepts a donor's registry ID as a TEXT input.\nThis function should calculate the donor's Renal Function Score, using weights of 0.8 for the internal eGFR calculation and 0.2 for serum creatinine, and return it as a REAL value.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_M_4", "selected_database": "organ_transplant", "query": "Let's create a special watchlist for all of our High-Risk Donors so we can track them easily.\nCan you build a materialized view for this? Call it `v_high_risk_donors`.\nIt should automatically pull in any donor who meets the official definition of high-risk.\nFor each donor on this list, I want to see their ID, their age, and a note on which specific risk factor got them on the list.", "normal_query": "I need a dedicated, up-to-date list of all donors who are classified as a High-Risk Donor based on the established criteria.\nPlease create a materialized view named `v_high_risk_donors`.\nThe view should list the donor's registry ID, their age, and the specific high-risk factor that was identified.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_M_5", "selected_database": "organ_transplant", "query": "We need to keep track whenever a patient's life support status actually changes.\nCan you set up a log for that? First, make a new table called `life_support_audit` that can store a log entry number, the patient's ID, what the status was before and after the change, and when it happened.\nThen, create a trigger called `trg_audit_life_support_changes` that automatically adds a new line to this log only if the life support value is modified to something new.", "normal_query": "I need an audit trail for changes to a recipient's life support status.\nPlease create a new table called `life_support_audit` with columns for `audit_id`, `recipient_id`, `old_status`, `new_status`, and `change_timestamp`.\nThen, create a trigger named `trg_audit_life_support_changes` that fires after an update on the `recipients_immunology` table, inserting a new record into the audit table only when the `life_support` value actually changes.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_M_6", "selected_database": "organ_transplant", "query": "I want a new summary table that tracks how well our transplant centers are doing.\nLet's call it `transplant_center_performance`.\nIt should show the center's ID, a count of all the transplants they've done, their average EGS score, and a final Center Performance Score.\nOnce the table is made, fill it up by calculating the score for every center.\nFor the score, let's say the number of surgeries they do is the most important part, maybe 70%, and their average graft survival outcome is the other 30%.", "normal_query": "Create a new table named `transplant_center_performance` to store analytical data.\nThis table should have columns for the center's identification code, the total number of transplants performed, the average Expected Graft Survival (EGS) score for that center, and a calculated Center Performance Score.\nAfter creating the table, populate it by analyzing all relevant transplant records.\nThe Center Performance Score should be calculated with a 70% weight on the center's total transplant volume and a 30% weight on its average EGS score.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_M_7", "selected_database": "organ_transplant", "query": "I want a list of all the donors who died from Anoxia.\nLet's make a view for it called `v_anoxia_donor_profile`.\nJust show me the donor's ID, their age, and their kidney numbers—the creatinine and GFR values. This will help us quickly see if the organs are any good.", "normal_query": "Create a view named `v_anoxia_donor_profile` to help assess organ quality for a specific subset of donors.\nThe view should list all donors whose cause of death is recorded as 'Anoxia'.\nFor each such donor, display their registry ID, age, and their key kidney function indicators: serum creatinine and glomerular filtration rate.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_M_8", "selected_database": "organ_transplant", "query": "We need to track every time a patient's urgency level actually changes.\nCan you set up an audit trail for that? First, make a table called `urgency_status_log` to store the history—it needs a log number, the patient ID, the date it happened, and what the status changed from and to.\nThen, build a trigger called `trg_log_urgency_status_change`. It should watch the clinical table and automatically add a new line to our log only when the medical urgency value for a patient is modified.", "normal_query": "Create a system for logging changes to a patient's Medical Urgency Status.\nFirst, create a new table called `urgency_status_log` with columns to track the log ID, the recipient's registry ID, the date of the change, the old status, and the new status.\nSecond, create a trigger named `trg_log_urgency_status_change` that executes after an update on the `clinical` table. The trigger should fire only if the `med_urgency` column is modified, and it must insert a new record into the log table with the relevant details.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_M_9", "selected_database": "organ_transplant", "query": "We need to standardize how we calculate our main ranking score.\nCan you build a function called `calculate_composite_allocation_score` that does all the work?\nI want to just give it a match ID.\nIt should then go and figure out all the component scores and combine them using the standard Composite Allocation Score formula, and just return the one final number.", "normal_query": "Create a reusable function named `calculate_composite_allocation_score` that takes a match ID as input.\nThis function must internally calculate and combine the Patient Urgency Score, Immunological Compatibility Score, and Expected Graft Survival (EGS) Score using the standard Composite Allocation Score formula.\nThe function must return the final calculated score as a single REAL value.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_M_10", "selected_database": "organ_transplant", "query": "Let's make our center performance stats update in real-time.\nFirst, make a simple summary table, call it `center_performance_live`, just to hold the center's ID, a running count of their transplants, and their average EGS score.\nThen, the magic part: build a trigger called `trg_update_center_performance`. Whenever a match is officially marked as 'Completed', this trigger should automatically update that center's numbers in our new summary table. It needs to be smart enough to add the center if it's their first completed transplant.", "normal_query": "I want to automate the updates to our center performance metrics.\nFirst, create a summary table called `center_performance_live` with columns for center ID, total transplants, and the running average for their Expected Graft Survival (EGS) Score.\nThen, create a trigger named `trg_update_center_performance` that fires after an update on `transplant_matching`. When a `match_status` changes to 'Completed', the trigger must find the corresponding transplant center and update its `total_transplants` and `avg_egs_score` in the summary table. If the center isn't in the table yet, it should be inserted.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_M_11", "selected_database": "organ_transplant", "query": "I need a dedicated, ranked list of all the kids waiting for a heart.\nCan you create a view called `v_pediatric_heart_candidates`?\nIt should only include patients under 18. The ranking is critical: the sickest kids—Status 1A—go first. If kids have the same urgency status, the one with the higher Recipient Wait Time Ratio should get priority.\nI need the view to show the kid's ID, their age, their urgency level, and their calculated wait time ratio.", "normal_query": "Create a specialized view named `v_pediatric_heart_candidates`.\nThis view should produce a prioritized list of all recipients under the age of 18 who are waiting for a heart transplant.\nThe list should be ordered first by their Medical Urgency Status (Status 1A being highest), and then by their Recipient Wait Time Ratio in descending order for recipients with the same urgency status.\nThe view should display the recipient's ID, age, medical urgency, and the calculated wait time ratio.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "organ_transplant_M_12", "selected_database": "organ_transplant", "query": "We need to prevent silly mistakes in our data entry. People don't get younger.\nCan you build a trigger called `trg_validate_recipient_age`?\nIt should watch the recipient demographics table. If anyone ever tries to update a patient's age to a number that's lower than what it was before, the trigger must just ignore the change and keep the old age.", "normal_query": "To ensure data integrity, create a trigger that prevents illogical updates to a recipient's age.\nThe trigger, named `trg_validate_recipient_age`, should activate before any update on the `recipients_demographics` table.\nIt must check if the new `age_count` is less than the old `age_count`. If a user attempts to decrease a recipient's age, the trigger should silently prevent the change by reverting the age to its original value.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "organ_transplant_M_13", "selected_database": "organ_transplant", "query": "I need a quick way to pull up all the main risk numbers for a specific transplant match.\nCan you make a function called `get_match_risk_profile` that takes a match ID?\nIt should just go into the risk data and pull out the five important scores—Immunological, Infection, Rejection, Readmission, and Mortality—and then give them back to me as a single JSON object.", "normal_query": "Please create a function that provides a holistic risk profile for a given transplant match.\nThe function, named `get_match_risk_profile`, must accept a match ID.\nIt should retrieve and return a single JSONB object containing the five key risk metrics: Immunological Risk, Infection Risk, Rejection Risk, Readmission Risk, and Mortality Risk.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "mental_health_1", "selected_database": "mental_health", "query": "Let's find our most vulnerable patients: those who are high-risk, in facilities under severe stress, and who are also not engaging well with their therapy. I need a list with their patient ID, assessment ID, the date of their latest assessment, their average rounded engagement score, and the stress level of their facility. Please sort by the most recent assessment and just show the top 50.", "normal_query": "I want to identify High-Risk Patients from facilities experiencing Severe Environmental Stress or Severe Life Impact, who also exhibit low Therapy Engagement Scores (average TES is lower than 2). For each patient, include their patient ID, assessment ID, date of their most recent assessment, their average rounded TES score, and the environmental stress or life impact level of the facility they are associated with. Focus only on the most recent assessments and prioritize patients meeting all these criteria. Sort the results by the assessment date in descending order and limit to the top 50 results.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "mental_health_2", "selected_database": "mental_health", "query": "Can you help me check how closely a facility's resources relate to how well patients stick to their treatment? I'd like to see the overall resource adequacy score across all facilities and the correlation between each facility's resource score and their treatment adherence rate. Just skip the places where there's no rate for the treatment adherence.", "normal_query": "For all facilities, I want to explore the Correlation Between Resource Adequacy and Adherence. Include the overall Facility Resource Adequacy Index as a reference and the correlation coefficient between each facility's resource adequacy score and treatment adherence rate. Exclude facilities with no applicable TAR.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "mental_health_3", "selected_database": "mental_health", "query": "Show me facilities where patients seem highly engaged in therapy, but their recovery progress is still lagging basically, places with a possible engagement-outcome disconnect. For each of those facilities, I want to see the Facility ID, their average therapy engagement score, and their index of recovery trajectory, both rounded to two decimal places. Sort the list by Facility ID and keep it to the first 100 results.", "normal_query": "Identify facilities classified as having a Facility with Potential Engagement-Outcome Disconnect. Display the facility ID, the average TES, and the RTI for these facilities. Round both TES and RTI to 2 decimal places, sort by facility ID, and limit the output to 100 rows.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "mental_health_4", "selected_database": "mental_health", "query": "Can you show me the top clinicians working in well-supported facilities based on the stability metric of the patients? I want to see each clinician's ID, which facility they work at, their Patient Stability Metric score, and how they rank within their facility (higher PSM means better rank). Just include those in resource-backed facilities, sort by facility and rank, and show only the top 100 results.", "normal_query": "I want to identify the top-performing clinicians in Resource-Supported Facilities based on their Patient Stability Metric. For each clinician, provide their ID, the facility ID, their PSM score, and their rank within the facility. The rank should be based on PSM, with higher PSM scores ranked higher. Only include clinicians from facilities classified as Resource-Supported Facilities. Sort the results by facility ID and then by rank within each facility, limiting the output to the top 100 rows.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "mental_health_5", "selected_database": "mental_health", "query": "Can you show me the patients who seem to have fragile stability? I want to see their ID, how often they miss appointments on average, and what their latest effectiveness score of social support.", "normal_query": "I want to find patients who are exhibiting fragile stability. List each patients ID, their average missed appointments, and their most recent SSE score.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": false}} +{"instance_id": "mental_health_6", "selected_database": "mental_health", "query": "Show me the top 100 primary diagnoses where patients have the highest number of crisis interventions. For each diagnosis, include the name of the diagnosis, how many patients had that diagnosis, and the average crisis intervention frequency, rounded to two decimal places. Sort the list by CIF from highest to lowest.", "normal_query": "I want to identify which primary diagnoses are associated with the highest Crisis Intervention Frequency (CIF) across all patients. For each diagnosis, list the diagnosis name, the number of patients with that diagnosis, and the CIF value, rounded to two decimal places. Sort the results by CIF in descending order and limit to the top 100 diagnoses.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": true, "order": true}} +{"instance_id": "mental_health_7", "selected_database": "mental_health", "query": "Show me the top 100 facilities, grouped into performance quadrants. For each one, list its ID, how well patients stick to their treatments (rate of treatment adherence), how stable the patients are, both rounded to two decimals, and which performance quadrant it falls into. Sort the results by quadrant and then by facility ID.", "normal_query": "I want to categorize facilities into performance quadrants. For each facility, list the facility ID, Treatment Adherence Rate (rounded to two decimal places), Patient Stability Metric (rounded to two decimal places), and the performance quadrant. Sort results by performance quadrant and facility ID, limiting to the top 100 facilities.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "mental_health_8", "selected_database": "mental_health", "query": "I want to see how different kinds of therapy changes—like switching therapy type, therapist, or session frequency—affect how engaged patients are. For each type of change, show how often it happens, what the average engagement score was before and after the change, and how much the score changed overall. Sort the results so that the most common changes appear at the top.", "normal_query": "Analyze the impact of therapy changes (modality, therapist, frequency) on the Therapy Engagement Score and calculate the engagement variation for each change type. Show the change type, total occurrences, average scores before (previous encounter of each encounter) and after (current encounter), and average score change from previous score to current score, ordering by total occurrences in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "mental_health_9", "selected_database": "mental_health", "query": "Show me the top 100 facilities where suicide risk is very high—over 20%. For each one, list the facility ID, their PFIS score, FRAI score, and the difference in resource demand, sorted from the highest RDD to the lowest. I want to find places where the need for resources is most urgent.", "normal_query": "For facilities with high Suicide Risk Prevalence over 20%, calculate the Resource-Demand Differential. List the facility ID, PFIS, FRAI, and RDD scores, ordered by RDD from highest to lowest, showing the top 100 facilities. This helps identify resource gaps in critical environments.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "mental_health_10", "selected_database": "mental_health", "query": "Find facilities that seem to have an environment that are systemically stressed. For each one, list the facility ID and the differential in resource demand. Just show the top 100 most stressed facilities.", "normal_query": "Identify facilities exhibiting characteristics of a Systemically Stressed Facility Environment. For each facility, return its ID and Resource-Demand Differential value, limited to the top 100 facilities.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "mental_health_11", "selected_database": "mental_health", "query": "Hey, can you pull together a report that shows how care might vary between different ethnic groups? For each patient, I want to see their ethnicity, how severe their symptoms are, and how well they're sticking with their treatment. Also, please include a summary row that combines all ethnicities, so we have a baseline to compare against. And make sure the results are sorted by ethnicity alphabetically. Thanks!", "normal_query": "To support our health equity audit, I need a report that assesses potential care disparities across different ethnic groups. Please generate a table showing each patient's ethnicity, their calculated Symptom Severity Index, and their Engagement-Adherence Score. The report must also include a summary row for 'All Ethnicities' to serve as a baseline. Please sort the results alphabetically by patient ethnicity.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "mental_health_12", "selected_database": "mental_health", "query": "Sarah, our lead case manager, is worried about a pattern she's calling 'the David profile.' These are patients who are constantly in crisis—we're talking about patients that has a low support profile and in high crisis. She wants to find every other patient who looks just like that right now. Can you pull a list for her? She needs to see their ID and age, plus the exact crisis and support scores that got them on the list. For her team to take action, please also show the date they were last in the hospital and their next appointment, but make that appointment date easy to read. And please, put the people with the most crises at the very top so she knows who to call first.", "normal_query": "For our weekly clinical review, Sarah needs to generate a Vulnerable Patient Watchlist to proactively identify a specific cohort. The focus is on individuals who fit the Patient with High Crisis & Low Support Profile. To make this list actionable for the meeting, please display each unique patient's identifier, their age, the total count of their crisis interventions, and their calculated Social Support Effectiveness score. For additional context on their recent trajectory and our next point of contact, also include the date of their last hospitalization and format the date of their next scheduled appointment as 'Mon DD, YYYY'.Finally, please sort the list with the patients having the highest number of crisis interventions at the top to prioritize our discussion.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": true}} +{"instance_id": "mental_health_13", "selected_database": "mental_health", "query": "Our director, Dr. Evans, is trying to get more funding for community partnerships, and his whole argument hinges on one idea: that facilities with better resources have patients who are more likely to stick to their treatment plans. He needs a solid number to prove this point. Can you run a statistical analysis to see how strong that connection really is? Basically, do a correlation analysis of the resource's adequacy. Just give me the final correlation score, nice and clean, as a single number.", "normal_query": "For a budget proposal, our regional director, Dr. Evans, needs to validate a key hypothesis: that better facility resources improve patient outcomes. He wants to test this by measuring the Correlation Between Resource Adequacy and Adherence. Please calculate this correlation. The final output should be a single numerical value representing this correlation, rounded to four decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 4, "distinct": false, "order": false}} +{"instance_id": "mental_health_14", "selected_database": "mental_health", "query": "Our Director, Dr. Sharma, needs help. We have a lot more patients coming in who are in a really bad place—severely depressed or anxious and also at high risk for suicide. She wants to build a list of our 'go-to' experts for these tough cases. Can you figure out, for each diagnosis like 'Depression', 'Anxiety', etc, which of our clinicians has the most hands-on experience with this exact type of high-risk patient? I need a table that shows the diagnosis, the clinician, how many of these patients they have, and then ranks them, so we can see who is number 1, 2, 3 for each category. Please organize the whole thing by diagnosis, then by the rank.", "normal_query": "To address a surge in complex cases, the Director of Clinical Services is creating an expert consultation roster. The goal is to identify clinicians with the most experience managing the High Severity, High Risk Patient Group. Please generate a report that, for each primary diagnosis, ranks clinicians based on the number of these specific high-risk patients they manage. The output table should include the primary diagnosis, the clinician's identifier, the count of these high-risk patients for that clinician, and their resulting rank within that diagnosis category. The final roster should be sorted first by the primary diagnosis and then by the clinician's rank to clearly present the top experts for each specialty.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "mental_health_15", "selected_database": "mental_health", "query": "I'm looking at Patient P871358's file and I'm a bit concerned. He's been diagnosed with Bipolar for about 10 years and has already been in the hospital 3 times. I need to know if we should escalate his care. So, first, can you run that calculation of the patient's risk for annualized hospitalization and tell me if he gets flagged for a immediate coordination of intensive care? If he does, I need to find him a new clinician right away. The new clinician would need to be at the same facility he last visited, be one of our 'High' confidence therapists, and not have their credentials up for review in the next six months or so let's say any time after June 2024. If he's flagged, show me their IDs and where they work. If his risk score is fine, just let me know by reporting as 'Patient does not meet criteria for Intensive Care Coordination'.", "normal_query": "I'm assessing Patient 'P871358' and need to determine if an Intensive Care Coordination Flag should be raised. First, please calculate his Annualized Hospitalization Risk, then flag it if the criteria is met. If and only if the patient is flagged, I need a list of suitable clinicians for referral. A suitable clinician must be located at the same facility as the patient's most recent encounter, have a 'High' confidence level, and have a next credential review date after June 1st, 2024. Please return the clinician's identifier and their facility. If the patient is not flagged, just report as 'Patient does not meet criteria for Intensive Care Coordination'", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "mental_health_16", "selected_database": "mental_health", "query": "I'm doing a review on patient P883117 and need to check a couple of things for her file. First, she's supposed to be getting at least 5 hours of therapy a month in her program. Can you check her latest therapy exp intensity and tell me if she's meeting that target? Second, her insurance gives her $1,200 a year for treatment. Based on her personal cost-effectiveness rate of 0.0509, what's her total expected quality of life gain for the whole year? And finally, is that number over our 'good value' threshold of 50 points? Just give me a quick summary for all the assessment for this patient, including the patient's id, and the answers to those three questions.", "normal_query": "I need to perform a case audit for patient 'P883117'. The patient is in a program requiring a minimum therapy intensity of 5 hours per month. Their annual insurance budget for treatment is $1,200, and their current recorded treatment cost-effectiveness is 0.0509 QoL-points per dollar. Please provide a report that answers the following: A boolean value indicating if the patient's current therapy_exp_intensity meets the 5 hours/month target. The calculated Projected Annual QoL Gain, based on their cost-effectiveness rate and the $1,200 budget. A boolean value indicating if this projected gain is greater than 50 points. The output show for all the assessment for this patient with columns for their identifier and these three calculated results.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "mental_health_17", "selected_database": "mental_health", "query": "I need to run a full audit on patient P425079. His insurance gives him $6,000 a year for treatment, but we charge in pounds—£100 an hour. Assuming an exchange rate of 1.25 dollars to the pound, how many hours of therapy can he actually get for his money? Also, his high-acuity plan says he needs 30 hours of therapy a month, but his chart says he's only getting 29.86. Can you confirm if he's meeting that target? Lastly, given his personal cost-effectiveness rate of 0.1197, what's his total potential quality of life gain if he uses his whole $6,000 budget? And is that number over our 'good value' benchmark of 650?", "normal_query": "I am conducting a detailed audit for patient 'P425079'. His insurance plan has a maximum out-of-pocket cost of $6,000 USD per year. Our clinic's therapy rate is £100 GBP per hour, with the current exchange rate at 1.25 USD per GBP. The patient's therapy plan requires a minimum intensity of 30 hours/month, and his last recorded intensity was 29.86 hours/month. His cost-effectiveness rate is 0.1197 QoL-points/dollar. Please provide a report with four calculated fields: The total number of therapy hours his annual budget can afford, rounded to 2 decimal places. A boolean value indicating if his current therapy intensity meets the 30-hour monthly target. The Projected QoL Gain from Max Out-of-Pocket rounded to 2 decimal, using his cost-effectiveness rate and the $6,000 budget. A boolean value indicating if this projected gain exceeds the 'clinically valuable' threshold of 650 points.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "mental_health_18", "selected_database": "mental_health", "query": "I'm working on a study and need to find a very specific group of patients. I'm looking for people with a Bipolar diagnosis. Once you have that list, I need to check one more thing about their current treatment. Our standard for this group is 15 hours of therapy a month. Can you just show me a table of these patients with their ID, diagnosis duration, and hospitalization count, and then add a true/false column that tells me if they're meeting that 15-hour therapy target?", "normal_query": "For my research paper, I need to analyze a Chronic High-Acuity Bipolar Cohort. After identifying this cohort, I need to check their Intensive Therapy Standard Compliance. Please generate a report that lists each patient's identifier, their diagnosis duration in months, their count of previous hospitalizations, and a boolean value indicating if their current therapy intensity is 15 hours per month or more.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "mental_health_19", "selected_database": "mental_health", "query": "I'm trying to decide where to focus our new goal-setting program. I have a hunch that our PTSD patients are struggling more to make progress than our anxiety patients. Can you check this for me? I want to see a comparison: on average, how many recovery goals would a PTSD patient achieve in a full year versus an anxiety patient? I just need those two numbers side-by-side to see if my hunch is right.", "normal_query": "For program development, I need to compare the Annual Goal Achievement between two patient populations. The first population consists of patients with a primary diagnosis of 'PTSD', and the second consists of patients with a primary diagnosis of 'Anxiety'. Please calculate the average Annual Goal Achievement for each group. The final output should be a single row with two columns: one for the PTSD group's average and one for the Anxiety group's average.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "mental_health_20", "selected_database": "mental_health", "query": "I'm Dr. Hanson, a pharmacist. I need to run a safety check on our Anxiety patients at the F533 facility, but only those who've been diagnosed for more than a year. I'm worried about medication side effects. Can you calculate an side effect score for a 12-month period for each of them with just 2 medications? I need a list of these patients showing their ID, the original density number, this new score, and a true/false if their score is over our new safety limit of 0.1. Please put the patients with the highest scores at the top.", "normal_query": "I am conducting a 'Medication Protocol Review' for Anxiety patients at facility F533 who have a diagnosis duration of more than 12 months. I need to calculate their Annualized Side Effect Score, assuming that there are 2 number of medications. Please provide a report that lists the patient identifier, their original med side eff density, their calculated Annualized Side Effect Score, and a boolean flag indicating if this score is over our protocol's threshold of 0.1. The list should be sorted by the highest score first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": true}} +{"instance_id": "mental_health_M_3", "selected_database": "mental_health", "query": "Hey! Go ahead and clean up the treatmentoutcomes table by deleting any old or stale records, but only for those patients who've been flagged as Non-Compliant. Leave the rest untouched!", "normal_query": "Please remove Stale Treatment Outcome Records from the treatmentoutcomes table, but only for patients who have been identified as Non-Compliant Patient.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": true, "order": false}} +{"instance_id": "mental_health_M_4", "selected_database": "mental_health", "query": "Hey! Can you make a reusable database function called calculate_tes? If it already exists, just replace it. This function should take a treatment key, look up the 'engagement' level from the therapy details tied to that treatment, and return the score for therapy engagements as a number.", "normal_query": "Please create (or replace if it exists) a reusable database function named calculate_tes. This function's purpose is to calculate the Therapy Engagement Score for a single treatment record. It should take the treatment key as input, find the corresponding 'engagement' level from the therapy details data, and return the calculated numeric score based on the standard Therapy Engagement Score definition.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "mental_health_M_6", "selected_database": "mental_health", "query": "Hi team, I'm worried we're missing the chance to help patients who are really struggling until it's too late. I want to build a new 'at-risk' watchlist named vulnerable_patient_watchlist that our care coordinators can check every morning. Let's automatically flag any patient who is both having frequent crises and seems to be socially isolated. When the system flags someone, I need the new watchlist to show the watchlist id, their patient ID, who their clinician is, exactly how many crises they've had, what their support score was, and—most importantly—when their next appointment is, and also the insertion date of the watchlist. Make sure you pull that date from the notes of their last visit so we have the most current one.", "normal_query": "As the head of clinical outreach, I am launching a new initiative to reduce critical incidents. To do this, I need to establish a new, permanent data process called 'Vulnerable Patient Watchlist Generation'. This process will create and populate a new table named vulnerable_patient_watchlist. To determine who belongs on this list, we will use our established 'Patient with High Crisis & Low Support Profile'. For every patient who meets these criteria, the new vulnerable_patient_watchlist table must store the following actionable information: a watchlist id, their unique patient ID, the ID of their lead clinician, their total crisis intervention count, their calculated SSE score, and their next scheduled appointment date as recorded during their most recent encounter, and the date of the watchlist's insertion.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "mental_health_M_8", "selected_database": "mental_health", "query": "This is urgent. My patient, P515871, is at high risk for suicide based on the assessment I just filed. Our protocol says this should immediately flag me for intensive care coordination so we can get them help now. Can you run a script to do this protocol?", "normal_query": "I've just finished an emergency assessment for patient P515871, who has been flagged with a 'High' suicide risk. To ensure immediate action, I need to invoke our 'High-Risk Escalation Protocol'. Please execute a script to do this protocol.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": false}} +{"instance_id": "mental_health_M_9", "selected_database": "mental_health", "query": "We nearly missed a serious situation with a patient named John because nobody saw their hospitalization risk score was getting dangerously high. I want to fix this now by doing a review protocol. Please help me flag those patients that is the same as John in our patient files. First, please add a new boolean column named needs_case_review to the patients table, with a default of FALSE. Then, for all patients that are currently meeting the criteria based on their latest assessment, execute an update to set the flag to TRUE. This will be our new automated safety alert.", "normal_query": "In response to a recent critical incident, our safety board has approved a new 'Mandatory Case Review Protocol'. Some patients are exceeding the hospital risk density recently, like John, and because of this, we must execute this protocol to flag and identify more patients that has the same situation as John. To implement this, I need a two-part script. First, please add a new boolean column named needs_case_review to the patients table, with a default of FALSE. Second, execute an update to set this new flag to TRUE for all patients who currently meet the protocol's criteria based on their latest assessment.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": false}} +{"instance_id": "mental_health_M_10", "selected_database": "mental_health", "query": "Hi, I'm Dr. Rossi. I need an urgent change to a patient's file. My patient David, that's P883117, just lost his entire support system because his wife was in a bad accident. His current support plan is useless now. We've agreed he needs to be twice as self-reliant. Can you find his last assessment, look up his 'support utilization rate' which I think is 1.5, and cut it in half? Please change it in the system to 0.75 so his official plan is up to date.", "normal_query": "I have an urgent update for my patient, David (P883117), following a major life event—his primary caregiver is incapacitated. His existing support_util_rate of '1.5 support-level/contact' is no longer viable. We have set a new therapeutic goal to halve his reliance on external support. Please execute a script to find his single most recent assessment record and update the support_util_rate value to '0.75 support-level/contact' to reflect this new plan.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_1", "selected_database": "reverse_logistics", "query": "Show me how much the whole process, from transport to final disposition, cost us for every return. Round the result to 2 decimal places, biggest cost first.", "normal_query": "List the Total Return Cost (TRC) for each return case, round the value to 2 decimal places, and sort the results by TRC in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "reverse_logistics_2", "selected_database": "reverse_logistics", "query": "Tell me the overall gain or loss from all returns: take what we got back after processing and subtract the combined out-of-pocket amount (transport, getting it sellable again, label fixes, end-of-life handling, fix-up quote). Include the net figure and both parts, rounded to two decimals.", "normal_query": "Estimate the overall net impact across all processed returns by subtracting the total handling cost (transport, reintegration, label correction, end-of-life handling, fix-up estimate) from the amount recaptured after processing; also show both components; round each to two decimals.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_3", "selected_database": "reverse_logistics", "query": "Show me the average value recovered per day since sale for the entire portfolio, along with the recovery value and number of days used in the calculation, rounded to 2 decimal places from highest to lowest.", "normal_query": "Compute the portfolio-wide average Recovery Rate per Day, rounded to two decimals; also return the aggregated Recovery Value and the aggregated Days Lapsed used in that calculation.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_M_1", "selected_database": "reverse_logistics", "query": "Find store credit refunds that are for zero dollars, mark them as “Pending,” and show me their reference numbers with the new status.", "normal_query": "Only for zero-amount reimbursements issued as store credit, set the status to “Pending” and return each affected reference with its new status.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_4", "selected_database": "reverse_logistics", "query": "I want to know the average lost of a return, consider the environmental impact factors. Round to two decimal places.", "normal_query": "For all returns, calculate its average Sustainability Adjusted Loss (SAL), rounded to 2 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_5", "selected_database": "reverse_logistics", "query": "Show me each processing site with their average and individual processing times , rounded to one decimal place, and list the slowest sites first.", "normal_query": "List the Average Processing Time (APT) for each processing site, including individual Processing Time values in the result. Round numeric outputs to 1 decimal places. Sort the results by APT in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 1, "distinct": false, "order": true}} +{"instance_id": "reverse_logistics_6", "selected_database": "reverse_logistics", "query": "I want to know the proportion of returns having warranty claims, and round the answer to one decimal point.", "normal_query": "Calculate the Warranty Claim Ratio (WCR) for the whole business, and round the result to 1 decimal place.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 1, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_7", "selected_database": "reverse_logistics", "query": "For each return, show a severity score equal to the number of signals times the level weight (use 1 for low, 2 for medium, 3 for high). Include both the signal count and the weight, and list the highest scores first.", "normal_query": "For each return, compute the Fraud Flag Severity Score (FFS) as the flag count multiplied by a level weight, using Low=1, Medium=2, High=3; also show the flag count and the applied weight; sort by FFS in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "reverse_logistics_8", "selected_database": "reverse_logistics", "query": "Provide the grand total of all regulatory penalties we've accumulated to date (show only the final number).", "normal_query": "Measure the total Regulatory Compliance Penalty (RCP) incurred to date, only show the final number.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_9", "selected_database": "reverse_logistics", "query": "Show the latest 100 items with a relative transport cost versus what’s typical for the same way of coming back. Include the way it came back, round to two decimals, and list newest first.", "normal_query": "For the 100 most recent returns, list each item's Return Channel Cost Index (value = its transport charge divided by the historical average for returns that came back the same way). Include the way it came back, round to two decimals, and sort by the logging time from newest to oldest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "reverse_logistics_M_2", "selected_database": "reverse_logistics", "query": "Suspend anyone whose combined severity score (using 1 for low, 2 for medium, 3 for high) is 9 or more, and show their IDs with the new category.", "normal_query": "Suspend all customers whose Fraud Flag Severity Score is at least 9 and the weights are Low=1, Medium=2, High=3. Return each customer ID with the updated segment category.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_M_3", "selected_database": "reverse_logistics", "query": "Make the expected expense that bring a defective return back to sellable condition increase by 15% for returns waiting more than 60 days. Show me the case numbers and new estimates (rounded to 2 decimal places).", "normal_query": "Increase the repair estimate by 15 percent for any return that has been waiting more than 60 days, and return the case number with the adjusted amount rounded to 2 decimals.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_10", "selected_database": "reverse_logistics", "query": "Show me the average cost of printing and attaching new labels for each product category, rounded to two decimals and sorted from highest to lowest.", "normal_query": "List the Average relabeling cost by product category. Round numeric outputs to 2 decimal places and sort descendingly.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "reverse_logistics_11", "selected_database": "reverse_logistics", "query": "Show overall disposal spend per method, most expensive first.", "normal_query": "Show the Total disposal cost by disposal method. Sort the results by the cost in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "reverse_logistics_12", "selected_database": "reverse_logistics", "query": "Which 5 returns cost most to fix? Show me from priciest to least pricey.", "normal_query": "Display the top 5 returns ranked by repair estimate, ordered from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "reverse_logistics_13", "selected_database": "reverse_logistics", "query": "How much do we typically recover through each return method? Give me the average amounts rounded to pennies.", "normal_query": "List the average recovery value per return channel, rounded numeric outputs to 2 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_M_4", "selected_database": "reverse_logistics", "query": "Find every case marked as finished but never officially closed, mark it closed with today's date, and tell me which case numbers were touched.", "normal_query": "Close every case whose action state is 'Completed' and whose close state is still NULL; set its close state to 'Closed', update the close date to today, and return its case number.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_14", "selected_database": "reverse_logistics", "query": "Of all returns, tell me the % flagged with 'high' fraud risk (rounded to hundredths) - show only the percentage.", "normal_query": "Figure out the percentage of returns flagged 'high' in fraud risk levels. Round numeric outputs to 2 decimal places and only show the first one.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_15", "selected_database": "reverse_logistics", "query": "Give me the kilograms of carbon dioxide released in different disposal processing activities. Round numeric outputs to 2 decimal places and show from highest to lowest.", "normal_query": "List the Average carbon footprint by disposal method. Round numeric outputs to 2 decimal places. Show the results from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "reverse_logistics_16", "selected_database": "reverse_logistics", "query": "Show me how many returns we have for each warranty status and return channel combination, sorted by the count from highest to lowest.", "normal_query": "List the Count Returns per Warranty status (CNT) and return channel. Sort the results by CNT in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "reverse_logistics_M_5", "selected_database": "reverse_logistics", "query": "Find all electronics worth over $700, mark their subcategories as 'High Value', and tell me how many got tagged.", "normal_query": "Append the tag 'High Value' to the subcategory of electronics products with unit value greater than 700. Return the number of updated rows.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_17", "selected_database": "reverse_logistics", "query": "Show me how much money we actually get back (after costs) for each type of refund, along with both the recovered amounts and what we spent processing them.", "normal_query": "Calculate the Net return profit impact per refund method. Display Recovery Value and Total Return Cost in the result.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_18", "selected_database": "reverse_logistics", "query": "How dirty and pricey is it to scrap items in different states? how me the average values, rounded to two decimals.", "normal_query": "Output the Average carbon footprint and disposal cost for each item condition state. Round numeric outputs to 2 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_M_7", "selected_database": "reverse_logistics", "query": "Find all unsatisfied customers (ratings 2 or below), flag their cases for follow-up, and give me the case numbers with their new status.", "normal_query": "Mark needsfollowup as 'Yes' for cases whose satisfaction score is less than or equal to 2. Return casetie and the new needsfollowup value.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_19", "selected_database": "reverse_logistics", "query": "Show me how many non-compliant items we have divided by disposal method, along with their average carbon footprint (rounded to 2 decimal places). Sort by the highest count of non-compliant items first.", "normal_query": "Display the count of items whose Regulatory Compliance Status is non-compliant, grouped by disposal method, with the average carbon footprint rounded to 2 decimals. Sort the list by the non-compliant count in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "reverse_logistics_M_8", "selected_database": "reverse_logistics", "query": "Find all supposedly recycled items with heavy carbon footprints (over 50kg), mark them as hazardous waste instead, and tell me how many needed reclassification.", "normal_query": "Change disposal method from 'Recycle' to 'Hazardous Waste' for records with carbon footprint greater than 50 kilograms. Return the number of affected rows.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_20", "selected_database": "reverse_logistics", "query": "When fraud risk is high and it comes back by courier, how off-normal is the shipping cost? Please round the answer to two decimals.", "normal_query": "When the fraud-risk rating is high and the item comes back by courier, how far above or below average is the shipping cost? Please round the answer to two decimals.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "reverse_logistics_M_10", "selected_database": "reverse_logistics", "query": "Find all automatic approvals for express processing, bump them up to manager approval, and tell me which locations had changes with both the old and new levels.", "normal_query": "Upgrade approval level from 'Automatic' to 'Manager' for processing priority 'Express'. Return loccode, old approval level, and new approval level.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "robot_fault_prediction_1", "selected_database": "robot_fault_prediction", "query": "Let's get a list of all robots that need urgent maintenance and are running too hot, showing their ID, model, urgency score, and hottest joint temp with worst cases first.", "normal_query": "Which robots currently meet the dual criteria of a high Predictive Maintenance Urgency score and a concurrent Thermal Anomaly? Show robot ID, model, maintenance urgency score, and maximum joint temperature. Sort by most critical cases first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "robot_fault_prediction_3", "selected_database": "robot_fault_prediction", "query": "Find robots doing precision work that aren't accurate enough, showing their ID, job, error amount, and if they need calibration.", "normal_query": "Identify robots used in Precision-Critical Applications that are showing signs of Precision Performance Degradation. Return robot ID, its application type, relative positional error, and current calibration state.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "robot_fault_prediction_4", "selected_database": "robot_fault_prediction", "query": "Show me robots working too hard right now, listing their model, how much payload capacity they're using, and their cycle speed. Make sure the most overloaded ones are at the top of the list.", "normal_query": "List all robots currently operating under an Intensive Workload. Include the robot's model, its payload utilization ratio, and its throughput rate, sorted with the most utilized and fastest robots appearing first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "robot_fault_prediction_5", "selected_database": "robot_fault_prediction", "query": "I need to plan next week's emergency maintenance schedule. Find all robots that are likely to fail sometime next week and which the system also considers a high risk to break. Show their ID, how likely they'll break, and how many days they have left.", "normal_query": "For next week's maintenance planning, identify robots that are projected to fail within the next 7 days and also have a 'High' Fault Prediction Score Tier. For each, show the robot ID, its fault prediction score, and its remaining useful life in days.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "robot_fault_prediction_6", "selected_database": "robot_fault_prediction", "query": "Let's see which manufacturers are struggling the most with their robots' health. For manufacturers with at least two robots getting worse, show me how many are affected and what their average decline rate is. Please show the manufacturer names in lowercase.", "normal_query": "I require an analysis of health degradation trends, grouped by manufacturer with more than one degrading robot, show the total count of such robots and their average health degradation rate (with manufacturer names in lowercase).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "robot_fault_prediction_7", "selected_database": "robot_fault_prediction", "query": "Let's find robots whose controllers are overworked compared to their peers by flagging any with 'Anomalous Controller Stress'. I need to see the robot's ID, its model (in lowercase), its stress score, and what the average for its model was.", "normal_query": "Identify all robots experiencing 'Anomalous Controller Stress'. For each, display its robot ID, its model (in lowercase), its specific stress score, and the calculated average stress for its model.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "robot_fault_prediction_8", "selected_database": "robot_fault_prediction", "query": "Let's find robots that might be breaking down because their controllers are overworked. Show me any robot where its controller stress is over 20% more than average for its model, and its mechanical wear score is getting into the medium or high range. I need to see the robot's ID, who made it, the model (in lowercase), and both the stress and wear scores. Please group them by the maker and list the most stressed ones first.", "normal_query": "Identify robots exhibiting signs of mechanically significant wear potentially induced by high controller stress. A robot qualifies if it meets two criteria simultaneously: 1. Its 'Controller Stress Score' is at least 20% higher than the average score for all robots of the same model series. 2. Its 'Mechanical Wear Score' is classified as either 'High' or 'Medium' severity. For each qualifying robot, return its ID, manufacturer, model (in lowercase), its specific controller stress score, and its mechanical wear score. Sort the results by manufacturer, then by the controller stress score in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "robot_fault_prediction_10", "selected_database": "robot_fault_prediction", "query": "I'm trying to see if our heavy-duty jobs are really taking a toll on the robots. Can you compare the failure rates for two groups? First, all the robots doing high-wear stuff like welding and grinding. Second, everyone else. For each group, just tell me what percentage of them are considered a high reliability risk.", "normal_query": "For a comparative reliability analysis, calculate and contrast the percentage of robots classified with 'High Reliability Risk' between two groups: those assigned to 'High-Wear Applications' and those in all other application types. The final result should display the application group and its corresponding high-risk percentage.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "robot_fault_prediction_11", "selected_database": "robot_fault_prediction", "query": "I need a list of our underperforming robots. Can you find all the bots that are running slower than 180 cycles an hour, and then give me just the top 5 slowest from that list? Show me their ID, maker, model, and their actual cycles per hour score, rounded to two decimals. List the absolute slowest one at the top.", "normal_query": "For a fleet-wide performance review, identify the 5 robots with a production throughput rate lower than 180 cycles per hour. For each of these robots, display its ID, manufacturer, model, and the calculated throughput in cycles per hour, rounded to two decimal places. Sort by the lowest throughput first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "robot_fault_prediction_12", "selected_database": "robot_fault_prediction", "query": "I'm working on our fleet health dashboard and need a key baseline number. What's the average vibration level if you look at all joints on all our robots? Just give me that one number, rounded to two decimal spots, please.", "normal_query": "To establish a fleet-wide health baseline, calculate the average joint vibration (`vibration_mmps`) across all joints of all robots in the entire fleet. The final result should be a single value, rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "robot_fault_prediction_M_1", "selected_database": "robot_fault_prediction", "query": "We've finished the safety check for robot RB5150. Can you please zero out all its safety violation counters and also mark its calibration status as pending so we know it needs to be rechecked?", "normal_query": "Following a safety review for robot 'RB5150', reset all of its safety violation counters (zone, speed, emergency stops, and collisions) to zero and set its calibration state to 'Pending' to require re-verification.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "robot_fault_prediction_M_2", "selected_database": "robot_fault_prediction", "query": "We're switching robot RB0042 over from welding to material handling. Can you update its job in the system and put it in manual mode so we can set it up?", "normal_query": "Robot 'RB0042' is being repurposed from 'Welding' to 'Material Handling'. Update its application type accordingly and set its operational mode to 'Manual' for reconfiguration.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "robot_fault_prediction_M_3", "selected_database": "robot_fault_prediction", "query": "I need to adjust the maintenance schedule for some of our heavily used robots. For any robot that does welding or grinding and is due for service in the next two weeks, can you push their maintenance date out by 7 days and set the estimated cost for the job to 1200 dollars?", "normal_query": "For all robots assigned to high-wear applications such as 'Welding' or 'Grinding' whose next maintenance is due in less than 14 days, extend the maintenance interval by an additional 7 days and standardize their estimated upkeep cost to $1200.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "robot_fault_prediction_M_4", "selected_database": "robot_fault_prediction", "query": "It's time to retire the old robot, 'RB1055'. Can you please go through the decommissioning steps in the system? That means marking it as decommissioned, wiping its current program, and putting it into an emergency stop state so it can't be used.", "normal_query": "Initiate the full decommissioning procedure for the legacy robot 'RB1055'. This requires updating its operational mode to 'DECOMMISSIONED', clearing its currently loaded program, and setting its safety state to 'Emergency Stop' as a final precaution.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "robot_fault_prediction_M_5", "selected_database": "robot_fault_prediction", "query": "Hey, we fixed that 'SRVO-050' alarm on robot 'RB2073'. Can you clear that fault from the system? And since it's fixed, let's also reset the fault prediction score back to a really low number, like 0.05, and clear out what the system thought was going to fail.", "normal_query": "For robot 'RB2073', clear the active fault 'SRVO-050' as the underlying issue is resolved. Concurrently, reset the fault prediction metrics by setting the prediction score to a baseline low value of 0.05 and nullifying the fault type estimation.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "robot_fault_prediction_M_7", "selected_database": "robot_fault_prediction", "query": "To help us keep a constant eye on our riskiest robots, can you create a special saved list called `vw_high_risk_robots`? It should always show the bots that are considered a high safety risk, along with their ID, who made them, the model, how many times they've crashed, and their overall incident rate.", "normal_query": "As per our new safety directive, create a permanent database view named `vw_high_risk_robots`. This view must provide a live list of all robots classified as a 'High Safety-Risk Unit'. The view shall include the robot's ID, its manufacturer, model, total collision count, and its calculated Safety Incident Rate.", "preprocess_sql": [], "clean_up_sqls": ["DROP VIEW IF EXISTS vw_high_risk_robots;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "robot_fault_prediction_M_8", "selected_database": "robot_fault_prediction", "query": "Let's start tracking how often our robots get overloaded. Can you add a new field called `overload_frequency_ph` to the main performance and safety table? Once you've added it, you'll need to go back and fill it in for all the robots by calculating their overload frequency from their existing data.", "normal_query": "To enhance our performance monitoring, we need to begin tracking the 'Overload Frequency' KPI directly on the performance records. First, modify the `performance_and_safety` table to add a new column named `overload_frequency_ph` with a `REAL` data type. After adding the column, immediately populate it for all existing records by calculating their historical Overload Frequency based on their total overload events and operating hours.", "preprocess_sql": ["ALTER TABLE performance_and_safety DROP COLUMN IF EXISTS overload_frequency_ph;"], "clean_up_sqls": ["ALTER TABLE performance_and_safety DROP COLUMN IF EXISTS overload_frequency_ph;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_1", "selected_database": "exchange_traded_funds", "query": "Could you get me a ranked list of all income funds that are premier? For each one, I need the ticker symbol, its short name, its premier rank, and the calculated secure income efficiency score. Please sort the list by the efficiency score, from highest to lowest.", "normal_query": "Generate a ranked list of all premier income funds. For each fund, provide its ticker symbol, short label, its premier rank, and its calculated secure income efficiency score. This has to be ordered from the highest to the lowest score.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "exchange_traded_funds_2", "selected_database": "exchange_traded_funds", "query": "Hey, can you pull up the performance history for the 'AADR' fund? I want to see how it did against its category each year. For every year, also show me what the outperformance was in the year before, and what the year-over-year outperformance change was. Just list it all out by year for me.", "normal_query": "I need to analyze the performance trend for the fund with ticker 'AADR'. Please calculate the annual fund outperformance for each calendar year. Additionally, for each year, show the previous year's outperformance and the year-over-year outperformance change. The output should contain the calendar year, the current outperformance, the previous year's outperformance, and the year-over-year change, sorted by year.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "exchange_traded_funds_3", "selected_database": "exchange_traded_funds", "query": "I'm worried about interest rates going up and want to find bond funds that are safer than their peers. Can you identify funds that are much less sensitive to rate changes‚Äîat least 1.5 years less‚Äîthan what's typical for their category? For each of these funds, please list its ticker symbol, name, and category. Also, show me its specific sensitivity value, the average for its category, and its advantage to which their duration is shorter than the average duration of its category. Sort the results by the advantage , from highest to lowest.", "normal_query": "I need to perform a peer analysis on fixed-income funds based on interest rate sensitivity. For each fund category, first calculate the category average duration. Then, identify all funds whose own duration is at least 1.5 years lower than this average. For this final list of funds, please display the ticker symbol, short label, product class, the specific fund's duration, the calculated category average duration, and the fund's duration advantage. Sort the results in descending order by the duration advantage.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "exchange_traded_funds_4", "selected_database": "exchange_traded_funds", "query": "I'm looking for resilient funds that do well no matter what the market is doing. Can you find funds that tend to beat their peers when the market is up, but also protect against losses better than their peers when the market is down? For each fund, show me its ticker symbol, its average outperformance in good years, and its average outperformance in bad years. Also, calculate the fund's difference in average outperformance for both scenarios. Only show me funds with at least three good and three bad years of data. Sort the list by the biggest difference first.", "normal_query": "I want to identify funds that perform well in different market cycles. Please calculate the average upside outperformance and the average downside outperformance for each fund. Also, compute the capture differential. Display the fund's ticker symbol, its average upside outperformance, its average downside outperformance, and the capture differential. Only include funds with at least three up years and three down years of history. Sort the results by the capture differential in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 6, "distinct": false, "order": true}} +{"instance_id": "exchange_traded_funds_5", "selected_database": "exchange_traded_funds", "query": "I'm worried about some of our funds being too big and hard to trade. For each fund, please tell me what is its liquidity pressure? For the top 100 funds, I want to see a list showing the fund's ticker symbol, its size, ratio of turnover, and its calculated days to trade turnover. Show me the riskiest ones at the top of the list.", "normal_query": "I need to assess the liquidity risk for our funds. Please calculate the portfolio liquidity Pressure for each fund, which should be expressed in days. For the top 100 funds with the highest pressure, please display the ticker symbol, net worth, turnover ratio, and the calculated liquidity pressure days. The results should be sorted by the liquidity pressure days in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "exchange_traded_funds_6", "selected_database": "exchange_traded_funds", "query": "Hey, can you help me check funds that changed its investment style? I'm looking for funds with long histories and want to compare their 3-year vs 10-year Beta and R-squared values. For each one, give me the ticker, how much the Beta and R-squared changed, and include a quick summary of their classifications too. Then just show the top 100 funds with the biggest Beta drift, sorted from highest to lowest based on the absolute value of that drift.", "normal_query": "I need to analyze the style drift for funds with long track records. Please compare the 3-year and 10-year Beta and R-squared values for each fund. The output should include the fund's ticker symbol, the calculated Beta Drift, the R-Squared Drift, and a summary of the classifications. Sort the top 100 results by the absolute value of the Beta drift in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "exchange_traded_funds_7", "selected_database": "exchange_traded_funds", "query": "I want to find the top-performing fund in each category. But only look at categories that have at least 10 funds in them, and for each category, find the fund that has the highest score for peer-group comparison. For each one, show me the category name, the ticker of the best fund, its short label, and its final score. Sort everything by the highest scores first, and just give me the top 100 results.", "normal_query": "I want to find the category dominator for each fund category. For each fund category with at least 10 funds, identify the fund with the highest composite score. The output should list the category, the ticker symbol of the dominating fund, its short label, and its final composite score. Please sort the results by the composite score in descending order and limit the output to the top 100.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "exchange_traded_funds_8", "selected_database": "exchange_traded_funds", "query": "I'd like to run a regression to see how portfolio turnover relates to manager skill. For each product class that has more than 25 funds, calculate the slope between alpha and turnover, and also show how good the fit is. I want to see the product class name, how many funds are in it, the slope value, and the R-squared for the regression. Sort the results by the slope from highest to lowest, and just show the top 100.", "normal_query": "I want to conduct a regression analysis to determine the relationship between portfolio turnover and manager skill. For each productclass with more than 25 funds, calculate the alpha-turnover slope and the fit quality. The output should include the productclass, the number of funds, the calculated slope, and the R-squared value for the fit. Please sort the top 100 results by the alpha-turnover slope in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "exchange_traded_funds_9", "selected_database": "exchange_traded_funds", "query": "Can you find funds that is a potential value investment? For the top 100 that qualify, show me their ticker, short label, and where their price stands right now. Sort them from the lowest to the highest price position.", "normal_query": "Please screen for funds that match the contrarian value play profile. For the top 100 qualifying funds, please display the ticker symbol, short label, and the calculated price position. Sort the results by the price position in ascending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 8, "distinct": false, "order": true}} +{"instance_id": "exchange_traded_funds_10", "selected_database": "exchange_traded_funds", "query": "I have a hunch that funds that tell us more about their stock valuations actually perform better. Can we test this? Let's split all the funds into two groups: those that share their valuation numbers and those that don't. For each of those two groups, tell me how many funds are in it and what the typical 1-year return was. I'm curious to see if there's a difference.", "normal_query": "I want to compare the performance of funds based on their data transparency. Please create two groups of funds: 'Transparent' and 'Opaque', based on their valuation data availability. For each group, calculate the fund_count and the Median 1-Year Return.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 4, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_11", "selected_database": "exchange_traded_funds", "query": "Can you find those rare funds that basically produce positive alpha but are passive? I'd like to see the top 10 of these, ranked by the alpha for 5-years in descending order. For each one, show me the ticker symbol, the company that runs it, and their 5-year alpha score. Also, at the end, just tell me the total number of these funds you found.", "normal_query": "I want to find all passive alpha generators. Please provide a list of the top 10, showing their ticker symbol, parent group, and their 5-year alpha. The list should be sorted by the 5-year alpha in descending order. Also, provide a separate summary count of the total number of passive alpha generators found.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "exchange_traded_funds_12", "selected_database": "exchange_traded_funds", "query": "I want to see which fund managers are actually worth their high fees. Can you figure out a score that shows if a manager's skill outweighs their cost? Show me the top 10 funds with the best scores, listing their ticker symbol and the score itself, sorted from highest to lowest. Also, can you tell me what percentage of all funds are actually providing a positive value for their cost?", "normal_query": "I need to rank funds based on their active manager value. Show me a list of the top 10 funds with the highest AMV, displaying their ticker symbol and the calculated AMV score, sorted from highest to lowest. Additionally, provide a scalar metric showing the percentage of all funds that have a positive AMV.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "exchange_traded_funds_13", "selected_database": "exchange_traded_funds", "query": "How many funds out there are both generating excess returns successfully and its managers confidently put a lot of money into their best ideas? Just give me the total number.", "normal_query": "I need to know the total number of focused alpha leaders.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_14", "selected_database": "exchange_traded_funds", "query": "I want to know the average Information Ratio after adjusting for how consistent it is. I just need that one final number.", "normal_query": "I want to calculate the average consistency-adjusted information ratio, and please provide it as a single scalar value.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_16", "selected_database": "exchange_traded_funds", "query": "What's the best score any fund has gotten for generating steady income efficiently? Just give me the highest number and I only need the top score across the board.", "normal_query": "I need to find the single highest secure income efficiency score across all funds. Please provide only the maximum SIES value as a single scalar result.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_17", "selected_database": "exchange_traded_funds", "query": "How many funds are truly standing out from the crowd with their active strategies? Just give me the total number ‚Äî one single value.", "normal_query": "I want a total count of all true active differentiators. Please provide a single scalar value for the total count.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_18", "selected_database": "exchange_traded_funds", "query": "How many funds are really going against the grain and is a potential value investment? Just give me the final count ‚Äî one number is all I need.", "normal_query": "I need the total number of funds that qualify as a contrarian value play. Please provide the final count as a single scalar value.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_19", "selected_database": "exchange_traded_funds", "query": "Which exchange moves the most money overall? Let's first figure out the average daily trading volume for each fund. Then, total those numbers for every exchange. Just tell me the one exchange where the most trading happens ‚Äî the biggest player in terms of total value traded.", "normal_query": "I need to identify the most liquid exchange. To do this, first calculate the average daily value traded for every fund. Then, sum this value for all funds on each exchange. Finally, return the name of the single exchange with the highest total value traded", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_20", "selected_database": "exchange_traded_funds", "query": "How much money is being wasted on fees by funds that are basically just hugging the index? For every one of those closet indexers, add up the fees that aren‚Äôt really earning their keep. In the end, just give me one total number ‚Äî the grand total of all those wasted fees, rounded to 2 decimal places.", "normal_query": "I need to calculate the total wasted fee amount for all funds classified as a closet indexer. Finally, sum these amounts together and provide a single, rounded scalar value to 2 decimal values.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_M_1", "selected_database": "exchange_traded_funds", "query": "It's time to refresh our list of funds with positive momentum. Can you wipe the old daily_golden_cross_leaders table clean (create it if it doesn't exists) and then fill it up again with all the funds whose short-term price average is above their long-term average? For each one, I need the ticker symbol, who runs it, the momentum score, and today's date.", "normal_query": "Please run the daily job to update the daily_golden_cross_leaders table. First, ensure the table exists. Then, clear out all existing data from it. Finally, populate it with all funds that are currently showing a golden cross signal. The table should contain the fund's ticker symbol, parent group, the calculated short-term momentum indicator value, and today's date as the report date.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_M_2", "selected_database": "exchange_traded_funds", "query": "I need a tool that can calculate the stock-picking skill for any fund. Can you build a function called get_appraisal_ratio that I can use on any ticker? It should figure out the fund's appraisal ratio over the last 3 years and be smart enough not to break if the data isn't perfect like nulls or invalid data.", "normal_query": "Please create a reusable SQL function named get_appraisal_ratio that takes a fund's ticker symbol as input. This function should calculate the fund's 3-year appraisal ratio. Inside the function, handle potential null or invalid data to avoid errors.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_M_3", "selected_database": "exchange_traded_funds", "query": "I want to create a pre-calculated list named vw_reliable_core_holdings. This list should contain all funds that are reliable as a core portfolio holding. For each qualifying fund, please include its ticker symbol, short label, parent group, product class, and launch date.", "normal_query": "I want to create a pre-calculated list named vw_reliable_core_holdings. This list should contain all funds classified as a reliable core holding. For each qualifying fund, please include its ticker symbol, short label, parent group, product class, and launch date.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_M_4", "selected_database": "exchange_traded_funds", "query": "Let's make a refreshable summary table called family_risk_summary to track the family risk profile for each fund company. For each company, store the family name, and can you calculate their average market risk (3-year beta), their median risk-adjusted return (3-year Sharpe Ratio), and the number of funds that generates more than 0 alpha in a 5 year period? When the query is run, it should just update the table with these fresh calculations and the current time.", "normal_query": "I need to perform an upsert operation on a summary table named family_risk_summary. This table should store a family risk profile for each fund family. For each family, calculate and store the family name, the average 3-year beta, the median 3-year sharpe ratio, a total count of their alpha generator funds, and a timestamp of when the record was last updated.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_M_6", "selected_database": "exchange_traded_funds", "query": "Let's rebuild our summary of funds that are basically just expensive index trackers. Can you wipe the closet_indexer_summary table clean and then fill it again (create it if the table doesn't exists)? For every fund that just copies the market but still charges more than its benchmark, I want to see how much money is being wasted in extra fees. Put the fund's ticker, the company name, and that wasted fee amount into the table.", "normal_query": "I need to refresh the closet_indexer_summary table. Please ensure the table exists, then clear all its existing data. Afterward, identify all closet indexer funds, calculate their total wasted fee amount, and insert the fund's ticker symbol, its family name, and the calculated amount into the table.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_M_7", "selected_database": "exchange_traded_funds", "query": "I need to clean up the annual_returns table by moving old data into a separate archive. Let‚Äôs create a procedure called archive_old_returns that takes a number of years as input‚Äîthis tells it how far back we want to keep. Any return records older than that should be moved to annual_returns_archive, and then deleted from the main table. This is important because we use the archived data to calculate long-term outperformance. Once the procedure is ready, run it to archive everything older than 10 years.", "normal_query": "I need to move historical data from annual_returns which is an active table, to an archive table called annual_returns_archive. You should create a procedure, named archive_old_returns, and it must be designed to take an integer for the number of years to retain. It should move records from annual_returns to annual_returns_archive if they are older than the specified retention period, and then delete the moved records. This is important because the archived data is the basis for calculating annual fund outperformance. Please create the procedure and then CALL it to archive all records older than 10 years.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_M_8", "selected_database": "exchange_traded_funds", "query": "I want to automatically get an alert when a fund's investment strategy seems to be changing. Can you set up a trigger function called log_style_drift that keeps a log for us? Whenever a fund's risk numbers get updated in the risk_metrics table, I want the function to check if its market risk or its correlation to the benchmark has shifted significantly. If it has, please add a new line to a style_drift_log table with the fund's ticker symbol, the old and new risk values, and when it happened. If the style_drift_log table does not exist, please create it first.", "normal_query": "I need you to implement a trigger to monitor for style drift in funds, and this function should be called log_style_drift. First, create a new table named style_drift_log to record these events. Then, create a trigger function that fires after any update on the risk_metrics table. If the change meets the definition of style drift, it should insert a new record into style_drift_log containing the fund's ticker symbol, the old and new beta/R-squared values, and the current timestamp.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_M_9", "selected_database": "exchange_traded_funds", "query": "Let's add a new performance stat to the funds table. First, make sure there's a column called rrei_score, and that's where we'll store each fund's index of risk-return efficiency. Then, set up a function called calculate_rrei that figures out this score based on the fund's ticker. Once that's ready, go ahead and fill in the rrei_score for every fund by calling the function for each one.", "normal_query": "I want to enrich the funds table with a new calculated metric. First, add a new rrei_score column if it's not already there. Next, create a function calculate_rrei that computes the risk-return efficiency index for a given fund ticker. Finally, update the funds table to populate the rrei_score for every fund by calling this new function.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "exchange_traded_funds_M_10", "selected_database": "exchange_traded_funds", "query": "Let's create and keep a summary table called family_sector_focus up-to-date. I want to see which single industry each fund company is most heavily invested in. For each company, give the company's name, find their top industry and the average investment percentage in it and then either add them to the table or update their existing entry with this new info and the time of the update.", "normal_query": "I want to insert a new summary record or update an existing one, ensuring data freshness without duplicates to track the family sector concentration profile. Ensure a table named family_sector_focus exists. Then, for each fund family, insert or update the table with the family name, the top sector name, the average weight in that sector, and the current timestamp.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_1", "selected_database": "disaster_relief", "query": "We need to pinpoint which operations must be flagged as critical. Let's find any operation responding to a 'Catastrophic' level disaster. For these, show me their ID, the area they are in, and that final disaster severity score, rounded to two decimals.", "normal_query": "Generate a report of all operations that require escalation to 'Critical' priority. An operation qualifies if it is responding to a disaster classified as 'Catastrophic'. The report must include the operation's reference ID, the affected area, and the specific DSI value, rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_2", "selected_database": "disaster_relief", "query": "For planning purposes, I need to get a handle on our resource situation across all disasters. Could you pull a report for every event showing what it was and where it happened, and then calculate two key things for me? First, how many days will our current supplies last, considering food and water as separate constraints? Second, what is the current shelter shortfall? Please round any calculated numbers to two decimal places.", "normal_query": "Provide a logistical status report for all recorded disasters. The report must contain the disaster ID, hazard type, and affected area, along with two calculated metrics, rounded to two decimal places: the 'Supply Sufficiency (Days)' and the 'Shelter Gap'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_3", "selected_database": "disaster_relief", "query": "I need to flag any disaster that was a Mass Casualty Incident. Can you pull a list of them and show me the ID and location for each?", "normal_query": "Identify all disasters classified as a Mass Casualty Incident (MCI). For each qualifying disaster, return its ID and the affected area.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_4", "selected_database": "disaster_relief", "query": "How are our distribution hubs holding up? Let's create a report for each hub. I want a strain score that considers both how busy the hub is internally and how severe the disasters are that it's serving. If that final score is over a million, flag it as 'Overwhelmed', otherwise it's 'Normal'. Show me the hub ID, the final score rounded to two decimals, and its status.", "normal_query": "Let's assess the status of our distribution hubs. For each hub, provide its ID and its calculated Hub Strain Index (HSI), rounded to two decimal places. Also include a 'status' column classifying the hub as 'Overwhelmed' if its HSI exceeds 1,000,000, and 'Normal' otherwise.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_5", "selected_database": "disaster_relief", "query": "I'm worried about operations running out of cash. Can you identify which are in a Funding Crisis? For that list, show me their ID, funding status, and exactly how many days they have left, to one decimal place.", "normal_query": "Identify all ongoing operations that are in a Funding Crisis. The report should include the operation ID, its funding state, and the specific Budget Runway in days, rounded to one decimal place.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 1, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_6", "selected_database": "disaster_relief", "query": "I want to do a post-mortem on missions that didn't go well. Can you pull up the full coordination and evaluation details for any completed operations we've flagged as a Failing Operation?", "normal_query": "For post-mission analysis, retrieve the complete coordination and evaluation records for all completed operations that are classified as a Failing Operation.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_7", "selected_database": "disaster_relief", "query": "Let's find our top performers. Can you identify all missions that qualify as a Highly Effective Operation? Please list their ID, along with their RES and CQI scores. Sort the list to show the most effective ones at the top.", "normal_query": "Generate a report identifying all operations designated as a Highly Effective Operation. The report must include the operation ID, its Response Effectiveness Score (RES), and its Coordination Quality Index (CQI), sorted by RES in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "disaster_relief_8", "selected_database": "disaster_relief", "query": "Let's find any areas on the brink of a major health crisis. Can you run the numbers and find all operations where the public health risk score is over 70? This score should be based on things like disease risk, sanitation, and medical staff ratios. For any you find, just list the operation's ID and that final risk score, rounded to two decimal places.", "normal_query": "Produce a report identifying all operations that should be flagged for a Public Health Emergency. An operation is flagged if its calculated Public Health Risk Score (PHRS) exceeds 70. For each flagged operation, show the operation ID and its calculated PHRS, rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_9", "selected_database": "disaster_relief", "query": "A simple list of stuck operations isn't enough; we need to know which ones are the biggest emergencies. Can you rank all operations in Logistical Gridlock by how critical they are using the Gridlock Severity Index? Then, show me a list of the worst ones at the top, with their location and that severity score, rounded to two decimals.", "normal_query": "Generate a prioritized report of all operations in Logistical Gridlock. The report should rank operations by a calculated 'Gridlock Severity Index'. Return the operation ID, affected area, and the index, rounded to two decimal places, sorted with the most severe cases first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "disaster_relief_10", "selected_database": "disaster_relief", "query": "Give me a bird's-eye view of how our regions are performing. Can you produce a summary showing each region (in lowercase), their average coordination quality score (rounded to two decimals), and a simple count of how many disasters they've handled?", "normal_query": "I need a regional performance summary. For each region tag (in lowercase), calculate and display the average Coordination Quality Index (CQI), rounded to two decimal places, and the total count of disasters that have occurred in that region.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_11", "selected_database": "disaster_relief", "query": "Let's get a big picture metric on our efficiency. For all the missions we've officially wrapped up, how much did it cost us on average to help a single person? Just give me that final dollar amount, rounded to two decimal places.", "normal_query": "For all 'Completed' operations, calculate the overall 'Cost Per Beneficiary (CPB)'. The final value should be rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_12", "selected_database": "disaster_relief", "query": "I want to know how our vehicles are holding up in the absolute worst conditions. For all those disasters where we could barely get in—the ones marked with 'Minimal' transport access—what's our average vehicle breakdown rate? Just give me that single percentage, rounded to one decimal.", "normal_query": "Determine the average vehicle breakdown rate for all transportation units involved in disasters where transport access is rated as 'Minimal'. Present the result as a percentage rounded to one decimal place.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 1, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_M_1", "selected_database": "disaster_relief", "query": "Let's add a supply status flag to each disaster record to make our dashboards easier to read. Can you update our records based on how many days their supplies will last? If it's less than two days, tag it 'Critical'. If it's under five, call it 'Low'. Anything else is 'Adequate'. Please also store the calculated number of days in there.", "normal_query": "Update the impact_summary JSONB field for all disaster events to include a new 'supply_status' object. This object should contain the calculated supply sufficiency in days and a status classification based on that value. Classify the status as 'Critical' for a runway under 2 days, 'Low' for under 5 days, and 'Adequate' otherwise.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_M_2", "selected_database": "disaster_relief", "query": "We need a standard way to figure out our cost per ton for aid delivery. Let's create a reusable function, call it calculate_adc, that does this for us. It should take a transportation ID, find the associated costs and total tons delivered, and return the cost per ton. Also, build in a sanity check: if delivery tons are missing or zero, it should throw an error instead of dividing by zero.", "normal_query": "Create a PL/pgSQL function named calculate_adc that calculates the aid delivery cost per ton. The function should accept a single transportation ID as an input parameter. It must compute the total transport costs associated with that ID and divide it by the total tons delivered. The function must include validation to raise an exception if the total delivery tons are null or zero.", "preprocess_sql": [], "clean_up_sqls": ["DROP FUNCTION IF EXISTS calculate_adc(VARCHAR(20));"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "disaster_relief_M_3", "selected_database": "disaster_relief", "query": "I want to set up some automatic financial red flags. Can you build a function that checks all our active operations and alerts us based on two rules? First, if an active operation has already spent more than 20 percent of its total budget, raise a 'High Burn Rate' alert. Second, calculate how many days of funding they have left at their current spending rate; if it's less than a week, raise a 'Funding Crisis' alert.", "normal_query": "Create a function named get_financial_alerts that returns a table of alerts for active operations. It should generate a 'High Burn Rate' alert if an operation's total costs exceed 20% of its allocated budget. It should also generate a 'Funding Crisis' alert if the calculated budget runway, based on the daily burn rate, is less than 7 days.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_1", "selected_database": "households", "query": "I want to find the most 'plush' region. To do this, first figure out a 'comfort score' for each house by dividing its number of bathrooms by its number of residents—but only for houses where we know the bathroom count. Then, find the average comfort score for each region. Once you've identified the single region with the best average score, go back and add up the car counts for every household in that specific region and tell me the grand total.", "normal_query": "A 'Comfort Index' is calculated for each household by dividing its bathroom count by its resident count. Find the region (`locregion`) with the highest average 'Comfort Index', considering only households with a known bathroom count and at least one resident. For this top-ranking region, calculate the total number of cars ('Auto_Count') owned by all its households combined.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_2", "selected_database": "households", "query": "Let's find the biggest welfare fraud hotspot. A family is a red flag if they get aid but have a lot of new vehicles (more than 2, newest from 2010+). I want to know which region has the highest concentration of these red-flag families as a percentage of their total population. Just give me the name of that region.", "normal_query": "To identify potential fraud hotspots, first define a 'High-Risk Household' as one that is 'Supported' (socsupport is 'Yes') and 'High-Mobility' (total vehicles > 2, newest vehicle 2010 or later). Then, for each region, calculate the percentage of its total households that are 'High-Risk'. Finally, return the `locregion` with the highest percentage of 'High-Risk' households.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "households_3", "selected_database": "households", "query": "We're looking for welfare fraud. Flag any family that gets aid and also has a lot of newish vehicles. Specifically, a 'high-mobility' family has more than two vehicles in total, and their newest one is from 2010 or later. Give me a unique list of the flagged household IDs.", "normal_query": "The government is investigating potential welfare fraud. A household is flagged for review if it meets two conditions simultaneously: 1) its `socsupport` status is 'Yes', AND 2) it is a 'High-Mobility' household. A 'High-Mobility' household is defined as one where the sum of all vehicles is greater than 2, AND its 'Newest_Year' is from 2010 or later.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": false}} +{"instance_id": "households_4", "selected_database": "households", "query": "Let's find all the 'roomy' apartments. A place is 'roomy' if it has more than 20 square meters per person. To figure out the total square meters, assume every bathroom is 10 sq meters and every bedroom (use the `Room_Count` field) is 15 sq meters—if a count is missing, just treat it as zero. After you get the total area, divide it by the number of people in that house, making sure to handle cases with no residents. Finally, just tell me how many apartments in total are 'roomy'.", "normal_query": "An urban planning initiative gives a 'Space Bonus'. To qualify, an apartment must have more than 20 square meters per resident. The total square meters is calculated as (the number of bathrooms * 10) plus (the number of bedrooms * 15), using the `Bath_Count` and `Room_Count` fields from `dwelling_specs` respectively. Any missing counts should be treated as 0. This total area is then divided by the household's resident count, avoiding any division-by-zero errors. Return the total count of apartments that qualify for the bonus.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_5", "selected_database": "households", "query": "I need to find our longest-standing 'large, wealthy' family. A family qualifies if they own their home, are in the top two income brackets, and have more than 4 people. From that list, find the one with the lowest household ID (our oldest record) and tell me their region and zone.", "normal_query": "Identify 'affluent, large' families, defined as owner-occupied households in the top two income brackets with more than 4 residents. From this group, find the household that has been in the system the longest (i.e., has the lowest `housenum`). For this specific household, list its `locregion` and `loczone`.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "households_6", "selected_database": "households", "query": "In the Taguatinga area, find all the 'overcrowded' homes (more than 3 people per bedroom). Once you have that list, figure out the average number of vehicles (cars, bikes, motorcycles all counted) that this specific group owns. Give me the final number, rounded.", "normal_query": "For the 'Taguatinga' region, calculate the 'crowding score' for each household. Identify all households with a score over 3. For this specific group of 'overcrowded' households, determine the average number of total vehicles they own (sum of autos, bikes, and motors), rounded to an integer.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 0, "distinct": false, "order": false}} +{"instance_id": "households_7", "selected_database": "households", "query": "Let's compare crowded city homes to crowded country homes. How many more crowded city homes are there than crowded country homes? Give me the difference.", "normal_query": "We want to compare two groups of households: 'Urban Crowded' and 'Rural Crowded', based on our established definitions for 'Urban' and 'Crowded' households. Calculate the count for each group and return the difference (Urban Crowded count - Rural Crowded count).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_8", "selected_database": "households", "query": "Let's score each house's infrastructure. First, find the average score for each region. Then, tell me which region has the best average score and which has the worst, in a single line like 'BestRegion | WorstRegion'.", "normal_query": "Calculate the 'Infrastructure Score' for each household. Then, for each region, find the average score. Finally, list the region with the highest average score and the region with the lowest average score as a single string: '[Highest Region] | [Lowest Region]'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_9", "selected_database": "households", "query": "Let's score each house's infrastructure. First, find the average score for each region. Then, tell me which region has the best average score and which has the worst, in a single line like 'BestRegion | WorstRegion'.", "normal_query": "Calculate the 'Infrastructure Score' for each household. Then, for each region, find the average score. Finally, list the region with the highest average score and the region with the lowest average score as a single string: '[Highest Region] | [Lowest Region]'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_10", "selected_database": "households", "query": "I'm looking for the hotspot of 'high-tech, high-mobility' families. These are people with lots of newish vehicles (more than 2, newest from 2005+) who also live in modern homes (house, apt, condo) with available TV service. Find all these families, then tell me which single region has the most of them.", "normal_query": "Identify households that are both 'High-Mobility' (more than 2 total vehicles, newest from 2005 or later) and 'High-Tech' (living in a modern dwelling like a house, apartment, or condo with available TV service). After finding this group, determine which `locregion` has the highest count of these specific households.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "households_11", "selected_database": "households", "query": "Let's find our top 10 families by their financial health score, which considers their income, spending habits, and homeownership status. After you get that list, tell me what percentage of them has a private garage.", "normal_query": "Using the 'Socioeconomic Index' (SEI), identify the top 10 households with the highest scores. Then, for this elite group, calculate the percentage of them that have access to a 'Private Garage'.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "households_12", "selected_database": "households", "query": "Find all the families that don't get social aid or domestic help and own more than one vehicle. From those, figure out which type of home has the highest average 'prosperity score'. Then, tell me the total number of vehicles owned by families in that home type.", "normal_query": "Identify all 'independent' households (defined as those receiving no social support or domestic help and owning more than one vehicle). Among them, find the dwelling class with the highest average 'Household Prosperity Score'. For that top-ranking dwelling class, what is the total number of vehicles owned by the 'independent' households living there?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_13", "selected_database": "households", "query": "I want to find the region with the highest concentration of families on social support. For each region, figure out what percentage of its total families receive aid. Then, just show me the name of the region with the top percentage and the percentage itself, rounded to two decimal places.", "normal_query": "For each region, calculate the ratio of households receiving social support to the total number of households in that region. Return the name of the region with the highest percentage, along with the ratio expressed as a percentage rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "households_14", "selected_database": "households", "query": "Let's find the biggest spender among our 'comfortable' families. A family is 'comfortable' if they have a high living score and enough bathrooms for their size. First, make a list of all these families. Then, from that list, find the one with the highest spending number and tell me their household ID.", "normal_query": "A 'Comfortable Household' is defined as one with a 'Living Condition Score' over 3 and a bathroom-to-resident ratio over 0.5. First, identify all such households based on these criteria. Then, from this group, find the household with the highest 'Expenditure Coefficient' and return its `housenum`.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "households_15", "selected_database": "households", "query": "I need to find the region with the biggest 'at-risk' population. A household is 'at-risk' if they get social aid AND they're very overcrowded (more than 4 people per bedroom). For every region, calculate what percentage of their families are 'at-risk'. Then, just tell me the name of the region with the highest percentage.", "normal_query": "Identify 'At-Risk' households, defined as those receiving social support and having a household density (residents per bedroom) over 4. Then, for each region, calculate the percentage of all its households that are 'At-Risk'. Finally, return the region with the highest percentage of 'At-Risk' households.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "households_16", "selected_database": "households", "query": "What's the income bracket for household number 3?", "normal_query": "What is the income classification of the household with number 3?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_17", "selected_database": "households", "query": "How many wealthy families who own their own homes live in the Taguatinga area?", "normal_query": "How many households in the 'Taguatinga' region are owner-occupied and fall within the top two income brackets?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_18", "selected_database": "households", "query": "Which modern-style homes (like brickwork houses or apartments) in the 'Guará' area also have TV service? List their household numbers in order.", "normal_query": "List the household numbers for modern dwellings in the 'Guará' region, sorted by household number. A modern dwelling is defined as a specific dwelling type with available TV service.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": true, "order": true}} +{"instance_id": "households_19", "selected_database": "households", "query": "Which household in an urban area owns the most cars?", "normal_query": "What is the household number with the most passenger vehicles among all households in urban areas (defined by infrastructure)?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "households_20", "selected_database": "households", "query": "How many families in each region get government help? List the regions from the one with the most helped families to the one with the least.", "normal_query": "Count the number of households receiving social support in each region, sorted by count in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "households_M_1", "selected_database": "households", "query": "The city has a $10k fund to give cable to modern homes that don't have it. It costs $75 per house. First, check if we have enough money to cover everyone who's eligible. If we do, go ahead and update their status to 'Subscribed'. After you're done, tell me exactly how many homes got the upgrade.", "normal_query": "A municipal program with a budget of $10,000 aims to upgrade cable infrastructure for 'modern dwellings' that currently have 'No Service Available'. If the cost per household is $75, first determine if the total cost for all eligible households is within budget. If it is, perform the update to set their cable status to 'Subscribed'. Finally, return the total number of households that were successfully updated (which will be 0 if the budget was exceeded).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_M_2", "selected_database": "households", "query": "I want to delete vehicle records for families with no income info, but only if it's a small cleanup. First, check what percentage of our total vehicle data this would remove. If it's less than 5%, go ahead and delete them, then tell me how many you deleted. If it's 5% or more, don't delete anything and just tell me '0'.", "normal_query": "As a data quality measure, we need to purge transportation assets for households with a null income bracket. However, to prevent accidental mass deletion, this operation is only permitted if the number of affected records is less than 5% of the total transportation assets. First, calculate the percentage of records that would be deleted. If this percentage is below 5%, proceed with the deletion and return the count of deleted records. Otherwise, return 0.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_M_4", "selected_database": "households", "query": "Add a new family: household 5000, Taguatinga, zone 315, 3 people, no social services, owns their home.", "normal_query": "Register a new household with number 5000 in 'Taguatinga', zone 315, with 3 residents, no service plan, and owned tenure.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_M_5", "selected_database": "households", "query": "Remove all vehicle records for families where we don't have their income information.", "normal_query": "Purge transportation assets for households with no income classification.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_M_6", "selected_database": "households", "query": "What type of building does household 1182 live in?", "normal_query": "What is the dwelling type of household number 1182?", "preprocess_sql": [], "clean_up_sqls": ["DROP VIEW IF EXISTS V_Dwelling_Type_1182;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_M_7", "selected_database": "households", "query": "Count how many small living spaces (apartments or studios) house only one or two people.", "normal_query": "How many households are in compact dwellings (Apartment or Studio) with fewer than 3 residents?", "preprocess_sql": [], "clean_up_sqls": ["DROP VIEW IF EXISTS V_Compact_Household_Count;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_M_8", "selected_database": "households", "query": "How many well-maintained houses (like brickwork houses and apartments) also have TV service available?", "normal_query": "What is the total number of dwellings considered well-maintained, based on their type and available TV services?", "preprocess_sql": [], "clean_up_sqls": ["DROP VIEW IF EXISTS V_Well_Maintained_Dwellings_Count;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_M_9", "selected_database": "households", "query": "We have a $5000 monthly budget to survey premium city families. A family is 'premium' if they own a high-income home, have great infrastructure (city water, paved roads), and aren't overcrowded (no more than 2 people per bedroom). If each survey costs $150, what's the total cost, and are we over or under budget? Give me a summary like 'Cost: $XXXX, Budget Status: Within Budget'.", "normal_query": "A research institute has a total monthly budget of $5000 to identify and survey 'premium urban households'. A household qualifies as 'premium urban' if it meets three criteria: 1) It is owner-occupied with an income level of 'High Income' or 'Very High Income'. 2) It has piped water and resides on roads with asphalt or concrete surfaces. 3) The household's resident-to-bedroom ratio does not exceed 2. If the cost to survey each qualifying household is $150, calculate the total survey cost and determine if it is within the monthly budget. The final output should be a single string: 'Cost: [Total Cost], Budget Status: [Within Budget/Exceeds Budget]'.", "preprocess_sql": [], "clean_up_sqls": ["DROP VIEW IF EXISTS V_Survey_Budget_Analysis;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "households_M_10", "selected_database": "households", "query": "I need to find the 5th most crowded house. First, figure out the 'people per bedroom' number for all crowded houses (more than 2 people/bedroom). Then, convert that number to a 'strain index' by multiplying it by 15. Finally, tell me the ID of the house that ranks 5th on this new strain index list.", "normal_query": "To assess housing strain, we define a 'density score' as residents per bedroom. For international comparison, this score needs to be converted to a 'strain index' where 1 unit of density equals 15 'strain points'. Generate a ranked list of households with a density score greater than 2, showing their household number and the calculated strain index (as an integer). From this list, identify the household number with the 5th highest strain index.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 0, "distinct": false, "order": true}} +{"instance_id": "planets_data_1", "selected_database": "planets_data", "query": "I'm curious about the gravity on bigger, rocky worlds. For all the confirmed super-earths found by watching their star's light dip (no matter how the method is written), what's their average surface gravity? Just give me the number, rounded to two decimal spots.", "normal_query": "What is the average planet surface gravity for all confirmed exoplanets that are larger than Earth but no more than ten times its mass, have a density greater than 3 g/cm³, and were discovered by observing the dimming of their host star? The check for the discovery method must be case-insensitive. Provide the result as a single scalar value rounded to 2 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "planets_data_2", "selected_database": "planets_data", "query": "I'm looking for Jupiter-like planets that are both scorching hot and spinning backwards, but only those where we know the star's mass and the planet's orbital distance. Can you list them out for me? I want to see their names, how long their year is, their orbital tilt, and how fast they're zipping around their star in kilometers per second. Put the fastest ones at the top.", "normal_query": "Generate a table of all hot jupiter planets that are also in a retrograde orbit, and for which the host star's mass and the planet's semi-major axis are known. Please display the host star's name, its orbital period in days, its inclination in degrees, and its calculated orbital velocity in km/s. Sort the results in descending order of the orbital velocity.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "planets_data_4", "selected_database": "planets_data", "query": "Let's fact-check Kepler's law on star systems with multiple planets. For each of these systems, take the planet that's farthest out (based on a known semi-major axis) and use its orbit to calculate its star's mass, but only if its orbital period is also known and positive. Show me the star's name, its official mass, and the mass we just calculated. Then, sort them to show the ones where our calculation was closest to the real value first.", "normal_query": "For each star that hosts a multi-planetary system, calculate the kepler's third law verification value for its outermost planet (the one with the largest known semi-major axis). Only include planets with a known and positive orbital period for this calculation. Display the host star's name, the recorded mass from the database, and the calculated mass. Order the results by the absolute difference between the recorded and calculated mass, from smallest to largest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "planets_data_5", "selected_database": "planets_data", "query": "Of all the stars that have a measured mass and a positive radius, which one is the most tightly packed? Give me its name. You'll need to convert solar mass to kg using 1.98847E30 and solar radius to meters using 6.957E8 to do the calculation.", "normal_query": "For all stars with a known mass and a known, positive radius, what is the name of the one with the highest calculated stellar density? Provide the name as a single text result. Note: To calculate density in SI units, use the conversion factors 1.98847E30 for solar mass to kg, and 6.957E8 for solar radius to meters.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "planets_data_6", "selected_database": "planets_data", "query": "Show me a list of planets that have highly eccentric orbits and are flagged for having a minimum mass measurement (look for 'msini', regardless of case). For each one that has a known mass and is orbiting a star with a known, positive mass, I'd like to see the planet's name, its star's name, its eccentricity value, and the planet-to-star mass ratio. To calculate the ratio, use 1.898E27 kg as the mass of Jupiter and 1.98847E30 kg as the mass of the Sun. Please show the ratio with 5 decimal places and sort the list from the smallest ratio to the largest.", "normal_query": "I want a report on all planets that have a high eccentricity orbit and also have their minimum mass status flagged (case-insensitive match for 'msini'). For each planet with a known mass, whose host star also has a known and positive mass, show its full name, its host star, its eccentricity, and calculate its planet-star mass ratio to 5 decimal places. Use 1.898E27 kg for Jupiter's mass and 1.98847E30 kg for the Sun's mass for the ratio calculation. Order the results by the mass ratio in ascending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 5, "distinct": false, "order": true}} +{"instance_id": "planets_data_7", "selected_database": "planets_data", "query": "For all the big gassy planets found by Kepler's second-chance mission (matching 'k2' however it's capitalized), what's their average surface temperature, basically? Only include cases where we know the star's temperature, the star's radius, and the planet's orbital distance, and all are positive numbers. Give me that in kelvin, rounded to a whole number. Note that you'll need to use 6.957E8 to convert solar radius to meters and 1.496E11 to convert AU to meters.", "normal_query": "Find the average planetary equilibrium temperature for all gas giant planets discovered by the successor to the original Kepler mission (case-insensitive match for 'k2'). Only include planets for which the host star's temperature and radius, and the planet's semi-major axis, are all known and positive. Express the result in kelvin and round to the nearest whole number. Note: for the temperature calculation, use conversion factors of 6.957E8 for solar radius to meters and 1.496E11 for AU to meters.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 0, "distinct": false, "order": false}} +{"instance_id": "planets_data_8", "selected_database": "planets_data", "query": "When planets found by the star wobble method (no matter how it's capitalized) pass in front of their star, what's the biggest dimming effect we could see? Only consider planets that have a known radius and orbit a star with a known, positive radius. Tell me that maximum dip in brightness as a percentage with four decimal places, and also name the planet and star responsible. You'll need to use the conversion that 1 solar radius is 109.2 Earth radii.", "normal_query": "What is the maximum transit depth, expressed as a percentage to 4 decimal places, for any planet discovered via the radial velocity method (case-insensitive match) where both the planet's radius and the host star's radius are known and positive? Also, provide the full name of the planet and its host star. Note: To compare radii, use the conversion factor 1 solar radius = 109.2 Earth radii.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 4, "distinct": false, "order": true}} +{"instance_id": "planets_data_9", "selected_database": "planets_data", "query": "Find me the rocky super-earth that has the strongest gravity pull of them all, considering only planets with known mass, radius, and density. Once you've pinpointed that planet, tell me what its mass is as a fraction of its star's mass, assuming the star's mass is also known and positive. I need that final number in scientific format, with 7-digit precision.", "normal_query": "Determine the planet-star mass ratio for the specific super-earth that exhibits the highest planet surface gravity. Only consider planets with known mass, radius, and density, orbiting stars with a known and positive mass. The final ratio should be a single value, expressed in scientific notation with 7 digits of precision.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 6, "distinct": false, "order": false}} +{"instance_id": "planets_data_10", "selected_database": "planets_data", "query": "On average, how far away are the stars that have those big, puffy gas planets? I only want to include stars where we have a solid distance number, not just a 'less than' value, and where the brightness measurement isn't messed up by other stars nearby. Show the result in light-years with two decimal points.", "normal_query": "What is the average distance in light-years to host stars of inflated gas giant planets? Only include host stars where the distance measurement is not an upper limit value and the photometric magnitude measurement is not affected by a blended measurement. Give the answer to 2 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "planets_data_11", "selected_database": "planets_data", "query": "Find me the planetary systems that are really tightly packed, but only if their closest-in planet is also super fast. For these systems, I want to see the star's name and the average orbital period ratio calculated using the geometric mean, with three decimal points. Sort the list by that average ratio, from highest to lowest.", "normal_query": "Identify compact systems where the innermost planet is a short-period planet. For each such system, list the host star name and calculate the geometric mean of all orbital period ratios between adjacent planets, rounded to 3 decimal places. Order the result by this geometric mean descending.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "planets_data_12", "selected_database": "planets_data", "query": "How many different stars have planets that were discovered by the ttv method, where they look for wobbles in a planet's transit schedule? Make sure you find 'ttv' regardless of its case.", "normal_query": "What is the total number of distinct host stars for which a planet was found by analyzing timing deviations in an already known transit? The search for the facility name 'ttv' must be case-insensitive.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": true, "order": false}} +{"instance_id": "planets_data_13", "selected_database": "planets_data", "query": "Find the planet with the biggest mass-to-size ratio, only looking at planets where we have a measured mass and a measured, non-zero radius. Then tell me its escape velocity in kilometers per second. Just give me a whole number.", "normal_query": "Calculate the planet escape velocity in km/s for the planet with the highest confirmed mass-radius relationship value. Only consider planets with a known, non-zero mass and radius. Provide the result rounded to the nearest integer.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 0, "distinct": false, "order": true}} +{"instance_id": "planets_data_14", "selected_database": "planets_data", "query": "Can you count up how many stars were observed with each type of light filter? Make sure to lump all the different ways of writing 'v-band' together, and do the same for 'kepler-band', ignoring capitalization. Just ignore the 'k-band' ones for now. Then show me the cleaned-up filter name and how many stars for each, with the most-used filter at the top.", "normal_query": "For each photometric band in the 'stars' table, count the number of stars observed. Standardize the band names (case-insensitively): 'v (johnson)', 'johnson', 'v', 'johnson v', and 'v-band' should all be grouped as 'v-band'; 'kepler-band', 'kepler', 'kep-b', and 'kep' as 'kepler-band'. Ignore 'k-band' and any nulls. Show the standardized band name and the count, ordered by count descending.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "planets_data_15", "selected_database": "planets_data", "query": "If you look at stars that only have rocky planets and no gas giants, what is their average brightness compared to our sun? Only include planets where we know their density or mass, and only stars where we know their radius and temperature. I need the number with 4 decimal places.", "normal_query": "What is the average stellar luminosity of stars that host at least one rocky planet, but have no gas giant planets in the system? This analysis should only consider planets with a known density or mass and stars with a known radius and temperature. Calculate the result relative to the sun and provide it to 4 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 4, "distinct": false, "order": false}} +{"instance_id": "planets_data_16", "selected_database": "planets_data", "query": "How many planets did kepler find where the star's temperature reading is wonky because of other nearby stars?", "normal_query": "Count the number of planets whose discovery is attributed to the kepler mission and are part of a system with a blended measurement for stellar temperature.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "planets_data_17", "selected_database": "planets_data", "query": "Can you give me the coordinates for the star '55 cnc' on an hr diagram, matching the name regardless of its case? I need its temperature and its luminosity relative to the sun, with the luminosity value having 3 decimal points.", "normal_query": "Provide a hertzsprung-russell (hr) diagram position for the star '55 cnc' (case-insensitive match). List its effective temperature and its calculated stellar luminosity. Round the luminosity to 3 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": false}} +{"instance_id": "planets_data_18", "selected_database": "planets_data", "query": "I want to find the hottest planet that isn't a 'hot jupiter'. Only look at planets where we know the star's temperature and radius and the planet's orbital distance so you can do the calculation. Can you tell me its name, its star, and its temperature in kelvin? Please round the temperature to a whole number. You'll need to convert star radius from solar radii to meters using 6.957E8 and orbital distance from AU to meters using 1.496E11.", "normal_query": "Find the planet with the highest planetary equilibrium temperature that is not classified as a hot jupiter. Only include planets for which the host star's temperature and radius, and the planet's semi-major axis, are all known and valid for the calculation. Return the planet's letter, its host star name, and its calculated equilibrium temperature in kelvin, rounded to the nearest integer. Note that for unit consistency, you should use the conversion factors 6.957E8 for solar radius to meters and 1.496E11 for AU to meters.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 0, "distinct": false, "order": true}} +{"instance_id": "planets_data_19", "selected_database": "planets_data", "query": "For how many planets do we have a size measurement, but we know it's just a 'less-than-or-equal-to' kind of number because it's marked as an upper limit?", "normal_query": "How many planets have a value for planetary radius, but this value is not a confirmed measurement and is instead flagged as an upper boundary? ", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "planets_data_20", "selected_database": "planets_data", "query": "For the star that looks brightest in our night sky, what's its standard gravitational parameter, μ? Give me the answer in scientific notation, with three digits of precision. You'll need to use G = 6.67430E-11 and convert the star's mass from solar masses to kg using the factor 1.98847E30.", "normal_query": "Calculate the gravitational parameter (μ) for the star that appears brightest from Earth. Provide the result in scientific notation with 3 digits of precision. Note that the Gravitational constant 'G' is 6.67430E-11 and the conversion factor for solar mass to kilograms is 1.98847E30.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "planets_data_M_1", "selected_database": "planets_data", "query": "Let's clean up the discovery methods because they're a mess. Can you make a new view called `v_discovery_method_summary`? It should list every planet's id, its original discovery method from the table, and a new column with a neat, standardized category like 'radial velocity' or 'transit' that works no matter how the original method is capitalized.", "normal_query": "Create a view named `v_discovery_method_summary`. This view should contain the planet's reference id, its original discovery method string, and a new `discovery_category` column. The new column should perform a case-insensitive standardization of the various discovery method names into unified categories such as 'radial velocity', 'transit', and 'imaging', based on the known variations.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "planets_data_M_2", "selected_database": "planets_data", "query": "I need a new table called `planet_properties_earth_units` to keep track of planet sizes in a way that's easier to compare to home. It should have a reference to the planet, its mass in earths, and its radius in earths. Once you've made the table, go ahead and fill it up with all the planets we have the right data for.", "normal_query": "Create a table named `planet_properties_earth_units`. The table should store the planet's reference id (as a foreign key to the `planets` table), the planet mass in earth units, and the planet radius in earth units. After creating the table, populate it with data for all planets that have known jupiter-mass and jupiter-radius values.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "planets_data_M_3", "selected_database": "planets_data", "query": "Let's create a summary view for all the star systems; call it `v_system_overview`. For each star, I want to see its name, how big it is, how hot it is, and then two numbers: how many of its planets were found by the 'wobble' method and how many were found by the 'dimming' method (ignoring capitalization for both).", "normal_query": "I need a new view called `v_system_overview`. This view should list each host star and include its name, its stellar radius, its temperature, and two separate counts of its planets: one for discoveries via radial velocity and one for discoveries via the transit method (both matched case-insensitively).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "planets_data_M_4", "selected_database": "planets_data", "query": "Make me a table called `high_precision_params` that flags planets with super-accurate measurements. It needs to link to the planet and then have three true/false columns: one for a high-precision mass, one for radius, and one for period. Then, fill the table with every planet for which we can calculate at least one of these uncertainty values, even if all flags end up being false.", "normal_query": "Create a table `high_precision_params`. The table should contain a reference to the planet and boolean flags indicating if its mass, radius, and period are high-precision. Populate this table for all planets that have at least one valid, non-null uncertainty measurement for either mass, radius, or period, regardless of whether it qualifies as high-precision.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "planets_data_M_5", "selected_database": "planets_data", "query": "I want to categorize all the stars with a known mass by how big they are. Can you make a new table called `star_classifications` for this? It needs to link to the star and give it a label. Call them 'massive' if they're huge (over 3 solar masses), 'intermediate' if they're a fair bit bigger than the sun, 'sun-like' if they're in the same ballpark as ours (down to about 0.8 solar masses), and 'low-mass' for all the little ones. Then fill the table with these labels.", "normal_query": "Create a new table `star_classifications` with columns for `stellarref` and `class_name`. The `stellarref` should be a foreign key to the `stars` table. Then, for all stars with a known stellar mass value, populate this table by assigning a `class_name` based on that mass: 'massive' for stars more than three times the sun's mass, 'intermediate' for those between that and one-and-a-half solar masses, 'sun-like' for those down to eighty percent of the sun's mass, and 'low-mass' for anything smaller.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "planets_data_M_6", "selected_database": "planets_data", "query": "Can you make me a pre-compiled list of all the super puffy gas planets where we can actually calculate their temperature? Let's call it `v_inflated_giants_report`. The list should only include planets where the star's temperature and radius and the planet's orbital distance are known. For those planets, show their name, star, mass and radius in jupiter units, density, and estimated temperature, with the temperature as a whole number.", "normal_query": "Please create a materialized view called `v_inflated_giants_report`. This view should contain all planets classified as inflated gas giants for which a planetary equilibrium temperature can be calculated (i.e., the host star's temperature and radius, and the planet's semi-major axis are all known). For each such planet, include its name, its host star, its mass in jupiter units, its radius in jupiter units, its density, and its planetary equilibrium temperature, rounded to the nearest integer.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": 0, "distinct": false, "order": false}} +{"instance_id": "planets_data_M_7", "selected_database": "planets_data", "query": "Let's make a new table called `planet_types` to label every planet. It should have the planet's id and its type. Use our standard rulebook to classify them: check first for the special types like 'hot jupiter' and 'super-earth', then for the basic 'rocky' or 'gas giant' types. If a planet doesn't fit any of those buckets, just label it 'unknown'. Go ahead and fill the table after you create it.", "normal_query": "Create a new table `planet_types` that contains the planet's reference id and a string representing its type. Then, populate this table by classifying each planet according to the established hierarchical definitions for 'hot jupiter', 'super-earth', 'rocky planet', and 'gas giant', assigning 'unknown' to any that do not fit a category.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "planets_data_M_8", "selected_database": "planets_data", "query": "I want a quick way to see which planets have dodgy star measurements. Make a view called `v_uncertain_measurements`. It should list any planet where the star's mass, size, or temperature reading might be mixed with another star's light. Show me the planet's name, its star, and then three flags telling me 'yes' or 'no' for whether the mass is blended, the radius is blended, and the temp is blended.", "normal_query": "Create a view called `v_uncertain_measurements`. This view should list all planets that have a blended measurement for their stellar mass, radius, or temperature. Include the planet's letter, host star name, and boolean flags indicating which of the three measurements (mass, radius, temperature) are blended.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "planets_data_M_9", "selected_database": "planets_data", "query": "Let's make a table for studying tightly-packed solar systems, call it `system_period_ratios`. It should have the star's id, the inner planet's orbital period, the outer planet's orbital period, and the ratio between them. Go ahead and fill it up with this info for every neighboring pair of planets that have known periods, in any system that has more than one planet.", "normal_query": "Create a table `system_period_ratios` to analyze compact systems. It should store `hostlink`, the `inner_planet_period`, `outer_planet_period`, and the calculated orbital period ratio. Populate this table for all adjacent planet pairs with known orbital periods in systems where the star has more than one planet.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "planets_data_M_10", "selected_database": "planets_data", "query": "I need a pre-calculated summary of how we're finding planets. Please create a materialized view called `v_discovery_stats`. It should list every discovery technique after you've cleaned them up by trimming spaces and ignoring case. For each one, show how many planets we found with it, the average distance to those planets in light-years (calculated only from planets with known distances and shown with two decimal points), and the very last date any record for that method was updated.", "normal_query": "Create a materialized view `v_discovery_stats`. The view should list each distinct discovery method, after cleaning the text by trimming whitespace and converting to lowercase. It should also provide the total count of planets discovered with that method, the average stellar distance in light-years for those discoveries (only for planets with a known distance) rounded to two decimal places, and the most recent update timestamp associated with any planet of that discovery method.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "museum_artifact_1", "selected_database": "museum_artifact", "query": "I'm worried about our items that are most at risk. Can you pull a list of artifacts that are both considered high-value (let's say historical significance over 8 and cultural score over 20) and are also officially listed with a 'High' or 'Medium' risk level? For each one, show its ID, title, its actual risk level, and both of those scores.", "normal_query": "Generate a report of artifacts that are both high-risk (level 'High' or 'Medium') and high-value (historical significance > 8 and cultural score > 20). The report should include the artifact's ID, title, risk level, historical significance, and cultural score.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "museum_artifact_2", "selected_database": "museum_artifact", "query": "I'm concerned about environmental damage. Can we find the artifacts most at risk by calculating a score based on their average sensitivity to the environment? Only show me the ones where the score is over 4. For those, I need the artifact's ID, name, its exact score, and a list of all specific sensitivities rated 'High'. Please sort the list to show the highest-risk items first.", "normal_query": "Identify artifacts with dangerously high environmental risks by calculating their Environmental Risk Factor (ERF). The report should include the artifact's ID, its name, the calculated ERF score, and a JSON summary of all its 'High' sensitivity ratings. Only include artifacts where the ERF score exceeds 4, and sort the results from highest to lowest risk.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "museum_artifact_3", "selected_database": "museum_artifact", "query": "To help us plan conservation, I need to focus on artifacts from the 'Ming' and 'Qing' dynasties. Please calculate a priority score for each one, and then assign a priority level. I'd like a report showing the artifact's ID, title, dynasty, the calculated score, and the final priority level. Please sort it to show the highest scores at the top.", "normal_query": "Generate a conservation priority report for artifacts from the 'Ming' and 'Qing' dynasties. For each artifact, calculate its Conservation Priority Index (CPI) and determine its Priority Level. The report should include the Artifact ID, Title, Dynasty, CPI Score, and Priority Level, sorted by CPI Score in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "museum_artifact_4", "selected_database": "museum_artifact", "query": "I need a report on how well we're funding artifact conservation across different dynasties. For each artifact, show its dynasty, its priority score, the specific budget we've assigned, and whether that funding is 'Sufficient' or 'Insufficient'.", "normal_query": "For each artifact with a known dynasty, create a report showing its dynasty, its CPI score, its specific budget allocation, and its generalized budget status.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "museum_artifact_5", "selected_database": "museum_artifact", "query": "Can you whip up a fast list for me? I want to see if any artifacts are deteriorating too quickly. For each one, give me the ID, name, current temp and humidity, a count of its high sensitivities, and a 'Yes' or 'No' on whether it's in this danger zone. Don't skip any artifacts, even if data's missing. Sort it all by artifact ID.", "normal_query": "Check if artifacts are in an Accelerated Deterioration Scenario. The report should show each artifact's ID, its name, the current temperature and humidity in its display case, how many high sensitivities it has, and a 'Yes' or 'No' for the scenario. Include all artifacts and sort by artifact ID.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "museum_artifact_6", "selected_database": "museum_artifact", "query": "Can you whip up a fast rundown for me? I need all the showcase IDs that have unstable environmental conditions. Get every unique ID and line them up in alphabetical order.", "normal_query": "Generate a list of all unique showcase IDs that are experiencing an Environmental Instability Event. The list should be sorted alphabetically by ID.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": true}} +{"instance_id": "museum_artifact_7", "selected_database": "museum_artifact", "query": "Can you sniff out all the showcase IDs that could be heading toward a problem? We'll say a showcase is a problem if its environmental stability score drops below 5, OR if it has at least three major maintenance issues like a poor seal, overdue status, or a filter/silica needing replacement. Show me just the showcase IDs for these problem cases, lined up alphabetically.", "normal_query": "Identify all showcase IDs that are at risk of failure. A showcase is considered 'At Risk' if its calculated environmental stability score is less than 5, OR if it has at least three major maintenance issues ('Poor' seal state, 'Overdue' maintenance status, 'Replace Now' filter, or 'Replace Now' silica). The report should list the IDs of the at-risk showcases, sorted alphabetically.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": true, "order": true}} +{"instance_id": "museum_artifact_8", "selected_database": "museum_artifact", "query": "Can you pull together a rundown of artifacts with 'Medium' or 'High' humidity sensitivity? I need the registry number, name, material type, and that sensitivity level for each. Plus, check if they're 'Over Exposure' or 'Within Limits' using a more cautious humidity threshold, and line them up by registry number.", "normal_query": "List all artifacts with 'Medium' or 'High' Humidity Sensitivity. For each, show the registry number, title, material type, and sensitivity level. Also, determine if they are 'Over Exposure' or 'Within Limits' based on a secondary, more cautious humidity threshold, and sort by registry number.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "museum_artifact_9", "selected_database": "museum_artifact", "query": "We need a priority list based on comprehensive risk. First, calculate a total environmental threat score for all artifacts. I'm only interested in those officially at the second-highest risk level. From that group, find the top 10 with the highest threat scores. Give me their registration IDs and their scores, sorted from highest to lowest.", "normal_query": "Identify the top 10 artifacts in greatest danger by calculating their Total Environmental Threat Level (TETL). The report should only consider artifacts at the second-highest risk level and list their registration IDs and TETL scores, sorted from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "museum_artifact_10", "selected_database": "museum_artifact", "query": "Let's figure out our next rotation plan. For all items currently in a resting state, I need to see their ID, name, material, and how many months it's been on display. Then, calculate its maximum safe display time. Using our standard method, compute its final rotation priority score. To make things clear, add a final column that flags it for 'Immediate Rotation' if needed, otherwise just label it as 'Monitor'.", "normal_query": "Generate a rotation schedule based on the Exhibition Rotation Priority Score (ERPS). The report should only include artifacts in 'Resting' state and show their ID, name, material, current display duration, and Display Safety Duration (DSD) limit. It must also include the ERPS value and a final recommendation ('Immediate Rotation' or 'Monitor').", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "museum_artifact_11", "selected_database": "museum_artifact", "query": "I'm trying to figure out our data storage plan. Can you analyze the environmental readings for 2023, 2024, and 2025? First, for each year, find the average temperature. Then, for each of those years, count how many days the temperature was off by a specific amount—a deviation that's more than zero but no more than 4 degrees from that year's average. Give me a table with the year, its average temperature, and the total count of these anomaly days.", "normal_query": "To assist with data partitioning strategy, generate a report showing the annual environmental anomaly count for the years 2023-2025. An anomaly is defined as a daily reading where the temperature deviation from that year's annual average is greater than 0°C and less than or equal to 4°C. The report should show the year, the calculated average temperature for that year, and the total count of anomaly days.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": true, "order": true}} +{"instance_id": "museum_artifact_12", "selected_database": "museum_artifact", "query": "I'm trying to find our most valuable artifacts (let's define that as a cultural score over 80) that might be at risk due to a weird environment. Can you find any of them that are in a showcase where the average temperature is more than 1 degree different from the average temperature of all other showcases in that same hall? For any you find, list the artifact's ID, its title, material, its showcase's temperature, what the hall's average temp is, and the exact deviation.", "normal_query": "Identify artifacts that are both a 'High-Value Artifact' (cultural score > 80) and an 'Environmental Outlier'. An outlier is defined as an artifact whose showcase has an average temperature that deviates by more than 1°C from the average temperature of all showcases in its hall. For these artifacts, show their ID, title, material, their showcase's temperature, the hall's average temperature, and the temperature deviation.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "museum_artifact_13", "selected_database": "museum_artifact", "query": "Figure out the conservation priority score for every artifact, showing their ID, name, and score, ordered from highest to lowest priority.", "normal_query": "Calculate the Conservation Priority Index (CPI) for each artifact. The report should include the artifact ID, title, and the CPI score, sorted in descending order by CPI.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "order": true, "distinct": false}} +{"instance_id": "museum_artifact_14", "selected_database": "museum_artifact", "query": "Find all really valuable artifacts, group them by their historical period, and show the period, how many valuable artifacts there are, and their average cultural score, ordered from highest to lowest count.", "normal_query": "Identify all High-Value Artifacts and group them by dynasty. The report should show the dynasty, a count of high-value artifacts, and their average cultural score, sorted by the count in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "order": true, "distinct": false}} +{"instance_id": "museum_artifact_15", "selected_database": "museum_artifact", "query": "I want to find artifacts that we've put in the wrong place. Can you make a list of any artifact that has a 'Medium' sensitivity to something (like light, temp, or humidity) and is in a showcase that we've classified as having a 'Medium' level of that same thing? Show me the artifact's name, material, the showcase it's in, which specific sensitivity is the problem, what the sensitivity level is, and what the showcase's environment profile is.", "normal_query": "Identify environmental mismatches by finding artifacts whose specific environmental sensitivities are incompatible with the typical environment of their showcase. A 'mismatch' occurs if an artifact with 'Medium' sensitivity to an environmental factor (e.g., humidity) is in a showcase classified with a 'Medium' level of that same factor. The report should list the artifact's title, its material, the showcase ID, the mismatched sensitivity type, the sensitivity level, and the environment profile.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "museum_artifact_16", "selected_database": "museum_artifact", "query": "Group artifacts by how vulnerable their organic material is and their environmental risk score. Show me what they're made of, their vulnerability status, their average environmental risk, and a count for each group, ordered by the highest average risk.", "normal_query": "Cluster artifacts by their Organic Material Vulnerability and Environmental Risk Factor (ERF). The report should show material type, vulnerability status, average ERF, and artifact count, sorted by average ERF descending.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "order": true, "distinct": false}} +{"instance_id": "museum_artifact_17", "selected_database": "museum_artifact", "query": "I'm looking for ticking time bombs in our collection. Can you find all the artifacts that are currently listed as 'Good' or 'Excellent', but are at a high risk of light damage? Let's say 'high risk' means they have 'High' light sensitivity and they've already soaked up more than 70,000 lux-hours of light. For each one you find, show me its name, dynasty, its exact light sensitivity, the current lux level, and its total light exposure. Please list the ones with the most total exposure first.", "normal_query": "Generate a report of artifacts with a 'Good' or 'Excellent' conservation status that are at high risk of light damage. A high-risk artifact is defined as having 'High' light sensitivity AND has a cumulative light exposure exceeding 70,000 lxh. The report should include the artifact's title, dynasty, light sensitivity level, current lux, and cumulative exposure (visLxh), sorted by cumulative exposure in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "museum_artifact_18", "selected_database": "museum_artifact", "query": "What's the single highest risk score for any artifact, considering both its conservation priority and environmental sensitivity?", "normal_query": "What is the maximum Artifact Vulnerability Score (AVS) found among all artifacts in the collection?", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "museum_artifact_19", "selected_database": "museum_artifact", "query": "How well do our showcases generally suit the artifacts from the Ming dynasty? I'm looking for a single number that tells me the average compatibility.", "normal_query": "Calculate the average Artifact Exhibition Compatibility (AEC) for all artifacts from the 'Ming' dynasty.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "museum_artifact_20", "selected_database": "museum_artifact", "query": "Can you give me a total count of display cases that are at risk of failing? I'm talking about any case with a very unstable environment or at least three major maintenance problems.", "normal_query": "Calculate the total number of showcases that are currently considered a Showcase Failure Risk.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 0, "distinct": true, "order": false}} +{"instance_id": "museum_artifact_M_1", "selected_database": "museum_artifact", "query": "I need to add a maintenance alert. Find every artifact that has a condition report on file, and for each one, append a timestamped alert saying 'Alert (Conservation Emergency): Immediate action recommended' to its maintenance log.", "normal_query": "For all artifacts that have an existing condition assessment record, append a timestamped alert with the text 'Alert (Conservation Emergency): Immediate action recommended' to their maintenance log.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": 0, "order": false, "distinct": false}} +{"instance_id": "museum_artifact_M_2", "selected_database": "museum_artifact", "query": "Write a function called 'calculate_cpi' that figures out how important an artifact is for conservation, using its historical, research, and cultural value, plus its current condition, and gives back a final score.", "normal_query": "Create a function named 'calculate_cpi' to compute a conservation priority score. This score should be based on historical significance, research value, cultural importance, and conservation condition, returning a numeric value.", "preprocess_sql": [], "clean_up_sqls": ["DROP FUNCTION IF EXISTS calculate_cpi(SMALLINT, INT, SMALLINT, VARCHAR);"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "order": false, "distinct": false}} +{"instance_id": "museum_artifact_M_3", "selected_database": "museum_artifact", "query": "Put a rule on the artifact ratings table to make sure the historical importance score stays between 1 and 10.", "normal_query": "Add a check constraint named 'hist_sign_rating_check' to the 'ArtifactRatings' table. This constraint should ensure the historical significance score is between 1 and 10, inclusive.", "preprocess_sql": ["UPDATE \"ArtifactRatings\" SET \"HIST_sign\" = 11 WHERE \"ART_link\" = 'ART54317';"], "clean_up_sqls": ["ALTER TABLE \"ArtifactRatings\" DROP CONSTRAINT IF EXISTS hist_sign_rating_check;", "UPDATE \"ArtifactRatings\" SET \"HIST_sign\" = 7 WHERE \"ART_link\" = 'ART54317';"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": 0, "order": false, "distinct": false}} +{"instance_id": "museum_artifact_M_4", "selected_database": "museum_artifact", "query": "We need to decide which artifacts to put into storage next. Figure out the five most urgent candidates for rotation based on a priority score, but with a twist: if an artifact is made of organic material like textile, wood, or paper, increase its final score by multiplying it by 1.2 because it's more fragile. I just need the artifact titles and their final adjusted scores, with the most urgent one (the one with the lowest score) first.", "normal_query": "Create a database view named 'V_Top5_Rotation_Priority' that provides a priority list of the top 5 artifacts for exhibition rotation. The standard Exhibition Rotation Priority Score (ERPS) calculation needs to be adjusted: for artifacts made of 'Organic' materials ('Textile', 'Wood', 'Paper'), their final ERPS score should be multiplied by 1.2 (a 20% vulnerability factor). The view should include the artifact's title and its final, adjusted ERPS score, sorted by the adjusted ERPS in ascending order (lowest score is highest priority).", "preprocess_sql": [], "clean_up_sqls": ["DROP VIEW IF EXISTS V_Top5_Rotation_Priority;", "DROP VIEW IF EXISTS V_Rotation_Priority_Analysis;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "museum_artifact_M_5", "selected_database": "museum_artifact", "query": "Can you find all the artifacts made of materials like textile or paper that are both extremely valuable and highly sensitive to their environment? For each one, show its name and its exact sensitivity levels for light, temperature, and humidity.", "normal_query": "Create a database view named 'V_Precious_Vulnerable_Organic_Items' to identify all artifacts that are both a High-Value Artifact and meet the Organic Material Vulnerability criteria. The view should display each artifact's title and its specific sensitivity ratings for light, temperature, and humidity.", "preprocess_sql": [], "clean_up_sqls": ["DROP VIEW IF EXISTS V_Precious_Vulnerable_Organic_Items;", "DROP VIEW IF EXISTS V_High_Light_Risk_Artifact_Status;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": 0, "distinct": false, "order": false}} +{"instance_id": "museum_artifact_M_6", "selected_database": "museum_artifact", "query": "I need to know which of our halls are a security nightmare. Find any hall that has a high visitor impact score (say, over 15) but at the same time has a low security score (less than 8). For each of those problem halls, show me the hall ID and both of those scores so I can see what's going on.", "normal_query": "Create a view named 'V_High_Threat_Halls' to identify 'High Threat' exhibition halls, defined as those with a Visitor Impact Risk (VIR) score greater than 15 and a calculated Security Score below 8. The view should show the hall's ID, its VIR score, and its Security Score.", "preprocess_sql": [], "clean_up_sqls": ["DROP VIEW IF EXISTS V_High_Threat_Halls;", "DROP VIEW IF EXISTS V_Critical_Artifacts_In_High_Threat_Halls;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "museum_artifact_M_7", "selected_database": "museum_artifact", "query": "Out of all our textiles, how many are in a 'Poor' state of environmental compliance? Assume the ideal is 20 degrees Celsius and 50 percent humidity to make the call.", "normal_query": "Create a function named 'get_poor_compliance_count_by_material' that accepts a material type (e.g., 'textile') and calculates the number of artifacts of that material with a 'Poor' Compliance Level. The calculation should be based on the Environmental Compliance Index (ECI) with an ideal temperature of 20°C and ideal humidity of 50%.", "preprocess_sql": [], "clean_up_sqls": ["DROP FUNCTION IF EXISTS get_poor_compliance_count_by_material(TEXT);", "DROP VIEW IF EXISTS V_Compliance_Report_By_Material;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": 0, "distinct": false, "order": false}} +{"instance_id": "museum_artifact_M_8", "selected_database": "museum_artifact", "query": "Can you tell me the exact number of our important dynasty artifacts (from Ming, Han, or Tang) that are aging too quickly and are considered at risk?", "normal_query": "Create a function named 'get_dynasty_artifacts_at_risk_count' to calculate the total number of artifacts classified as a Dynasty Artifact at Risk.", "preprocess_sql": [], "clean_up_sqls": ["DROP FUNCTION IF EXISTS get_dynasty_artifacts_at_risk_count();", "DROP VIEW IF EXISTS V_Dynasty_Risk_Report;"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": 0, "distinct": false, "order": false}} +{"instance_id": "museum_artifact_M_9", "selected_database": "museum_artifact", "query": "I need a list of our most neglected showcases. Find every single one that has all three of these problems at once: the filter needs replacing now, the silica needs replacing now, and its general maintenance is overdue. For that list of problem showcases, tell me their ID, which hall they're in, and a count of how many high-sensitivity artifacts are stuck inside them.", "normal_query": "Create a view named 'V_Chronic_Maintenance_Backlog_Showcases' to identify showcases with chronic maintenance backlogs. A 'chronic backlog' is defined as a showcase where the filter is overdue ('Replace Now'), the silica is exhausted ('Replace Now'), and the general maintenance status is 'Overdue'. For these showcases, the view should list their ID, hall ID, and the number of high-sensitivity artifacts they contain.", "preprocess_sql": [], "clean_up_sqls": ["DROP VIEW IF EXISTS V_Chronic_Maintenance_Backlog_Showcases;", "DROP FUNCTION IF EXISTS get_worst_backlog_showcase_env();"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "museum_artifact_M_10", "selected_database": "museum_artifact", "query": "We've got a $25,000 budget to catch up on overdue cleanings. I need a priority list. Figure out the risk score for every late artifact, and also estimate a treatment cost based on its material and complexity. Then, tell me which artifacts we can afford to fix, starting with the highest risk ones, without going over our budget. List the artifact name, its risk score, and its estimated cost.", "normal_query": "Create a materialized view named 'MV_Prioritized_Maintenance_Plan' to generate a prioritized maintenance plan for overdue cleanings within a simulated budget of $25,000. Calculate the 'cost of treatment' for each overdue artifact based on its material and treatment complexity. The view should list the artifacts that can be treated within the budget, ordered by their Conservation Backlog Risk (CBR), and show their title, CBR score, and calculated cost.", "preprocess_sql": [], "clean_up_sqls": ["DROP MATERIALIZED VIEW IF EXISTS MV_Prioritized_Maintenance_Plan;", "DROP FUNCTION IF EXISTS get_remaining_maintenance_budget();"], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "fake_account_1", "selected_database": "fake_account", "query": "Which accounts are growing their networks the fastest? Give me that blended growth score rounded to three decimal places and sort from fastest to slowest.", "normal_query": "Compute the Network Growth Velocity (NGV) for every account using its follower and following growth rates, rounded to three decimal places, and list accounts with the highest NGV first.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_2", "selected_database": "fake_account", "query": "Show me the top 10 accounts by their logins-per-day score. For each account, take the biggest lifecycle login total you've ever seen for it, divide by its age in days (skip age missing or ≤ 0), round the score to 3 decimals, and show just the account ID plus that score.", "normal_query": "Compute each account's Account Activity Frequency (AAF) per the domain definition. For each account, use the highest recorded lifecycle session total across all session snapshots, exclude anyone with age in days missing or ≤ 0, then return the ten accounts with the greatest AAF in descending order, showing the account identifier and AAF rounded to three decimals.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_3", "selected_database": "fake_account", "query": "List each account together with their attempts to evade detection. Calculate that sneakiness score, rounded to 3 decimal places, and rank them from most to least sneaky.", "normal_query": "List each account along with its Technical Evasion Index (TEI), rounded to three decimal places, and sort them descending.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_4", "selected_database": "fake_account", "query": "Bucket every user into four sneakiness tiers based on how much they rely on tricks like VPNs, and list each account with the tier number.", "normal_query": "Divide all accounts into quartiles based on their TEI values and return each account id with its TEI quartile.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "fake_account_5", "selected_database": "fake_account", "query": "How many users land in each risk level based on the number of an account's attempts to evade detection?", "normal_query": "Count how many accounts fall into each TEI Risk Category (low, moderate, high, very high).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "fake_account_6", "selected_database": "fake_account", "query": "List the top twenty accounts by the highest overall exposure metric based on multiple signals. Only include users whose required inputs are all present; sort high to low; show the user ID and the metric rounded to three decimals.", "normal_query": "Show the twenty accounts with the highest Security Risk Score (SRS). Compute the score only for accounts where all three inputs (risk value, trust value, impact value) are present; sort by the unrounded score descending; display the account identifier and the score rounded to three decimals.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_7", "selected_database": "fake_account", "query": "Show every user that needs immediate attention: they must have the top severity label and an ongoing detection. Rank them by their overall exposure metric (rounded to 3 decimals) from high to low, and include the severity label, only show those larger than 0.7.", "normal_query": "List all High-Risk Accounts: compute SRS per its definition, keep only those with SRS > 0.7, whose overall severity is Critical, and that have at least one ongoing detection. Return the account ID, the score rounded to three decimals, and the severity, sorted by the unrounded score descending.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_8", "selected_database": "fake_account", "query": "For each account, combines multiple bot-detection metrics into a single score, rounded to three decimal places, and display the highest value.", "normal_query": "Calculate the Bot Behavior Index (BBI) for each account, rounded to three decimal places, and show the top one.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_9", "selected_database": "fake_account", "query": "Identify all accounts exhibiting systematic VPN usage and report the total count of distinct countries from which they have authenticated.", "normal_query": "Identify all VPN Abuser accounts and show how many different login countries they have used.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "fake_account_10", "selected_database": "fake_account", "query": "Estimate the average authenticity level for content on each platform, rounded to three decimal places and sorted in a descending order.", "normal_query": "Compute the average Content Authenticity Score (CAS) for each platform, rounded to three decimal places and sorted from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_M_1", "selected_database": "fake_account", "query": "Update the StateFlag to 'Suspended' for every high-risk account, according to the High-Risk Account rule, excluding those already in suspended state.", "normal_query": "Suspend every High-Risk Account by setting its StateFlag to \"Suspended\", according to the High-Risk Account rule. Make sure accounts already suspended are not updated twice.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "fake_account_11", "selected_database": "fake_account", "query": "Show the top 10 accounts that have most content manipulation patterns. Evaluate their scores, rounded to 3 decimal places, and sort in descending order.", "normal_query": "Retrieve the ten accounts with the highest Content Manipulation Score (CMS), sorted in a descending order and rounded to three decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_12", "selected_database": "fake_account", "query": "Find all accounts pumping out loads of near-duplicate posts, list their posts-per-day, sorted from most to least.", "normal_query": "List all accounts classified as Content Farms along with their daily post frequency, ordered by frequency from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "fake_account_M_2", "selected_database": "fake_account", "query": "Create active, low-priority watch items for accounts that heavily rely on traffic-masking tools and have logged in from at least three different countries, skipping anyone who already has an active watch item. Tag the new items as coming from an automated scan and timestamp them with the current time.", "normal_query": "Insert a low-priority monitoring entry for every account that qualifies under the VPN-abuse rule (TEI > 0.8 and login countries ≥ 3), skipping any account that already has an active monitoring entry. Use a random unique ID, the current timestamp, mark the source as Algorithm, and set the entry state to Active.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "fake_account_13", "selected_database": "fake_account", "query": "For each account, build one overall risk score by combining an automation-likeness signal, a coordination signal, and the group size; round to three decimals, list those above 0.8, and sort high to low.", "normal_query": "Compute Coordinated Bot Risk (CBR) for each account using the defined BBI and CAS formulas; round to three decimals, return those with CBR greater than 0.8, and sort in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_14", "selected_database": "fake_account", "query": "What's the overall average trust score of everyone's connections, rounded to three decimals?", "normal_query": "Determine the overall average Network Trust Score (NTS) across all accounts, rounded to three decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": false}} +{"instance_id": "fake_account_15", "selected_database": "fake_account", "query": "Generate a report of every cluster whose role is “SocialGroup”. For each cluster, show its identifier, the number of unique member accounts, the cluster's maximum coordination score. Quantify each account's position and influence in interaction network if the data are available, otherwise NULL, and indetify whether it is sophisticated influence campaigns", "normal_query": "Generate a report of every cluster whose role is “SocialGroup”. For each cluster, show its identifier, the number of unique member accounts, the average Network Influence Centrality (NIC) of those members if NIC data are available, otherwise NULL, the cluster's maximum coordination score, and a “Yes/No” flag that is “Yes” when the cluster satisfies the Coordinated Influence Operation definition.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "fake_account_16", "selected_database": "fake_account", "query": "Estimate each account's authentication-related risk, rounded to 3 decimal places. List accounts with a score above 0.7, sorted from highest to lowest.", "normal_query": "Identify accounts with an Authentication Risk Score (ARS) greater than 0.7, round the score to 3 decimal places and order from highest to lowest.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_17", "selected_database": "fake_account", "query": "For each account, show the most recent system estimate of automation likelihood using the latest detection event.", "normal_query": "Return each account's Latest Bot Likelihood Score (LBS).", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "fake_account_18", "selected_database": "fake_account", "query": "For each account, measure how much its hourly activity over a day diverges from its usual behavior, round to 3 decimals and sort descending. Show those above 0.7.", "normal_query": "Identify accounts whose Temporal Pattern Deviation Score (TPDS) exceeds 0.7, rounded to 3 decimal places and sorted in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_19", "selected_database": "fake_account", "query": "List all accounts that exert strong influence while posting at a high daily rate, acting as key amplifiers in coordinated networks. List their influence score and daily post frequency, sorted by influence score in a descending order.", "normal_query": "List all High-Impact Amplifier accounts together with their influence score and daily post frequency, sorted by influence score in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "fake_account_20", "selected_database": "fake_account", "query": "Measure account-reputation stability, round to 3 decimals, and show whoever has the highest score.", "normal_query": "Show the account with the highest Reputation Volatility Index (RVI), rounded to 3 decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_21", "selected_database": "fake_account", "query": "Retrieve accounts with elevated engagement levels based on the number of sessions or total posting frequency. Show their account ID and the daily post number.", "normal_query": "Retrieve accounts classified as High-Activity Accounts, showing their account ID and the daily post number.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "fake_account_22", "selected_database": "fake_account", "query": "Group results by platform type and show the average of the 0-1 score indicating how real the interactions feel, keep three decimals, and list them from high to low.", "normal_query": "Compute the average engagement authenticity score for each platform type, rounded to 3 decimal places and sort in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_23", "selected_database": "fake_account", "query": "How many accounts are currently inactive and also classified as automated?", "normal_query": "Count the number of accounts that are both in the inactive status and belong to the automated category.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "fake_account_M_3", "selected_database": "fake_account", "query": "If an account meets our trust checks, shows no recent detections for 180 days, and has been quiet for at least 90 days based on the latest activity proxy, mark its monitoring priority as \"Review_Inactive_Trusted\".", "normal_query": "For accounts that pass the trust threshold, have had no detections in the last 180 days, and whose most recent activity proxy is older than 90 days, set their monitoring priority to \"Review_Inactive_Trusted\".", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "fake_account_24", "selected_database": "fake_account", "query": "By platform kind, average a score built as: how manipulated the content looks, times how urgently it should be reviewed, times how central it is in the network.", "normal_query": "For each platform kind, compute the average Content Impact Score, where the score equals manipulation intensity multiplied by moderation priority and by network influence centrality.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 3, "distinct": false, "order": true}} +{"instance_id": "fake_account_M_4", "selected_database": "fake_account", "query": "Make a materialized view that shows all accounts with a credibility value above 0.9.", "normal_query": "Create a materialized view listing all accounts whose built-in credibility value is greater than 0.9.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "fake_account_M_5", "selected_database": "fake_account", "query": "Analyze the table that keeps monitoring snapshots so the database updates its size and stats.", "normal_query": "Run ANALYZE on the table that stores monitoring snapshots to refresh its statistics.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "fake_account_M_6", "selected_database": "fake_account", "query": "Write a helper called pct_to_numeric(text) that turns strings like '85%' into decimals like 0.85 and returns a numeric result.", "normal_query": "Create a utility function named pct_to_numeric(text) that converts a percentage string (e.g., '85%') into a numeric value (e.g., 0.85), returning a numeric type.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_1", "selected_database": "cold_chain_pharma_compliance", "query": "Can you check our cold chain data and tell me how long temperature problems typically last on our riskiest shipping routes? I'm looking for the average time in minutes that temperatures went outside acceptable ranges, but only for the shipments marked as high risk. Just round to two decimal places for me.", "normal_query": "I want to find the average Temperature Excursion Duration for shipments on High Risk routes only. Please show me the route type label and the average excursion duration in minutes, rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_2", "selected_database": "cold_chain_pharma_compliance", "query": "Would check what percentage of our shipments actually stayed in the right temperature range the whole time? I need a simple number showing how many shipments had zero temperature problems compared to our total shipments. Just round it to two decimal places so it's easy to report.", "normal_query": "What is our overall Cold Chain Compliance Rate for all shipments? Please calculate the percentage of compliant shipments out of the total shipments monitored, and round the result to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_3", "selected_database": "cold_chain_pharma_compliance", "query": "Will you help me figure out how our shipments are performing in terms of timing? I want to see how many shipments are arriving early, on-time, or running late. Can you count up our shipments by these delivery timing categories? Just give me each category and its count, with the biggest numbers at the top.", "normal_query": "I plan to analyze our cold chain delivery performance using Delivery Performance Classification. Show me the performance category and the number of shipments in each category, sorted by shipment count in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cold_chain_pharma_compliance_4", "selected_database": "cold_chain_pharma_compliance", "query": "I want to figure out if our shipping performance is better when our GPS tracking is working actively versus when it's in the intermittent location tracking state. Can you compare the average on-time performance between these two categories? Just give me the two tracking categories and their average performance scores rounded to two decimal places.", "normal_query": "I am working on comparing the On-Time Delivery Performance between shipments with different Location Tracking States. Specifically, analyze the average OTDP for shipments that have either 'Active' or 'Intermittent' tracking states. Show me each tracking state and its corresponding average OTDP value rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_5", "selected_database": "cold_chain_pharma_compliance", "query": "I am trying to figure out if our quality agreements are working properly. Can you check how many shipments failed compliance checks for each type of agreement we have? Just show me each agreement status and how many shipments were flagged as non-compliant under that status.", "normal_query": "I hope to analyze how different Quality Agreement Status types relate to non-compliance issues. Could you count the number of shipments that were classified as 'Non-compliant' for each quality agreement status category? Please show each agreement status with its corresponding count of non-compliant shipments.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_6", "selected_database": "cold_chain_pharma_compliance", "query": "Our quality team needs to flag any high-risk shipments for immediate review. Could you pull a list of all shipments falling into red quality risk zone? Just show me the shipment ID, what percentage of time it stayed in the acceptable range, and how many total minutes it was out of range. Round the portion value into 2 decimal points.", "normal_query": "I am going to identify shipments in the Red Zone of our Quality Risk Zones for further investigation. For each shipment in the Red Zone, show me the shipment ID, calculated TIRP percentage (rounded to 2 decimal places), and the total excursion duration in minutes.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_7", "selected_database": "cold_chain_pharma_compliance", "query": "Can you figure out what the average temperature impact was for all our shipments where the temperature went outside the acceptable range? We need that special calculation that accounts for how temperature fluctuations affect products over time. Just give me one number that summarizes this across all problematic shipments.", "normal_query": "I would like to calculate the Mean Kinetic Temperature for all shipments that have experienced temperature excursions. Please provide me with the average MKT value across these shipments.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_8", "selected_database": "cold_chain_pharma_compliance", "query": "Please help me find shipment routes where our risk labels don't match what's actually happening. I want to see where we've mislabeled routes compared to what the temperature excursion data shows. Make sure you check all our routes, even if some might be missing monitoring data. Just show me the top 3 most problematic routes according to average excursion count.", "normal_query": "We need to identify where our shipping route risk classifications don't match reality. Using the Route Risk Classification and High-Risk Shipping Origin-Destination Pairs knowledge, compare our documented risk notes against calculated risk levels, even those without environmental monitoring data. Show me only routes where the documented risk level differs from the calculated risk level, ordered by average excursion count from highest to lowest. Limit results to the top 3 discrepancies.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 0, "distinct": false, "order": true}} +{"instance_id": "cold_chain_pharma_compliance_9", "selected_database": "cold_chain_pharma_compliance", "query": "Just give me a rough idea of how our shipments did in terms of temperature control. You can group them into risk categories like green/yellow/red using time-in-range and total out-of-range time. It’s fine to use a simplified method—doesn’t have to be perfect—as long as we get a general sense.", "normal_query": "Please provide an approximate analysis of cold chain shipment quality by grouping them into Quality Risk Zones. Use Time In Range Percentage (TIRP) and total temperature excursion duration as approximate indicators for classification. A proxy-based zoning approach is acceptable where exact excursion details are unavailable. Return the number and percentage of shipments in each zone (Green, Yellow, Red), sorted from lowest to highest risk.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cold_chain_pharma_compliance_10", "selected_database": "cold_chain_pharma_compliance", "query": "Can you help me figure out how strong our supply chain is overall? Let’s turn the inputs into scores like this: route risk gets 8 if it’s low, 5 for medium, 2 for high; carrier certification gets 10 for both types, 8 if it’s just 'gdp' or 'ceiv pharma', 4 otherwise; vehicles score 9 if validated, 7 if qualified, and 5 otherwise; and compliance gets 9 for full, 6 for partial, 3 otherwise. Use the 0.4/0.3/0.2/0.1 weighting and round the final number to two decimal places.", "normal_query": "I want to calculate the overall Supply Chain Resilience Score using a weighted average of several proxy indicators. For this, please map: 'low' route risk to 8, 'medium' to 5, and 'high' to 2; for carrier certification, use 10 for 'both', 8 for 'gdp' or 'ceiv pharma', and 4 otherwise; for vehicle qualification, use 9 for 'validated', 7 for 'qualified', and 5 otherwise; and for GDP compliance, use 9 for 'full', 6 for 'partial', and 3 otherwise. Then apply weights: 0.4 for route risk, 0.3 for carrier certification, 0.2 for vehicle, and 0.1 for compliance. Round to two decimals.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_11", "selected_database": "cold_chain_pharma_compliance", "query": "I wonder that, for each type of GDP certification, how many different shipping companies have that certification, and out of those, how many actually have at least one fully validated vehicle. Please show the certification type, the total number of unique certified companies, and how many of those companies have validated vehicles. Put the ones with the most companies using validated vehicles at the top.", "normal_query": "For each GDP Certification Status, I want to know how many distinct carriers hold that certification, and among them, how many distinct carriers also have at least one vehicle with Validated Cold Chain Vehicle Qualification Status. Please display the GDP certification level, the total number of distinct GDP-certified carriers, and the number of those distinct carriers with validated vehicles. Sort the results by the number of carriers with validated vehicles in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": true, "conditions": {"decimal": 0, "distinct": true, "order": true}} +{"instance_id": "cold_chain_pharma_compliance_14", "selected_database": "cold_chain_pharma_compliance", "query": "I need to know, on average, how much of their allowed stability budget temperature-sensitive shipments are using up. Only count shipments where there actually was a temperature excursion. Give me the average percentage used, rounded to two decimal places.", "normal_query": "Calculate the average Stability Budget Consumption Rate for all shipments of temperature-sensitive products. Return the average SBCR as a percentage, rounded to two decimal places and only count shipments where there actually was a temperature excursion.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_15", "selected_database": "cold_chain_pharma_compliance", "query": "Out of all the biologics product batches, what percent need to be kept at ultra-low temperatures? Round it to two decimal places.", "normal_query": "Please calculate the percentage of biologics products that require ultra-low temperature storage. The result should be rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_16", "selected_database": "cold_chain_pharma_compliance", "query": "I want to know, on average, how much carbon was produced by shipments that were way behind schedule—specifically, those whose delivery performance category counted as severely delayed. Return this value into two decimal places.", "normal_query": "Calculate the average carbon footprint for all shipments that are classified as 'Severely Delayed' based on the Delivery Performance Classification standard. Please return a value rounded to two decimal places.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_17", "selected_database": "cold_chain_pharma_compliance", "query": "Could you figure out the overall reliability score for all our temperature data loggers? Treat any logger with a missing recording interval as having a reading failure, and count a calibration failure if the calibration date is before June 26, 2024.", "normal_query": "Estimate the overall Data Logger Reliability Score for all monitoring devices in the fleet. Assume a reading failure if the recording interval is missing, and a calibration failure if the calibration date is before 2024-06-26.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_18", "selected_database": "cold_chain_pharma_compliance", "query": "Which three people have had the most shipments rejected when making product release decisions? I just want a list of their names and how many times each had a rejection, sorted so the person with the most rejections is at the top.", "normal_query": "Using the Product Release Decision Framework, identify the top 3 responsible persons with the highest number of 'Rejected' product releases based on the Product Release Decision Framework. Please list each responsible person and their count of rejected shipments, ordered from highest to lowest count.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cold_chain_pharma_compliance_19", "selected_database": "cold_chain_pharma_compliance", "query": "Can you show me, for each type of regulatory compliance status, what the average number of temperature and humidity excursions is? And sort the results so the highest average temperature excursions come first.", "normal_query": "For each regulatory compliance status, calculate the average number of temperature excursions and average number of humidity excursions. Display a table with compliance status, average temperature excursions, and average humidity excursions, sorted by temperature excursions in descending order.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cold_chain_pharma_compliance_20", "selected_database": "cold_chain_pharma_compliance", "query": "Can you show me the three riskiest shipping routes based on temperature excursions and shipping delays? Just give me the route, how many shipments went that way, and the risk score. Only count routes with more than one shipment.", "normal_query": "I want to identify the top 3 riskiest shipping lanes by calculating the Lane Risk Potential for each route. Only include temperature excursions and shipping delays in the risk score. Please provide a report listing the route string, total number of shipments, and the calculated lane risk potential, sorted by lane risk potential in descending order. Only include lanes with more than one shipment.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Query", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": true}} +{"instance_id": "cold_chain_pharma_compliance_M_1", "selected_database": "cold_chain_pharma_compliance", "query": "Can you make a function calculate_ted that, when I give you a shipment's ID, tells me how many minutes it spent outside the right temperature range?", "normal_query": "For a given shipment, try to create a function calculate_ted that calculates the Temperature Excursion Duration. The input is a shipment's ID and output the TED value as an integer.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": 0, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_M_2", "selected_database": "cold_chain_pharma_compliance", "query": "For each shipment, I want you to include two things in the summary. First, tell me if it managed to stay in the right temperature the whole way. Second, show what percent of all shipments got that right. Even if some shipments don’t have temperature info, still include both pieces of info in their summary.", "normal_query": "Please update every shipment so that its summary includes two clear insights. First, show whether that shipment stayed within the correct temperature range the entire time. Second, include the percentage of all shipments that successfully stayed within the proper temperature range from start to finish. This information should be added for every shipment, even for those where no temperature data is available.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_M_3", "selected_database": "cold_chain_pharma_compliance", "query": "Can you make a view called v_product_temperature_analysis that breaks down our products by how sensitive they are to temperature changes and by their storage type? For each kind of product and storage group, show how many batches there are, what the average temperature range is, a list of the different sensitivity levels, and the lowest and highest storage temps. Please sort the results by product type and storage method.", "normal_query": "Create a view named v_product_temperature_analysis that analyzes pharmaceutical products by both Temperature Sensitivity Tiers and Product Storage Classifications. For each product category and storage classification, display the batch count, average temperature range, all unique sensitivity descriptions, and the minimum and maximum storage temperatures. Order the results by product category and storage classification.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cold_chain_pharma_compliance_M_4", "selected_database": "cold_chain_pharma_compliance", "query": "Can you show, for every delivery, how well it matched the expected timing — like how close it was to being on time? And also include something simple that tells whether it was early, late, or arrived just right. Keep everything else as it is.", "normal_query": "Please enhance each delivery record by adding two insights. The first is the On-Time Delivery Performance, which shows how closely the actual delivery matched the planned schedule, expressed as a percentage. The second is the Delivery Performance Classification, which gives a simple label describing the delivery’s overall timeliness. These additions should be included along with the original delivery details.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": false, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_M_5", "selected_database": "cold_chain_pharma_compliance", "query": "Whenever we add or change a shipment’s temperature data, I want the system to automatically figure out how much of the time the temperature stayed where it should, and also give it a simple color rating based on that. These two things should be added back into the same record.", "normal_query": "Whenever a new or updated environmental monitoring entry is recorded, the system should automatically assess the Quality Risk Zones for that shipment. It should use the temperature data to calculate Time In Range Percentage based on a standard 72-hour journey, then assign the appropriate risk level. Both the quality risk zone and time in range percentage should be added back into the same record.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_M_6", "selected_database": "cold_chain_pharma_compliance", "query": "I need to find all the product batches that count as top-level cold chain products. For each one, list its batch tag, product code, name, and value, and make sure it’s clearly marked as tier 1 in the records. Also, let me know how many you found, and if anything goes wrong, just keep going and let me know about the problem.", "normal_query": "I want to batch identify all Tier 1 Cold Chain Products in our database. For each product batch, check if it meets the Tier 1 Cold Chain Products criteria. For every qualifying batch, record its batch tag, product code, product label, and value in USD, and flag it as Tier 1. Also, update the product batch records to append a '[TIER1]' flag to the value field for all identified Tier 1 Cold Chain Products. Please ensure the process logs the number of Tier 1 products found and handles any errors gracefully.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_M_7", "selected_database": "cold_chain_pharma_compliance", "query": "I need to refresh our monitoring device reliability records using the data logger value for reliability assessment. For every device, figure out its reliability value. Show all the details and scores in a temp table first. Then, save a backup of the current device list, wipe it clean, and fill it back up with the updated info from the results. Make sure to include all the device details from the original record, the made-up failure rates, the reliability score, and when you did the analysis.", "normal_query": "I plan to recalculate and rebuild the reliability tracking for all monitoring devices in our system using the Data Logger Reliability Score. For each device, calculate DLRS and store the results in a staging table. Then, back up the current monitoringdevices table and repopulate it with the original device columns from the staging results. Please include the device reference, calibration timestamp, device accuracy, recording interval, temperature points, all simulated failure rates, the calculated DLRS, and the analysis timestamp in the staging output.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": 2, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_M_8", "selected_database": "cold_chain_pharma_compliance", "query": "I'm looking to see, for every shipment, what's the biggest number of shock events it went through, basically the highest shock count for each shipment, even if some shipments didn't have any shock data. Just show me the shipment code and that top shock number for each one.", "normal_query": "For each shipment in the cold chain database, I want to determine the maximum shock event count observed, using the concept of Shock Event Significance Levels. Please provide the shipment identifier and its corresponding maximum shock event count, ensuring that all shipments are included in the results even if no shock data is present, just use 0 to represent null data. The output should display the shipment code alongside the highest shock event count recorded for that shipment.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": true}} +{"instance_id": "cold_chain_pharma_compliance_M_9", "selected_database": "cold_chain_pharma_compliance", "query": "If I give you a shipment, would you tell me what its regulatory compliance status is, and also just let me know with a true or false if it’s considered compliant?", "normal_query": "For a specified shipment, I require a summary of its regulatory compliance status according to the concept of Regulatory Compliance Status Definitions. The output should include both the compliance status and an indicator of whether the shipment is compliant, with the indicator expressed as a boolean value.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}} +{"instance_id": "cold_chain_pharma_compliance_M_10", "selected_database": "cold_chain_pharma_compliance", "query": "Whenever a shipment’s product release status is set to rejected, go ahead and mark its insurance claim as rejected too, and don’t bother updating claims that are already marked as rejected.", "normal_query": "For all shipments, please update the insurance claim status to 'Rejected' in the insuranceclaims table for every case where the product release status is 'Rejected', strictly following the Product Release Decision Framework. Ensure that only claims not already marked as 'Rejected' are updated.", "preprocess_sql": [], "clean_up_sqls": [], "sol_sql": [], "external_knowledge": [], "test_cases": [], "category": "Management", "high_level": true, "conditions": {"decimal": -1, "distinct": false, "order": false}}