Reja1 commited on
Commit
1ab1a75
·
1 Parent(s): b30cb09

Integer type question evaluation fixed

Browse files
data/metadata.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:da12c6171e65130835ea5f3985bc1d144548c2c0b114614e472c542b3199d1ce
3
- size 130971
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f388b32c0d64c519aebdd23349f51f9bd9ae96977d26a2cb8a9d00192224cec
3
+ size 130978
jee-neet-benchmark.py CHANGED
@@ -63,7 +63,7 @@ class JeeNeetBenchmark(datasets.GeneratorBasedBuilder):
63
  "exam_code": datasets.Value("string"), # Will provide default if missing in source
64
  "subject": datasets.Value("string"),
65
  "question_type": datasets.Value("string"),
66
- "correct_answer": datasets.Sequence(datasets.Value("string")), # List of strings
67
  }
68
  )
69
  return datasets.DatasetInfo(
@@ -163,5 +163,5 @@ class JeeNeetBenchmark(datasets.GeneratorBasedBuilder):
163
  "exam_code": row.get("exam_code", "N/A"), # Provide "N/A" if exam_code is missing
164
  "subject": row.get("subject", ""),
165
  "question_type": row.get("question_type", ""),
166
- "correct_answer": row.get("correct_answer", []),
167
  }
 
63
  "exam_code": datasets.Value("string"), # Will provide default if missing in source
64
  "subject": datasets.Value("string"),
65
  "question_type": datasets.Value("string"),
66
+ "correct_answer": datasets.Value("string"), # Store as JSON string
67
  }
68
  )
69
  return datasets.DatasetInfo(
 
163
  "exam_code": row.get("exam_code", "N/A"), # Provide "N/A" if exam_code is missing
164
  "subject": row.get("subject", ""),
165
  "question_type": row.get("question_type", ""),
166
+ "correct_answer": json.dumps(row.get("correct_answer", [])),
167
  }
src/benchmark_runner.py CHANGED
@@ -295,7 +295,7 @@ def run_benchmark(
295
  # Explicitly specify data_files and data_dir for local loading.
296
  # data_dir should be the project root ('.') when loading a local script,
297
  # as the script is copied to a cache and needs to know where the actual data is.
298
- dataset = load_dataset(dataset_path, split='test', data_files={'test': 'data/metadata.jsonl'}, data_dir=os.getcwd(), trust_remote_code=True)
299
  dataset = dataset.cast_column("image", HFImage(decode=True)) # Ensure images are loaded as PIL
300
  logging.info(f"Dataset loaded successfully from path: {dataset_path}. Original number of questions: {len(dataset)}")
301
  except Exception as e:
@@ -390,7 +390,7 @@ def run_benchmark(
390
  exam_name_from_data = example.get("exam_name", "UNKNOWN_EXAM") # Get exam_name from data
391
  question_type_from_data = example.get("question_type", "MCQ_SINGLE_CORRECT") # Get question_type
392
  image: PILImage.Image = example["image"]
393
- truth = example["correct_answer"]
394
 
395
  result_data = {
396
  "question_id": question_id,
 
295
  # Explicitly specify data_files and data_dir for local loading.
296
  # data_dir should be the project root ('.') when loading a local script,
297
  # as the script is copied to a cache and needs to know where the actual data is.
298
+ dataset = load_dataset(dataset_path, split='test', data_files={'test': 'data/metadata.jsonl'}, data_dir=os.getcwd(), trust_remote_code=True, download_mode="force_redownload")
299
  dataset = dataset.cast_column("image", HFImage(decode=True)) # Ensure images are loaded as PIL
300
  logging.info(f"Dataset loaded successfully from path: {dataset_path}. Original number of questions: {len(dataset)}")
301
  except Exception as e:
 
390
  exam_name_from_data = example.get("exam_name", "UNKNOWN_EXAM") # Get exam_name from data
391
  question_type_from_data = example.get("question_type", "MCQ_SINGLE_CORRECT") # Get question_type
392
  image: PILImage.Image = example["image"]
393
+ truth = json.loads(example["correct_answer"]) # Parse the JSON string back to a list/list of lists
394
 
395
  result_data = {
396
  "question_id": question_id,
src/evaluation.py CHANGED
@@ -69,6 +69,21 @@ def get_subject_as_section(subject: str, question_num_for_log: int) -> Optional[
69
  logging.warning(f"Invalid or missing subject ('{subject}') for question_num '{question_num_for_log}'. Cannot determine section.")
70
  return None
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  def calculate_single_question_score_details(result_item: Dict[str, Any]) -> Dict[str, Any]:
74
  """
@@ -99,25 +114,23 @@ def calculate_single_question_score_details(result_item: Dict[str, Any]) -> Dict
99
  # Ensure truth is a set of uppercase strings for consistent processing.
100
  # Ground truth from metadata.jsonl is expected to be a list of strings.
101
  # e.g., ["1"], ["A"], ["12.75"], ["A", "C"]
102
- truth_set: set
103
- if isinstance(truth, str):
104
- # This case might occur if metadata had a single string instead of list for some reason,
105
- # or if an old format slips through. Convert to a set of one uppercase string.
106
- logging.warning(f"Ground truth for {question_id} is a single string: '{truth}'. Converting to set.")
107
- truth_set = {truth.upper()}
108
- elif isinstance(truth, list) and all(isinstance(t, str) for t in truth):
109
- truth_set = {s.upper() for s in truth}
110
- # Deprecated int/List[int] handling, as metadata should now be List[str]
111
- elif isinstance(truth, list) and any(isinstance(t, int) for t in truth):
112
- logging.warning(f"Ground truth for {question_id} contains integers: {truth}. Converting all to strings.")
113
- truth_set = {str(s).upper() for s in truth}
114
- elif isinstance(truth, int):
115
- logging.warning(f"Ground truth for {question_id} is int: {truth}. Converting to string set.")
116
- truth_set = {str(truth).upper()}
117
  else:
118
  logging.error(f"Invalid ground_truth format for {question_id}: {truth} (type: {type(truth)}). Assigning 0 marks.")
119
  return {"marks_awarded": 0, "evaluation_status": "error_bad_ground_truth"}
120
 
 
121
  if not api_success or pred is None: # pred is None means our internal parsing failed
122
  evaluation_status = "failure_api_or_parse"
123
  current_score_change = -1
@@ -140,7 +153,8 @@ def calculate_single_question_score_details(result_item: Dict[str, Any]) -> Dict
140
  is_correct = False
141
  if len(pred_set) == 1: # Ensure prediction is indeed a single option
142
  single_pred_answer = list(pred_set)[0] # Get the single predicted option
143
- if single_pred_answer in truth_set: # Check if this predicted option is in the set of true answers
 
144
  is_correct = True
145
 
146
  if is_correct:
@@ -157,23 +171,49 @@ def calculate_single_question_score_details(result_item: Dict[str, Any]) -> Dict
157
  else: current_score_change = 0 # Default no penalty
158
 
159
  elif exam_name == "JEE_MAIN" and question_type == "INTEGER": # Integer answers are now strings in a list e.g. ["14"]
160
- if len(pred_set) == 1 and list(pred_set)[0] in truth_set: # Compare the single string prediction
 
 
 
 
 
 
 
161
  current_score_change = 4; evaluation_status = "correct"
162
  else:
163
  current_score_change = 0; evaluation_status = "incorrect"
164
 
165
  elif exam_name == "JEE_ADVANCED":
166
  # Note: MCQ_SINGLE_CORRECT for JEE_ADVANCED is handled by the common block above
167
- if question_type == "INTEGER": # Integer answers are now strings in a list e.g. ["12"]
168
- if len(pred_set) == 1 and list(pred_set)[0] in truth_set: # Compare the single string prediction
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  current_score_change = 4; evaluation_status = "correct"
170
  else:
171
  current_score_change = 0; evaluation_status = "incorrect"
172
  elif question_type == "MCQ_MULTIPLE_CORRECT":
173
- num_correct_options_in_truth = len(truth_set)
 
 
 
174
  num_chosen_options = len(pred_set)
175
- correct_chosen_options = pred_set.intersection(truth_set)
176
- incorrect_chosen_options = pred_set.difference(truth_set)
177
  num_correct_chosen = len(correct_chosen_options)
178
  num_incorrect_chosen = len(incorrect_chosen_options)
179
 
 
69
  logging.warning(f"Invalid or missing subject ('{subject}') for question_num '{question_num_for_log}'. Cannot determine section.")
70
  return None
71
 
72
+ def is_within_range(predicted_value_str: str, lower_bound_str: str, upper_bound_str: str) -> bool:
73
+ """
74
+ Checks if a predicted numerical value (as a string) falls within a specified range.
75
+ The comparison is inclusive.
76
+ """
77
+ try:
78
+ predicted_value = float(predicted_value_str)
79
+ lower_bound = float(lower_bound_str)
80
+ upper_bound = float(upper_bound_str)
81
+ except ValueError:
82
+ logging.debug(f"Could not convert predicted value '{predicted_value_str}' or bounds ('{lower_bound_str}', '{upper_bound_str}') to numbers.")
83
+ return False
84
+
85
+ return lower_bound <= predicted_value <= upper_bound
86
+
87
 
88
  def calculate_single_question_score_details(result_item: Dict[str, Any]) -> Dict[str, Any]:
89
  """
 
114
  # Ensure truth is a set of uppercase strings for consistent processing.
115
  # Ground truth from metadata.jsonl is expected to be a list of strings.
116
  # e.g., ["1"], ["A"], ["12.75"], ["A", "C"]
117
+ # For integer ranges, it will be like [["0.7", "0.8"]]
118
+ truth_processed: List[Union[str, List[str]]] = []
119
+ if isinstance(truth, list):
120
+ for t_item in truth:
121
+ if isinstance(t_item, str):
122
+ truth_processed.append(t_item.upper())
123
+ elif isinstance(t_item, list) and len(t_item) == 2 and all(isinstance(x, str) for x in t_item):
124
+ truth_processed.append([x.upper() for x in t_item]) # Store range as list of uppercase strings
125
+ else:
126
+ logging.error(f"Invalid item in ground_truth list for {question_id}: {t_item}. Skipping.")
127
+ elif isinstance(truth, str):
128
+ truth_processed.append(truth.upper())
 
 
 
129
  else:
130
  logging.error(f"Invalid ground_truth format for {question_id}: {truth} (type: {type(truth)}). Assigning 0 marks.")
131
  return {"marks_awarded": 0, "evaluation_status": "error_bad_ground_truth"}
132
 
133
+
134
  if not api_success or pred is None: # pred is None means our internal parsing failed
135
  evaluation_status = "failure_api_or_parse"
136
  current_score_change = -1
 
153
  is_correct = False
154
  if len(pred_set) == 1: # Ensure prediction is indeed a single option
155
  single_pred_answer = list(pred_set)[0] # Get the single predicted option
156
+ # Check against all processed truths (which are single strings for MCQ)
157
+ if single_pred_answer in truth_processed:
158
  is_correct = True
159
 
160
  if is_correct:
 
171
  else: current_score_change = 0 # Default no penalty
172
 
173
  elif exam_name == "JEE_MAIN" and question_type == "INTEGER": # Integer answers are now strings in a list e.g. ["14"]
174
+ # For JEE_MAIN INTEGER, we expect truth_processed to contain single strings
175
+ is_correct = False
176
+ if len(pred_set) == 1:
177
+ predicted_answer_str = list(pred_set)[0]
178
+ if predicted_answer_str in truth_processed: # Check against single string truths
179
+ is_correct = True
180
+
181
+ if is_correct:
182
  current_score_change = 4; evaluation_status = "correct"
183
  else:
184
  current_score_change = 0; evaluation_status = "incorrect"
185
 
186
  elif exam_name == "JEE_ADVANCED":
187
  # Note: MCQ_SINGLE_CORRECT for JEE_ADVANCED is handled by the common block above
188
+ if question_type == "INTEGER":
189
+ is_correct = False
190
+ if len(pred_set) == 1:
191
+ predicted_answer_str = list(pred_set)[0] # Get the single predicted string
192
+
193
+ # Iterate through each ground truth entry in the 'truth_processed' list
194
+ for gt_entry in truth_processed:
195
+ if isinstance(gt_entry, list) and len(gt_entry) == 2: # This is a range [lower, upper]
196
+ lower_bound_str, upper_bound_str = gt_entry[0], gt_entry[1]
197
+ if is_within_range(predicted_answer_str, lower_bound_str, upper_bound_str):
198
+ is_correct = True
199
+ break # Found a matching range, no need to check others
200
+ elif isinstance(gt_entry, str): # This is an exact integer match
201
+ if predicted_answer_str == gt_entry: # gt_entry is already uppercase
202
+ is_correct = True
203
+ break # Found an exact match, no need to check others
204
+
205
+ if is_correct:
206
  current_score_change = 4; evaluation_status = "correct"
207
  else:
208
  current_score_change = 0; evaluation_status = "incorrect"
209
  elif question_type == "MCQ_MULTIPLE_CORRECT":
210
+ # For MCQ_MULTIPLE_CORRECT, truth_processed contains single strings
211
+ truth_set_mcq = set(truth_processed) # Convert to set for intersection operations
212
+
213
+ num_correct_options_in_truth = len(truth_set_mcq)
214
  num_chosen_options = len(pred_set)
215
+ correct_chosen_options = pred_set.intersection(truth_set_mcq)
216
+ incorrect_chosen_options = pred_set.difference(truth_set_mcq)
217
  num_correct_chosen = len(correct_chosen_options)
218
  num_incorrect_chosen = len(incorrect_chosen_options)
219