Reja1 commited on
Commit
a2cb2dd
·
1 Parent(s): ebee8dc

Separation of response and score in predictions.jsonl and summary.jsonl

Browse files
Files changed (2) hide show
  1. src/benchmark_runner.py +101 -41
  2. src/evaluation.py +69 -14
src/benchmark_runner.py CHANGED
@@ -93,27 +93,35 @@ def load_config(config_path: str) -> dict:
93
 
94
  def append_prediction(result: Dict[str, Any], filepath: str):
95
  """Appends a single prediction result to a JSONL file."""
 
 
 
 
 
 
 
96
  try:
97
  with open(filepath, 'a') as f:
98
- json.dump(result, f)
99
  f.write('\n')
100
  except IOError as e:
101
  logging.error(f"Failed to append prediction to {filepath}: {e}")
102
  except Exception as e:
103
  logging.error(f"Unexpected error appending prediction to {filepath}: {e}")
104
 
105
-
106
- def save_summary(summary: Dict[str, Any], filepath: str):
107
- """Saves the final summary dictionary to a JSON file."""
108
  try:
109
- with open(filepath, 'w') as f:
110
- json.dump(summary, f, indent=4)
111
- logging.info(f"Summary results saved to {filepath}")
112
  except IOError as e:
113
- logging.error(f"Failed to save summary results to {filepath}: {e}")
114
  except Exception as e:
115
- logging.error(f"Unexpected error saving summary to {filepath}: {e}")
 
116
 
 
117
 
118
  def generate_markdown_summary(summary: Dict[str, Any], filepath: str):
119
  """Generates a human-readable Markdown summary from the results dictionary."""
@@ -125,7 +133,6 @@ def generate_markdown_summary(summary: Dict[str, Any], filepath: str):
125
  timestamp = summary.get("timestamp", "N/A")
126
  total_questions_in_dataset = summary.get("total_questions_in_dataset", 0)
127
  total_questions_processed_in_run = summary.get("total_questions_processed_in_run", 0)
128
- # total_api_cost = summary.get("total_api_cost", 0.0) # Removed
129
 
130
  filtered_questions_count = 0
131
  if total_questions_in_dataset > 0 and total_questions_processed_in_run > 0:
@@ -149,14 +156,8 @@ def generate_markdown_summary(summary: Dict[str, Any], filepath: str):
149
  if "overall_score" in summary and "section_breakdown" in summary: # Generic check for score-based summary
150
  total_processed = summary.get("total_questions_processed", 0)
151
 
152
- # Max score calculation is complex due to varied scoring per question.
153
- # For now, we'll omit max_score from the overall display or acknowledge its variability.
154
- # A more accurate max_score would require iterating through the dataset items used in the run
155
- # and summing their individual max possible scores based on exam_name and question_type.
156
- # This is out of scope for the current summary generation simplicity.
157
- max_score_display = "N/A (variable per question)" # Placeholder
158
-
159
  overall_score = summary.get('overall_score', 'N/A')
 
160
  correct_full_count = summary.get('overall_correct_full', 'N/A')
161
  partial_correct_count = summary.get('overall_partial_correct', 'N/A')
162
  incorrect_choice_count = summary.get('overall_incorrect_choice', 'N/A')
@@ -164,10 +165,10 @@ def generate_markdown_summary(summary: Dict[str, Any], filepath: str):
164
  failures_count = summary.get('overall_api_parse_failures', 'N/A')
165
  unmapped_count = summary.get('unmapped_section_questions', 'N/A')
166
 
167
- md_content.append("## Exam Scoring Results") # Changed from NEET
168
- md_content.append(f"**Overall Score:** **{overall_score}** (Max score varies based on question types)")
169
  md_content.append(f"- **Fully Correct Answers:** {correct_full_count}")
170
- if partial_correct_count != 'N/A' and partial_correct_count > 0 : # Only show if applicable
171
  md_content.append(f"- **Partially Correct Answers:** {partial_correct_count}")
172
  md_content.append(f"- **Incorrectly Answered (Choice Made):** {incorrect_choice_count}")
173
  md_content.append(f"- **Skipped Questions:** {skipped_count}")
@@ -176,6 +177,53 @@ def generate_markdown_summary(summary: Dict[str, Any], filepath: str):
176
  if unmapped_count > 0:
177
  md_content.append(f"- **Unmapped Section Questions:** {unmapped_count} *(Not included in section breakdown)*")
178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  md_content.append("\n### Section Breakdown")
180
  md_content.append("| Section | Score | Fully Correct | Partially Correct | Incorrect Choice | Skipped | API/Parse Failures |")
181
  md_content.append("|---------------|-------|---------------|-------------------|------------------|---------|--------------------|")
@@ -189,9 +237,9 @@ def generate_markdown_summary(summary: Dict[str, Any], filepath: str):
189
  for section_name in sorted_section_names:
190
  stats = section_breakdown.get(section_name, {})
191
  score = stats.get('score', 'N/A')
192
- s_correct = stats.get('correct', 'N/A') # This is full correct from new structure
193
  s_partial = stats.get('partial_correct', 'N/A')
194
- s_incorrect = stats.get('incorrect', 'N/A') # This is incorrect choice from new structure
195
  s_skipped = stats.get('skipped', 'N/A')
196
  s_failures = stats.get('api_parse_failures', 'N/A')
197
  display_section_name = section_name.replace('_', ' ')
@@ -200,7 +248,7 @@ def generate_markdown_summary(summary: Dict[str, Any], filepath: str):
200
  md_content.append("| No section data available | N/A | N/A | N/A | N/A | N/A | N/A |")
201
 
202
  # Fallback for simple accuracy (if exam scoring wasn't applicable or failed)
203
- elif "accuracy_on_parsed" in summary: # This branch might be less used if all datasets now have exam_name/type
204
  md_content.append("## Simple Accuracy Results (Fallback)")
205
  md_content.append(f"- **Accuracy (on successfully parsed non-skipped):** {summary.get('accuracy_on_parsed', 'N/A'):.4f}")
206
  md_content.append(f"- **Total Processed Attempts:** {summary.get('total_processed_attempts', 'N/A')}")
@@ -320,7 +368,7 @@ def run_benchmark(
320
  model_output_dir = os.path.join(base_output_dir, model_output_dir_name)
321
  os.makedirs(model_output_dir, exist_ok=True)
322
  predictions_path = os.path.join(model_output_dir, "predictions.jsonl")
323
- summary_path = os.path.join(model_output_dir, "summary.json")
324
  markdown_summary_path = os.path.join(model_output_dir, "summary.md") # Define path for MD summary
325
  logging.info(f"Results for {model_id} will be saved to: {model_output_dir}")
326
 
@@ -447,8 +495,19 @@ def run_benchmark(
447
  result_data['marks_awarded'] = score_details.get('marks_awarded')
448
  result_data['evaluation_status'] = score_details.get('evaluation_status')
449
 
 
 
 
 
 
 
 
 
 
 
 
450
  model_results.append(result_data)
451
- append_prediction(result_data, predictions_path)
452
 
453
  final_parsed_answer = result_data["predicted_answer"]
454
  log_message_prefix = f"Question {question_id}:"
@@ -626,8 +685,19 @@ def run_benchmark(
626
  result_data_retry['marks_awarded'] = score_details_retry.get('marks_awarded')
627
  result_data_retry['evaluation_status'] = score_details_retry.get('evaluation_status')
628
 
 
 
 
 
 
 
 
 
 
 
 
629
  model_results.append(result_data_retry)
630
- append_prediction(result_data_retry, predictions_path)
631
 
632
  # Logging for retry pass
633
  log_message_prefix_retry = f"Question {question_id_retry} (Retry):"
@@ -709,24 +779,14 @@ def run_benchmark(
709
  logging.info(json.dumps(summary, indent=2, sort_keys=True))
710
  logging.info("-------------------------------------")
711
 
712
- # --- Overwrite predictions file with final evaluated results ---
713
- # The model_results list was modified in-place by calculate_neet_scores (if applicable)
714
  # to include evaluation_status and marks_awarded.
715
- logging.info(f"Overwriting {predictions_path} with final evaluated results...")
716
- try:
717
- with open(predictions_path, 'w') as f:
718
- for final_result in model_results:
719
- json.dump(final_result, f)
720
- f.write('\n')
721
- logging.info(f"Successfully updated {predictions_path} with evaluation details.")
722
- except IOError as e:
723
- logging.error(f"Failed to overwrite predictions file {predictions_path}: {e}")
724
- except Exception as e:
725
- logging.error(f"Unexpected error overwriting predictions file {predictions_path}: {e}")
726
 
727
 
728
- # Save final summary (JSON and Markdown) for the current model
729
- save_summary(summary, summary_path)
730
  generate_markdown_summary(summary, markdown_summary_path) # Call the new function
731
 
732
  logging.info("Benchmark run completed.")
 
93
 
94
  def append_prediction(result: Dict[str, Any], filepath: str):
95
  """Appends a single prediction result to a JSONL file."""
96
+ # Create a copy to avoid modifying the original dict that might be used elsewhere
97
+ # and remove evaluation-specific fields before saving to predictions.jsonl
98
+ prediction_data = result.copy()
99
+ prediction_data.pop('marks_awarded', None)
100
+ prediction_data.pop('evaluation_status', None)
101
+ prediction_data.pop('predicted_answer', None) # Remove predicted_answer
102
+ prediction_data.pop('ground_truth', None) # Remove ground_truth
103
  try:
104
  with open(filepath, 'a') as f:
105
+ json.dump(prediction_data, f)
106
  f.write('\n')
107
  except IOError as e:
108
  logging.error(f"Failed to append prediction to {filepath}: {e}")
109
  except Exception as e:
110
  logging.error(f"Unexpected error appending prediction to {filepath}: {e}")
111
 
112
+ def append_summary_detail(result_detail: Dict[str, Any], filepath: str):
113
+ """Appends a single question's summary details (evaluation status, marks, predicted, truth) to a JSONL file."""
 
114
  try:
115
+ with open(filepath, 'a') as f:
116
+ json.dump(result_detail, f)
117
+ f.write('\n')
118
  except IOError as e:
119
+ logging.error(f"Failed to append summary detail to {filepath}: {e}")
120
  except Exception as e:
121
+ logging.error(f"Unexpected error appending summary detail to {filepath}: {e}")
122
+
123
 
124
+ # Removed save_summary function as summary.json is no longer needed.
125
 
126
  def generate_markdown_summary(summary: Dict[str, Any], filepath: str):
127
  """Generates a human-readable Markdown summary from the results dictionary."""
 
133
  timestamp = summary.get("timestamp", "N/A")
134
  total_questions_in_dataset = summary.get("total_questions_in_dataset", 0)
135
  total_questions_processed_in_run = summary.get("total_questions_processed_in_run", 0)
 
136
 
137
  filtered_questions_count = 0
138
  if total_questions_in_dataset > 0 and total_questions_processed_in_run > 0:
 
156
  if "overall_score" in summary and "section_breakdown" in summary: # Generic check for score-based summary
157
  total_processed = summary.get("total_questions_processed", 0)
158
 
 
 
 
 
 
 
 
159
  overall_score = summary.get('overall_score', 'N/A')
160
+ total_possible_score = summary.get('total_possible_score_for_processed_questions', 'N/A')
161
  correct_full_count = summary.get('overall_correct_full', 'N/A')
162
  partial_correct_count = summary.get('overall_partial_correct', 'N/A')
163
  incorrect_choice_count = summary.get('overall_incorrect_choice', 'N/A')
 
165
  failures_count = summary.get('overall_api_parse_failures', 'N/A')
166
  unmapped_count = summary.get('unmapped_section_questions', 'N/A')
167
 
168
+ md_content.append("## Exam Scoring Results")
169
+ md_content.append(f"**Overall Score:** **{overall_score}** / **{total_possible_score}**")
170
  md_content.append(f"- **Fully Correct Answers:** {correct_full_count}")
171
+ if partial_correct_count != 'N/A' and partial_correct_count > 0 :
172
  md_content.append(f"- **Partially Correct Answers:** {partial_correct_count}")
173
  md_content.append(f"- **Incorrectly Answered (Choice Made):** {incorrect_choice_count}")
174
  md_content.append(f"- **Skipped Questions:** {skipped_count}")
 
177
  if unmapped_count > 0:
178
  md_content.append(f"- **Unmapped Section Questions:** {unmapped_count} *(Not included in section breakdown)*")
179
 
180
+ md_content.append("\n### Detailed Score Calculation by Question Type")
181
+ question_type_breakdown = summary.get("question_type_breakdown", {})
182
+ if question_type_breakdown:
183
+ sorted_q_types = sorted(question_type_breakdown.keys())
184
+ for q_type in sorted_q_types:
185
+ stats = question_type_breakdown[q_type]
186
+ q_type_display = q_type.replace('_', ' ').title()
187
+ max_score_per_q = stats.get('max_score_per_question', 0)
188
+
189
+ correct_count_q = stats.get('correct_full', 0)
190
+ partial_count_q = stats.get('partial_correct', 0)
191
+ incorrect_count_q = stats.get('incorrect_choice', 0)
192
+ skipped_count_q = stats.get('skipped', 0)
193
+ api_fail_count_q = stats.get('api_parse_failures', 0)
194
+ score_q = stats.get('score', 0)
195
+
196
+ calculation_parts = []
197
+ if correct_count_q > 0:
198
+ calculation_parts.append(f"{correct_count_q} Correct (+{max_score_per_q})")
199
+ if partial_count_q > 0:
200
+ # For partial, we can't easily show the exact score per question without more detail
201
+ # For now, just indicate partials.
202
+ calculation_parts.append(f"{partial_count_q} Partial")
203
+ if incorrect_count_q > 0:
204
+ # Need to know penalty for incorrect. Assuming -1 for MCQ_SINGLE_CORRECT, -2 for MCQ_MULTIPLE_CORRECT
205
+ # For INTEGER, penalty is 0. This needs to be more robust if penalties vary.
206
+ penalty_per_incorrect = 0
207
+ if q_type == "MCQ_SINGLE_CORRECT": penalty_per_incorrect = -1
208
+ elif q_type == "MCQ_MULTIPLE_CORRECT": penalty_per_incorrect = -2
209
+ calculation_parts.append(f"{incorrect_count_q} Incorrect ({penalty_per_incorrect})")
210
+ if skipped_count_q > 0:
211
+ calculation_parts.append(f"{skipped_count_q} Skipped (0)")
212
+ if api_fail_count_q > 0:
213
+ # Assuming -1 for API/Parse failures for non-integer types, 0 for integer
214
+ penalty_per_api_fail = -1
215
+ if q_type == "INTEGER": penalty_per_api_fail = 0
216
+ calculation_parts.append(f"{api_fail_count_q} API/Parse Fail ({penalty_per_api_fail})")
217
+
218
+ calculation_str = " + ".join(part for part in calculation_parts if part)
219
+ if not calculation_str:
220
+ calculation_str = "No questions of this type processed or all had 0 score change."
221
+
222
+ md_content.append(f"**{q_type_display} ({stats.get('count', 0)} questions):** {score_q} marks")
223
+ md_content.append(f" *Calculation:* {calculation_str} = {score_q}")
224
+ else:
225
+ md_content.append("No question type breakdown available.")
226
+
227
  md_content.append("\n### Section Breakdown")
228
  md_content.append("| Section | Score | Fully Correct | Partially Correct | Incorrect Choice | Skipped | API/Parse Failures |")
229
  md_content.append("|---------------|-------|---------------|-------------------|------------------|---------|--------------------|")
 
237
  for section_name in sorted_section_names:
238
  stats = section_breakdown.get(section_name, {})
239
  score = stats.get('score', 'N/A')
240
+ s_correct = stats.get('correct', 'N/A')
241
  s_partial = stats.get('partial_correct', 'N/A')
242
+ s_incorrect = stats.get('incorrect', 'N/A')
243
  s_skipped = stats.get('skipped', 'N/A')
244
  s_failures = stats.get('api_parse_failures', 'N/A')
245
  display_section_name = section_name.replace('_', ' ')
 
248
  md_content.append("| No section data available | N/A | N/A | N/A | N/A | N/A | N/A |")
249
 
250
  # Fallback for simple accuracy (if exam scoring wasn't applicable or failed)
251
+ elif "accuracy_on_parsed" in summary:
252
  md_content.append("## Simple Accuracy Results (Fallback)")
253
  md_content.append(f"- **Accuracy (on successfully parsed non-skipped):** {summary.get('accuracy_on_parsed', 'N/A'):.4f}")
254
  md_content.append(f"- **Total Processed Attempts:** {summary.get('total_processed_attempts', 'N/A')}")
 
368
  model_output_dir = os.path.join(base_output_dir, model_output_dir_name)
369
  os.makedirs(model_output_dir, exist_ok=True)
370
  predictions_path = os.path.join(model_output_dir, "predictions.jsonl")
371
+ summary_details_path = os.path.join(model_output_dir, "summary.jsonl") # New file for per-question summary details
372
  markdown_summary_path = os.path.join(model_output_dir, "summary.md") # Define path for MD summary
373
  logging.info(f"Results for {model_id} will be saved to: {model_output_dir}")
374
 
 
495
  result_data['marks_awarded'] = score_details.get('marks_awarded')
496
  result_data['evaluation_status'] = score_details.get('evaluation_status')
497
 
498
+ # Append evaluation details to summary.jsonl
499
+ summary_detail_data = {
500
+ "question_id": question_id,
501
+ "marks_awarded": result_data['marks_awarded'],
502
+ "evaluation_status": result_data['evaluation_status'],
503
+ "predicted_answer": result_data['predicted_answer'], # Add predicted_answer
504
+ "ground_truth": result_data['ground_truth'], # Add ground_truth
505
+ "attempt": result_data['attempt']
506
+ }
507
+ append_summary_detail(summary_detail_data, summary_details_path)
508
+
509
  model_results.append(result_data)
510
+ append_prediction(result_data, predictions_path) # append_prediction now handles removing evaluation fields
511
 
512
  final_parsed_answer = result_data["predicted_answer"]
513
  log_message_prefix = f"Question {question_id}:"
 
685
  result_data_retry['marks_awarded'] = score_details_retry.get('marks_awarded')
686
  result_data_retry['evaluation_status'] = score_details_retry.get('evaluation_status')
687
 
688
+ # Append evaluation details to summary.jsonl for retry pass
689
+ summary_detail_data_retry = {
690
+ "question_id": question_id_retry,
691
+ "marks_awarded": result_data_retry['marks_awarded'],
692
+ "evaluation_status": result_data_retry['evaluation_status'],
693
+ "predicted_answer": result_data_retry['predicted_answer'], # Add predicted_answer
694
+ "ground_truth": result_data_retry['ground_truth'], # Add ground_truth
695
+ "attempt": result_data_retry['attempt']
696
+ }
697
+ append_summary_detail(summary_detail_data_retry, summary_details_path)
698
+
699
  model_results.append(result_data_retry)
700
+ append_prediction(result_data_retry, predictions_path) # append_prediction now handles removing evaluation fields
701
 
702
  # Logging for retry pass
703
  log_message_prefix_retry = f"Question {question_id_retry} (Retry):"
 
779
  logging.info(json.dumps(summary, indent=2, sort_keys=True))
780
  logging.info("-------------------------------------")
781
 
782
+ # The model_results list was modified in-place by calculate_exam_scores
 
783
  # to include evaluation_status and marks_awarded.
784
+ # predictions.jsonl is now written incrementally without evaluation details.
785
+ # No need to overwrite predictions.jsonl here.
 
 
 
 
 
 
 
 
 
786
 
787
 
788
+ # Save final summary (Markdown) for the current model
789
+ # The summary.json file is no longer generated as per user request.
790
  generate_markdown_summary(summary, markdown_summary_path) # Call the new function
791
 
792
  logging.info("Benchmark run completed.")
src/evaluation.py CHANGED
@@ -201,9 +201,34 @@ def calculate_single_question_score_details(result_item: Dict[str, Any]) -> Dict
201
  return {"marks_awarded": current_score_change, "evaluation_status": evaluation_status}
202
 
203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  def calculate_exam_scores(results: List[Dict[str, Any]]) -> Dict[str, Any]:
205
  """
206
- Calculates exam scores based on exam_name and question_type, providing section-wise breakdown.
 
207
 
208
  Args:
209
  results (List[Dict[str, Any]]): A list of result dictionaries. Each dict must contain:
@@ -217,14 +242,25 @@ def calculate_exam_scores(results: List[Dict[str, Any]]) -> Dict[str, Any]:
217
  This list will be modified in-place to add 'evaluation_status' and 'marks_awarded' by calling
218
  calculate_single_question_score_details for each item.
219
  Returns:
220
- Dict[str, Any]: A dictionary containing overall and section-wise scores and counts.
 
221
  """
222
  if not results:
223
  return {"error": "No results provided."}
224
 
225
- overall_stats = {"score": 0, "correct": 0, "incorrect": 0, "skipped": 0, "api_parse_failures": 0, "partial_correct": 0}
226
- # total_api_cost = 0.0 # Removed
 
 
 
 
 
 
 
227
 
 
 
 
228
  valid_subjects_from_data = [r.get("subject") for r in results if r.get("subject") and isinstance(r.get("subject"), str) and r.get("subject").strip()]
229
  if not valid_subjects_from_data and results:
230
  logging.warning("No valid subjects found in results data to initialize section_stats.")
@@ -242,6 +278,8 @@ def calculate_exam_scores(results: List[Dict[str, Any]]) -> Dict[str, Any]:
242
  for result in results:
243
  question_id = result.get("question_id") # For logging within loop
244
  subject = result.get("subject") # For section mapping
 
 
245
 
246
  # Calculate score details for the single question
247
  score_details = calculate_single_question_score_details(result)
@@ -252,18 +290,12 @@ def calculate_exam_scores(results: List[Dict[str, Any]]) -> Dict[str, Any]:
252
  result['evaluation_status'] = evaluation_status
253
  result['marks_awarded'] = current_score_change
254
 
255
- # Accumulate API cost # Removed
256
- # current_api_cost = result.get("api_cost") # Removed
257
- # if isinstance(current_api_cost, (int, float)): # Removed
258
- # total_api_cost += current_api_cost # Removed
259
- # elif current_api_cost is not None: # Removed
260
- # logging.warning(f"Invalid api_cost type for QID {question_id}: {current_api_cost} (type: {type(current_api_cost)}). Skipping cost accumulation for this item.") # Removed
261
 
262
  # Determine boolean flags based on evaluation_status for aggregation
263
  is_correct_full = evaluation_status in ["correct", "correct_full"]
264
  is_partial_correct = evaluation_status.startswith("partial_")
265
- # is_incorrect_choice needs to be inferred carefully if not directly returned by single calc
266
- # For simplicity, we'll rely on the status. More robust would be for single_calc to return these bools too.
267
  is_incorrect_choice = evaluation_status in ["incorrect", "incorrect_negative"]
268
  is_skipped = evaluation_status == "skipped"
269
  is_api_parse_failure = evaluation_status in ["failure_api_or_parse", "failure_unexpected_type", "error_bad_ground_truth"]
@@ -276,6 +308,28 @@ def calculate_exam_scores(results: List[Dict[str, Any]]) -> Dict[str, Any]:
276
  if is_api_parse_failure: overall_stats["api_parse_failures"] += 1
277
  if is_partial_correct: overall_stats["partial_correct"] +=1
278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
  section = None
280
  if subject:
281
  question_num_for_log = -1 # Placeholder, as QID might not have number
@@ -309,9 +363,10 @@ def calculate_exam_scores(results: List[Dict[str, Any]]) -> Dict[str, Any]:
309
  "overall_skipped": overall_stats["skipped"],
310
  "overall_api_parse_failures": overall_stats["api_parse_failures"],
311
  "total_questions_processed": len(results),
312
- # "total_api_cost": total_api_cost, # Removed
313
  "unmapped_section_questions": unmapped_section_questions,
314
- "section_breakdown": section_stats
 
315
  }
316
 
317
 
 
201
  return {"marks_awarded": current_score_change, "evaluation_status": evaluation_status}
202
 
203
 
204
+ def calculate_max_score_for_question(exam_name: str, question_type: str) -> int:
205
+ """
206
+ Returns the maximum possible score for a given exam and question type.
207
+ """
208
+ exam_name = exam_name.upper()
209
+ question_type = question_type.upper()
210
+
211
+ if exam_name == "NEET" and question_type == "MCQ_SINGLE_CORRECT":
212
+ return 4
213
+ elif exam_name == "JEE_MAIN":
214
+ if question_type == "MCQ_SINGLE_CORRECT":
215
+ return 4
216
+ elif question_type == "INTEGER":
217
+ return 4
218
+ elif exam_name == "JEE_ADVANCED":
219
+ if question_type == "MCQ_SINGLE_CORRECT":
220
+ return 3
221
+ elif question_type == "INTEGER":
222
+ return 4
223
+ elif question_type == "MCQ_MULTIPLE_CORRECT":
224
+ return 4 # Max score for multiple correct is 4
225
+ return 0 # Default for unknown types
226
+
227
+
228
  def calculate_exam_scores(results: List[Dict[str, Any]]) -> Dict[str, Any]:
229
  """
230
+ Calculates exam scores based on exam_name and question_type, providing section-wise breakdown
231
+ and detailed question type statistics.
232
 
233
  Args:
234
  results (List[Dict[str, Any]]): A list of result dictionaries. Each dict must contain:
 
242
  This list will be modified in-place to add 'evaluation_status' and 'marks_awarded' by calling
243
  calculate_single_question_score_details for each item.
244
  Returns:
245
+ Dict[str, Any]: A dictionary containing overall and section-wise scores and counts,
246
+ plus question type breakdowns and total possible score.
247
  """
248
  if not results:
249
  return {"error": "No results provided."}
250
 
251
+ overall_stats = {
252
+ "score": 0,
253
+ "correct": 0,
254
+ "incorrect": 0,
255
+ "skipped": 0,
256
+ "api_parse_failures": 0,
257
+ "partial_correct": 0,
258
+ "total_possible_score": 0 # New field
259
+ }
260
 
261
+ # Initialize question type breakdown
262
+ question_type_breakdown: Dict[str, Dict[str, Any]] = {}
263
+
264
  valid_subjects_from_data = [r.get("subject") for r in results if r.get("subject") and isinstance(r.get("subject"), str) and r.get("subject").strip()]
265
  if not valid_subjects_from_data and results:
266
  logging.warning("No valid subjects found in results data to initialize section_stats.")
 
278
  for result in results:
279
  question_id = result.get("question_id") # For logging within loop
280
  subject = result.get("subject") # For section mapping
281
+ exam_name = result.get("exam_name", "").upper()
282
+ question_type = result.get("question_type", "").upper()
283
 
284
  # Calculate score details for the single question
285
  score_details = calculate_single_question_score_details(result)
 
290
  result['evaluation_status'] = evaluation_status
291
  result['marks_awarded'] = current_score_change
292
 
293
+ # Accumulate total possible score
294
+ overall_stats["total_possible_score"] += calculate_max_score_for_question(exam_name, question_type)
 
 
 
 
295
 
296
  # Determine boolean flags based on evaluation_status for aggregation
297
  is_correct_full = evaluation_status in ["correct", "correct_full"]
298
  is_partial_correct = evaluation_status.startswith("partial_")
 
 
299
  is_incorrect_choice = evaluation_status in ["incorrect", "incorrect_negative"]
300
  is_skipped = evaluation_status == "skipped"
301
  is_api_parse_failure = evaluation_status in ["failure_api_or_parse", "failure_unexpected_type", "error_bad_ground_truth"]
 
308
  if is_api_parse_failure: overall_stats["api_parse_failures"] += 1
309
  if is_partial_correct: overall_stats["partial_correct"] +=1
310
 
311
+ # Aggregate by question type
312
+ if question_type not in question_type_breakdown:
313
+ question_type_breakdown[question_type] = {
314
+ "count": 0,
315
+ "score": 0,
316
+ "correct_full": 0,
317
+ "partial_correct": 0,
318
+ "incorrect_choice": 0,
319
+ "skipped": 0,
320
+ "api_parse_failures": 0,
321
+ "max_score_per_question": calculate_max_score_for_question(exam_name, question_type)
322
+ }
323
+
324
+ q_type_stats = question_type_breakdown[question_type]
325
+ q_type_stats["count"] += 1
326
+ q_type_stats["score"] += current_score_change
327
+ if is_correct_full: q_type_stats["correct_full"] += 1
328
+ if is_incorrect_choice: q_type_stats["incorrect_choice"] += 1
329
+ if is_skipped: q_type_stats["skipped"] += 1
330
+ if is_api_parse_failure: q_type_stats["api_parse_failures"] += 1
331
+ if is_partial_correct: q_type_stats["partial_correct"] += 1
332
+
333
  section = None
334
  if subject:
335
  question_num_for_log = -1 # Placeholder, as QID might not have number
 
363
  "overall_skipped": overall_stats["skipped"],
364
  "overall_api_parse_failures": overall_stats["api_parse_failures"],
365
  "total_questions_processed": len(results),
366
+ "total_possible_score_for_processed_questions": overall_stats["total_possible_score"], # New field
367
  "unmapped_section_questions": unmapped_section_questions,
368
+ "section_breakdown": section_stats,
369
+ "question_type_breakdown": question_type_breakdown # New field
370
  }
371
 
372