With results of Jee Advanced 2024
Browse files- README.md +5 -5
- data/metadata.jsonl +2 -2
- results/google_gemini-2.5-pro-preview-03-25_JEE_ADVANCED_2024_20250527_100743/predictions.jsonl +0 -0
- results/google_gemini-2.5-pro-preview-03-25_JEE_ADVANCED_2024_20250527_100743/summary.json +43 -0
- results/google_gemini-2.5-pro-preview-03-25_JEE_ADVANCED_2024_20250527_100743/summary.md +24 -0
- src/benchmark_runner.py +36 -8
- src/evaluation.py +9 -0
README.md
CHANGED
@@ -44,7 +44,7 @@ configs:
|
|
44 |
splits:
|
45 |
test: # Name of the split
|
46 |
# num_bytes: # Size of the split in bytes (you might need to calculate this)
|
47 |
-
num_examples:
|
48 |
# You can add dataset_tags, dataset_summary, etc. for each split.
|
49 |
|
50 |
# Column Naming
|
@@ -97,7 +97,7 @@ source_datasets: # If your dataset is derived from other datasets
|
|
97 |
- original # If it's original data
|
98 |
# - extended # If it extends another dataset
|
99 |
size_categories: # Approximate size of the dataset
|
100 |
-
- n<1K # (
|
101 |
dataset_curation_process: |
|
102 |
Questions are sourced from official JEE and NEET examination papers.
|
103 |
They are provided as images to maintain original formatting and diagrams.
|
@@ -230,7 +230,7 @@ This repository contains scripts to run the benchmark evaluation directly:
|
|
230 |
**Available filtering options:**
|
231 |
- `--exam_name`: Choose from `NEET`, `JEE_MAIN`, `JEE_ADVANCED`, or `all` (default)
|
232 |
- `--exam_year`: Choose from available years (`2024`, `2025`, etc.) or `all` (default)
|
233 |
-
- `--question_ids`: Comma-separated list of specific question IDs to evaluate (e.g., "
|
234 |
|
235 |
6. **Check Results:**
|
236 |
* Results for each model run will be saved in timestamped subdirectories within the `results/` folder.
|
@@ -289,7 +289,7 @@ The benchmark implements authentic scoring systems for each exam type:
|
|
289 |
|
290 |
* **`data/metadata.jsonl`**: Contains metadata for each question image with fields:
|
291 |
- `image_path`: Path to the question image
|
292 |
-
- `question_id`: Unique identifier (e.g., "
|
293 |
- `exam_name`: Exam type ("NEET", "JEE_MAIN", "JEE_ADVANCED")
|
294 |
- `exam_year`: Year of the exam (integer)
|
295 |
- `exam_code`: Paper/session code (e.g., "T3", "P1")
|
@@ -299,7 +299,7 @@ The benchmark implements authentic scoring systems for each exam type:
|
|
299 |
|
300 |
* **`images/`**: Contains subdirectories for each exam set:
|
301 |
- `images/NEET_2024_T3/`: NEET 2024 question images
|
302 |
-
- `images/NEET_2025_45/`: NEET 2025 question images
|
303 |
- `images/JEE_ADVANCE_2024/`: JEE Advanced 2024 question images
|
304 |
|
305 |
* **`src/`**: Python source code for the benchmark system:
|
|
|
44 |
splits:
|
45 |
test: # Name of the split
|
46 |
# num_bytes: # Size of the split in bytes (you might need to calculate this)
|
47 |
+
num_examples: 482 # Number of examples in the split (from your script output)
|
48 |
# You can add dataset_tags, dataset_summary, etc. for each split.
|
49 |
|
50 |
# Column Naming
|
|
|
97 |
- original # If it's original data
|
98 |
# - extended # If it extends another dataset
|
99 |
size_categories: # Approximate size of the dataset
|
100 |
+
- n<1K # (482 examples)
|
101 |
dataset_curation_process: |
|
102 |
Questions are sourced from official JEE and NEET examination papers.
|
103 |
They are provided as images to maintain original formatting and diagrams.
|
|
|
230 |
**Available filtering options:**
|
231 |
- `--exam_name`: Choose from `NEET`, `JEE_MAIN`, `JEE_ADVANCED`, or `all` (default)
|
232 |
- `--exam_year`: Choose from available years (`2024`, `2025`, etc.) or `all` (default)
|
233 |
+
- `--question_ids`: Comma-separated list of specific question IDs to evaluate (e.g., "N24T3001,JA24P1M01")
|
234 |
|
235 |
6. **Check Results:**
|
236 |
* Results for each model run will be saved in timestamped subdirectories within the `results/` folder.
|
|
|
289 |
|
290 |
* **`data/metadata.jsonl`**: Contains metadata for each question image with fields:
|
291 |
- `image_path`: Path to the question image
|
292 |
+
- `question_id`: Unique identifier (e.g., "N24T3001")
|
293 |
- `exam_name`: Exam type ("NEET", "JEE_MAIN", "JEE_ADVANCED")
|
294 |
- `exam_year`: Year of the exam (integer)
|
295 |
- `exam_code`: Paper/session code (e.g., "T3", "P1")
|
|
|
299 |
|
300 |
* **`images/`**: Contains subdirectories for each exam set:
|
301 |
- `images/NEET_2024_T3/`: NEET 2024 question images
|
302 |
+
- `images/NEET_2025_45/`: NEET 2025 question images
|
303 |
- `images/JEE_ADVANCE_2024/`: JEE Advanced 2024 question images
|
304 |
|
305 |
* **`src/`**: Python source code for the benchmark system:
|
data/metadata.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:43efc7048a18bde98412fa3b7527e423dfc39699fd736c566c68ee482e0a6a17
|
3 |
+
size 106457
|
results/google_gemini-2.5-pro-preview-03-25_JEE_ADVANCED_2024_20250527_100743/predictions.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
results/google_gemini-2.5-pro-preview-03-25_JEE_ADVANCED_2024_20250527_100743/summary.json
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "google/gemini-2.5-pro-preview-03-25",
|
3 |
+
"exam_name": "JEE_ADVANCED",
|
4 |
+
"exam_year": "2024",
|
5 |
+
"question_ids_filter": "None",
|
6 |
+
"timestamp": "20250527_100743",
|
7 |
+
"total_questions_in_dataset": 482,
|
8 |
+
"total_questions_processed_in_run": 102,
|
9 |
+
"overall_score": 322,
|
10 |
+
"overall_correct_full": 91,
|
11 |
+
"overall_partial_correct": 0,
|
12 |
+
"overall_incorrect_choice": 7,
|
13 |
+
"overall_skipped": 2,
|
14 |
+
"overall_api_parse_failures": 2,
|
15 |
+
"total_questions_processed": 102,
|
16 |
+
"unmapped_section_questions": 0,
|
17 |
+
"section_breakdown": {
|
18 |
+
"Chemistry": {
|
19 |
+
"score": 102,
|
20 |
+
"correct": 29,
|
21 |
+
"incorrect": 4,
|
22 |
+
"skipped": 1,
|
23 |
+
"api_parse_failures": 0,
|
24 |
+
"partial_correct": 0
|
25 |
+
},
|
26 |
+
"Math": {
|
27 |
+
"score": 113,
|
28 |
+
"correct": 32,
|
29 |
+
"incorrect": 1,
|
30 |
+
"skipped": 1,
|
31 |
+
"api_parse_failures": 0,
|
32 |
+
"partial_correct": 0
|
33 |
+
},
|
34 |
+
"Physics": {
|
35 |
+
"score": 107,
|
36 |
+
"correct": 31,
|
37 |
+
"incorrect": 3,
|
38 |
+
"skipped": 0,
|
39 |
+
"api_parse_failures": 0,
|
40 |
+
"partial_correct": 0
|
41 |
+
}
|
42 |
+
}
|
43 |
+
}
|
results/google_gemini-2.5-pro-preview-03-25_JEE_ADVANCED_2024_20250527_100743/summary.md
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Benchmark Results: google/gemini-2.5-pro-preview-03-25
|
2 |
+
**Exam Name:** JEE_ADVANCED
|
3 |
+
**Exam Year:** 2024
|
4 |
+
**Timestamp:** 20250527_100743
|
5 |
+
**Total Questions in Dataset:** 482
|
6 |
+
**Questions Filtered Out:** 380
|
7 |
+
**Total Questions Processed in this Run:** 102
|
8 |
+
|
9 |
+
---
|
10 |
+
|
11 |
+
## Exam Scoring Results
|
12 |
+
**Overall Score:** **322** (Max score is 360)
|
13 |
+
- **Fully Correct Answers:** 92
|
14 |
+
- **Incorrectly Answered (Choice Made):** 8
|
15 |
+
- **Skipped Questions:** 2
|
16 |
+
- **API/Parse Failures:** 0
|
17 |
+
- **Total Questions Processed:** 102
|
18 |
+
|
19 |
+
### Section Breakdown
|
20 |
+
| Section | Score | Fully Correct | Partially Correct | Incorrect Choice | Skipped | API/Parse Failures |
|
21 |
+
|---------------|-------|---------------|-------------------|------------------|---------|--------------------|
|
22 |
+
| Chemistry | 102 | 29 | 0 | 4 | 1 | 0 |
|
23 |
+
| Math | 113 | 32 | 0 | 1 | 1 | 0 |
|
24 |
+
| Physics | 107 | 31 | 0 | 3 | 0 | 0 |
|
src/benchmark_runner.py
CHANGED
@@ -123,7 +123,14 @@ def generate_markdown_summary(summary: Dict[str, Any], filepath: str):
|
|
123 |
exam_name = summary.get("exam_name", "N/A")
|
124 |
exam_year = summary.get("exam_year", "N/A")
|
125 |
timestamp = summary.get("timestamp", "N/A")
|
126 |
-
total_questions_in_dataset = summary.get("total_questions_in_dataset", 0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
|
128 |
md_content.append(f"# Benchmark Results: {model_name}")
|
129 |
if exam_name and exam_name not in ["N/A", "All_Exams"]: # Only display if a specific exam was targeted
|
@@ -132,6 +139,10 @@ def generate_markdown_summary(summary: Dict[str, Any], filepath: str):
|
|
132 |
md_content.append(f"**Exam Year:** {exam_year}")
|
133 |
md_content.append(f"**Timestamp:** {timestamp}")
|
134 |
md_content.append(f"**Total Questions in Dataset:** {total_questions_in_dataset if total_questions_in_dataset > 0 else 'N/A'}")
|
|
|
|
|
|
|
|
|
135 |
md_content.append("\n---\n")
|
136 |
|
137 |
# Check if NEET results are present (or any dataset with overall_score and section_breakdown)
|
@@ -344,13 +355,16 @@ def run_benchmark(
|
|
344 |
"parse_successful": False,
|
345 |
"api_call_successful": False,
|
346 |
"error": None,
|
347 |
-
"attempt": 1
|
|
|
|
|
348 |
}
|
349 |
|
350 |
try:
|
351 |
# --- Initial API Call ---
|
|
|
352 |
# Pass exam_name_from_data and question_type_from_data to get_openrouter_prediction
|
353 |
-
parsed_answer, raw_response = get_openrouter_prediction(
|
354 |
model_identifier=model_id,
|
355 |
api_key=api_key,
|
356 |
image=image,
|
@@ -364,12 +378,15 @@ def run_benchmark(
|
|
364 |
api_success_attempt1 = True # If no exception, API call itself was successful
|
365 |
parse_success_attempt1 = parsed_answer is not None
|
366 |
raw_response_attempt1 = raw_response
|
|
|
367 |
|
368 |
# --- Re-prompt Logic ---
|
369 |
if api_success_attempt1 and not parse_success_attempt1 and raw_response_attempt1 is not None:
|
370 |
logging.warning(f"Question {question_id}: Initial parse failed. Attempting re-prompt.")
|
|
|
371 |
try:
|
372 |
-
|
|
|
373 |
model_identifier=model_id,
|
374 |
api_key=api_key,
|
375 |
previous_raw_response=raw_response_attempt1,
|
@@ -387,9 +404,12 @@ def run_benchmark(
|
|
387 |
"predicted_answer": processed_answer_rp,
|
388 |
"raw_response": raw_response_rp,
|
389 |
"parse_successful": processed_answer_rp is not None,
|
390 |
-
"api_call_successful": True,
|
391 |
"attempt": 2
|
|
|
392 |
})
|
|
|
|
|
393 |
logging.info(f"Question {question_id}: Re-prompt {'succeeded' if result_data['parse_successful'] else 'failed to parse'}.")
|
394 |
except Exception as e_rp:
|
395 |
logging.error(f"Re-prompt API call failed for question {question_id}: {e_rp}")
|
@@ -523,11 +543,14 @@ def run_benchmark(
|
|
523 |
"parse_successful": False,
|
524 |
"api_call_successful": False,
|
525 |
"error": "Initial API call failed.", # Pre-fill error
|
526 |
-
"attempt": 2
|
|
|
|
|
527 |
}
|
528 |
|
529 |
try:
|
530 |
-
|
|
|
531 |
model_identifier=model_id,
|
532 |
api_key=api_key,
|
533 |
image=image_retry,
|
@@ -540,11 +563,14 @@ def run_benchmark(
|
|
540 |
api_success_attempt2 = True
|
541 |
parse_success_attempt2 = parsed_answer_retry is not None
|
542 |
raw_response_attempt2 = raw_response_retry
|
|
|
543 |
|
544 |
if api_success_attempt2 and not parse_success_attempt2 and raw_response_attempt2 is not None:
|
545 |
logging.warning(f"Question {question_id_retry}: API Retry succeeded, but parse failed. Attempting re-prompt.")
|
|
|
546 |
try:
|
547 |
-
|
|
|
548 |
model_identifier=model_id,
|
549 |
api_key=api_key,
|
550 |
previous_raw_response=raw_response_attempt2,
|
@@ -564,6 +590,8 @@ def run_benchmark(
|
|
564 |
"error": None if processed_answer_rp2 is not None else "Re-prompt after API retry failed to parse.",
|
565 |
"attempt": 3
|
566 |
})
|
|
|
|
|
567 |
logging.info(f"Question {question_id_retry}: API Retry + Re-prompt {'succeeded' if result_data_retry['parse_successful'] else 'failed to parse'}.")
|
568 |
except Exception as e_rp2:
|
569 |
logging.error(f"Re-prompt API call failed for question {question_id_retry} after API retry: {e_rp2}")
|
|
|
123 |
exam_name = summary.get("exam_name", "N/A")
|
124 |
exam_year = summary.get("exam_year", "N/A")
|
125 |
timestamp = summary.get("timestamp", "N/A")
|
126 |
+
total_questions_in_dataset = summary.get("total_questions_in_dataset", 0)
|
127 |
+
total_questions_processed_in_run = summary.get("total_questions_processed_in_run", 0)
|
128 |
+
# total_api_cost = summary.get("total_api_cost", 0.0) # Removed
|
129 |
+
|
130 |
+
filtered_questions_count = 0
|
131 |
+
if total_questions_in_dataset > 0 and total_questions_processed_in_run > 0:
|
132 |
+
filtered_questions_count = total_questions_in_dataset - total_questions_processed_in_run
|
133 |
+
|
134 |
|
135 |
md_content.append(f"# Benchmark Results: {model_name}")
|
136 |
if exam_name and exam_name not in ["N/A", "All_Exams"]: # Only display if a specific exam was targeted
|
|
|
139 |
md_content.append(f"**Exam Year:** {exam_year}")
|
140 |
md_content.append(f"**Timestamp:** {timestamp}")
|
141 |
md_content.append(f"**Total Questions in Dataset:** {total_questions_in_dataset if total_questions_in_dataset > 0 else 'N/A'}")
|
142 |
+
if filtered_questions_count > 0:
|
143 |
+
md_content.append(f"**Questions Filtered Out:** {filtered_questions_count}")
|
144 |
+
md_content.append(f"**Total Questions Processed in this Run:** {total_questions_processed_in_run}")
|
145 |
+
# md_content.append(f"**Estimated Total API Cost:** ${total_api_cost:.6f}") # Removed
|
146 |
md_content.append("\n---\n")
|
147 |
|
148 |
# Check if NEET results are present (or any dataset with overall_score and section_breakdown)
|
|
|
355 |
"parse_successful": False,
|
356 |
"api_call_successful": False,
|
357 |
"error": None,
|
358 |
+
"attempt": 1,
|
359 |
+
# "api_cost": None, # Removed
|
360 |
+
"previous_raw_response_on_reprompt": None # For task 1
|
361 |
}
|
362 |
|
363 |
try:
|
364 |
# --- Initial API Call ---
|
365 |
+
logging.info(f"Attempting API call for question: {question_id} with model: {model_id}")
|
366 |
# Pass exam_name_from_data and question_type_from_data to get_openrouter_prediction
|
367 |
+
parsed_answer, raw_response = get_openrouter_prediction( # No longer expect api_cost
|
368 |
model_identifier=model_id,
|
369 |
api_key=api_key,
|
370 |
image=image,
|
|
|
378 |
api_success_attempt1 = True # If no exception, API call itself was successful
|
379 |
parse_success_attempt1 = parsed_answer is not None
|
380 |
raw_response_attempt1 = raw_response
|
381 |
+
# result_data["api_cost"] = api_cost # Removed
|
382 |
|
383 |
# --- Re-prompt Logic ---
|
384 |
if api_success_attempt1 and not parse_success_attempt1 and raw_response_attempt1 is not None:
|
385 |
logging.warning(f"Question {question_id}: Initial parse failed. Attempting re-prompt.")
|
386 |
+
result_data["previous_raw_response_on_reprompt"] = raw_response_attempt1 # Store previous response
|
387 |
try:
|
388 |
+
# Assuming re-prompt might also have a cost
|
389 |
+
parsed_answer_rp, raw_response_rp = get_openrouter_prediction( # No longer expect api_cost
|
390 |
model_identifier=model_id,
|
391 |
api_key=api_key,
|
392 |
previous_raw_response=raw_response_attempt1,
|
|
|
404 |
"predicted_answer": processed_answer_rp,
|
405 |
"raw_response": raw_response_rp,
|
406 |
"parse_successful": processed_answer_rp is not None,
|
407 |
+
"api_call_successful": True,
|
408 |
"attempt": 2
|
409 |
+
# Assuming api_cost_rp would be added to existing api_cost or handled separately
|
410 |
})
|
411 |
+
# if api_cost_rp is not None: # Add re-prompt cost if available # Removed
|
412 |
+
# result_data["api_cost"] = (result_data.get("api_cost") or 0.0) + api_cost_rp # Removed
|
413 |
logging.info(f"Question {question_id}: Re-prompt {'succeeded' if result_data['parse_successful'] else 'failed to parse'}.")
|
414 |
except Exception as e_rp:
|
415 |
logging.error(f"Re-prompt API call failed for question {question_id}: {e_rp}")
|
|
|
543 |
"parse_successful": False,
|
544 |
"api_call_successful": False,
|
545 |
"error": "Initial API call failed.", # Pre-fill error
|
546 |
+
"attempt": 2,
|
547 |
+
# "api_cost": None, # Removed
|
548 |
+
"previous_raw_response_on_reprompt_after_api_retry": None # For task 1
|
549 |
}
|
550 |
|
551 |
try:
|
552 |
+
logging.info(f"Attempting API call for question: {question_id_retry} (API Retry Pass) with model: {model_id}")
|
553 |
+
parsed_answer_retry, raw_response_retry = get_openrouter_prediction( # No longer expect api_cost
|
554 |
model_identifier=model_id,
|
555 |
api_key=api_key,
|
556 |
image=image_retry,
|
|
|
563 |
api_success_attempt2 = True
|
564 |
parse_success_attempt2 = parsed_answer_retry is not None
|
565 |
raw_response_attempt2 = raw_response_retry
|
566 |
+
# result_data_retry["api_cost"] = api_cost_retry # Removed
|
567 |
|
568 |
if api_success_attempt2 and not parse_success_attempt2 and raw_response_attempt2 is not None:
|
569 |
logging.warning(f"Question {question_id_retry}: API Retry succeeded, but parse failed. Attempting re-prompt.")
|
570 |
+
result_data_retry["previous_raw_response_on_reprompt_after_api_retry"] = raw_response_attempt2 # Store previous response
|
571 |
try:
|
572 |
+
# Assuming re-prompt might also have a cost
|
573 |
+
parsed_answer_rp2, raw_response_rp2 = get_openrouter_prediction( # No longer expect api_cost
|
574 |
model_identifier=model_id,
|
575 |
api_key=api_key,
|
576 |
previous_raw_response=raw_response_attempt2,
|
|
|
590 |
"error": None if processed_answer_rp2 is not None else "Re-prompt after API retry failed to parse.",
|
591 |
"attempt": 3
|
592 |
})
|
593 |
+
# if api_cost_rp2 is not None: # Add re-prompt cost if available # Removed
|
594 |
+
# result_data_retry["api_cost"] = (result_data_retry.get("api_cost") or 0.0) + api_cost_rp2 # Removed
|
595 |
logging.info(f"Question {question_id_retry}: API Retry + Re-prompt {'succeeded' if result_data_retry['parse_successful'] else 'failed to parse'}.")
|
596 |
except Exception as e_rp2:
|
597 |
logging.error(f"Re-prompt API call failed for question {question_id_retry} after API retry: {e_rp2}")
|
src/evaluation.py
CHANGED
@@ -223,6 +223,7 @@ def calculate_exam_scores(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
223 |
return {"error": "No results provided."}
|
224 |
|
225 |
overall_stats = {"score": 0, "correct": 0, "incorrect": 0, "skipped": 0, "api_parse_failures": 0, "partial_correct": 0}
|
|
|
226 |
|
227 |
valid_subjects_from_data = [r.get("subject") for r in results if r.get("subject") and isinstance(r.get("subject"), str) and r.get("subject").strip()]
|
228 |
if not valid_subjects_from_data and results:
|
@@ -251,6 +252,13 @@ def calculate_exam_scores(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
251 |
result['evaluation_status'] = evaluation_status
|
252 |
result['marks_awarded'] = current_score_change
|
253 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
# Determine boolean flags based on evaluation_status for aggregation
|
255 |
is_correct_full = evaluation_status in ["correct", "correct_full"]
|
256 |
is_partial_correct = evaluation_status.startswith("partial_")
|
@@ -301,6 +309,7 @@ def calculate_exam_scores(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
301 |
"overall_skipped": overall_stats["skipped"],
|
302 |
"overall_api_parse_failures": overall_stats["api_parse_failures"],
|
303 |
"total_questions_processed": len(results),
|
|
|
304 |
"unmapped_section_questions": unmapped_section_questions,
|
305 |
"section_breakdown": section_stats
|
306 |
}
|
|
|
223 |
return {"error": "No results provided."}
|
224 |
|
225 |
overall_stats = {"score": 0, "correct": 0, "incorrect": 0, "skipped": 0, "api_parse_failures": 0, "partial_correct": 0}
|
226 |
+
# total_api_cost = 0.0 # Removed
|
227 |
|
228 |
valid_subjects_from_data = [r.get("subject") for r in results if r.get("subject") and isinstance(r.get("subject"), str) and r.get("subject").strip()]
|
229 |
if not valid_subjects_from_data and results:
|
|
|
252 |
result['evaluation_status'] = evaluation_status
|
253 |
result['marks_awarded'] = current_score_change
|
254 |
|
255 |
+
# Accumulate API cost # Removed
|
256 |
+
# current_api_cost = result.get("api_cost") # Removed
|
257 |
+
# if isinstance(current_api_cost, (int, float)): # Removed
|
258 |
+
# total_api_cost += current_api_cost # Removed
|
259 |
+
# elif current_api_cost is not None: # Removed
|
260 |
+
# logging.warning(f"Invalid api_cost type for QID {question_id}: {current_api_cost} (type: {type(current_api_cost)}). Skipping cost accumulation for this item.") # Removed
|
261 |
+
|
262 |
# Determine boolean flags based on evaluation_status for aggregation
|
263 |
is_correct_full = evaluation_status in ["correct", "correct_full"]
|
264 |
is_partial_correct = evaluation_status.startswith("partial_")
|
|
|
309 |
"overall_skipped": overall_stats["skipped"],
|
310 |
"overall_api_parse_failures": overall_stats["api_parse_failures"],
|
311 |
"total_questions_processed": len(results),
|
312 |
+
# "total_api_cost": total_api_cost, # Removed
|
313 |
"unmapped_section_questions": unmapped_section_questions,
|
314 |
"section_breakdown": section_stats
|
315 |
}
|