slz1 commited on
Commit
fcdd8d2
·
verified ·
1 Parent(s): 60d2674

Upload evaluate.py

Browse files
Files changed (1) hide show
  1. evaluate.py +141 -0
evaluate.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ # os.environ['CUDA_VISIBLE_DEVICES'] = '3'
3
+
4
+ import pathlib
5
+ from CoT.task import CoT_Task
6
+ from ToT.task import ToT_Task
7
+ from MCTS.task import MCTS_Task
8
+ import argparse
9
+ from utils.visualize import visualize
10
+ from utils.json_operator import *
11
+ from utils.verify_answer import *
12
+ from utils.self_consistency import get_consistency_output_scibench
13
+ import copy
14
+
15
+ def run(arguments):
16
+ print('-'*30, 'Begin testing', '-'*30, '\n')
17
+ # file = f'data/{arguments.task_name}/{arguments.file}.json'
18
+ # file = "/cpfs/29f69eb5e2e60f26/code/sft_intern/lh/slz/ReST-MCTS/data/math/math_500.json"
19
+ file = arguments.load_file_path
20
+ print('** file_path: ', file)
21
+ try:
22
+ data_list = load_file(file)
23
+ data_len = len(data_list)
24
+ except Exception as e:
25
+ print(f'File must be standardized json!\nError type:{e}\n')
26
+ return
27
+ assert data_len > 0, "Data list is empty!\n"
28
+ # assert 'content' in data_list[0].keys() and 'answer' in data_list[0].keys(), "Key error, Make sure json object contain correct keys!\n"
29
+
30
+ output_list = []
31
+ correct_count = 0
32
+ for i in range(data_len):
33
+ # for i in range(0, 1):
34
+ print(f'Begin to solve the problem {i+1}...\n')
35
+ # data = data_list[i]['question']
36
+ # answer = data_list[i]['answer'][0]
37
+ data = data_list[i]['question']
38
+ answer = data_list[i]['solution']
39
+ if arguments.mode == 'cot':
40
+ Task = CoT_Task(data, arguments.propose_method, arguments.value_method, arguments.temperature, evaluate=arguments.evaluate)
41
+ if arguments.consistency:
42
+ outputs = []
43
+ for cnt in range(3):
44
+ output = Task.run()
45
+ outputs.append(output)
46
+ output = get_consistency_output_scibench(outputs)
47
+ else:
48
+ output = Task.run()
49
+
50
+ elif arguments.mode == 'tot':
51
+ Task = ToT_Task(data, arguments.propose_method, arguments.value_method, arguments.algorithm,
52
+ arguments.branch, arguments.select_branch, arguments.max_depth, arguments.end_gate,
53
+ arguments.select_method, arguments.temperature, use_case_prompt=arguments.use_case_prompt,
54
+ low=arguments.low, high=arguments.high, evaluate=arguments.evaluate)
55
+ output, root = Task.run()
56
+ if arguments.visualize:
57
+ visualize(root, Task, arguments.task_name, arguments.file, i + 1)
58
+ else:
59
+ Task = MCTS_Task(data, arguments.propose_method, arguments.value_method, arguments.branch, arguments.end_gate,
60
+ arguments.roll_policy, arguments.roll_branch, arguments.roll_forward_steps, arguments.time_limit,
61
+ arguments.iteration_limit, arguments.exploration_constant, arguments.alpha, arguments.inf,
62
+ arguments.temperature, use_case_prompt=arguments.use_case_prompt, use_reflection=arguments.use_reflection,
63
+ low=arguments.low, high=arguments.high, evaluate=arguments.evaluate, answer=answer, lang='en')
64
+ output, root = Task.run()
65
+
66
+ if arguments.visualize:
67
+ visualize(root, Task, arguments.task_name, arguments.file, i + 1)
68
+
69
+ # evaluate metrics
70
+ if arguments.evaluate:
71
+ print('** output: ', output)
72
+ result = verify_float(answer, output['summary'])
73
+ output.update({'answer': answer, 'accurate': result})
74
+ if result:
75
+ print(f'The answer of problem {i+1} is correct.\n')
76
+ correct_count += 1
77
+ else:
78
+ print(f'The answer of problem {i+1} is wrong.\n')
79
+ print(f'The solution to problem {i+1} is complete.\n')
80
+
81
+ # output
82
+ base_dir = os.getcwd()
83
+ output_dir = pathlib.Path(f'{base_dir}/outputs/{arguments.task_name}/{arguments.file}/{Task.mode}')
84
+ output_file = f'{base_dir}/outputs/{arguments.task_name}/{arguments.file}/{Task.mode}/{Task.propose_method}_{Task.value_method}_{arguments.save_name}.json'
85
+
86
+ data_item = copy.deepcopy(data_list[i]) # 创建深拷贝
87
+ data_item['mcts_output'] = output
88
+
89
+ output_list.append(data_item)
90
+ pathlib.Path.mkdir(output_dir, exist_ok=True, parents=True)
91
+ dump_json(output_file, output_list)
92
+ print('** output_file: ', output_file)
93
+
94
+
95
+ print('_' * 60)
96
+ # accuracy
97
+ if args.evaluate:
98
+ print(f'Test accuracy:{correct_count / data_len}\n')
99
+ print(f'Correct number of problems:{correct_count}\nTotal number of questions:{data_len}\n')
100
+ print('_' * 60)
101
+
102
+
103
+ def parse_args():
104
+ base_args = argparse.ArgumentParser()
105
+ base_args.add_argument('--load_file_path', type=str, default='scibench')
106
+ base_args.add_argument('--task_name', type=str, default='scibench')
107
+ base_args.add_argument('--file', type=str, default='thermo_standardized') # json
108
+ base_args.add_argument('--save_name', type=str, default='test') # json
109
+ base_args.add_argument('--propose_method', type=str, choices=['gpt', 'glm', 'llama', 'local'], default='glm')
110
+ base_args.add_argument('--value_method', type=str, choices=['gpt', 'glm', 'local'], default='local')
111
+ base_args.add_argument('--mode', type=str, choices=['cot', 'tot', 'mcts'], default='tot')
112
+ base_args.add_argument('--temperature', type=float, default=0.7)
113
+ base_args.add_argument('--time_limit', type=int, default=None)
114
+ base_args.add_argument('--iteration_limit', type=int, default=100)
115
+ base_args.add_argument('--roll_policy', type=str, choices=['random', 'greedy'], default='greedy')
116
+ base_args.add_argument('--exploration_constant', type=float, default=0.4)
117
+ base_args.add_argument('--roll_forward_steps', type=int, default=2)
118
+ base_args.add_argument('--end_gate', type=float, default=0.9) # End threshold
119
+ base_args.add_argument('--branch', type=int, default=3)
120
+ base_args.add_argument('--roll_branch', type=int, default=1)
121
+ base_args.add_argument('--inf', type=float, default=0.8)
122
+ base_args.add_argument('--evaluate', type=str, default='scibench') # Whether to evaluate (empty means no evaluation)
123
+ base_args.add_argument('--alpha', type=float, default=0.5)
124
+ base_args.add_argument('--visualize', type=bool, default=False) # visualization
125
+ base_args.add_argument('--use_case_prompt', type=bool, default=False) # Use sample prompts
126
+ base_args.add_argument('--use_reflection', type=str, choices=['simple', 'common'], default='simple') # Use reflective mode
127
+ base_args.add_argument('--low', type=float, default=0)
128
+ base_args.add_argument('--high', type=float, default=1)
129
+ base_args.add_argument('--algorithm', type=str, choices=['dfs', 'bfs'], default='dfs')
130
+ base_args.add_argument('--select_branch', type=int, default=2)
131
+ base_args.add_argument('--max_depth', type=int, default=8)
132
+ base_args.add_argument('--select_method', type=str, choices=['greedy', 'sample'], default='greedy')
133
+ base_args.add_argument('--consistency', type=bool, default=True)
134
+
135
+ arguments = base_args.parse_args()
136
+ return arguments
137
+
138
+
139
+ if __name__ == '__main__':
140
+ args = parse_args()
141
+ run(args)