Update errors/custom_prefix_token_id.txt
Browse files
errors/custom_prefix_token_id.txt
CHANGED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
AttributeError Traceback (most recent call last)
|
2 |
+
<ipython-input-15-cf6a974cddb7> in <cell line: 124>()
|
3 |
+
330 print(f'Running evaluation on {eval_tasks} with {num_fewshot}-shot examples')
|
4 |
+
331
|
5 |
+
--> 332 results = adapter.run_eval(
|
6 |
+
333 eval_tasks=eval_tasks,
|
7 |
+
334 num_fewshot=num_fewshot,
|
8 |
+
|
9 |
+
/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py in decorate_context(*args, **kwargs)
|
10 |
+
114 def decorate_context(*args, **kwargs):
|
11 |
+
115 with ctx_factory():
|
12 |
+
--> 116 return func(*args, **kwargs)
|
13 |
+
117
|
14 |
+
118 return decorate_context
|
15 |
+
|
16 |
+
<ipython-input-15-cf6a974cddb7> in run_eval(self, eval_tasks, num_fewshot, limit, bootstrap_iters, fewshot_random_seed)
|
17 |
+
312 task_obj.set_config(key="num_fewshot", value=num_fewshot)
|
18 |
+
313
|
19 |
+
--> 314 results = evaluator.evaluate(
|
20 |
+
315 lm=self,
|
21 |
+
316 task_dict=task_dict,
|
22 |
+
|
23 |
+
/usr/local/lib/python3.10/dist-packages/lm_eval/utils.py in _wrapper(*args, **kwargs)
|
24 |
+
393 "lm-evaluation-harness!"
|
25 |
+
394 )
|
26 |
+
--> 395 return fn(*args, **kwargs)
|
27 |
+
396
|
28 |
+
397 return _wrapper
|
29 |
+
|
30 |
+
/usr/local/lib/python3.10/dist-packages/lm_eval/evaluator.py in evaluate(lm, task_dict, limit, cache_requests, rewrite_requests_cache, bootstrap_iters, write_out, log_samples, system_instruction, apply_chat_template, fewshot_as_multiturn, verbosity)
|
31 |
+
447
|
32 |
+
448 # run requests through model
|
33 |
+
--> 449 resps = getattr(lm, reqtype)(cloned_reqs)
|
34 |
+
450
|
35 |
+
451 # put responses from model into a list of length K for each request.
|
36 |
+
|
37 |
+
/usr/local/lib/python3.10/dist-packages/lm_eval/api/model.py in loglikelihood(self, requests, disable_tqdm)
|
38 |
+
361 # BOS or EOS as context
|
39 |
+
362 context_enc, continuation_enc = (
|
40 |
+
--> 363 [self.prefix_token_id],
|
41 |
+
364 self.tok_encode(continuation),
|
42 |
+
365 )
|
43 |
+
|
44 |
+
/usr/local/lib/python3.10/dist-packages/lm_eval/models/huggingface.py in prefix_token_id(self)
|
45 |
+
360 def prefix_token_id(self):
|
46 |
+
361 # it is used as prefix for loglikelihood
|
47 |
+
--> 362 if self.custom_prefix_token_id is not None:
|
48 |
+
363 return self.custom_prefix_token_id
|
49 |
+
364 if self.tokenizer.bos_token_id is not None:
|
50 |
+
|
51 |
+
AttributeError: 'EvalHarnessAdapter' object has no attribute 'custom_prefix_token_id'
|