Update configs.py
Browse files- configs.py +2 -2
configs.py
CHANGED
@@ -20,8 +20,8 @@ model_info = {
|
|
20 |
'LLAMA2-7B': dict(model_path='meta-llama/Llama-2-7b-chat-hf', token=os.environ['hf_token'],
|
21 |
original_prompt_template='<s>{prompt}',
|
22 |
interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
|
23 |
-
load_in_8bit=True,
|
24 |
-
dont_cuda=True,
|
25 |
layers_format=llama_layers_format),
|
26 |
'LLAMA2-13B': dict(model_path='meta-llama/Llama-2-13b-chat-hf',
|
27 |
token=os.environ['hf_token'], torch_dtype=torch.float16,
|
|
|
20 |
'LLAMA2-7B': dict(model_path='meta-llama/Llama-2-7b-chat-hf', token=os.environ['hf_token'],
|
21 |
original_prompt_template='<s>{prompt}',
|
22 |
interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
|
23 |
+
# load_in_8bit=True,
|
24 |
+
# dont_cuda=True,
|
25 |
layers_format=llama_layers_format),
|
26 |
'LLAMA2-13B': dict(model_path='meta-llama/Llama-2-13b-chat-hf',
|
27 |
token=os.environ['hf_token'], torch_dtype=torch.float16,
|