# config.json | |
{ | |
"_name_or_path": "EleutherAI/gpt-j-6B", | |
"architectures": ["GPTJForCausalLM"], | |
"model_type": "gptj", | |
"torch_dtype": "float16", | |
"quantization_config": { | |
"load_in_4bit": true, | |
"bnb_4bit_quant_type": "nf4", | |
"bnb_4bit_compute_dtype": "float16", | |
"bnb_4bit_use_double_quant": true | |
} | |
} |