File size: 349 Bytes
82b2c1b |
1 2 3 4 5 6 7 8 9 10 11 12 13 |
# config.json
{
"_name_or_path": "EleutherAI/gpt-j-6B",
"architectures": ["GPTJForCausalLM"],
"model_type": "gptj",
"torch_dtype": "float16",
"quantization_config": {
"load_in_4bit": true,
"bnb_4bit_quant_type": "nf4",
"bnb_4bit_compute_dtype": "float16",
"bnb_4bit_use_double_quant": true
}
} |