kas1 commited on
Commit
a5a84c2
·
1 Parent(s): 52bc6ae

Remove quantization_config entirely to avoid bitsandbytes dependency

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -10,7 +10,7 @@ from transformers import BitsAndBytesConfig
10
  original_model = AutoModelForCausalLM.from_pretrained(
11
  "unsloth/DeepSeek-R1-Distill-Llama-8B-unsloth-bnb-4bit",
12
  load_in_4bit=False, # Disable 4-bit quantization
13
- quantization_config=BitsAndBytesConfig() # Use an empty quantization config
14
  )
15
  original_tokenizer = AutoTokenizer.from_pretrained("unsloth/DeepSeek-R1-Distill-Llama-8B-unsloth-bnb-4bit")
16
 
@@ -18,7 +18,7 @@ original_tokenizer = AutoTokenizer.from_pretrained("unsloth/DeepSeek-R1-Distill-
18
  fine_tuned_model = AutoModelForCausalLM.from_pretrained(
19
  "kas1/DeepSeek-R1-Distill-Llama-8B-unsloth-bnb-4bit-John1",
20
  load_in_4bit=False, # Disable 4-bit quantization
21
- quantization_config=BitsAndBytesConfig() # Use an empty quantization config
22
  )
23
  fine_tuned_tokenizer = AutoTokenizer.from_pretrained("kas1/DeepSeek-R1-Distill-Llama-8B-unsloth-bnb-4bit-John1")
24
 
 
10
  original_model = AutoModelForCausalLM.from_pretrained(
11
  "unsloth/DeepSeek-R1-Distill-Llama-8B-unsloth-bnb-4bit",
12
  load_in_4bit=False, # Disable 4-bit quantization
13
+ # Remove quantization_config entirely
14
  )
15
  original_tokenizer = AutoTokenizer.from_pretrained("unsloth/DeepSeek-R1-Distill-Llama-8B-unsloth-bnb-4bit")
16
 
 
18
  fine_tuned_model = AutoModelForCausalLM.from_pretrained(
19
  "kas1/DeepSeek-R1-Distill-Llama-8B-unsloth-bnb-4bit-John1",
20
  load_in_4bit=False, # Disable 4-bit quantization
21
+ # Remove quantization_config entirely
22
  )
23
  fine_tuned_tokenizer = AutoTokenizer.from_pretrained("kas1/DeepSeek-R1-Distill-Llama-8B-unsloth-bnb-4bit-John1")
24