Update README.md
Browse files
README.md
CHANGED
@@ -53,7 +53,7 @@ Llama 3 8 billion model was finetuned using **unsloth** package on a **cleaned B
|
|
53 |
|
54 |
from unsloth import FastLanguageModel
|
55 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
56 |
-
model_name = "KillerShoaib/llama-3-8b-bangla-
|
57 |
max_seq_length = 2048,
|
58 |
dtype = None,
|
59 |
load_in_4bit = True,
|
@@ -93,7 +93,7 @@ tokenizer.batch_decode(outputs)
|
|
93 |
from peft import AutoPeftModelForCausalLM
|
94 |
from transformers import AutoTokenizer
|
95 |
model = AutoPeftModelForCausalLM.from_pretrained(
|
96 |
-
"KillerShoaib/
|
97 |
load_in_4bit = True,
|
98 |
)
|
99 |
tokenizer = AutoTokenizer.from_pretrained("KillerShoaib/llama3-8b-4bit-bangla")
|
|
|
53 |
|
54 |
from unsloth import FastLanguageModel
|
55 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
56 |
+
model_name = "KillerShoaib/llama-3-8b-bangla-lora",
|
57 |
max_seq_length = 2048,
|
58 |
dtype = None,
|
59 |
load_in_4bit = True,
|
|
|
93 |
from peft import AutoPeftModelForCausalLM
|
94 |
from transformers import AutoTokenizer
|
95 |
model = AutoPeftModelForCausalLM.from_pretrained(
|
96 |
+
"KillerShoaib/llama-3-8b-bangla-lora",
|
97 |
load_in_4bit = True,
|
98 |
)
|
99 |
tokenizer = AutoTokenizer.from_pretrained("KillerShoaib/llama3-8b-4bit-bangla")
|