Update README.md
Browse files
README.md
CHANGED
@@ -50,7 +50,7 @@ In case you want to perform inference on the BitNet checkpoint run:
|
|
50 |
import torch
|
51 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
52 |
|
53 |
-
model_id = "tiiuae/Falcon-E-
|
54 |
|
55 |
model = AutoModelForCausalLM.from_pretrained(
|
56 |
model_id,
|
@@ -66,7 +66,7 @@ If you want to rather use the classic `bfloat16` version, you can run:
|
|
66 |
import torch
|
67 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
68 |
|
69 |
-
model_id = "tiiuae/Falcon-E-
|
70 |
revision = "bfloat16"
|
71 |
|
72 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -84,8 +84,8 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
84 |
```
|
85 |
git clone https://github.com/microsoft/BitNet && cd BitNet
|
86 |
pip install -r requirements.txt
|
87 |
-
python setup_env.py --hf-repo tiiuae/Falcon-E-
|
88 |
-
python run_inference.py -m models/Falcon-E-
|
89 |
```
|
90 |
|
91 |
### Fine-tuning
|
@@ -99,7 +99,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
99 |
from trl import SFTTrainer
|
100 |
+ from onebitllms import replace_linear_with_bitnet_linear, quantize_to_1bit
|
101 |
|
102 |
-
model_id = "tiiuae/Falcon-E-
|
103 |
|
104 |
tokenizer = AutoTokenizer.from_pretrained(model_id, revision="prequantized")
|
105 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
50 |
import torch
|
51 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
52 |
|
53 |
+
model_id = "tiiuae/Falcon-E-3B-Instruct"
|
54 |
|
55 |
model = AutoModelForCausalLM.from_pretrained(
|
56 |
model_id,
|
|
|
66 |
import torch
|
67 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
68 |
|
69 |
+
model_id = "tiiuae/Falcon-E-3B-Instruct"
|
70 |
revision = "bfloat16"
|
71 |
|
72 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
84 |
```
|
85 |
git clone https://github.com/microsoft/BitNet && cd BitNet
|
86 |
pip install -r requirements.txt
|
87 |
+
python setup_env.py --hf-repo tiiuae/Falcon-E-3B-Instruct -q i2_s
|
88 |
+
python run_inference.py -m models/Falcon-E-3B-Instruct/ggml-model-i2_s.gguf -p "You are a helpful assistant" -cnv
|
89 |
```
|
90 |
|
91 |
### Fine-tuning
|
|
|
99 |
from trl import SFTTrainer
|
100 |
+ from onebitllms import replace_linear_with_bitnet_linear, quantize_to_1bit
|
101 |
|
102 |
+
model_id = "tiiuae/Falcon-E-3B-Instruct"
|
103 |
|
104 |
tokenizer = AutoTokenizer.from_pretrained(model_id, revision="prequantized")
|
105 |
model = AutoModelForCausalLM.from_pretrained(
|