hangzou commited on
Commit
0ad0765
·
verified ·
1 Parent(s): cda8ce5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -50,7 +50,7 @@ In case you want to perform inference on the BitNet checkpoint run:
50
  import torch
51
  from transformers import AutoModelForCausalLM, AutoTokenizer
52
 
53
- model_id = "tiiuae/Falcon-E-1B-Base"
54
 
55
  model = AutoModelForCausalLM.from_pretrained(
56
  model_id,
@@ -66,7 +66,7 @@ If you want to rather use the classic `bfloat16` version, you can run:
66
  import torch
67
  from transformers import AutoModelForCausalLM, AutoTokenizer
68
 
69
- model_id = "tiiuae/Falcon-E-1B-Base"
70
  revision = "bfloat16"
71
 
72
  model = AutoModelForCausalLM.from_pretrained(
@@ -84,8 +84,8 @@ model = AutoModelForCausalLM.from_pretrained(
84
  ```
85
  git clone https://github.com/microsoft/BitNet && cd BitNet
86
  pip install -r requirements.txt
87
- python setup_env.py --hf-repo tiiuae/Falcon-E-1B-Base -q i2_s
88
- python run_inference.py -m models/Falcon-E-1B-Base/ggml-model-i2_s.gguf -p "You are a helpful assistant" -cnv
89
  ```
90
 
91
  ### Fine-tuning
@@ -99,7 +99,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
99
  from trl import SFTTrainer
100
  + from onebitllms import replace_linear_with_bitnet_linear, quantize_to_1bit
101
 
102
- model_id = "tiiuae/Falcon-E-1B-Base"
103
 
104
  tokenizer = AutoTokenizer.from_pretrained(model_id, revision="prequantized")
105
  model = AutoModelForCausalLM.from_pretrained(
 
50
  import torch
51
  from transformers import AutoModelForCausalLM, AutoTokenizer
52
 
53
+ model_id = "tiiuae/Falcon-E-3B-Instruct"
54
 
55
  model = AutoModelForCausalLM.from_pretrained(
56
  model_id,
 
66
  import torch
67
  from transformers import AutoModelForCausalLM, AutoTokenizer
68
 
69
+ model_id = "tiiuae/Falcon-E-3B-Instruct"
70
  revision = "bfloat16"
71
 
72
  model = AutoModelForCausalLM.from_pretrained(
 
84
  ```
85
  git clone https://github.com/microsoft/BitNet && cd BitNet
86
  pip install -r requirements.txt
87
+ python setup_env.py --hf-repo tiiuae/Falcon-E-3B-Instruct -q i2_s
88
+ python run_inference.py -m models/Falcon-E-3B-Instruct/ggml-model-i2_s.gguf -p "You are a helpful assistant" -cnv
89
  ```
90
 
91
  ### Fine-tuning
 
99
  from trl import SFTTrainer
100
  + from onebitllms import replace_linear_with_bitnet_linear, quantize_to_1bit
101
 
102
+ model_id = "tiiuae/Falcon-E-3B-Instruct"
103
 
104
  tokenizer = AutoTokenizer.from_pretrained(model_id, revision="prequantized")
105
  model = AutoModelForCausalLM.from_pretrained(