ryomac commited on
Commit
f47a934
·
verified ·
1 Parent(s): 8bfc8f5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -7
README.md CHANGED
@@ -51,25 +51,32 @@ from trl import SFTTrainer
51
  # 各自HugginFaceのトークンを取得してください
52
  HF_TOKEN = "your-token"
53
 
54
- model_name = "ryomac/llm-jp-3-13b-ry-ft1"
 
55
 
56
- # QLoRA config
 
57
  bnb_config = BitsAndBytesConfig(
58
  load_in_4bit=True,
59
  bnb_4bit_quant_type="nf4",
60
  bnb_4bit_compute_dtype=torch.bfloat16,
61
  )
62
 
63
- # Load model
 
64
  model = AutoModelForCausalLM.from_pretrained(
65
- model_name,
66
  quantization_config=bnb_config,
67
  device_map="auto",
68
- token = HF_TOKEN
69
  )
70
 
71
- # Load tokenizer
72
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, token = HF_TOKEN)
 
 
 
 
73
  # データセットの読み込み。
74
  datasets = []
75
  with open("./elyza-tasks-100-TV_0.jsonl", "r") as f:
 
51
  # 各自HugginFaceのトークンを取得してください
52
  HF_TOKEN = "your-token"
53
 
54
+ model_id = "llm-jp/llm-jp-3-13b"
55
+ adapter_id = "ryomac/llm-jp-3-13b-ry-ft1"
56
 
57
+
58
+ # QLoRA用の設定
59
  bnb_config = BitsAndBytesConfig(
60
  load_in_4bit=True,
61
  bnb_4bit_quant_type="nf4",
62
  bnb_4bit_compute_dtype=torch.bfloat16,
63
  )
64
 
65
+
66
+ # モデル読み込み
67
  model = AutoModelForCausalLM.from_pretrained(
68
+ model_id,
69
  quantization_config=bnb_config,
70
  device_map="auto",
71
+ token=HF_TOKEN
72
  )
73
 
74
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, token=HF_TOKEN)
75
+
76
+ # Peftモデルを適用
77
+ model = PeftModel.from_pretrained(model, adapter_id, token=HF_TOKEN)
78
+
79
+
80
  # データセットの読み込み。
81
  datasets = []
82
  with open("./elyza-tasks-100-TV_0.jsonl", "r") as f: