finetune first pass
Browse fileshttps://wandb.ai/wing-lian/huggingface/runs/vuhppjj5/overview
- axolotl.yml +45 -0
- config.json +22 -0
- generation_config.json +7 -0
- pytorch_model.bin +3 -0
axolotl.yml
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: anon8231489123/vicuna-13b-GPTQ-4bit-128g
|
2 |
+
base_model_config: anon8231489123/vicuna-13b-GPTQ-4bit-128g
|
3 |
+
model_type: LlamaForCausalLM
|
4 |
+
tokenizer_type: LlamaTokenizer
|
5 |
+
load_in_8bit: false
|
6 |
+
load_4bit: true
|
7 |
+
gptq_groupsize: 128
|
8 |
+
gptq_model_v1: false
|
9 |
+
datasets:
|
10 |
+
- path: data/alpaca_reflect_pruned.jsonl
|
11 |
+
type: reflection
|
12 |
+
dataset_prepared_path: data/last_run_prepared
|
13 |
+
val_set_size: 0.04
|
14 |
+
adapter:
|
15 |
+
lora_model_dir:
|
16 |
+
sequence_len: 2048
|
17 |
+
max_packed_sequence_len: 2048
|
18 |
+
lora_r: 8
|
19 |
+
lora_alpha: 16
|
20 |
+
lora_dropout: 0.05
|
21 |
+
lora_target_modules:
|
22 |
+
- q_proj
|
23 |
+
- v_proj
|
24 |
+
# - k_proj
|
25 |
+
# - o_proj
|
26 |
+
lora_fan_in_fan_out: false
|
27 |
+
wandb_project:
|
28 |
+
wandb_watch:
|
29 |
+
wandb_run_id:
|
30 |
+
wandb_log_model: checkpoint
|
31 |
+
output_dir: ./vicuna-reflect
|
32 |
+
batch_size: 8
|
33 |
+
micro_batch_size: 2
|
34 |
+
num_epochs: 3
|
35 |
+
learning_rate: 0.00003
|
36 |
+
train_on_inputs: false
|
37 |
+
group_by_length: false
|
38 |
+
bf16: true
|
39 |
+
tf32: true
|
40 |
+
gradient_checkpointing: false
|
41 |
+
early_stopping_patience: 3
|
42 |
+
resume_from_checkpoint:
|
43 |
+
local_rank:
|
44 |
+
flash_attention: true
|
45 |
+
|
config.json
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"LlamaForCausalLM"
|
4 |
+
],
|
5 |
+
"bos_token_id": 1,
|
6 |
+
"eos_token_id": 2,
|
7 |
+
"hidden_act": "silu",
|
8 |
+
"hidden_size": 5120,
|
9 |
+
"initializer_range": 0.02,
|
10 |
+
"intermediate_size": 13824,
|
11 |
+
"max_position_embeddings": 2048,
|
12 |
+
"model_type": "llama",
|
13 |
+
"num_attention_heads": 40,
|
14 |
+
"num_hidden_layers": 40,
|
15 |
+
"pad_token_id": 0,
|
16 |
+
"rms_norm_eps": 1e-06,
|
17 |
+
"tie_word_embeddings": false,
|
18 |
+
"torch_dtype": "float32",
|
19 |
+
"transformers_version": "4.29.0.dev0",
|
20 |
+
"use_cache": false,
|
21 |
+
"vocab_size": 32001
|
22 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"pad_token_id": 0,
|
6 |
+
"transformers_version": "4.29.0.dev0"
|
7 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b7430b4355cf755ffdd3f454106970a8365ba24fec9e5c333c667d9f361ba4b
|
3 |
+
size 7916030660
|