mtasic85 commited on
Commit
c65045f
·
1 Parent(s): b0c036d

pretrain core 2

Browse files
Files changed (3) hide show
  1. README.md +43 -0
  2. config-2.json +29 -0
  3. scripts/pretrain_core_model_2.yaml +149 -0
README.md CHANGED
@@ -236,4 +236,47 @@ Epoch 1 | iter 1024 step 8 | loss train: 3.287, val: 3.457 | iter time: 396.93 m
236
  Epoch 1 | iter 1152 step 9 | loss train: 3.236, val: 3.457 | iter time: 398.67 ms (step) remaining time: 8:06:49
237
  Epoch 1 | iter 1280 step 10 | loss train: 3.274, val: 3.457 | iter time: 399.49 ms (step) remaining time: 8:05:09
238
  # ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  ```
 
236
  Epoch 1 | iter 1152 step 9 | loss train: 3.236, val: 3.457 | iter time: 398.67 ms (step) remaining time: 8:06:49
237
  Epoch 1 | iter 1280 step 10 | loss train: 3.274, val: 3.457 | iter time: 399.49 ms (step) remaining time: 8:05:09
238
  # ...
239
+ Epoch 1 | iter 76928 step 601 | loss train: 3.177, val: 3.304 | iter time: 400.61 ms (step) remaining time: 0:03:35
240
+ Epoch 1 | iter 77056 step 602 | loss train: 3.191, val: 3.304 | iter time: 396.14 ms (step) remaining time: 0:02:46
241
+ Epoch 1 | iter 77184 step 603 | loss train: 3.173, val: 3.304 | iter time: 399.39 ms (step) remaining time: 0:01:58
242
+ Epoch 1 | iter 77312 step 604 | loss train: 3.211, val: 3.304 | iter time: 398.61 ms (step) remaining time: 0:01:09
243
+ Epoch 1 | iter 77440 step 605 | loss train: 3.203, val: 3.304 | iter time: 399.31 ms (step) remaining time: 0:00:21
244
+ Validating ...
245
+ Final evaluation | val loss: 3.304 | val ppl: 27.217
246
+ Saving checkpoint to '../out/pretrain-core-1/final/lit_model.pth'
247
+ ----------------------------------------
248
+ | Performance
249
+ | - Total tokens : 634,855,424
250
+ | - Training Time : 29361.39 s
251
+ | - Tok/sec : 524.18 tok/s
252
+ | ----------------------------------------
253
+ | Memory Usage
254
+ | - Memory Used : 22.33 GB
255
+ ----------------------------------------
256
+ ```
257
+
258
+ Backup `wandb`:
259
+
260
+ ```bash
261
+ mv wandb wandb-pretrain-core-1
262
+ ```
263
+
264
+ Copy config:
265
+
266
+ ```bash
267
+ cp ../config-1.json ../out/pretrain-core-1/final/config.json
268
+ ```
269
+
270
+ Chat with model:
271
+
272
+ ```bash
273
+ CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt chat ../out/pretrain-core-1/final
274
+ ```
275
+
276
+ ```bash
277
+ litgpt convert_pretrained_checkpoint ../out/pretrain-core-1/final ../out/pretrain-core-1/checkpoint
278
+ ```
279
+
280
+ ```bash
281
+ CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt pretrain --config pretrain_core_model_2.yaml
282
  ```
config-2.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 1,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 512,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 2048,
14
+ "max_position_embeddings": 131072,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 8,
18
+ "num_hidden_layers": 32,
19
+ "num_key_value_heads": 8,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": null,
23
+ "rope_theta": 27000.0,
24
+ "tie_word_embeddings": true,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.45.0.dev0",
27
+ "use_cache": true,
28
+ "vocab_size": 131072
29
+ }
scripts/pretrain_core_model_2.yaml ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The name of the model to pretrain. Choose from names in ``litgpt.config``. Mutually exclusive with
2
+ # ``model_config``. (type: Optional[str], default: null)
3
+ model_name: 'tangled-alpha-0.9-core'
4
+
5
+ # A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with
6
+ # ``model_config``. (type: Optional[Config], default: null)
7
+ model_config:
8
+ name: 'tangled-alpha-0.9-core'
9
+ block_size: 131072
10
+ vocab_size: 131072
11
+ padded_vocab_size: 131072
12
+ n_layer: 32
13
+ n_head: 8
14
+ n_embd: 512
15
+ n_query_groups: 8
16
+ rotary_percentage: 1.0
17
+ parallel_residual: False
18
+ bias: False
19
+ norm_class_name: "RMSNorm"
20
+ mlp_class_name: "LLaMAMLP"
21
+ intermediate_size: 2048 # n_embd * 4
22
+ norm_eps: 1e-5
23
+ rope_base: 27000 # https://arxiv.org/pdf/2405.14591
24
+ head_size: 128 # n_embd / n_head
25
+
26
+ # Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
27
+ # /teamspace/jobs/<job-name>/share. (type: <class 'Path'>, default: out/pretrain)
28
+ out_dir: "../out/pretrain-core-1/"
29
+
30
+ # The precision to use for pretraining. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
31
+ # precision: bf16-mixed
32
+ precision: bf16-true
33
+
34
+ # Optional path to a checkpoint directory to initialize the model from.
35
+ # Useful for continued pretraining. Mutually exclusive with ``resume``. (type: Optional[Path], default: null)
36
+ initial_checkpoint_dir: "../out/pretrain-core-1/checkpoint"
37
+
38
+ # Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume
39
+ # from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing
40
+ # ``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists.
41
+ # (type: Union[bool, Literal["auto"], Path], default: False)
42
+ resume:
43
+
44
+ # Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
45
+ data:
46
+ class_path: LitData
47
+
48
+ init_args:
49
+ data_path: "../core-data-2-2049-4097-4097-4000/"
50
+ num_workers: 32
51
+
52
+ # Training-related arguments. See ``litgpt.args.TrainArgs`` for details
53
+ train:
54
+ # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
55
+ save_interval: 50
56
+
57
+ # Number of iterations between logging calls (type: int, default: 1)
58
+ log_interval: 1
59
+
60
+ # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 512)
61
+ global_batch_size: 512
62
+
63
+ # Number of samples per data-parallel rank (type: int, default: 4)
64
+ micro_batch_size: 2
65
+
66
+ # Number of iterations with learning rate warmup active (type: int, default: 2000)
67
+ lr_warmup_steps: 0
68
+
69
+ # Number of epochs to train on (type: Optional[int], default: null)
70
+ epochs:
71
+
72
+ # Total number of tokens to train on (type: Optional[int], default: 3000000000000)
73
+ max_tokens: 466414771
74
+
75
+ # Limits the number of optimizer steps to run. (type: Optional[int], default: null)
76
+ max_steps:
77
+
78
+ # Limits the length of samples. Off by default (type: Optional[int], default: null)
79
+ max_seq_length: 4096
80
+
81
+ # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: False)
82
+ tie_embeddings: true
83
+
84
+ # (type: Optional[float], default: 1.0)
85
+ max_norm: 1.0
86
+
87
+ # (type: float, default: 4e-05)
88
+ min_lr: 5e-5
89
+
90
+ # Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
91
+ eval:
92
+ # Number of optimizer steps between evaluation calls (type: int, default: 1000)
93
+ interval: 50
94
+
95
+ # Number of tokens to generate (type: Optional[int], default: null)
96
+ max_new_tokens:
97
+
98
+ # Number of iterations (type: int, default: 100)
99
+ max_iters: 100
100
+
101
+ # Whether to evaluate on the validation set at the beginning of the training
102
+ initial_validation: true
103
+
104
+ # Whether to evaluate on the validation set at the end the training
105
+ final_validation: true
106
+
107
+ # Optimizer-related arguments
108
+
109
+ # optimizer:
110
+ # class_path: torch.optim.AdamW
111
+ # # class_path: torchao.prototype.low_bit_optim.AdamW8bit
112
+ # # class_path: torchao.prototype.low_bit_optim.AdamW4bit
113
+ # # class_path: bitsandbytes.optim.AdamW8bit
114
+ # # class_path: bitsandbytes.optim.PagedAdamW8bit
115
+ # init_args:
116
+ # # (type: float, default: 0.001)
117
+ # lr: 3e-4
118
+ # # (type: float, default: 0.01)
119
+ # weight_decay: 0.01
120
+ # # (type: tuple, default: (0.9,0.999))
121
+ # betas:
122
+ # - 0.9
123
+ # - 0.999
124
+
125
+ optimizer:
126
+ class_path: sophia_opt.SophiaG
127
+ init_args:
128
+ lr: 1e-4
129
+ betas:
130
+ - 0.9
131
+ - 0.95
132
+ rho: 0.05
133
+ weight_decay: 0.1
134
+
135
+ # How many devices/GPUs to use. Uses all GPUs by default. (type: Union[int, str], default: auto)
136
+ devices: auto
137
+
138
+ # How many nodes to use. (type: int, default: 1)
139
+ num_nodes: 1
140
+
141
+ # Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data
142
+ # module require this. (type: Optional[Path], default: null)
143
+ tokenizer_dir: "../tokenizer"
144
+
145
+ # The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: tensorboard)
146
+ logger_name: "wandb"
147
+
148
+ # The random seed to use for reproducibility. (type: int, default: 42)
149
+ seed: 23