mtasic85 commited on
Commit
18f918f
·
1 Parent(s): 8116748

pretrain 1

Browse files
README.md CHANGED
@@ -210,3 +210,7 @@ CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable
210
  | - leaderboard_musr_object_placements | 1|none | 0|acc_norm |↑ |0.2344|± |0.0265|
211
  | - leaderboard_musr_team_allocation | 1|none | 0|acc_norm |↑ |0.3200|± |0.0296|
212
  ```
 
 
 
 
 
210
  | - leaderboard_musr_object_placements | 1|none | 0|acc_norm |↑ |0.2344|± |0.0265|
211
  | - leaderboard_musr_team_allocation | 1|none | 0|acc_norm |↑ |0.3200|± |0.0296|
212
  ```
213
+
214
+ ```bash
215
+ litgpt convert_pretrained_checkpoint ../out/pretrain-core-0/final ../out/pretrain-core-0/checkpoint
216
+ ```
config-1.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 1,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 512,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 2048,
14
+ "max_position_embeddings": 131072,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 8,
18
+ "num_hidden_layers": 32,
19
+ "num_key_value_heads": 8,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": null,
23
+ "rope_theta": 16000.0,
24
+ "tie_word_embeddings": true,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.45.0.dev0",
27
+ "use_cache": true,
28
+ "vocab_size": 131072
29
+ }
scripts/pretrain_core_model_0.yaml CHANGED
@@ -61,7 +61,6 @@ train:
61
  global_batch_size: 512
62
 
63
  # Number of samples per data-parallel rank (type: int, default: 4)
64
- # micro_batch_size: 2
65
  micro_batch_size: 8
66
 
67
  # Number of iterations with learning rate warmup active (type: int, default: 2000)
@@ -77,7 +76,6 @@ train:
77
  max_steps:
78
 
79
  # Limits the length of samples. Off by default (type: Optional[int], default: null)
80
- # max_seq_length: 4096
81
  max_seq_length: 1024
82
 
83
  # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: False)
 
61
  global_batch_size: 512
62
 
63
  # Number of samples per data-parallel rank (type: int, default: 4)
 
64
  micro_batch_size: 8
65
 
66
  # Number of iterations with learning rate warmup active (type: int, default: 2000)
 
76
  max_steps:
77
 
78
  # Limits the length of samples. Off by default (type: Optional[int], default: null)
 
79
  max_seq_length: 1024
80
 
81
  # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: False)
scripts/pretrain_core_model_1.yaml ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The name of the model to pretrain. Choose from names in ``litgpt.config``. Mutually exclusive with
2
+ # ``model_config``. (type: Optional[str], default: null)
3
+ model_name: 'tangled-alpha-0.9-core'
4
+
5
+ # A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with
6
+ # ``model_config``. (type: Optional[Config], default: null)
7
+ model_config:
8
+ name: 'tangled-alpha-0.9-core'
9
+ block_size: 131072
10
+ vocab_size: 131072
11
+ padded_vocab_size: 131072
12
+ n_layer: 32
13
+ n_head: 8
14
+ n_embd: 512
15
+ n_query_groups: 8
16
+ rotary_percentage: 1.0
17
+ parallel_residual: False
18
+ bias: False
19
+ norm_class_name: "RMSNorm"
20
+ mlp_class_name: "LLaMAMLP"
21
+ intermediate_size: 2048 # n_embd * 4
22
+ norm_eps: 1e-5
23
+ rope_base: 16000 # https://arxiv.org/pdf/2405.14591
24
+ head_size: 128 # n_embd / n_head
25
+
26
+ # Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
27
+ # /teamspace/jobs/<job-name>/share. (type: <class 'Path'>, default: out/pretrain)
28
+ out_dir: "../out/pretrain-core-1/"
29
+
30
+ # The precision to use for pretraining. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
31
+ # precision: bf16-mixed
32
+ precision: bf16-true
33
+
34
+ # Optional path to a checkpoint directory to initialize the model from.
35
+ # Useful for continued pretraining. Mutually exclusive with ``resume``. (type: Optional[Path], default: null)
36
+ initial_checkpoint_dir: "../out/pretrain-core-0/checkpoint"
37
+
38
+ # Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume
39
+ # from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing
40
+ # ``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists.
41
+ # (type: Union[bool, Literal["auto"], Path], default: False)
42
+ resume: "auto"
43
+
44
+ # Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
45
+ data:
46
+ class_path: LitData
47
+
48
+ init_args:
49
+ data_path: "../core-data-1-1025-2049-2049-8000/"
50
+ num_workers: 32
51
+
52
+ # Training-related arguments. See ``litgpt.args.TrainArgs`` for details
53
+ train:
54
+ # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
55
+ save_interval: 50
56
+
57
+ # Number of iterations between logging calls (type: int, default: 1)
58
+ log_interval: 1
59
+
60
+ # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 512)
61
+ global_batch_size: 512
62
+
63
+ # Number of samples per data-parallel rank (type: int, default: 4)
64
+ micro_batch_size: 4
65
+
66
+ # Number of iterations with learning rate warmup active (type: int, default: 2000)
67
+ lr_warmup_steps: 0
68
+
69
+ # Number of epochs to train on (type: Optional[int], default: null)
70
+ epochs:
71
+
72
+ # Total number of tokens to train on (type: Optional[int], default: 3000000000000)
73
+ max_tokens: 634858062
74
+
75
+ # Limits the number of optimizer steps to run. (type: Optional[int], default: null)
76
+ max_steps:
77
+
78
+ # Limits the length of samples. Off by default (type: Optional[int], default: null)
79
+ max_seq_length: 2048
80
+
81
+ # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: False)
82
+ tie_embeddings: true
83
+
84
+ # (type: Optional[float], default: 1.0)
85
+ max_norm: 1.0
86
+
87
+ # (type: float, default: 4e-05)
88
+ min_lr: 3e-6
89
+
90
+ # Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
91
+ eval:
92
+ # Number of optimizer steps between evaluation calls (type: int, default: 1000)
93
+ interval: 50
94
+
95
+ # Number of tokens to generate (type: Optional[int], default: null)
96
+ max_new_tokens:
97
+
98
+ # Number of iterations (type: int, default: 100)
99
+ max_iters: 100
100
+
101
+ # Whether to evaluate on the validation set at the beginning of the training
102
+ initial_validation: true
103
+
104
+ # Whether to evaluate on the validation set at the end the training
105
+ final_validation: true
106
+
107
+ # Optimizer-related arguments
108
+
109
+ # optimizer:
110
+ # class_path: torch.optim.AdamW
111
+ # # class_path: torchao.prototype.low_bit_optim.AdamW8bit
112
+ # # class_path: torchao.prototype.low_bit_optim.AdamW4bit
113
+ # # class_path: bitsandbytes.optim.AdamW8bit
114
+ # # class_path: bitsandbytes.optim.PagedAdamW8bit
115
+ # init_args:
116
+ # # (type: float, default: 0.001)
117
+ # lr: 3e-4
118
+ # # (type: float, default: 0.01)
119
+ # weight_decay: 0.01
120
+ # # (type: tuple, default: (0.9,0.999))
121
+ # betas:
122
+ # - 0.9
123
+ # - 0.999
124
+
125
+ optimizer:
126
+ class_path: sophia_opt.SophiaG
127
+ init_args:
128
+ lr: 3e-5
129
+ betas:
130
+ - 0.9
131
+ - 0.95
132
+ rho: 0.05
133
+ weight_decay: 0.1
134
+
135
+ # How many devices/GPUs to use. Uses all GPUs by default. (type: Union[int, str], default: auto)
136
+ devices: auto
137
+
138
+ # How many nodes to use. (type: int, default: 1)
139
+ num_nodes: 1
140
+
141
+ # Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data
142
+ # module require this. (type: Optional[Path], default: null)
143
+ tokenizer_dir: "../tokenizer"
144
+
145
+ # The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: tensorboard)
146
+ logger_name: "wandb"
147
+
148
+ # The random seed to use for reproducibility. (type: int, default: 42)
149
+ seed: 23