Safetensors
English
gpt2
codebyzeb commited on
Commit
6acac6c
·
verified ·
1 Parent(s): 8c3eac9

Training in progress, step 20000

Browse files
babyslm/lexical.txt CHANGED
The diff for this file is too large to render. See raw diff
 
babyslm/syntactic.txt CHANGED
The diff for this file is too large to render. See raw diff
 
config.json CHANGED
@@ -3,9 +3,9 @@
3
  "architectures": [
4
  "GPT2LMHeadModel"
5
  ],
6
- "attn_pdrop": 0.5,
7
  "bos_token_id": 3,
8
- "embd_pdrop": 0.5,
9
  "eos_token_id": 3,
10
  "initializer_range": 0.02,
11
  "layer_norm_epsilon": 1e-05,
@@ -13,10 +13,10 @@
13
  "n_embd": 128,
14
  "n_head": 4,
15
  "n_inner": 512,
16
- "n_layer": 2,
17
  "n_positions": 256,
18
  "reorder_and_upcast_attn": false,
19
- "resid_pdrop": 0.5,
20
  "scale_attn_by_inverse_layer_idx": false,
21
  "scale_attn_weights": true,
22
  "summary_activation": null,
 
3
  "architectures": [
4
  "GPT2LMHeadModel"
5
  ],
6
+ "attn_pdrop": 0.3,
7
  "bos_token_id": 3,
8
+ "embd_pdrop": 0.3,
9
  "eos_token_id": 3,
10
  "initializer_range": 0.02,
11
  "layer_norm_epsilon": 1e-05,
 
13
  "n_embd": 128,
14
  "n_head": 4,
15
  "n_inner": 512,
16
+ "n_layer": 3,
17
  "n_positions": 256,
18
  "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
  "scale_attn_by_inverse_layer_idx": false,
21
  "scale_attn_weights": true,
22
  "summary_activation": null,
hydra_config_1736991679.9411452.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ experiment:
2
+ seed: 42
3
+ name: gpt2_600k-full-03
4
+ group: childes-size-english
5
+ dry_run: false
6
+ offline_run: false
7
+ evaluate_segmentation: true
8
+ evaluate_babyslm: true
9
+ blimp_tasks: null
10
+ resume_checkpoint_path: null
11
+ resume_run_id: null
12
+ dataset:
13
+ name: phonemetransformers/CHILDES
14
+ subconfig: English
15
+ text_column: phonemized_utterance
16
+ is_phonemes: true
17
+ max_age: 120
18
+ remove_child_utterances: true
19
+ tokenizer:
20
+ name: phonemetransformers/CHILDES-English-phoneme-tokenizer
21
+ data_preprocessing:
22
+ max_input_length: 128
23
+ join_utts: static
24
+ remove_word_boundaries: true
25
+ subsample: null
26
+ subsample_type: tokens
27
+ model:
28
+ name: gpt2_lm
29
+ model_kwargs:
30
+ n_layer: 3
31
+ n_head: 4
32
+ n_embd: 128
33
+ n_positions: 256
34
+ n_inner: 512
35
+ resid_pdrop: 0.3
36
+ embd_pdrop: 0.3
37
+ attn_pdrop: 0.3
38
+ trainer:
39
+ batch_size: 32
40
+ lr: 0.001
41
+ num_warmup_steps: 60000
42
+ max_training_steps: 200000
43
+ logging_steps: 2000
44
+ save_steps: 20000
45
+ eval_steps: 20000
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9164369d9a7fdea8127b129d3c6109d951f164b2f49ceccb07cfe521bcff59b
3
- size 1745032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:690bd1ecb5896e5b64a2032c72960b6745f5b53cc14cfe3e334207fd01c00e4c
3
+ size 2539288
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e39e5f79376736909aff47ed0035fa12fc36e51fb24724077ea552997f4276cb
3
- size 5432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82ae76f4757a61676b98de487ca62d4fd33f207f408709e8e1ffbe6551050475
3
+ size 5368