Safetensors
English
gpt2
codebyzeb commited on
Commit
04d042d
·
verified ·
1 Parent(s): 02d3441

Training in progress, step 200000

Browse files
babyslm/lexical.txt CHANGED
The diff for this file is too large to render. See raw diff
 
babyslm/syntactic.txt CHANGED
The diff for this file is too large to render. See raw diff
 
config.json CHANGED
@@ -3,9 +3,9 @@
3
  "architectures": [
4
  "GPT2LMHeadModel"
5
  ],
6
- "attn_pdrop": 0.3,
7
  "bos_token_id": 3,
8
- "embd_pdrop": 0.3,
9
  "eos_token_id": 3,
10
  "initializer_range": 0.02,
11
  "layer_norm_epsilon": 1e-05,
@@ -16,7 +16,7 @@
16
  "n_layer": 3,
17
  "n_positions": 256,
18
  "reorder_and_upcast_attn": false,
19
- "resid_pdrop": 0.3,
20
  "scale_attn_by_inverse_layer_idx": false,
21
  "scale_attn_weights": true,
22
  "summary_activation": null,
 
3
  "architectures": [
4
  "GPT2LMHeadModel"
5
  ],
6
+ "attn_pdrop": 0.1,
7
  "bos_token_id": 3,
8
+ "embd_pdrop": 0.1,
9
  "eos_token_id": 3,
10
  "initializer_range": 0.02,
11
  "layer_norm_epsilon": 1e-05,
 
16
  "n_layer": 3,
17
  "n_positions": 256,
18
  "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.1,
20
  "scale_attn_by_inverse_layer_idx": false,
21
  "scale_attn_weights": true,
22
  "summary_activation": null,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c74657fc47ce12d4a4c4c8f2d2fc85b78eefe353f17ea8c69a01c542e2a2fd76
3
  size 2539288
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea7fa508facf7b2114f33e8a6ed03e6b8774da3be3a8f217358467806c473d1b
3
  size 2539288
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b0faa326a9ad465fa691ab59fb9d1c440af93bba3fdf11a33e9c1f31003f5c8f
3
  size 5432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c72b82046da94d266f2f0c44d71d64597a24ff2e96d5ee1fc92e5fad4813e7ae
3
  size 5432