fpadovani commited on
Commit
9574a0d
·
verified ·
1 Parent(s): 4c6c11a

Training in progress, step 4000

Browse files
config.json CHANGED
@@ -1,32 +1,28 @@
1
  {
2
- "activation_function": "gelu_new",
3
  "architectures": [
4
- "GPT2LMHeadModel"
5
  ],
6
- "attn_pdrop": 0.1,
7
  "bos_token_id": 0,
8
- "embd_pdrop": 0.1,
9
  "eos_token_id": 1,
 
 
 
10
  "initializer_range": 0.02,
11
- "layer_norm_epsilon": 1e-05,
12
- "model_type": "gpt2",
13
- "n_ctx": 512,
14
- "n_embd": 256,
15
- "n_head": 8,
16
- "n_inner": 2048,
17
- "n_layer": 8,
18
- "n_positions": 512,
19
- "reorder_and_upcast_attn": false,
20
- "resid_pdrop": 0.1,
21
- "scale_attn_by_inverse_layer_idx": false,
22
- "scale_attn_weights": true,
23
- "summary_activation": null,
24
- "summary_first_dropout": 0.1,
25
- "summary_proj_to_labels": true,
26
- "summary_type": "cls_index",
27
- "summary_use_proj": true,
28
  "torch_dtype": "float32",
29
  "transformers_version": "4.45.2",
 
30
  "use_cache": true,
31
  "vocab_size": 8192
32
  }
 
1
  {
 
2
  "architectures": [
3
+ "RobertaForMaskedLM"
4
  ],
5
+ "attention_probs_dropout_prob": 0.1,
6
  "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
  "eos_token_id": 1,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 256,
12
  "initializer_range": 0.02,
13
+ "intermediate_size": 2048,
14
+ "layer_norm_eps": 1e-05,
15
+ "mask_token_id": 4,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 8,
19
+ "num_hidden_layers": 8,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "tie_word_embeddings": false,
 
 
 
 
 
 
 
23
  "torch_dtype": "float32",
24
  "transformers_version": "4.45.2",
25
+ "type_vocab_size": 2,
26
  "use_cache": true,
27
  "vocab_size": 8192
28
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a04169962d5652fb4e8716bf7a0495b7bd84c205ff87d870d3b150ad39350a54
3
- size 51007160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a7fa8d26b896267fbddd9e6df991a40c98f86be34da007a6022f12416a6fd93
3
+ size 59702184
special_tokens_map.json CHANGED
@@ -23,7 +23,13 @@
23
  "rstrip": false,
24
  "single_word": false
25
  },
26
- "pad_token": "</s>",
 
 
 
 
 
 
27
  "unk_token": {
28
  "content": "<unk>",
29
  "lstrip": false,
 
23
  "rstrip": false,
24
  "single_word": false
25
  },
26
+ "pad_token": {
27
+ "content": "<pad>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ },
33
  "unk_token": {
34
  "content": "<unk>",
35
  "lstrip": false,
tokenizer_config.json CHANGED
@@ -57,7 +57,7 @@
57
  "eos_token": "</s>",
58
  "mask_token": "<mask>",
59
  "model_max_length": 128,
60
- "pad_token": "</s>",
61
  "tokenizer_class": "PreTrainedTokenizerFast",
62
  "unk_token": "<unk>"
63
  }
 
57
  "eos_token": "</s>",
58
  "mask_token": "<mask>",
59
  "model_max_length": 128,
60
+ "pad_token": "<pad>",
61
  "tokenizer_class": "PreTrainedTokenizerFast",
62
  "unk_token": "<unk>"
63
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb433b87752cc469b20ee625a833c904bc80363d908ad5bc61687e872bf3a42a
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7e62d267b0e2dac82b0654ee993033d8fc2441a7f6a20ed53b7f7b3af048d8d
3
  size 5368
validation_batches.log CHANGED
The diff for this file is too large to render. See raw diff