MHGanainy commited on
Commit
9e3d82c
·
verified ·
1 Parent(s): dae7b79

MHGanainy/gpt2-xl-lora-multi-shared-512-top

Browse files
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
- library_name: peft
3
  license: mit
4
- base_model: openai-community/gpt2-xl
5
  tags:
6
  - generated_from_trainer
7
  model-index:
@@ -14,9 +14,14 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # gpt2-xl-lora-multi-shared-512-top
16
 
17
- This model is a fine-tuned version of [openai-community/gpt2-xl](https://huggingface.co/openai-community/gpt2-xl) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 2.1270
 
 
 
 
 
20
 
21
  ## Model description
22
 
@@ -39,25 +44,15 @@ The following hyperparameters were used during training:
39
  - train_batch_size: 2
40
  - eval_batch_size: 2
41
  - seed: 42
42
- - distributed_type: multi-GPU
43
- - num_devices: 8
44
- - total_train_batch_size: 16
45
- - total_eval_batch_size: 16
46
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: cosine
48
  - lr_scheduler_warmup_ratio: 0.1
49
- - lr_scheduler_warmup_steps: 69884
50
  - num_epochs: 1
51
  - mixed_precision_training: Native AMP
52
 
53
- ### Training results
54
-
55
-
56
-
57
  ### Framework versions
58
 
59
- - PEFT 0.13.2
60
- - Transformers 4.45.2
61
  - Pytorch 2.1.0a0+32f93b1
62
- - Datasets 3.0.1
63
- - Tokenizers 0.20.1
 
1
  ---
2
+ library_name: transformers
3
  license: mit
4
+ base_model: openai-community/gpt2
5
  tags:
6
  - generated_from_trainer
7
  model-index:
 
14
 
15
  # gpt2-xl-lora-multi-shared-512-top
16
 
17
+ This model is a fine-tuned version of [openai-community/gpt2](https://huggingface.co/openai-community/gpt2) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - eval_loss: 2.9468
20
+ - eval_model_preparation_time: 0.0024
21
+ - eval_runtime: 1258.8862
22
+ - eval_samples_per_second: 150.453
23
+ - eval_steps_per_second: 75.227
24
+ - step: 0
25
 
26
  ## Model description
27
 
 
44
  - train_batch_size: 2
45
  - eval_batch_size: 2
46
  - seed: 42
47
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
 
 
 
 
48
  - lr_scheduler_type: cosine
49
  - lr_scheduler_warmup_ratio: 0.1
 
50
  - num_epochs: 1
51
  - mixed_precision_training: Native AMP
52
 
 
 
 
 
53
  ### Framework versions
54
 
55
+ - Transformers 4.47.1
 
56
  - Pytorch 2.1.0a0+32f93b1
57
+ - Datasets 3.2.0
58
+ - Tokenizers 0.21.0
all_results.json CHANGED
@@ -1,13 +1,8 @@
1
  {
2
- "epoch": 1.0,
3
- "eval_loss": 2.127037763595581,
4
- "eval_runtime": 2671.6931,
5
- "eval_samples_per_second": 70.892,
6
- "eval_steps_per_second": 4.431,
7
- "perplexity": 8.389976869779035,
8
- "total_flos": 1.2729340395184456e+19,
9
- "train_loss": 2.3255439768606525,
10
- "train_runtime": 52634.826,
11
- "train_samples_per_second": 26.555,
12
- "train_steps_per_second": 1.66
13
  }
 
1
  {
2
+ "eval_loss": 2.9467811584472656,
3
+ "eval_model_preparation_time": 0.0024,
4
+ "eval_runtime": 1258.8862,
5
+ "eval_samples_per_second": 150.453,
6
+ "eval_steps_per_second": 75.227,
7
+ "perplexity": 19.044553562183122
 
 
 
 
 
8
  }
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai-community/gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float16",
36
+ "transformers_version": "4.47.1",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 1.0,
3
- "eval_loss": 2.127037763595581,
4
- "eval_runtime": 2671.6931,
5
- "eval_samples_per_second": 70.892,
6
- "eval_steps_per_second": 4.431,
7
- "perplexity": 8.389976869779035
8
  }
 
1
  {
2
+ "eval_loss": 2.9467811584472656,
3
+ "eval_model_preparation_time": 0.0024,
4
+ "eval_runtime": 1258.8862,
5
+ "eval_samples_per_second": 150.453,
6
+ "eval_steps_per_second": 75.227,
7
+ "perplexity": 19.044553562183122
8
  }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.47.1"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a9affa1d9f5548ccc1e92cbe5f38b42cd237e1c2b1cdc6fd5db60813bfe1ab9
3
+ size 248894512
runs/Dec20_15-21-58_0a5d2fff92ba/events.out.tfevents.1734709378.0a5d2fff92ba.5181.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1e9c2b3c85a504e4dc847a2d0d8aba119e0d808d510fb925b4a54d9114b23c0
3
+ size 312
tokenizer_config.json CHANGED
@@ -13,6 +13,7 @@
13
  "bos_token": "<|endoftext|>",
14
  "clean_up_tokenization_spaces": false,
15
  "eos_token": "<|endoftext|>",
 
16
  "model_max_length": 1024,
17
  "pad_token": "<|endoftext|>",
18
  "tokenizer_class": "GPT2Tokenizer",
 
13
  "bos_token": "<|endoftext|>",
14
  "clean_up_tokenization_spaces": false,
15
  "eos_token": "<|endoftext|>",
16
+ "extra_special_tokens": {},
17
  "model_max_length": 1024,
18
  "pad_token": "<|endoftext|>",
19
  "tokenizer_class": "GPT2Tokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6376f21d9ddc0e35ba852326e2c1f4c51a3b967fd83385e601e5286452290c9c
3
- size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:045e2803cf84fbb192b66f7ef78d1756619f42c4e3f6ae66f870656e7c4f94d1
3
+ size 5432