Bakugo123 commited on
Commit
7d4242a
·
verified ·
1 Parent(s): 12c8998

Model save

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,59 +1,58 @@
1
  ---
2
- base_model: meta-llama/Meta-Llama-3-8B
3
- library_name: peft
4
- license: llama3
5
  tags:
6
- - trl
7
- - sft
8
  - generated_from_trainer
9
- model-index:
10
- - name: sft-llama-cloud
11
- results: []
12
  ---
13
 
14
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
- should probably proofread and complete it, then remove this comment. -->
16
-
17
- # sft-llama-cloud
18
 
19
- This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the None dataset.
20
- It achieves the following results on the evaluation set:
21
- - Loss: 1.5621
22
 
23
- ## Model description
24
 
25
- More information needed
 
26
 
27
- ## Intended uses & limitations
 
 
 
 
28
 
29
- More information needed
30
-
31
- ## Training and evaluation data
32
 
33
- More information needed
34
 
35
- ## Training procedure
36
 
37
- ### Training hyperparameters
38
 
39
- The following hyperparameters were used during training:
40
- - learning_rate: 2e-05
41
- - train_batch_size: 1
42
- - eval_batch_size: 8
43
- - seed: 42
44
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
- - lr_scheduler_type: linear
46
- - num_epochs: 5
47
- - mixed_precision_training: Native AMP
48
 
49
- ### Training results
 
 
 
 
50
 
 
51
 
52
 
53
- ### Framework versions
54
 
55
- - PEFT 0.12.0
56
- - Transformers 4.43.4
57
- - Pytorch 2.4.0a0+07cecf4168.nv24.05
58
- - Datasets 2.20.0
59
- - Tokenizers 0.19.1
 
 
 
 
 
 
 
 
1
  ---
2
+ base_model: meta-llama/Meta-Llama-3-8B-Instruct
3
+ library_name: transformers
4
+ model_name: sft-llama-cloud
5
  tags:
 
 
6
  - generated_from_trainer
7
+ - sft
8
+ - trl
9
+ licence: license
10
  ---
11
 
12
+ # Model Card for sft-llama-cloud
 
 
 
13
 
14
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
 
16
 
17
+ ## Quick start
18
 
19
+ ```python
20
+ from transformers import pipeline
21
 
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="Bakugo123/sft-llama-cloud", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
 
28
+ ## Training procedure
 
 
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/axiler/sft_llama3-8b-cloud-fine-tune/runs/d6fizy5q)
31
 
 
32
 
33
+ This model was trained with SFT.
34
 
35
+ ### Framework versions
 
 
 
 
 
 
 
 
36
 
37
+ - TRL: 0.19.1
38
+ - Transformers: 4.53.1
39
+ - Pytorch: 2.7.1
40
+ - Datasets: 2.16.0
41
+ - Tokenizers: 0.21.2
42
 
43
+ ## Citations
44
 
45
 
 
46
 
47
+ Cite TRL as:
48
+
49
+ ```bibtex
50
+ @misc{vonwerra2022trl,
51
+ title = {{TRL: Transformer Reinforcement Learning}},
52
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
53
+ year = 2020,
54
+ journal = {GitHub repository},
55
+ publisher = {GitHub},
56
+ howpublished = {\url{https://github.com/huggingface/trl}}
57
+ }
58
+ ```
adapter_config.json CHANGED
@@ -3,6 +3,9 @@
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "meta-llama/Meta-Llama-3-8B-Instruct",
5
  "bias": "none",
 
 
 
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
@@ -11,19 +14,23 @@
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 16,
 
14
  "lora_dropout": 0.1,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
 
19
  "r": 64,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "v_proj",
24
- "q_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
 
27
  "use_dora": false,
 
28
  "use_rslora": false
29
  }
 
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "meta-llama/Meta-Llama-3-8B-Instruct",
5
  "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
  "fan_in_fan_out": false,
10
  "inference_mode": true,
11
  "init_lora_weights": true,
 
14
  "layers_to_transform": null,
15
  "loftq_config": {},
16
  "lora_alpha": 16,
17
+ "lora_bias": false,
18
  "lora_dropout": 0.1,
19
  "megatron_config": null,
20
  "megatron_core": "megatron.core",
21
  "modules_to_save": null,
22
  "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
  "r": 64,
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
+ "q_proj",
29
+ "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
+ "trainable_token_indices": null,
33
  "use_dora": false,
34
+ "use_qalora": false,
35
  "use_rslora": false
36
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:461efed3c23271fd56410c7ca7b80996fbd31b52647b85f8638c42a6f040b596
3
  size 109069176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85df9188028a207e0e8a012c9eeff4eea55795f46fdf9214629db4544e05bac1
3
  size 109069176
chat_template.jinja ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
2
+
3
+ '+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>
4
+
5
+ ' }}{% endif %}
special_tokens_map.json CHANGED
@@ -12,6 +12,5 @@
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
- },
16
- "pad_token": "<|eot_id|>"
17
  }
 
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
+ }
 
16
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -2050,14 +2050,13 @@
2050
  }
2051
  },
2052
  "bos_token": "<|begin_of_text|>",
2053
- "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
2054
  "clean_up_tokenization_spaces": true,
2055
  "eos_token": "<|eot_id|>",
 
2056
  "model_input_names": [
2057
  "input_ids",
2058
  "attention_mask"
2059
  ],
2060
  "model_max_length": 1000000000000000019884624838656,
2061
- "pad_token": "<|eot_id|>",
2062
  "tokenizer_class": "PreTrainedTokenizerFast"
2063
  }
 
2050
  }
2051
  },
2052
  "bos_token": "<|begin_of_text|>",
 
2053
  "clean_up_tokenization_spaces": true,
2054
  "eos_token": "<|eot_id|>",
2055
+ "extra_special_tokens": {},
2056
  "model_input_names": [
2057
  "input_ids",
2058
  "attention_mask"
2059
  ],
2060
  "model_max_length": 1000000000000000019884624838656,
 
2061
  "tokenizer_class": "PreTrainedTokenizerFast"
2062
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6286358a7fc536d696b60de46cef40fc148ae6d8dce5a985a9af09700228159
3
- size 5496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c637cb65aafc76e8d7b3ffd129a81b98216f29c4265d8c6ee3f95985ce2fc31
3
+ size 6161