yuchiz commited on
Commit
4e73f42
·
verified ·
1 Parent(s): a2a2875

Push model using huggingface_hub.

Browse files
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - trl
5
+ - ppo
6
+ - transformers
7
+ - reinforcement-learning
8
+ ---
9
+
10
+ # TRL Model
11
+
12
+ This is a [TRL language model](https://github.com/huggingface/trl) that has been fine-tuned with reinforcement learning to
13
+ guide the model outputs according to a value, function, or human feedback. The model can be used for text generation.
14
+
15
+ ## Usage
16
+
17
+ To use this model for inference, first install the TRL library:
18
+
19
+ ```bash
20
+ python -m pip install trl
21
+ ```
22
+
23
+ You can then generate text as follows:
24
+
25
+ ```python
26
+ from transformers import pipeline
27
+
28
+ generator = pipeline("text-generation", model="yuchiz//tmp/tmpbswj31mg/yuchiz/models")
29
+ outputs = generator("Hello, my llama is cute")
30
+ ```
31
+
32
+ If you want to use the model for training or to obtain the outputs from the value head, load the model as follows:
33
+
34
+ ```python
35
+ from transformers import AutoTokenizer
36
+ from trl import AutoModelForCausalLMWithValueHead
37
+
38
+ tokenizer = AutoTokenizer.from_pretrained("yuchiz//tmp/tmpbswj31mg/yuchiz/models")
39
+ model = AutoModelForCausalLMWithValueHead.from_pretrained("yuchiz//tmp/tmpbswj31mg/yuchiz/models")
40
+
41
+ inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
42
+ outputs = model(**inputs, labels=inputs["input_ids"])
43
+ ```
adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 16,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "layers.27.self_attn.q_proj",
23
+ "layers.31.self_attn.q_proj",
24
+ "layers.27.self_attn.v_proj",
25
+ "layers.25.self_attn.v_proj",
26
+ "layers.24.self_attn.q_proj",
27
+ "layers.26.self_attn.q_proj",
28
+ "layers.25.self_attn.q_proj",
29
+ "layers.28.self_attn.q_proj",
30
+ "layers.29.self_attn.v_proj",
31
+ "layers.30.self_attn.q_proj",
32
+ "layers.24.self_attn.v_proj",
33
+ "layers.30.self_attn.v_proj",
34
+ "layers.26.self_attn.v_proj",
35
+ "layers.29.self_attn.q_proj",
36
+ "layers.31.self_attn.v_proj",
37
+ "layers.28.self_attn.v_proj"
38
+ ],
39
+ "task_type": "CAUSAL_LM",
40
+ "use_dora": false,
41
+ "use_rslora": false
42
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73b03b0030ff0ca5d523e95c87c70ea936c2bd171420dedb66a688a078ab902b
3
+ size 8392896
config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "exp_name": "hotpot_rl",
3
+ "seed": 0,
4
+ "log_with": null,
5
+ "task_name": null,
6
+ "model_name": "gpt2",
7
+ "query_dataset": "imdb",
8
+ "reward_model": "sentiment-analysis:lvwerra/distilbert-imdb",
9
+ "remove_unused_columns": true,
10
+ "tracker_kwargs": {},
11
+ "accelerator_kwargs": {},
12
+ "project_kwargs": {},
13
+ "tracker_project_name": "trl",
14
+ "push_to_hub_if_best_kwargs": {},
15
+ "steps": 20000,
16
+ "learning_rate": 1e-05,
17
+ "adap_kl_ctrl": true,
18
+ "init_kl_coef": 0.2,
19
+ "kl_penalty": "kl",
20
+ "target": 6,
21
+ "horizon": 10000,
22
+ "gamma": 1,
23
+ "lam": 0.95,
24
+ "cliprange": 0.2,
25
+ "cliprange_value": 0.2,
26
+ "vf_coef": 0.1,
27
+ "batch_size": 32,
28
+ "forward_batch_size": null,
29
+ "mini_batch_size": 1,
30
+ "gradient_accumulation_steps": 8,
31
+ "world_size": 4,
32
+ "ppo_epochs": 4,
33
+ "max_grad_norm": null,
34
+ "optimize_cuda_cache": true,
35
+ "optimize_device_cache": false,
36
+ "early_stopping": false,
37
+ "target_kl": 1,
38
+ "compare_steps": 1,
39
+ "ratio_threshold": 10.0,
40
+ "use_score_scaling": false,
41
+ "use_score_norm": false,
42
+ "score_clip": null,
43
+ "whiten_rewards": false,
44
+ "is_encoder_decoder": false,
45
+ "is_peft_model": true,
46
+ "backward_batch_size": 8,
47
+ "global_backward_batch_size": 32,
48
+ "global_batch_size": 128
49
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e14b562dfc3ecf628b48f74543de869ec9b23975d220d68b331d59227373e10b
3
+ size 17916