Upload RLHF checkpoint at epoch 6
Browse files- README.md +45 -0
- config.json +54 -0
- generation_config.json +6 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- special_tokens_map.json +24 -0
- tokenizer.json +0 -0
- tokenizer_config.json +23 -0
- training_config.yaml +65 -0
- vocab.json +0 -0
README.md
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# f9bs90mv-rlhf-checkpoint-gpt-neo-125m-irl-epoch-6
|
2 |
+
|
3 |
+
This is a RLHF model checkpoint trained at epoch 6.
|
4 |
+
|
5 |
+
## Model Information
|
6 |
+
|
7 |
+
- **Base Model**: EleutherAI/gpt-neo-125M
|
8 |
+
- **Reward Type**: irl
|
9 |
+
- **Dataset**: allenai/real-toxicity-prompts
|
10 |
+
- **Training Epoch**: 6
|
11 |
+
|
12 |
+
|
13 |
+
## IRL Configuration
|
14 |
+
|
15 |
+
- **Likelihood Type**: bradley_terry
|
16 |
+
- **Normalization Strategy**: none
|
17 |
+
- **IRL Artifact**: matthieubou-imperial-college-london/bayes_irl_vi/posterior_bradley_terry_rkiq5pd8:v0
|
18 |
+
- **Use Raw Score**: True
|
19 |
+
|
20 |
+
## Usage
|
21 |
+
|
22 |
+
This checkpoint can be loaded using the HuggingFace Transformers library:
|
23 |
+
|
24 |
+
```python
|
25 |
+
from transformers import AutoModelForCausalLM
|
26 |
+
from trl import AutoModelForCausalLMWithValueHead
|
27 |
+
|
28 |
+
# Load the checkpoint
|
29 |
+
model = AutoModelForCausalLMWithValueHead.from_pretrained("MattBou00/f9bs90mv-rlhf-checkpoint-gpt-neo-125m-irl-epoch-6")
|
30 |
+
```
|
31 |
+
|
32 |
+
## Training Configuration
|
33 |
+
|
34 |
+
The training configuration is saved in `training_config.yaml`.
|
35 |
+
|
36 |
+
---
|
37 |
+
language: en
|
38 |
+
tags:
|
39 |
+
- rlhf
|
40 |
+
- checkpoint
|
41 |
+
- irl
|
42 |
+
- gpt-neo-125m
|
43 |
+
library_name: transformers
|
44 |
+
pipeline_tag: text-generation
|
45 |
+
---
|
config.json
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_function": "gelu_new",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0,
|
7 |
+
"attention_layers": [
|
8 |
+
"global",
|
9 |
+
"local",
|
10 |
+
"global",
|
11 |
+
"local",
|
12 |
+
"global",
|
13 |
+
"local",
|
14 |
+
"global",
|
15 |
+
"local",
|
16 |
+
"global",
|
17 |
+
"local",
|
18 |
+
"global",
|
19 |
+
"local"
|
20 |
+
],
|
21 |
+
"attention_types": [
|
22 |
+
[
|
23 |
+
[
|
24 |
+
"global",
|
25 |
+
"local"
|
26 |
+
],
|
27 |
+
6
|
28 |
+
]
|
29 |
+
],
|
30 |
+
"bos_token_id": 50256,
|
31 |
+
"classifier_dropout": 0.1,
|
32 |
+
"embed_dropout": 0,
|
33 |
+
"eos_token_id": 50256,
|
34 |
+
"gradient_checkpointing": false,
|
35 |
+
"hidden_size": 768,
|
36 |
+
"initializer_range": 0.02,
|
37 |
+
"intermediate_size": null,
|
38 |
+
"layer_norm_epsilon": 1e-05,
|
39 |
+
"max_position_embeddings": 2048,
|
40 |
+
"model_type": "gpt_neo",
|
41 |
+
"num_heads": 12,
|
42 |
+
"num_layers": 12,
|
43 |
+
"resid_dropout": 0,
|
44 |
+
"summary_activation": null,
|
45 |
+
"summary_first_dropout": 0.1,
|
46 |
+
"summary_proj_to_labels": true,
|
47 |
+
"summary_type": "cls_index",
|
48 |
+
"summary_use_proj": true,
|
49 |
+
"torch_dtype": "float32",
|
50 |
+
"transformers_version": "4.54.0",
|
51 |
+
"use_cache": true,
|
52 |
+
"vocab_size": 50257,
|
53 |
+
"window_size": 256
|
54 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 50256,
|
4 |
+
"eos_token_id": 50256,
|
5 |
+
"transformers_version": "4.54.0"
|
6 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0c2c1eb3959bb8c134a90ffc7cff96a0ddb73e7182d7132cb07108c565b187a3
|
3 |
+
size 500814588
|
special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|endoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "<|endoftext|>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"50256": {
|
6 |
+
"content": "<|endoftext|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
}
|
13 |
+
},
|
14 |
+
"bos_token": "<|endoftext|>",
|
15 |
+
"clean_up_tokenization_spaces": true,
|
16 |
+
"eos_token": "<|endoftext|>",
|
17 |
+
"errors": "replace",
|
18 |
+
"extra_special_tokens": {},
|
19 |
+
"model_max_length": 2048,
|
20 |
+
"pad_token": "<|endoftext|>",
|
21 |
+
"tokenizer_class": "GPT2Tokenizer",
|
22 |
+
"unk_token": "<|endoftext|>"
|
23 |
+
}
|
training_config.yaml
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
name: EleutherAI/gpt-neo-125M
|
3 |
+
reward_type: irl
|
4 |
+
reward_model: matthieubou-imperial-college-london/bayes_irl_vi/posterior_bradley_terry_rkiq5pd8:v0
|
5 |
+
base_model_name: EleutherAI/pythia-410m
|
6 |
+
likelihood_type: bradley_terry
|
7 |
+
use_raw_irl_score: true
|
8 |
+
irl_normalization_strategy: none
|
9 |
+
irl_reward_scale: 1.0
|
10 |
+
n_posterior_samples: 100
|
11 |
+
uncertainty_threshold: 0.45
|
12 |
+
use_distance_sampling: false
|
13 |
+
learning_rate: 2.0e-06
|
14 |
+
batch_size: 128
|
15 |
+
mini_batch_size: 8
|
16 |
+
gradient_accumulation_steps: 16
|
17 |
+
max_sequence_length: 512
|
18 |
+
generation:
|
19 |
+
min_length: -1
|
20 |
+
top_k: 0
|
21 |
+
top_p: 0.8
|
22 |
+
temperature: 0.7
|
23 |
+
do_sample: true
|
24 |
+
output_min_length: 20
|
25 |
+
output_max_length: 30
|
26 |
+
rlhf:
|
27 |
+
model:
|
28 |
+
ppo_epochs: 3
|
29 |
+
init_kl_coef: 0.2
|
30 |
+
target: 1.0
|
31 |
+
cliprange: 0.1
|
32 |
+
cliprange_value: 0.2
|
33 |
+
vf_coef: 0.1
|
34 |
+
adap_kl_ctrl: true
|
35 |
+
use_score_norm: true
|
36 |
+
ratio_threshold: 10.0
|
37 |
+
training:
|
38 |
+
num_train_epochs: 80
|
39 |
+
save_freq: 2
|
40 |
+
eval_freq: 2
|
41 |
+
seed: 0
|
42 |
+
dataset:
|
43 |
+
name: allenai/real-toxicity-prompts
|
44 |
+
toxicity_metric: profanity
|
45 |
+
toxicity_threshold: 0.7
|
46 |
+
input_min_text_length: 10
|
47 |
+
input_max_text_length: 100
|
48 |
+
test_size: 0.1
|
49 |
+
num_samples: 20000
|
50 |
+
output:
|
51 |
+
push_to_hub: true
|
52 |
+
push_checkpoints_to_hub: true
|
53 |
+
push_final_model_to_hub: true
|
54 |
+
hub_org: MattBou00
|
55 |
+
repo_name_prefix: rlhf-checkpoint
|
56 |
+
private: false
|
57 |
+
wandb:
|
58 |
+
project: irl-rlhf-detox
|
59 |
+
entity: matthieubou-imperial-college-london
|
60 |
+
name: null
|
61 |
+
now: 2025-08-03_00-18-13
|
62 |
+
logging:
|
63 |
+
use_wandb: true
|
64 |
+
project_name: irl-rlhf-detox
|
65 |
+
wandb_mode: online
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|