hdong0 commited on
Commit
fdc91be
·
verified ·
1 Parent(s): adb398e

Model save

Browse files
README.md CHANGED
@@ -1,11 +1,9 @@
1
  ---
2
  base_model: Qwen/Qwen2.5-1.5B-Instruct
3
- datasets: open-r1/OpenR1-Math-220k
4
  library_name: transformers
5
  model_name: Qwen2.5-1.5B-Open-R1-Distill_4epoch
6
  tags:
7
  - generated_from_trainer
8
- - open-r1
9
  - trl
10
  - sft
11
  licence: license
@@ -13,7 +11,7 @@ licence: license
13
 
14
  # Model Card for Qwen2.5-1.5B-Open-R1-Distill_4epoch
15
 
16
- This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct) on the [open-r1/OpenR1-Math-220k](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k) dataset.
17
  It has been trained using [TRL](https://github.com/huggingface/trl).
18
 
19
  ## Quick start
 
1
  ---
2
  base_model: Qwen/Qwen2.5-1.5B-Instruct
 
3
  library_name: transformers
4
  model_name: Qwen2.5-1.5B-Open-R1-Distill_4epoch
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
  - sft
9
  licence: license
 
11
 
12
  # Model Card for Qwen2.5-1.5B-Open-R1-Distill_4epoch
13
 
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 4583117197672448.0,
3
- "train_loss": 0.4550957629332601,
4
- "train_runtime": 23037.0949,
5
  "train_samples": 93733,
6
- "train_samples_per_second": 16.275,
7
- "train_steps_per_second": 0.127
8
  }
 
1
  {
2
  "total_flos": 4583117197672448.0,
3
+ "train_loss": 0.0,
4
+ "train_runtime": 0.0145,
5
  "train_samples": 93733,
6
+ "train_samples_per_second": 25827825.108,
7
+ "train_steps_per_second": 201975.78
8
  }
config.json CHANGED
@@ -22,7 +22,7 @@
22
  "tie_word_embeddings": true,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.52.0.dev0",
25
- "use_cache": true,
26
  "use_sliding_window": false,
27
  "vocab_size": 151936
28
  }
 
22
  "tie_word_embeddings": true,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.52.0.dev0",
25
+ "use_cache": false,
26
  "use_sliding_window": false,
27
  "vocab_size": 151936
28
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 4583117197672448.0,
3
- "train_loss": 0.4550957629332601,
4
- "train_runtime": 23037.0949,
5
  "train_samples": 93733,
6
- "train_samples_per_second": 16.275,
7
- "train_steps_per_second": 0.127
8
  }
 
1
  {
2
  "total_flos": 4583117197672448.0,
3
+ "train_loss": 0.0,
4
+ "train_runtime": 0.0145,
5
  "train_samples": 93733,
6
+ "train_samples_per_second": 25827825.108,
7
+ "train_steps_per_second": 201975.78
8
  }
trainer_state.json CHANGED
@@ -4699,13 +4699,12 @@
4699
  },
4700
  {
4701
  "epoch": 4.0,
4702
- "num_tokens": 2241244589.0,
4703
  "step": 2932,
4704
  "total_flos": 4583117197672448.0,
4705
- "train_loss": 0.4550957629332601,
4706
- "train_runtime": 23037.0949,
4707
- "train_samples_per_second": 16.275,
4708
- "train_steps_per_second": 0.127
4709
  }
4710
  ],
4711
  "logging_steps": 5,
 
4699
  },
4700
  {
4701
  "epoch": 4.0,
 
4702
  "step": 2932,
4703
  "total_flos": 4583117197672448.0,
4704
+ "train_loss": 0.0,
4705
+ "train_runtime": 0.0145,
4706
+ "train_samples_per_second": 25827825.108,
4707
+ "train_steps_per_second": 201975.78
4708
  }
4709
  ],
4710
  "logging_steps": 5,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0de765071a809ee5cf343ea403ecfc91523fd9a4605db70373cd4526c718b27d
3
  size 7416
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87afaa4fd28fa61c5949f420310fac1ef54a15b689acd20dbc9c3667b0e56e36
3
  size 7416