fine tuned model of my own general answer question for STEM MCQAs with aqua-rat and SciQ
Browse files- README.md +4 -4
- config.json +1 -0
- model.safetensors +1 -1
- tokenizer_config.json +4 -0
- training_args.bin +2 -2
README.md
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
---
|
2 |
library_name: transformers
|
3 |
license: apache-2.0
|
4 |
-
base_model: Jukess/
|
5 |
tags:
|
6 |
- generated_from_trainer
|
7 |
model-index:
|
@@ -14,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
14 |
|
15 |
# qwen_mcqa
|
16 |
|
17 |
-
This model is a fine-tuned version of [Jukess/
|
18 |
|
19 |
## Model description
|
20 |
|
@@ -50,6 +50,6 @@ The following hyperparameters were used during training:
|
|
50 |
### Framework versions
|
51 |
|
52 |
- Transformers 4.51.3
|
53 |
-
- Pytorch 2.
|
54 |
-
- Datasets 3.
|
55 |
- Tokenizers 0.21.0
|
|
|
1 |
---
|
2 |
library_name: transformers
|
3 |
license: apache-2.0
|
4 |
+
base_model: Jukess/mcqa_initial_ft
|
5 |
tags:
|
6 |
- generated_from_trainer
|
7 |
model-index:
|
|
|
14 |
|
15 |
# qwen_mcqa
|
16 |
|
17 |
+
This model is a fine-tuned version of [Jukess/mcqa_initial_ft](https://huggingface.co/Jukess/mcqa_initial_ft) on the None dataset.
|
18 |
|
19 |
## Model description
|
20 |
|
|
|
50 |
### Framework versions
|
51 |
|
52 |
- Transformers 4.51.3
|
53 |
+
- Pytorch 2.7.0+cu126
|
54 |
+
- Datasets 3.6.0
|
55 |
- Tokenizers 0.21.0
|
config.json
CHANGED
@@ -17,6 +17,7 @@
|
|
17 |
"num_attention_heads": 16,
|
18 |
"num_hidden_layers": 28,
|
19 |
"num_key_value_heads": 8,
|
|
|
20 |
"rms_norm_eps": 1e-06,
|
21 |
"rope_scaling": null,
|
22 |
"rope_theta": 1000000,
|
|
|
17 |
"num_attention_heads": 16,
|
18 |
"num_hidden_layers": 28,
|
19 |
"num_key_value_heads": 8,
|
20 |
+
"pad_token_id": 151643,
|
21 |
"rms_norm_eps": 1e-06,
|
22 |
"rope_scaling": null,
|
23 |
"rope_theta": 1000000,
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2384234968
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8ad4ebace401168598e41528314eb80b61cb9ee5ac336e7ab7060f391aeaa323
|
3 |
size 2384234968
|
tokenizer_config.json
CHANGED
@@ -232,9 +232,13 @@
|
|
232 |
"eos_token": "<|endoftext|>",
|
233 |
"errors": "replace",
|
234 |
"extra_special_tokens": {},
|
|
|
235 |
"model_max_length": 131072,
|
236 |
"pad_token": "<|endoftext|>",
|
237 |
"split_special_tokens": false,
|
|
|
238 |
"tokenizer_class": "Qwen2Tokenizer",
|
|
|
|
|
239 |
"unk_token": null
|
240 |
}
|
|
|
232 |
"eos_token": "<|endoftext|>",
|
233 |
"errors": "replace",
|
234 |
"extra_special_tokens": {},
|
235 |
+
"max_length": 512,
|
236 |
"model_max_length": 131072,
|
237 |
"pad_token": "<|endoftext|>",
|
238 |
"split_special_tokens": false,
|
239 |
+
"stride": 0,
|
240 |
"tokenizer_class": "Qwen2Tokenizer",
|
241 |
+
"truncation_side": "right",
|
242 |
+
"truncation_strategy": "longest_first",
|
243 |
"unk_token": null
|
244 |
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:577deaea493431b64c3ff3e87a29de273a0fa13d10e1f0fd6519e61dfbf3b9c8
|
3 |
+
size 5649
|