Upload 12 files
Browse files- .gitattributes +1 -0
- README.md +443 -0
- added_tokens.json +28 -0
- config.json +30 -0
- config_sentence_transformers.json +10 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- modules.json +14 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +31 -0
- tokenizer.json +3 -0
- tokenizer_config.json +240 -0
- vocab.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- sentence-transformers
|
4 |
+
- sentence-similarity
|
5 |
+
- feature-extraction
|
6 |
+
- generated_from_trainer
|
7 |
+
- dataset_size:17048032
|
8 |
+
- loss:MultipleNegativesRankingLoss
|
9 |
+
base_model: Qwen/Qwen3-0.6B
|
10 |
+
widget:
|
11 |
+
- source_sentence: is messages for android by google
|
12 |
+
sentences:
|
13 |
+
- Creating and Opening Files. The CreateFile function can create a new file or open
|
14 |
+
an existing file. You must specify the file name, creation instructions, and other
|
15 |
+
attributes. When an application creates a new file, the operating system adds
|
16 |
+
it to the specified directory. The operating system assigns a unique identifier,
|
17 |
+
called a handle, to each file that is opened or created using CreateFile. An application
|
18 |
+
can use this handle with functions that read from, write to, and describe the
|
19 |
+
file.
|
20 |
+
- Google Voice gives you a free phone number for calling, text messaging, and voicemail.
|
21 |
+
It works on smartphones and computers, and syncs across your devices so you can
|
22 |
+
use the app while on the go or at home. You're in control. Forward calls, text
|
23 |
+
messages, and voicemail to any of your devices, and get spam filtered automatically.
|
24 |
+
- "Googleâ\x80\x99s Messenger app gets renamed â\x80\x9CAndroid Messagesâ\x80\x9D\
|
25 |
+
. Googleâ\x80\x99s default SMS/MMS app, Messenger, has been renamed Android Messages.\
|
26 |
+
\ The name change has just rolled out in the Play Store and arrives as Google\
|
27 |
+
\ prepares for wider adoption of the new Rich Communications Services (RCS) messaging\
|
28 |
+
\ standard."
|
29 |
+
- source_sentence: how much does brad pitt make?
|
30 |
+
sentences:
|
31 |
+
- 'Brad Pitt net worth: Brad Pitt is an award-winning film actor and producer who
|
32 |
+
has net worth of $240 million. Brad Pitt was raised in Springfield Brad Pitt net
|
33 |
+
worth: Brad Pitt is an award-winning film actor and producer who has net worth
|
34 |
+
of $240 million.'
|
35 |
+
- "An actor, Bradley Cooper is most widely recognized for â\x80¦ celebritynetworth.com\
|
36 |
+
\ Brad Pitt Net Worth-TheRichest-TheRichest-The â\x80¦ About Brad Pitt. American\
|
37 |
+
\ actor and film producer, William Bradley â\x80\x9CBradâ\x80\x9D Pitt has an\
|
38 |
+
\ estimated net worth of $240 million.With looks that have â\x80¦ therichest.com\
|
39 |
+
\ Brad Pitt Net Worth | Celebrities Net Worth 2014 Brad Pitt Net Worth. View Brad\
|
40 |
+
\ Pitt Net Worth and other Interesting and Rare Brad Pitt Facts You Won't Find\
|
41 |
+
\ Anywhere Else.radley Cooper Net Worth: Bradley Cooper is an American actor who\
|
42 |
+
\ has a net worth of $60 million dollars."
|
43 |
+
- Lime slurry is a suspension of calcium hydroxide in water. The product is a user-friendly,
|
44 |
+
cost-effective alkali. Lime slurry is a free-flowing product that is used for
|
45 |
+
a variety of industrial, municipal and environmental applications. The product
|
46 |
+
is also used extensively for the stabilization of expansive clay soils.
|
47 |
+
- source_sentence: hereditary paraganglioma-pheochromocytoma syndrome
|
48 |
+
sentences:
|
49 |
+
- Hereditary paraganglioma-pheochromocytoma syndrome is a condition in which tumors
|
50 |
+
develop in structures called paraganglia. Paraganglia are bundles of cells of
|
51 |
+
the peripheral nervous system (the nerves outside the brain and spinal cord).
|
52 |
+
A tumor that develops in the paraganglia is called a paraganglioma.
|
53 |
+
- Friendly political wager A friendly political wager is a largely symbolic wager
|
54 |
+
made between politicians representing two cities or areas on the outcome of an
|
55 |
+
important sports contest between teams representing those same two cities or areas.
|
56 |
+
- Hereditary paraganglioma-pheochromocytoma syndrome Most cases of familial paraganglioma
|
57 |
+
are caused by mutations in the succinate dehydrogenase (SDH; succinate:ubiquinone
|
58 |
+
oxidoreductase) subunit genes (SDHD, SDHAF2, SDHC, SDHB).
|
59 |
+
- source_sentence: what does option deposit mean in real estate
|
60 |
+
sentences:
|
61 |
+
- '1 Option money is credited towards purchase: When you sign a Lease 2 Purchase
|
62 |
+
contract, you will pay the seller an option deposit. 2 This money is your vested
|
63 |
+
interest in the home and will be fully (100%) credited to you when you buy the
|
64 |
+
home. Possible sale for a profit: If you are allowed to sell (assign) your option
|
65 |
+
(it will be in your agreement), you may sell it to a third party for a profit.
|
66 |
+
2 Increased buying power: When you buy a Lease 2 Purchase home, you can put down
|
67 |
+
as little as first month''s rent and a $1 option deposit.'
|
68 |
+
- H2 blockers reduce the amount of acid made by your stomach. They are used in conditions
|
69 |
+
where it is helpful to reduce stomach acid. For example, for acid reflux which
|
70 |
+
causes heartburn. Most people who take H2 blockers do not develop any side-effects.H2
|
71 |
+
blockers are a group of medicines that reduce the amount of acid produced by the
|
72 |
+
cells in the lining of the stomach. They are also called 'histamine H2-receptor
|
73 |
+
antagonists' but are commonly called H2 blockers.They include cimetidine, famotidine,
|
74 |
+
nizatidine and ranitidine, and have various different brand names. Your stomach
|
75 |
+
normally produces acid to help with the digestion of food and to kill germs (bacteria).or
|
76 |
+
example, for acid reflux which causes heartburn. Most people who take H2 blockers
|
77 |
+
do not develop any side-effects. H2 blockers are a group of medicines that reduce
|
78 |
+
the amount of acid produced by the cells in the lining of the stomach.
|
79 |
+
- Option land means the person granting an option is called the optionor (or grantor)
|
80 |
+
and the person who gets benefits of using an option is called optionee (or the
|
81 |
+
beneficiary). An option agreement is where one person grants another person the
|
82 |
+
exclusive right for a set time to buy a property normally at a set price.A non-refundable
|
83 |
+
fee is normally charged for this option. During the term of option, no one else
|
84 |
+
can buy or sell the property.The property may then be purchased by exercising
|
85 |
+
option and entering into a pre-agreed form of contract or option can be left to
|
86 |
+
expire.f you feel your property has potential for future development and you want
|
87 |
+
to find out more, please feel free to contact us for free initial impartial advice
|
88 |
+
without obligation. Option land for development as landowner grants a property
|
89 |
+
developer an option for a fixed term of years to purchase their land.
|
90 |
+
- source_sentence: what county is neptune city nj
|
91 |
+
sentences:
|
92 |
+
- Neptune City, NJ. Neptune City is a borough in Monmouth County, New Jersey, United
|
93 |
+
States. As of the 2010 United States Census, the borough population was 4,869.
|
94 |
+
The Borough of Neptune City was incorporated on October 4, 1881, based on a referendum
|
95 |
+
held on March 19, 1881.
|
96 |
+
- Neptune City, NJ. Sponsored Topics. Neptune City is a borough in Monmouth County,
|
97 |
+
New Jersey, United States. As of the 2010 United States Census, the borough population
|
98 |
+
was 4,869. The Borough of Neptune City was incorporated on October 4, 1881, based
|
99 |
+
on a referendum held on March 19, 1881.
|
100 |
+
- 'There are three main types of blood cancers: Leukemia, a type of cancer found
|
101 |
+
in your blood and bone marrow, is caused by the rapid production of abnormal white
|
102 |
+
blood cells.yeloma is a cancer of the plasma cells. Plasma cells are white blood
|
103 |
+
cells that produce disease-and infection-fighting antibodies in your body. Myeloma
|
104 |
+
cells prevent the normal production of antibodies, leaving your body''s immune
|
105 |
+
system weakened and susceptible to infection.'
|
106 |
+
pipeline_tag: sentence-similarity
|
107 |
+
library_name: sentence-transformers
|
108 |
+
---
|
109 |
+
|
110 |
+
# SentenceTransformer based on Qwen/Qwen3-0.6B
|
111 |
+
|
112 |
+
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [Qwen/Qwen3-0.6B](https://huggingface.co/Qwen/Qwen3-0.6B). It maps sentences & paragraphs to a 1024-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
|
113 |
+
|
114 |
+
## Model Details
|
115 |
+
|
116 |
+
### Model Description
|
117 |
+
- **Model Type:** Sentence Transformer
|
118 |
+
- **Base model:** [Qwen/Qwen3-0.6B](https://huggingface.co/Qwen/Qwen3-0.6B) <!-- at revision 6130ef31402718485ca4d80a6234f70d9a4cf362 -->
|
119 |
+
- **Maximum Sequence Length:** 512 tokens
|
120 |
+
- **Output Dimensionality:** 1024 dimensions
|
121 |
+
- **Similarity Function:** Cosine Similarity
|
122 |
+
<!-- - **Training Dataset:** Unknown -->
|
123 |
+
<!-- - **Language:** Unknown -->
|
124 |
+
<!-- - **License:** Unknown -->
|
125 |
+
|
126 |
+
### Model Sources
|
127 |
+
|
128 |
+
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
|
129 |
+
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
|
130 |
+
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
|
131 |
+
|
132 |
+
### Full Model Architecture
|
133 |
+
|
134 |
+
```
|
135 |
+
SentenceTransformer(
|
136 |
+
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: Qwen3Model
|
137 |
+
(1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
138 |
+
)
|
139 |
+
```
|
140 |
+
|
141 |
+
## Usage
|
142 |
+
|
143 |
+
### Direct Usage (Sentence Transformers)
|
144 |
+
|
145 |
+
First install the Sentence Transformers library:
|
146 |
+
|
147 |
+
```bash
|
148 |
+
pip install -U sentence-transformers
|
149 |
+
```
|
150 |
+
|
151 |
+
Then you can load this model and run inference.
|
152 |
+
```python
|
153 |
+
from sentence_transformers import SentenceTransformer
|
154 |
+
|
155 |
+
# Download from the 🤗 Hub
|
156 |
+
model = SentenceTransformer("sentence_transformers_model_id")
|
157 |
+
# Run inference
|
158 |
+
sentences = [
|
159 |
+
'what county is neptune city nj',
|
160 |
+
'Neptune City, NJ. Neptune City is a borough in Monmouth County, New Jersey, United States. As of the 2010 United States Census, the borough population was 4,869. The Borough of Neptune City was incorporated on October 4, 1881, based on a referendum held on March 19, 1881.',
|
161 |
+
'Neptune City, NJ. Sponsored Topics. Neptune City is a borough in Monmouth County, New Jersey, United States. As of the 2010 United States Census, the borough population was 4,869. The Borough of Neptune City was incorporated on October 4, 1881, based on a referendum held on March 19, 1881.',
|
162 |
+
]
|
163 |
+
embeddings = model.encode(sentences)
|
164 |
+
print(embeddings.shape)
|
165 |
+
# [3, 1024]
|
166 |
+
|
167 |
+
# Get the similarity scores for the embeddings
|
168 |
+
similarities = model.similarity(embeddings, embeddings)
|
169 |
+
print(similarities.shape)
|
170 |
+
# [3, 3]
|
171 |
+
```
|
172 |
+
|
173 |
+
<!--
|
174 |
+
### Direct Usage (Transformers)
|
175 |
+
|
176 |
+
<details><summary>Click to see the direct usage in Transformers</summary>
|
177 |
+
|
178 |
+
</details>
|
179 |
+
-->
|
180 |
+
|
181 |
+
<!--
|
182 |
+
### Downstream Usage (Sentence Transformers)
|
183 |
+
|
184 |
+
You can finetune this model on your own dataset.
|
185 |
+
|
186 |
+
<details><summary>Click to expand</summary>
|
187 |
+
|
188 |
+
</details>
|
189 |
+
-->
|
190 |
+
|
191 |
+
<!--
|
192 |
+
### Out-of-Scope Use
|
193 |
+
|
194 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
195 |
+
-->
|
196 |
+
|
197 |
+
<!--
|
198 |
+
## Bias, Risks and Limitations
|
199 |
+
|
200 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
201 |
+
-->
|
202 |
+
|
203 |
+
<!--
|
204 |
+
### Recommendations
|
205 |
+
|
206 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
207 |
+
-->
|
208 |
+
|
209 |
+
## Training Details
|
210 |
+
|
211 |
+
### Training Dataset
|
212 |
+
|
213 |
+
#### Unnamed Dataset
|
214 |
+
|
215 |
+
* Size: 17,048,032 training samples
|
216 |
+
* Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>sentence_2</code>
|
217 |
+
* Approximate statistics based on the first 1000 samples:
|
218 |
+
| | sentence_0 | sentence_1 | sentence_2 |
|
219 |
+
|:--------|:---------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|
|
220 |
+
| type | string | string | string |
|
221 |
+
| details | <ul><li>min: 2 tokens</li><li>mean: 7.16 tokens</li><li>max: 33 tokens</li></ul> | <ul><li>min: 25 tokens</li><li>mean: 84.34 tokens</li><li>max: 243 tokens</li></ul> | <ul><li>min: 16 tokens</li><li>mean: 81.56 tokens</li><li>max: 300 tokens</li></ul> |
|
222 |
+
* Samples:
|
223 |
+
| sentence_0 | sentence_1 | sentence_2 |
|
224 |
+
|:----------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
225 |
+
| <code>what county is nettles island fl</code> | <code>Nettles Island. Nettles Island in Hutchinson Island Florida. A development of close to 1300 lots with anything from trailer pads to updated concrete block homes on a mostly man made island that juts out into the Indian River on Hutchinson Island in Saint Lucie County FL. Though, the official address for Nettles Island is in Jensen Beach.</code> | <code>Fleming Island is an unincorporated community and census-designated place in Clay County, Florida, United States. It is located 21 miles southwest of downtown Jacksonville, on the western side of the St. Johns River, off US 17. As of the 2010 census the Fleming Island CDP had a population of 27,126. Fleming Island's ZIP code became 32003 in 2004, giving it a different code from Orange Park, the incorporated town to the north.</code> |
|
226 |
+
| <code>what time of day to take estrogen</code> | <code>Time of day to take Estrogen. Hi. I think we all may find different times of day are better for each of our needs. I actually feel much better using my estrogen twice a day. I use half in the morning and half in the evening. I am using a different estrogen than you and am able to split my dose. I'm glad to hear that you have been feeling very good on your current hormone therapy Hopefully just a small adjustment may be needed as our estrogen needs can change overtime.</code> | <code>Eating fresh carrots or drinking a cup of fresh carrot juice 2-3 times a day is a wonderful way to bring on your period sooner than expected. Carrots contain high amounts of carotene, which encourages the production of estrogen. The more estrogen you have in your body, the more your period desires to arrive.</code> |
|
227 |
+
| <code>what effects does nicotine have on your body</code> | <code>Nicotine also activates areas of the brain that are involved in producing feelings of pleasure and reward. Recently, scientists discovered that nicotine raises the levels of a neurotransmitter called dopamine in the parts of the brain that produce feelings of pleasure and reward.</code> | <code>The action of nicotine in the body is very complicated. It is a mild stimulant which has an effect upon the heart and brain. It stimulates the central nervous system causing irregular heartbeat and blood pressure, induces vomiting and diarrhea, and first stimulates, then inhibits glandular secretions.icotine seems to provide both a stimulant and a depressant effect, and it is likely that the effect it has at any time is determined by the mood of the user, the environment and the circumstances of use.</code> |
|
228 |
+
* Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
|
229 |
+
```json
|
230 |
+
{
|
231 |
+
"scale": 20.0,
|
232 |
+
"similarity_fct": "cos_sim"
|
233 |
+
}
|
234 |
+
```
|
235 |
+
|
236 |
+
### Training Hyperparameters
|
237 |
+
#### Non-Default Hyperparameters
|
238 |
+
|
239 |
+
- `num_train_epochs`: 1
|
240 |
+
- `max_steps`: 10000
|
241 |
+
- `fp16`: True
|
242 |
+
- `multi_dataset_batch_sampler`: round_robin
|
243 |
+
|
244 |
+
#### All Hyperparameters
|
245 |
+
<details><summary>Click to expand</summary>
|
246 |
+
|
247 |
+
- `overwrite_output_dir`: False
|
248 |
+
- `do_predict`: False
|
249 |
+
- `eval_strategy`: no
|
250 |
+
- `prediction_loss_only`: True
|
251 |
+
- `per_device_train_batch_size`: 8
|
252 |
+
- `per_device_eval_batch_size`: 8
|
253 |
+
- `per_gpu_train_batch_size`: None
|
254 |
+
- `per_gpu_eval_batch_size`: None
|
255 |
+
- `gradient_accumulation_steps`: 1
|
256 |
+
- `eval_accumulation_steps`: None
|
257 |
+
- `torch_empty_cache_steps`: None
|
258 |
+
- `learning_rate`: 5e-05
|
259 |
+
- `weight_decay`: 0.0
|
260 |
+
- `adam_beta1`: 0.9
|
261 |
+
- `adam_beta2`: 0.999
|
262 |
+
- `adam_epsilon`: 1e-08
|
263 |
+
- `max_grad_norm`: 1
|
264 |
+
- `num_train_epochs`: 1
|
265 |
+
- `max_steps`: 10000
|
266 |
+
- `lr_scheduler_type`: linear
|
267 |
+
- `lr_scheduler_kwargs`: {}
|
268 |
+
- `warmup_ratio`: 0.0
|
269 |
+
- `warmup_steps`: 0
|
270 |
+
- `log_level`: passive
|
271 |
+
- `log_level_replica`: warning
|
272 |
+
- `log_on_each_node`: True
|
273 |
+
- `logging_nan_inf_filter`: True
|
274 |
+
- `save_safetensors`: True
|
275 |
+
- `save_on_each_node`: False
|
276 |
+
- `save_only_model`: False
|
277 |
+
- `restore_callback_states_from_checkpoint`: False
|
278 |
+
- `no_cuda`: False
|
279 |
+
- `use_cpu`: False
|
280 |
+
- `use_mps_device`: False
|
281 |
+
- `seed`: 42
|
282 |
+
- `data_seed`: None
|
283 |
+
- `jit_mode_eval`: False
|
284 |
+
- `use_ipex`: False
|
285 |
+
- `bf16`: False
|
286 |
+
- `fp16`: True
|
287 |
+
- `fp16_opt_level`: O1
|
288 |
+
- `half_precision_backend`: auto
|
289 |
+
- `bf16_full_eval`: False
|
290 |
+
- `fp16_full_eval`: False
|
291 |
+
- `tf32`: None
|
292 |
+
- `local_rank`: 0
|
293 |
+
- `ddp_backend`: None
|
294 |
+
- `tpu_num_cores`: None
|
295 |
+
- `tpu_metrics_debug`: False
|
296 |
+
- `debug`: []
|
297 |
+
- `dataloader_drop_last`: False
|
298 |
+
- `dataloader_num_workers`: 0
|
299 |
+
- `dataloader_prefetch_factor`: None
|
300 |
+
- `past_index`: -1
|
301 |
+
- `disable_tqdm`: False
|
302 |
+
- `remove_unused_columns`: True
|
303 |
+
- `label_names`: None
|
304 |
+
- `load_best_model_at_end`: False
|
305 |
+
- `ignore_data_skip`: False
|
306 |
+
- `fsdp`: []
|
307 |
+
- `fsdp_min_num_params`: 0
|
308 |
+
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
|
309 |
+
- `tp_size`: 0
|
310 |
+
- `fsdp_transformer_layer_cls_to_wrap`: None
|
311 |
+
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
|
312 |
+
- `deepspeed`: None
|
313 |
+
- `label_smoothing_factor`: 0.0
|
314 |
+
- `optim`: adamw_torch
|
315 |
+
- `optim_args`: None
|
316 |
+
- `adafactor`: False
|
317 |
+
- `group_by_length`: False
|
318 |
+
- `length_column_name`: length
|
319 |
+
- `ddp_find_unused_parameters`: None
|
320 |
+
- `ddp_bucket_cap_mb`: None
|
321 |
+
- `ddp_broadcast_buffers`: False
|
322 |
+
- `dataloader_pin_memory`: True
|
323 |
+
- `dataloader_persistent_workers`: False
|
324 |
+
- `skip_memory_metrics`: True
|
325 |
+
- `use_legacy_prediction_loop`: False
|
326 |
+
- `push_to_hub`: False
|
327 |
+
- `resume_from_checkpoint`: None
|
328 |
+
- `hub_model_id`: None
|
329 |
+
- `hub_strategy`: every_save
|
330 |
+
- `hub_private_repo`: None
|
331 |
+
- `hub_always_push`: False
|
332 |
+
- `gradient_checkpointing`: False
|
333 |
+
- `gradient_checkpointing_kwargs`: None
|
334 |
+
- `include_inputs_for_metrics`: False
|
335 |
+
- `include_for_metrics`: []
|
336 |
+
- `eval_do_concat_batches`: True
|
337 |
+
- `fp16_backend`: auto
|
338 |
+
- `push_to_hub_model_id`: None
|
339 |
+
- `push_to_hub_organization`: None
|
340 |
+
- `mp_parameters`:
|
341 |
+
- `auto_find_batch_size`: False
|
342 |
+
- `full_determinism`: False
|
343 |
+
- `torchdynamo`: None
|
344 |
+
- `ray_scope`: last
|
345 |
+
- `ddp_timeout`: 1800
|
346 |
+
- `torch_compile`: False
|
347 |
+
- `torch_compile_backend`: None
|
348 |
+
- `torch_compile_mode`: None
|
349 |
+
- `include_tokens_per_second`: False
|
350 |
+
- `include_num_input_tokens_seen`: False
|
351 |
+
- `neftune_noise_alpha`: None
|
352 |
+
- `optim_target_modules`: None
|
353 |
+
- `batch_eval_metrics`: False
|
354 |
+
- `eval_on_start`: False
|
355 |
+
- `use_liger_kernel`: False
|
356 |
+
- `eval_use_gather_object`: False
|
357 |
+
- `average_tokens_across_devices`: False
|
358 |
+
- `prompts`: None
|
359 |
+
- `batch_sampler`: batch_sampler
|
360 |
+
- `multi_dataset_batch_sampler`: round_robin
|
361 |
+
|
362 |
+
</details>
|
363 |
+
|
364 |
+
### Training Logs
|
365 |
+
| Epoch | Step | Training Loss |
|
366 |
+
|:------:|:-----:|:-------------:|
|
367 |
+
| 0.0002 | 500 | 1.7342 |
|
368 |
+
| 0.0005 | 1000 | 1.7194 |
|
369 |
+
| 0.0007 | 1500 | 1.6713 |
|
370 |
+
| 0.0009 | 2000 | 1.5885 |
|
371 |
+
| 0.0012 | 2500 | 1.4152 |
|
372 |
+
| 0.0014 | 3000 | 1.3052 |
|
373 |
+
| 0.0016 | 3500 | 1.1763 |
|
374 |
+
| 0.0019 | 4000 | 1.0714 |
|
375 |
+
| 0.0021 | 4500 | 1.0235 |
|
376 |
+
| 0.0023 | 5000 | 0.9484 |
|
377 |
+
| 0.0026 | 5500 | 0.9207 |
|
378 |
+
| 0.0028 | 6000 | 0.9076 |
|
379 |
+
| 0.0031 | 6500 | 0.8736 |
|
380 |
+
| 0.0033 | 7000 | 0.8671 |
|
381 |
+
| 0.0035 | 7500 | 0.8621 |
|
382 |
+
| 0.0038 | 8000 | 0.8414 |
|
383 |
+
| 0.0040 | 8500 | 0.8228 |
|
384 |
+
| 0.0042 | 9000 | 0.8101 |
|
385 |
+
| 0.0045 | 9500 | 0.8339 |
|
386 |
+
| 0.0047 | 10000 | 0.7968 |
|
387 |
+
|
388 |
+
|
389 |
+
### Framework Versions
|
390 |
+
- Python: 3.10.14
|
391 |
+
- Sentence Transformers: 4.0.1
|
392 |
+
- Transformers: 4.51.3
|
393 |
+
- PyTorch: 2.6.0+cu124
|
394 |
+
- Accelerate: 1.6.0
|
395 |
+
- Datasets: 3.2.0
|
396 |
+
- Tokenizers: 0.21.1
|
397 |
+
|
398 |
+
## Citation
|
399 |
+
|
400 |
+
### BibTeX
|
401 |
+
|
402 |
+
#### Sentence Transformers
|
403 |
+
```bibtex
|
404 |
+
@inproceedings{reimers-2019-sentence-bert,
|
405 |
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
406 |
+
author = "Reimers, Nils and Gurevych, Iryna",
|
407 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
408 |
+
month = "11",
|
409 |
+
year = "2019",
|
410 |
+
publisher = "Association for Computational Linguistics",
|
411 |
+
url = "https://arxiv.org/abs/1908.10084",
|
412 |
+
}
|
413 |
+
```
|
414 |
+
|
415 |
+
#### MultipleNegativesRankingLoss
|
416 |
+
```bibtex
|
417 |
+
@misc{henderson2017efficient,
|
418 |
+
title={Efficient Natural Language Response Suggestion for Smart Reply},
|
419 |
+
author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
|
420 |
+
year={2017},
|
421 |
+
eprint={1705.00652},
|
422 |
+
archivePrefix={arXiv},
|
423 |
+
primaryClass={cs.CL}
|
424 |
+
}
|
425 |
+
```
|
426 |
+
|
427 |
+
<!--
|
428 |
+
## Glossary
|
429 |
+
|
430 |
+
*Clearly define terms in order to be accessible across audiences.*
|
431 |
+
-->
|
432 |
+
|
433 |
+
<!--
|
434 |
+
## Model Card Authors
|
435 |
+
|
436 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
437 |
+
-->
|
438 |
+
|
439 |
+
<!--
|
440 |
+
## Model Card Contact
|
441 |
+
|
442 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
443 |
+
-->
|
added_tokens.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</think>": 151668,
|
3 |
+
"</tool_call>": 151658,
|
4 |
+
"</tool_response>": 151666,
|
5 |
+
"<think>": 151667,
|
6 |
+
"<tool_call>": 151657,
|
7 |
+
"<tool_response>": 151665,
|
8 |
+
"<|box_end|>": 151649,
|
9 |
+
"<|box_start|>": 151648,
|
10 |
+
"<|endoftext|>": 151643,
|
11 |
+
"<|file_sep|>": 151664,
|
12 |
+
"<|fim_middle|>": 151660,
|
13 |
+
"<|fim_pad|>": 151662,
|
14 |
+
"<|fim_prefix|>": 151659,
|
15 |
+
"<|fim_suffix|>": 151661,
|
16 |
+
"<|im_end|>": 151645,
|
17 |
+
"<|im_start|>": 151644,
|
18 |
+
"<|image_pad|>": 151655,
|
19 |
+
"<|object_ref_end|>": 151647,
|
20 |
+
"<|object_ref_start|>": 151646,
|
21 |
+
"<|quad_end|>": 151651,
|
22 |
+
"<|quad_start|>": 151650,
|
23 |
+
"<|repo_name|>": 151663,
|
24 |
+
"<|video_pad|>": 151656,
|
25 |
+
"<|vision_end|>": 151653,
|
26 |
+
"<|vision_pad|>": 151654,
|
27 |
+
"<|vision_start|>": 151652
|
28 |
+
}
|
config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"Qwen3Model"
|
4 |
+
],
|
5 |
+
"attention_bias": false,
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 151643,
|
8 |
+
"eos_token_id": 151645,
|
9 |
+
"head_dim": 128,
|
10 |
+
"hidden_act": "silu",
|
11 |
+
"hidden_size": 1024,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"max_position_embeddings": 40960,
|
15 |
+
"max_window_layers": 28,
|
16 |
+
"model_type": "qwen3",
|
17 |
+
"num_attention_heads": 16,
|
18 |
+
"num_hidden_layers": 28,
|
19 |
+
"num_key_value_heads": 8,
|
20 |
+
"rms_norm_eps": 1e-06,
|
21 |
+
"rope_scaling": null,
|
22 |
+
"rope_theta": 1000000,
|
23 |
+
"sliding_window": null,
|
24 |
+
"tie_word_embeddings": true,
|
25 |
+
"torch_dtype": "float32",
|
26 |
+
"transformers_version": "4.51.3",
|
27 |
+
"use_cache": true,
|
28 |
+
"use_sliding_window": false,
|
29 |
+
"vocab_size": 151936
|
30 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "4.0.1",
|
4 |
+
"transformers": "4.51.3",
|
5 |
+
"pytorch": "2.6.0+cu124"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null,
|
9 |
+
"similarity_fn_name": "cosine"
|
10 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7525e6287d2697f3a6a53258b6aec2f36f6b38e59eb640b03a5c53792649570b
|
3 |
+
size 2384233112
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>",
|
5 |
+
"<|object_ref_start|>",
|
6 |
+
"<|object_ref_end|>",
|
7 |
+
"<|box_start|>",
|
8 |
+
"<|box_end|>",
|
9 |
+
"<|quad_start|>",
|
10 |
+
"<|quad_end|>",
|
11 |
+
"<|vision_start|>",
|
12 |
+
"<|vision_end|>",
|
13 |
+
"<|vision_pad|>",
|
14 |
+
"<|image_pad|>",
|
15 |
+
"<|video_pad|>"
|
16 |
+
],
|
17 |
+
"eos_token": {
|
18 |
+
"content": "<|im_end|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
},
|
24 |
+
"pad_token": {
|
25 |
+
"content": "<|endoftext|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": false,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
}
|
31 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1574cf58b63a2a56db9bc28f6ddcac4ece87690840939153189077692486f4ee
|
3 |
+
size 11422920
|
tokenizer_config.json
ADDED
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"151643": {
|
6 |
+
"content": "<|endoftext|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"151644": {
|
14 |
+
"content": "<|im_start|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"151645": {
|
22 |
+
"content": "<|im_end|>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"151646": {
|
30 |
+
"content": "<|object_ref_start|>",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
},
|
37 |
+
"151647": {
|
38 |
+
"content": "<|object_ref_end|>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false,
|
43 |
+
"special": true
|
44 |
+
},
|
45 |
+
"151648": {
|
46 |
+
"content": "<|box_start|>",
|
47 |
+
"lstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"rstrip": false,
|
50 |
+
"single_word": false,
|
51 |
+
"special": true
|
52 |
+
},
|
53 |
+
"151649": {
|
54 |
+
"content": "<|box_end|>",
|
55 |
+
"lstrip": false,
|
56 |
+
"normalized": false,
|
57 |
+
"rstrip": false,
|
58 |
+
"single_word": false,
|
59 |
+
"special": true
|
60 |
+
},
|
61 |
+
"151650": {
|
62 |
+
"content": "<|quad_start|>",
|
63 |
+
"lstrip": false,
|
64 |
+
"normalized": false,
|
65 |
+
"rstrip": false,
|
66 |
+
"single_word": false,
|
67 |
+
"special": true
|
68 |
+
},
|
69 |
+
"151651": {
|
70 |
+
"content": "<|quad_end|>",
|
71 |
+
"lstrip": false,
|
72 |
+
"normalized": false,
|
73 |
+
"rstrip": false,
|
74 |
+
"single_word": false,
|
75 |
+
"special": true
|
76 |
+
},
|
77 |
+
"151652": {
|
78 |
+
"content": "<|vision_start|>",
|
79 |
+
"lstrip": false,
|
80 |
+
"normalized": false,
|
81 |
+
"rstrip": false,
|
82 |
+
"single_word": false,
|
83 |
+
"special": true
|
84 |
+
},
|
85 |
+
"151653": {
|
86 |
+
"content": "<|vision_end|>",
|
87 |
+
"lstrip": false,
|
88 |
+
"normalized": false,
|
89 |
+
"rstrip": false,
|
90 |
+
"single_word": false,
|
91 |
+
"special": true
|
92 |
+
},
|
93 |
+
"151654": {
|
94 |
+
"content": "<|vision_pad|>",
|
95 |
+
"lstrip": false,
|
96 |
+
"normalized": false,
|
97 |
+
"rstrip": false,
|
98 |
+
"single_word": false,
|
99 |
+
"special": true
|
100 |
+
},
|
101 |
+
"151655": {
|
102 |
+
"content": "<|image_pad|>",
|
103 |
+
"lstrip": false,
|
104 |
+
"normalized": false,
|
105 |
+
"rstrip": false,
|
106 |
+
"single_word": false,
|
107 |
+
"special": true
|
108 |
+
},
|
109 |
+
"151656": {
|
110 |
+
"content": "<|video_pad|>",
|
111 |
+
"lstrip": false,
|
112 |
+
"normalized": false,
|
113 |
+
"rstrip": false,
|
114 |
+
"single_word": false,
|
115 |
+
"special": true
|
116 |
+
},
|
117 |
+
"151657": {
|
118 |
+
"content": "<tool_call>",
|
119 |
+
"lstrip": false,
|
120 |
+
"normalized": false,
|
121 |
+
"rstrip": false,
|
122 |
+
"single_word": false,
|
123 |
+
"special": false
|
124 |
+
},
|
125 |
+
"151658": {
|
126 |
+
"content": "</tool_call>",
|
127 |
+
"lstrip": false,
|
128 |
+
"normalized": false,
|
129 |
+
"rstrip": false,
|
130 |
+
"single_word": false,
|
131 |
+
"special": false
|
132 |
+
},
|
133 |
+
"151659": {
|
134 |
+
"content": "<|fim_prefix|>",
|
135 |
+
"lstrip": false,
|
136 |
+
"normalized": false,
|
137 |
+
"rstrip": false,
|
138 |
+
"single_word": false,
|
139 |
+
"special": false
|
140 |
+
},
|
141 |
+
"151660": {
|
142 |
+
"content": "<|fim_middle|>",
|
143 |
+
"lstrip": false,
|
144 |
+
"normalized": false,
|
145 |
+
"rstrip": false,
|
146 |
+
"single_word": false,
|
147 |
+
"special": false
|
148 |
+
},
|
149 |
+
"151661": {
|
150 |
+
"content": "<|fim_suffix|>",
|
151 |
+
"lstrip": false,
|
152 |
+
"normalized": false,
|
153 |
+
"rstrip": false,
|
154 |
+
"single_word": false,
|
155 |
+
"special": false
|
156 |
+
},
|
157 |
+
"151662": {
|
158 |
+
"content": "<|fim_pad|>",
|
159 |
+
"lstrip": false,
|
160 |
+
"normalized": false,
|
161 |
+
"rstrip": false,
|
162 |
+
"single_word": false,
|
163 |
+
"special": false
|
164 |
+
},
|
165 |
+
"151663": {
|
166 |
+
"content": "<|repo_name|>",
|
167 |
+
"lstrip": false,
|
168 |
+
"normalized": false,
|
169 |
+
"rstrip": false,
|
170 |
+
"single_word": false,
|
171 |
+
"special": false
|
172 |
+
},
|
173 |
+
"151664": {
|
174 |
+
"content": "<|file_sep|>",
|
175 |
+
"lstrip": false,
|
176 |
+
"normalized": false,
|
177 |
+
"rstrip": false,
|
178 |
+
"single_word": false,
|
179 |
+
"special": false
|
180 |
+
},
|
181 |
+
"151665": {
|
182 |
+
"content": "<tool_response>",
|
183 |
+
"lstrip": false,
|
184 |
+
"normalized": false,
|
185 |
+
"rstrip": false,
|
186 |
+
"single_word": false,
|
187 |
+
"special": false
|
188 |
+
},
|
189 |
+
"151666": {
|
190 |
+
"content": "</tool_response>",
|
191 |
+
"lstrip": false,
|
192 |
+
"normalized": false,
|
193 |
+
"rstrip": false,
|
194 |
+
"single_word": false,
|
195 |
+
"special": false
|
196 |
+
},
|
197 |
+
"151667": {
|
198 |
+
"content": "<think>",
|
199 |
+
"lstrip": false,
|
200 |
+
"normalized": false,
|
201 |
+
"rstrip": false,
|
202 |
+
"single_word": false,
|
203 |
+
"special": false
|
204 |
+
},
|
205 |
+
"151668": {
|
206 |
+
"content": "</think>",
|
207 |
+
"lstrip": false,
|
208 |
+
"normalized": false,
|
209 |
+
"rstrip": false,
|
210 |
+
"single_word": false,
|
211 |
+
"special": false
|
212 |
+
}
|
213 |
+
},
|
214 |
+
"additional_special_tokens": [
|
215 |
+
"<|im_start|>",
|
216 |
+
"<|im_end|>",
|
217 |
+
"<|object_ref_start|>",
|
218 |
+
"<|object_ref_end|>",
|
219 |
+
"<|box_start|>",
|
220 |
+
"<|box_end|>",
|
221 |
+
"<|quad_start|>",
|
222 |
+
"<|quad_end|>",
|
223 |
+
"<|vision_start|>",
|
224 |
+
"<|vision_end|>",
|
225 |
+
"<|vision_pad|>",
|
226 |
+
"<|image_pad|>",
|
227 |
+
"<|video_pad|>"
|
228 |
+
],
|
229 |
+
"bos_token": null,
|
230 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set content = message.content %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is defined and message.reasoning_content is not none %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in message.content %}\n {%- set content = message.content.split('</think>')[-1].lstrip('\\n') %}\n {%- set reasoning_content = message.content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n {%- if enable_thinking is defined and enable_thinking is false %}\n {{- '<think>\\n\\n</think>\\n\\n' }}\n {%- endif %}\n{%- endif %}",
|
231 |
+
"clean_up_tokenization_spaces": false,
|
232 |
+
"eos_token": "<|im_end|>",
|
233 |
+
"errors": "replace",
|
234 |
+
"extra_special_tokens": {},
|
235 |
+
"model_max_length": 512,
|
236 |
+
"pad_token": "<|endoftext|>",
|
237 |
+
"split_special_tokens": false,
|
238 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
239 |
+
"unk_token": null
|
240 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|