zyznull commited on
Commit
f718036
·
verified ·
1 Parent(s): b22da49

add use case for vllm & modify Citation

Browse files
Files changed (1) hide show
  1. README.md +39 -6
README.md CHANGED
@@ -62,6 +62,7 @@ KeyError: 'qwen3'
62
 
63
  ```python
64
  # Requires transformers>=4.51.0
 
65
 
66
  from sentence_transformers import SentenceTransformer
67
 
@@ -165,6 +166,39 @@ scores = (embeddings[:2] @ embeddings[2:].T)
165
  print(scores.tolist())
166
  # [[0.7645568251609802, 0.14142508804798126], [0.13549736142158508, 0.5999549627304077]]
167
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  📌 **Tip**: We recommend that developers customize the `instruct` according to their specific scenarios, tasks, and languages. Our tests have shown that in most retrieval scenarios, not using an `instruct` on the query side can lead to a drop in retrieval performance by approximately 1% to 5%.
169
 
170
  ## Evaluation
@@ -222,11 +256,10 @@ print(scores.tolist())
222
  If you find our work helpful, feel free to give us a cite.
223
 
224
  ```
225
- @misc{qwen3-embedding,
226
- title = {Qwen3-Embedding},
227
- url = {https://qwenlm.github.io/blog/qwen3/},
228
- author = {Qwen Team},
229
- month = {May},
230
- year = {2025}
231
  }
232
  ```
 
62
 
63
  ```python
64
  # Requires transformers>=4.51.0
65
+ # Requires sentence-transformers>=2.7.0
66
 
67
  from sentence_transformers import SentenceTransformer
68
 
 
166
  print(scores.tolist())
167
  # [[0.7645568251609802, 0.14142508804798126], [0.13549736142158508, 0.5999549627304077]]
168
  ```
169
+
170
+ ```python
171
+ # Requires vllm>=0.8.5
172
+ import torch
173
+ import vllm
174
+ from vllm import LLM
175
+
176
+ def get_detailed_instruct(task_description: str, query: str) -> str:
177
+ return f'Instruct: {task_description}\nQuery:{query}'
178
+
179
+ # Each query must come with a one-sentence instruction that describes the task
180
+ task = 'Given a web search query, retrieve relevant passages that answer the query'
181
+
182
+ queries = [
183
+ get_detailed_instruct(task, 'What is the capital of China?'),
184
+ get_detailed_instruct(task, 'Explain gravity')
185
+ ]
186
+ # No need to add instruction for retrieval documents
187
+ documents = [
188
+ "The capital of China is Beijing.",
189
+ "Gravity is a force that attracts two bodies towards each other. It gives weight to physical objects and is responsible for the movement of planets around the sun."
190
+ ]
191
+ input_texts = queries + documents
192
+
193
+ model = LLM(model="Qwen/Qwen3-Embedding-0.6B", task="embed")
194
+
195
+ outputs = model.embed(input_texts)
196
+ embeddings = torch.tensor([o.outputs.embedding for o in outputs])
197
+ scores = (embeddings[:2] @ embeddings[2:].T)
198
+ print(scores.tolist())
199
+ # [[0.7620252966880798, 0.14078938961029053], [0.1358368694782257, 0.6013815999031067]]
200
+ ```
201
+
202
  📌 **Tip**: We recommend that developers customize the `instruct` according to their specific scenarios, tasks, and languages. Our tests have shown that in most retrieval scenarios, not using an `instruct` on the query side can lead to a drop in retrieval performance by approximately 1% to 5%.
203
 
204
  ## Evaluation
 
256
  If you find our work helpful, feel free to give us a cite.
257
 
258
  ```
259
+ @article{qwen3embedding,
260
+ title={Qwen3 Embedding: Advancing Text Embedding and Reranking Through Foundation Models},
261
+ author={Zhang, Yanzhao and Li, Mingxin and Long, Dingkun and Zhang, Xin and Lin, Huan and Yang, Baosong and Xie, Pengjun and Yang, An and Liu, Dayiheng and Lin, Junyang and Huang, Fei and Zhou, Jingren},
262
+ journal={arXiv preprint arXiv:2506.05176},
263
+ year={2025}
 
264
  }
265
  ```