Update README.md
Browse files
README.md
CHANGED
|
@@ -221,4 +221,15 @@ vllm serve inclusionAI/Ring-mini-linear-2.0 \
|
|
| 221 |
--api-key your-api-key
|
| 222 |
```
|
| 223 |
|
| 224 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
--api-key your-api-key
|
| 222 |
```
|
| 223 |
|
| 224 |
+
#### Citation
|
| 225 |
+
```shell
|
| 226 |
+
@misc{lingteam2025attentionmattersefficienthybrid,
|
| 227 |
+
title={Every Attention Matters: An Efficient Hybrid Architecture for Long-Context Reasoning},
|
| 228 |
+
author={Ling Team and Bin Han and Caizhi Tang and Chen Liang and Donghao Zhang and Fan Yuan and Feng Zhu and Jie Gao and Jingyu Hu and Longfei Li and Meng Li and Mingyang Zhang and Peijie Jiang and Peng Jiao and Qian Zhao and Qingyuan Yang and Wenbo Shen and Xinxing Yang and Yalin Zhang and Yankun Ren and Yao Zhao and Yibo Cao and Yixuan Sun and Yue Zhang and Yuchen Fang and Zibin Lin and Zixuan Cheng and Jun Zhou},
|
| 229 |
+
year={2025},
|
| 230 |
+
eprint={2510.19338},
|
| 231 |
+
archivePrefix={arXiv},
|
| 232 |
+
primaryClass={cs.LG},
|
| 233 |
+
url={https://arxiv.org/abs/2510.19338},
|
| 234 |
+
}
|
| 235 |
+
```
|