Update README.md
Browse files
README.md
CHANGED
@@ -55,13 +55,24 @@ The three configurations (**full-hints, no-hints, one-try**) in our paper differ
|
|
55 |
|
56 |
## 📖 Citation
|
57 |
```
|
58 |
-
@
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
}
|
67 |
```
|
|
|
55 |
|
56 |
## 📖 Citation
|
57 |
```
|
58 |
+
@inproceedings{loredo-lopez-etal-2025-nyt,
|
59 |
+
title = "{NYT}-Connections: A Deceptively Simple Text Classification Task that Stumps System-1 Thinkers",
|
60 |
+
author = "Loredo Lopez, Angel Yahir and
|
61 |
+
McDonald, Tyler and
|
62 |
+
Emami, Ali",
|
63 |
+
editor = "Rambow, Owen and
|
64 |
+
Wanner, Leo and
|
65 |
+
Apidianaki, Marianna and
|
66 |
+
Al-Khalifa, Hend and
|
67 |
+
Eugenio, Barbara Di and
|
68 |
+
Schockaert, Steven",
|
69 |
+
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
|
70 |
+
month = jan,
|
71 |
+
year = "2025",
|
72 |
+
address = "Abu Dhabi, UAE",
|
73 |
+
publisher = "Association for Computational Linguistics",
|
74 |
+
url = "https://aclanthology.org/2025.coling-main.134/",
|
75 |
+
pages = "1952--1963",
|
76 |
+
abstract = "Large Language Models (LLMs) have shown impressive performance on various benchmarks, yet their ability to engage in deliberate reasoning remains questionable. We present NYT-Connections, a collection of 358 simple word classification puzzles derived from the New York Times Connections game. This benchmark is designed to penalize quick, intuitive {\textquotedblleft}System 1{\textquotedblright} thinking, isolating fundamental reasoning skills. We evaluated six recent LLMs, a simple machine learning heuristic, and humans across three configurations: single-attempt, multiple attempts without hints, and multiple attempts with contextual hints. Our findings reveal a significant performance gap: even top-performing LLMs like GPT-4 fall short of human performance by nearly 30{\%}. Notably, advanced prompting techniques such as Chain-of-Thought and Self-Consistency show diminishing returns as task difficulty increases. NYT-Connections uniquely combines linguistic isolation, resistance to intuitive shortcuts, and regular updates to mitigate data leakage, offering a novel tool for assessing LLM reasoning capabilities."
|
77 |
}
|
78 |
```
|