LquenS commited on
Commit
4daa396
·
verified ·
1 Parent(s): 8813230

Update config.json

Browse files

flash-attention-2 changed with eager

Files changed (1) hide show
  1. config.json +1 -1
config.json CHANGED
@@ -9,7 +9,7 @@
9
  "conversation_formatter_class": "QwenConversationFormatter",
10
  "disable_tie_weight": false,
11
  "hidden_size": 2048,
12
- "llm_attn_implementation": "flash_attention_2",
13
  "llm_config": {
14
  "_attn_implementation_autoset": true,
15
  "_name_or_path": "Qwen/Qwen2.5-3B-Instruct",
 
9
  "conversation_formatter_class": "QwenConversationFormatter",
10
  "disable_tie_weight": false,
11
  "hidden_size": 2048,
12
+ "llm_attn_implementation": "eager",
13
  "llm_config": {
14
  "_attn_implementation_autoset": true,
15
  "_name_or_path": "Qwen/Qwen2.5-3B-Instruct",