apepkuss79 commited on
Commit
b310b94
·
verified ·
1 Parent(s): 8194ca6

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +9 -7
README.md CHANGED
@@ -22,18 +22,20 @@ library_name: transformers
22
 
23
  ## Run with LlamaEdge
24
 
25
- - LlamaEdge version: coming soon
26
-
27
- <!-- - LlamaEdge version: [v0.14.3](https://github.com/LlamaEdge/LlamaEdge/releases/tag/0.14.3) -->
28
 
29
  - Prompt template
30
 
31
- - Prompt type: `deepseek-chat-25`
32
 
33
  - Prompt string
34
 
35
  ```text
36
- <|begin_of_sentence|>{system_message}<|User|>{user_message_1}<|Assistant|>{assistant_message_1}<|end_of_sentence|><|User|>{user_message_2}<|Assistant|>
 
 
 
 
37
  ```
38
 
39
  - Context size: `128000`
@@ -44,7 +46,7 @@ library_name: transformers
44
  wasmedge --dir .:. --nn-preload default:GGML:AUTO:DeepSeek-R1-Distill-Qwen-14B-Q5_K_M.gguf \
45
  llama-api-server.wasm \
46
  --model-name DeepSeek-R1-Distill-Qwen-14B \
47
- --prompt-template deepseek-chat-25 \
48
  --ctx-size 128000
49
  ```
50
 
@@ -53,7 +55,7 @@ library_name: transformers
53
  ```bash
54
  wasmedge --dir .:. --nn-preload default:GGML:AUTO:DeepSeek-R1-Distill-Qwen-14B-Q5_K_M.gguf \
55
  llama-chat.wasm \
56
- --prompt-template deepseek-chat-25 \
57
  --ctx-size 128000
58
  ```
59
 
 
22
 
23
  ## Run with LlamaEdge
24
 
25
+ - LlamaEdge version: [v0.16.1](https://github.com/LlamaEdge/LlamaEdge/releases/tag/0.16.1)
 
 
26
 
27
  - Prompt template
28
 
29
+ - Prompt type: `chatml`
30
 
31
  - Prompt string
32
 
33
  ```text
34
+ <|im_start|>system
35
+ {system_message}<|im_end|>
36
+ <|im_start|>user
37
+ {prompt}<|im_end|>
38
+ <|im_start|>assistant
39
  ```
40
 
41
  - Context size: `128000`
 
46
  wasmedge --dir .:. --nn-preload default:GGML:AUTO:DeepSeek-R1-Distill-Qwen-14B-Q5_K_M.gguf \
47
  llama-api-server.wasm \
48
  --model-name DeepSeek-R1-Distill-Qwen-14B \
49
+ --prompt-template chatml \
50
  --ctx-size 128000
51
  ```
52
 
 
55
  ```bash
56
  wasmedge --dir .:. --nn-preload default:GGML:AUTO:DeepSeek-R1-Distill-Qwen-14B-Q5_K_M.gguf \
57
  llama-chat.wasm \
58
+ --prompt-template chatml \
59
  --ctx-size 128000
60
  ```
61