JustinLin610 commited on
Commit
6b297fc
·
verified ·
1 Parent(s): 8d7d06f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -7,7 +7,7 @@ base_model:
7
  - Qwen/Qwen3-0.6B-Base
8
  ---
9
 
10
- # Qwen3-0.6B
11
  <a href="https://chat.qwen.ai/" target="_blank" style="margin: 2px;">
12
  <img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/>
13
  </a>
@@ -50,7 +50,7 @@ The following contains a code snippet illustrating how to use the model generate
50
  from mlx_lm import load, generate
51
 
52
  model, tokenizer = load("Qwen/Qwen3-0.6B-MLX-4bit")
53
- prompt = "hello, Introduce yourself, and what can you do?"
54
 
55
  if tokenizer.chat_template is not None:
56
  messages = [{"role": "user", "content": prompt}]
@@ -155,14 +155,14 @@ if __name__ == "__main__":
155
  chatbot = QwenChatbot()
156
 
157
  # First input (without /think or /no_think tags, thinking mode is enabled by default)
158
- user_input_1 = "How many r's in strawberries?"
159
  print(f"User: {user_input_1}")
160
  response_1 = chatbot.generate_response(user_input_1)
161
  print(f"Bot: {response_1}")
162
  print("----------------------")
163
 
164
  # Second input with /no_think
165
- user_input_2 = "Then, how many r's in blueberries? /no_think"
166
  print(f"User: {user_input_2}")
167
  response_2 = chatbot.generate_response(user_input_2)
168
  print(f"Bot: {response_2}")
 
7
  - Qwen/Qwen3-0.6B-Base
8
  ---
9
 
10
+ # Qwen3-0.6B-MLX-4bit
11
  <a href="https://chat.qwen.ai/" target="_blank" style="margin: 2px;">
12
  <img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/>
13
  </a>
 
50
  from mlx_lm import load, generate
51
 
52
  model, tokenizer = load("Qwen/Qwen3-0.6B-MLX-4bit")
53
+ prompt = "Hello, please introduce yourself and tell me what you can do."
54
 
55
  if tokenizer.chat_template is not None:
56
  messages = [{"role": "user", "content": prompt}]
 
155
  chatbot = QwenChatbot()
156
 
157
  # First input (without /think or /no_think tags, thinking mode is enabled by default)
158
+ user_input_1 = "How many 'r's are in strawberries?"
159
  print(f"User: {user_input_1}")
160
  response_1 = chatbot.generate_response(user_input_1)
161
  print(f"Bot: {response_1}")
162
  print("----------------------")
163
 
164
  # Second input with /no_think
165
+ user_input_2 = "Then, how many 'r's are in blueberries? /no_think"
166
  print(f"User: {user_input_2}")
167
  response_2 = chatbot.generate_response(user_input_2)
168
  print(f"Bot: {response_2}")