File size: 2,070 Bytes
2525c6e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
---
license: apache-2.0
datasets:
- fka/awesome-chatgpt-prompts
language:
- en
metrics:
- accuracy
base_model:
- moonshotai/Kimi-K2-Instruct
new_version: moonshotai/Kimi-K2-Instruct
pipeline_tag: summarization
library_name: adapter-transformers
tags:
- finance
---
## accuracy and qick response balance
```cmd
https://huggingface.co/Qwen/Qwen3-4B/tree/main
```
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
model_name = "./"
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto", # Use float16 or bfloat16 depending on GPU
device_map="auto", # Automatically maps to GPU/CPU
trust_remote_code=True
)
model.eval()
# Inference function
def ask_qwen(prompt: str, max_new_tokens=128):
messages = [{"role": "user", "content": prompt + " /no_think"}]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Fast replies, no step-by-step thinking
)
inputs = tokenizer([text], return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=max_new_tokens,
temperature=0.7,
top_p=0.8,
top_k=20,
min_p=0.0,
do_sample=True
)
generated = outputs[0][inputs["input_ids"].shape[-1]:]
return tokenizer.decode(generated, skip_special_tokens=True).strip()
# Continuous loop for user prompts
if __name__ == "__main__":
print("🔁 Qwen3-4B Chat Running... Type 'exit' to quit.")
while True:
prompt = input("\nYou: ")
if prompt.lower().strip() in ['exit', 'quit']:
print("👋 Exiting Qwen chat.")
break
try:
response = ask_qwen(prompt)
print(f"Qwen: {response}")
except Exception as e:
print(f"⚠️ Error: {e}")
``` |