Qwen3-0.6B Awesome Prompts Fine-tune
Fine-tuned version of Qwen/Qwen3-0.6B on the awesome-chatgpt-prompts dataset for prompt generation tasks.
Usage
from transformers import AutoModelForCausalLM, AutoTokenizer
model = AutoModelForCausalLM.from_pretrained("Ontario/qwen3-0.6b-awesome-prompts")
tokenizer = AutoTokenizer.from_pretrained("Ontario/qwen3-0.6b-awesome-prompts")
def generate_prompt(role):
messages = [{"role": "user", "content": f"Generate a prompt for: {role}"}]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
inputs = tokenizer(text, return_tensors="pt").to(model.device)
outputs = model.generate(**inputs, max_new_tokens=200)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
print(generate_prompt("Data Scientist"))