wraith-coder-7b / model_info.json
Tyler Williams
Initial commit: Wraith Coder 7B - Concise code assistant via iterative fine-tuning
cc49567
{
"model_name": "wraith-coder-7b",
"base_model": "Qwen/Qwen2.5-Coder-7B-Instruct",
"version": "1.0.0",
"release_date": "2025-11-19",
"architecture": {
"type": "CausalLM",
"parameters": "7.6B",
"layers": 28,
"hidden_size": 3584,
"attention_heads": 28,
"kv_heads": 4,
"context_length": 32768,
"vocab_size": 152064
},
"training": {
"method": "LoRA Fine-tuning",
"iterations": 3,
"total_examples": 14244,
"lora_rank": 16,
"lora_alpha": 32,
"learning_rate": 5e-5,
"epochs_per_iteration": 2,
"optimizer": "adamw_8bit"
},
"performance": {
"conciseness_improvement": "62.6%",
"complexity_analysis_coverage": "60%",
"base_model_complexity_coverage": "40%",
"evaluation_questions": 20,
"correctness_rate": "100%"
},
"recommended_parameters": {
"temperature": 0.7,
"top_p": 0.9,
"top_k": 40,
"repeat_penalty": 1.1,
"max_tokens": 2048
},
"quantization": {
"supported_formats": ["fp16", "q8_0", "q4_k_m", "q4_0"],
"recommended": "q4_k_m",
"model_size_q4_k_m": "4.4GB"
},
"license": "Apache-2.0",
"languages": ["en"],
"tags": [
"code-generation",
"algorithms",
"systems-programming",
"complexity-analysis",
"qwen2.5",
"fine-tuned"
]
}