models: - model: deepseek-ai/DeepSeek-R1-Distill-Llama-70B parameters: weight: 0.25 density: 0.5 - model: watt-ai/watt-tool-70B parameters: weight: 0.25 density: 0.5 - model: deepcogito/cogito-v1-preview-llama-70B parameters: weight: 0.25 density: 0.5 - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF parameters: weight: 0.25 density: 0.5 merge_method: dare_ties base_model: meta-llama/Llama-3.3-70B-Instruct parameters: normalize: false out_dtype: bfloat16 chat_template: llama3 tokenizer: source: base pad_to_multiple_of: 8