models: - model: "D:\\mergekit\\_My_YAMLS\\Merge_base_70B_v17" parameters: weight: - filter: self_attn.q_proj value: [1,1,1,1,1,1,1,1,1,0.9,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.9,1,1,1,1,1,1] - filter: self_attn.k_proj value: [1,1,1,1,1,1,1,1,1,0.9,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.9,1,1,1,1,1,1] - filter: self_attn.v_proj value: 1 - filter: self_attn.o_proj value: 1 - filter: mlp.*_proj value: [1,1,1,1,1,1,1,1,1,1,1,1,0.9,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.9,1,1,1] - filter: input_layernorm value: 0.9 - filter: post_attention_layernorm value: 0.9 - filter: embed_tokens value: 0.9 - filter: model.norm value: 0.9 - filter: lm_head value: 0.9 - value: 1 density: 0.9 - model: ReadyArt/The-Omega-Directive-L-70B-v1.0 parameters: weight: - filter: self_attn.q_proj value: [0,0,0,0,0,0,0,0,0,0.1,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.1,0,0,0,0,0,0] - filter: self_attn.k_proj value: [0,0,0,0,0,0,0,0,0,0.1,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.1,0,0,0,0,0,0] - filter: self_attn.v_proj value: 0 - filter: self_attn.o_proj value: 0 - filter: mlp.*_proj value: [0,0,0,0,0,0,0,0,0,0,0,0,0.1,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.1,0,0,0] - filter: input_layernorm value: 0.1 - filter: post_attention_layernorm value: 0.1 - filter: embed_tokens value: 0.1 - filter: model.norm value: 0.1 - filter: lm_head value: 0.1 - value: 0 density: 0.2 merge_method: dare_ties base_model: "D:\\mergekit\\_My_YAMLS\\Merge_base_70B_v17" parameters: normalize: false dtype: float32 out_dtype: bfloat16 chat_template: llama3 tokenizer: source: "D:\\mergekit\\_My_YAMLS\\Merge_base_70B_v17"