slices: | |
- sources: | |
- model: openchat/openchat-3.6-8b-20240522 | |
layer_range: [0, 32] | |
parameters: | |
filter_wise: false # calculate with per-tensor. Not recommended | |
- model: MLP-KTLim/llama-3-Korean-Bllossom-8B | |
layer_range: [0, 32] | |
parameters: | |
filter_wise: false # calculate with per-tensor. Not recommended | |
- model: lcw99/llama-3-8b-it-ko-chang | |
layer_range: [0, 32] | |
parameters: | |
filter_wise: false # calculate with per-tensor. Not recommended | |
- model: beomi/Llama-3-Open-Ko-8B-Instruct-preview | |
layer_range: [0, 32] | |
parameters: | |
filter_wise: false # calculate with per-tensor. Not recommended | |
- model: maywell/Llama-3-Ko-8B-Instruct | |
layer_range: [0, 32] | |
parameters: | |
filter_wise: false # calculate with per-tensor. Not recommended | |
- model: tesser-ai/Tesser-Llama-3-Ko-8B | |
layer_range: [0, 32] | |
parameters: | |
filter_wise: false # calculate with per-tensor. Not recommended | |
- model: maum-ai/Llama-3-MAAL-8B-Instruct-v0.1 | |
layer_range: [0, 32] | |
parameters: | |
filter_wise: false # calculate with per-tensor. Not recommended | |
- model: meta-llama/Meta-Llama-3.1-8B | |
layer_range: [0, 32] | |
parameters: | |
filter_wise: false # calculate with per-tensor. Not recommended | |
- model: NousResearch/Hermes-3-Llama-3.1-8B | |
layer_range: [0, 32] | |
parameters: | |
filter_wise: false # calculate with per-tensor. Not recommended | |
merge_method: model_stock | |
base_model: NousResearch/Hermes-3-Llama-3.1-8B | |
dtype: bfloat16 |