llama-scout-fp8 / recipe.yaml
rs-test's picture
Duplicate from RedHatAI/Llama-4-Scout-17B-16E-Instruct-FP8-dynamic
9f3936a verified
raw
history blame contribute delete
532 Bytes
default_stage:
default_modifiers:
QuantizationModifier:
config_groups:
group_0:
targets: [Linear]
weights: {num_bits: 8, type: float, symmetric: true, strategy: channel, observer: mse}
input_activations: {num_bits: 8, type: float, symmetric: true, strategy: token,
dynamic: true, observer: null}
output_activations: null
ignore: ['re:.*lm_head', 're:.*self_attn', 're:.*router', 're:.*vision_model', 're:.*multi_modal_projector']
targets: [Linear]