File size: 680 Bytes
78d0460 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
default_stage:
default_modifiers:
AWQModifier:
targets: [Linear]
ignore: [lm_head, model.embed_tokens, 're:.*input_layernorm$', 're:.*post_attention_layernorm$',
model.norm]
scheme: W4A16
mappings:
- smooth_layer: re:.*input_layernorm$
balance_layers: ['re:.*self_attn.q_proj$', 're:.*self_attn.k_proj$', 're:.*self_attn.v_proj$',
're:.*mlp.gate_proj$', 're:.*mlp.up_proj$']
- smooth_layer: re:.*v_proj$
balance_layers: ['re:.*o_proj$']
- smooth_layer: re:.*up_proj$
balance_layers: ['re:.*down_proj$']
offload_device: !!python/object/apply:torch.device [cpu]
duo_scaling: true
|