|
#!/bin/bash |
|
|
|
|
|
MODEL_OUTPUT_PATH="./bundle" |
|
QUANTIZATION="q0f16" |
|
|
|
MODEL_PATH="Llama3.1-8B-Instruct-Tinytron" |
|
MODEL_NAME="Llama3.1-8B-Instruct-Tinytron-MLC" |
|
|
|
mlc_llm convert_weight --device cpu ${MODEL_PATH}/ --quantization ${QUANTIZATION} -o ${MODEL_OUTPUT_PATH}/${MODEL_NAME}/ |
|
|
|
|
|
mlc_llm gen_config ${MODEL_PATH} --conv-template llama-3_1 --quantization ${QUANTIZATION} --context-window-size 512 --prefill-chunk-size=16 --max-batch-size=1 -o ${MODEL_OUTPUT_PATH}/${MODEL_NAME}/ |
|
|
|
|
|
|
|
MODEL_PATH="Qwen2-7B-Instruct-Tinytron" |
|
MODEL_NAME="Qwen2-7B-Instruct-Tinytron-MLC" |
|
|
|
mlc_llm convert_weight --device cpu ${MODEL_PATH} --quantization ${QUANTIZATION} -o ${MODEL_OUTPUT_PATH}/${MODEL_NAME}/ |
|
|
|
|
|
mlc_llm gen_config ${MODEL_PATH} --conv-template qwen2 --quantization ${QUANTIZATION} --context-window-size 512 --prefill-chunk-size=16 --max-batch-size=1 -o ${MODEL_OUTPUT_PATH}/${MODEL_NAME}/ |
|
|
|
|
|
MODEL_PATH="Phi-2-Tinytron-preview" |
|
MODEL_NAME="Phi-2-Tinytron-preview-MLC" |
|
|
|
mlc_llm convert_weight --device cpu ${MODEL_PATH} --quantization ${QUANTIZATION} -o ${MODEL_OUTPUT_PATH}/${MODEL_NAME}/ |
|
|
|
|
|
mlc_llm gen_config ${MODEL_PATH} --conv-template qwen2 --quantization ${QUANTIZATION} --context-window-size 512 --prefill-chunk-size=16 --max-batch-size=1 -o ${MODEL_OUTPUT_PATH}/${MODEL_NAME}/ |
|
|
|
MODEL_PATH="Cauchy-3B-preview" |
|
MODEL_NAME="Cauchy-3B-preview-MLC" |
|
|
|
mlc_llm convert_weight --model-type cauchy --device cpu ${MODEL_PATH} --quantization ${QUANTIZATION} -o ${MODEL_OUTPUT_PATH}/${MODEL_NAME}/ |
|
|
|
|
|
mlc_llm gen_config ${MODEL_PATH} --model-type cauchy --conv-template qwen2 --quantization ${QUANTIZATION} --context-window-size 512 --prefill-chunk-size=16 --max-batch-size=1 -o ${MODEL_OUTPUT_PATH}/${MODEL_NAME}/ |
|
|
|
|