|
<?xml version="1.0"?>
|
|
<net name="Model6" version="11">
|
|
<layers>
|
|
<layer id="0" name="input" type="Parameter" version="opset1">
|
|
<data shape="?,?" element_type="i64" />
|
|
<output>
|
|
<port id="0" precision="I64" names="input">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="1" name="self.weight" type="Const" version="opset1">
|
|
<data element_type="i8" shape="151655, 896" offset="0" size="135882880" />
|
|
<output>
|
|
<port id="0" precision="I8">
|
|
<dim>151655</dim>
|
|
<dim>896</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="2" name="Convert_154026" type="Convert" version="opset1">
|
|
<data destination_type="f16" />
|
|
<input>
|
|
<port id="0" precision="I8">
|
|
<dim>151655</dim>
|
|
<dim>896</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="1" precision="FP16">
|
|
<dim>151655</dim>
|
|
<dim>896</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="3" name="self.weight/scale" type="Const" version="opset1">
|
|
<data element_type="f16" shape="151655, 1" offset="135882880" size="303310" />
|
|
<output>
|
|
<port id="0" precision="FP16">
|
|
<dim>151655</dim>
|
|
<dim>1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="4" name="self.weight/fq_weights_0" type="Multiply" version="opset1">
|
|
<data auto_broadcast="numpy" />
|
|
<input>
|
|
<port id="0" precision="FP16">
|
|
<dim>151655</dim>
|
|
<dim>896</dim>
|
|
</port>
|
|
<port id="1" precision="FP16">
|
|
<dim>151655</dim>
|
|
<dim>1</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP16">
|
|
<dim>151655</dim>
|
|
<dim>896</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="5" name="self.weight/fq_weights_0/convert" type="Convert" version="opset1">
|
|
<data destination_type="f32" />
|
|
<input>
|
|
<port id="0" precision="FP16">
|
|
<dim>151655</dim>
|
|
<dim>896</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="1" precision="FP32">
|
|
<dim>151655</dim>
|
|
<dim>896</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="6" name="aten::embedding/Convert" type="Convert" version="opset1">
|
|
<data destination_type="i32" />
|
|
<input>
|
|
<port id="0" precision="I64">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="1" precision="I32">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="7" name="aten::embedding/Constant" type="Const" version="opset1">
|
|
<data element_type="i32" shape="" offset="136186190" size="4" />
|
|
<output>
|
|
<port id="0" precision="I32" />
|
|
</output>
|
|
</layer>
|
|
<layer id="8" name="aten::embedding/Gather" type="Gather" version="opset8">
|
|
<data batch_dims="0" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>151655</dim>
|
|
<dim>896</dim>
|
|
</port>
|
|
<port id="1" precision="I32">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
<port id="2" precision="I32" />
|
|
</input>
|
|
<output>
|
|
<port id="3" precision="FP32" names="inputs_embeds">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>896</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="9" name="Result_75015" type="Result" version="opset1">
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>896</dim>
|
|
</port>
|
|
</input>
|
|
</layer>
|
|
</layers>
|
|
<edges>
|
|
<edge from-layer="0" from-port="0" to-layer="6" to-port="0" />
|
|
<edge from-layer="1" from-port="0" to-layer="2" to-port="0" />
|
|
<edge from-layer="2" from-port="1" to-layer="4" to-port="0" />
|
|
<edge from-layer="3" from-port="0" to-layer="4" to-port="1" />
|
|
<edge from-layer="4" from-port="2" to-layer="5" to-port="0" />
|
|
<edge from-layer="5" from-port="1" to-layer="8" to-port="0" />
|
|
<edge from-layer="6" from-port="1" to-layer="8" to-port="1" />
|
|
<edge from-layer="7" from-port="0" to-layer="8" to-port="2" />
|
|
<edge from-layer="8" from-port="3" to-layer="9" to-port="0" />
|
|
</edges>
|
|
<rt_info>
|
|
<Runtime_version value="2024.6.0-17404-4c0f47d2335-releases/2024/6" />
|
|
<conversion_parameters>
|
|
<framework value="pytorch" />
|
|
<is_python_object value="True" />
|
|
</conversion_parameters>
|
|
<nncf>
|
|
<friendly_names_were_updated value="True" />
|
|
<weight_compression>
|
|
<advanced_parameters value="{'statistics_path': None, 'awq_params': {'subset_size': 32, 'percent_to_apply': 0.002, 'alpha_min': 0.0, 'alpha_max': 1.0, 'steps': 100}, 'scale_estimation_params': {'subset_size': 64, 'initial_steps': 5, 'scale_steps': 5, 'weight_penalty': -1.0}, 'gptq_params': {'damp_percent': 0.1, 'block_size': 128, 'subset_size': 128}, 'lora_correction_params': {'adapter_rank': 8, 'num_iterations': 3, 'apply_regularization': True, 'subset_size': 128, 'use_int8_adapters': True}}" />
|
|
<all_layers value="False" />
|
|
<awq value="False" />
|
|
<backup_mode value="int8_asym" />
|
|
<gptq value="False" />
|
|
<group_size value="-1" />
|
|
<ignored_scope value="[]" />
|
|
<lora_correction value="False" />
|
|
<mode value="int8_sym" />
|
|
<ratio value="1.0" />
|
|
<scale_estimation value="False" />
|
|
<sensitivity_metric value="weight_quantization_error" />
|
|
</weight_compression>
|
|
</nncf>
|
|
<optimum>
|
|
<optimum_intel_version value="1.22.0.dev0+b49fcbb" />
|
|
<optimum_version value="1.24.0.dev0" />
|
|
<pytorch_version value="2.5.1" />
|
|
<transformers_version value="4.46.3" />
|
|
</optimum>
|
|
</rt_info>
|
|
</net>
|
|
|