RyanMetcalfeInt8's picture
extract zips, mel_24000_cpu.raw -> .bin, rm zips
ef350dd
<?xml version="1.0"?>
<net name="torch_jit" version="11">
<layers>
<layer id="0" name="in" type="Parameter" version="opset1">
<data shape="1,16,128,32" element_type="f32" />
<output>
<port id="0" precision="FP32" names="in">
<dim>1</dim>
<dim>16</dim>
<dim>128</dim>
<dim>32</dim>
</port>
</output>
</layer>
<layer id="1" name="weight_compressed" type="Const" version="opset1">
<data element_type="f16" shape="16, 16, 1, 1" offset="0" size="512" />
<output>
<port id="0" precision="FP16">
<dim>16</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2" name="weight" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>16</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="weight">
<dim>16</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="3" name="/Conv/WithoutBiases" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="0, 0" pads_end="0, 0" auto_pad="explicit" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>128</dim>
<dim>32</dim>
</port>
<port id="1" precision="FP32">
<dim>16</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>128</dim>
<dim>32</dim>
</port>
</output>
</layer>
<layer id="4" name="Reshape_16_compressed" type="Const" version="opset1">
<data element_type="f16" shape="1, 16, 1, 1" offset="512" size="32" />
<output>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="5" name="Reshape_16" type="Convert" version="opset1">
<data destination_type="f32" />
<rt_info>
<attribute name="decompression" version="0" />
</rt_info>
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="6" name="out" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>128</dim>
<dim>32</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="out">
<dim>1</dim>
<dim>16</dim>
<dim>128</dim>
<dim>32</dim>
</port>
</output>
</layer>
<layer id="7" name="out/sink_port_0" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>128</dim>
<dim>32</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="3" to-port="0" />
<edge from-layer="1" from-port="0" to-layer="2" to-port="0" />
<edge from-layer="2" from-port="1" to-layer="3" to-port="1" />
<edge from-layer="3" from-port="2" to-layer="6" to-port="0" />
<edge from-layer="4" from-port="0" to-layer="5" to-port="0" />
<edge from-layer="5" from-port="1" to-layer="6" to-port="1" />
<edge from-layer="6" from-port="2" to-layer="7" to-port="0" />
</edges>
<rt_info>
<MO_version value="2024.4.0-16579-c3152d32c9c-releases/2024/4" />
<Runtime_version value="2024.4.0-16579-c3152d32c9c-releases/2024/4" />
<conversion_parameters>
<input_model value="DIR\post_quant_conv.onnx" />
<is_python_api_used value="False" />
</conversion_parameters>
<legacy_frontend value="False" />
</rt_info>
</net>