Update README.md
Browse files
README.md
CHANGED
@@ -97,8 +97,7 @@ python <NEMO_ROOT>/examples/asr/speech_classification/frame_vad_infer.py \
|
|
97 |
input_manifest=<Path of manifest file of evaluation data, where audio files should have unique names> \
|
98 |
out_manifest_filepath=<Path of output manifest file>
|
99 |
```
|
100 |
-
|
101 |
-
### Export a PyTorch model to ONNX
|
102 |
|
103 |
```python
|
104 |
import torch, onnx
|
@@ -124,7 +123,7 @@ inputs = {
|
|
124 |
torch.onnx.export(
|
125 |
model=vad_model,
|
126 |
args=inputs,
|
127 |
-
f=
|
128 |
input_names=["processed_signal", "processed_signal_length"],
|
129 |
output_names=["output"],
|
130 |
dynamic_axes={
|
@@ -137,7 +136,6 @@ torch.onnx.export(
|
|
137 |
# Validate exported ONNX model
|
138 |
onnx.checker.check_model(onnx.load(ONNX_EXPORT_PATH))
|
139 |
```
|
140 |
-
|
141 |
## Software Integration:
|
142 |
**Runtime Engine(s):**
|
143 |
* NeMo-2.0.0 <br>
|
|
|
97 |
input_manifest=<Path of manifest file of evaluation data, where audio files should have unique names> \
|
98 |
out_manifest_filepath=<Path of output manifest file>
|
99 |
```
|
100 |
+
### Export a PyTorch to ONNX
|
|
|
101 |
|
102 |
```python
|
103 |
import torch, onnx
|
|
|
123 |
torch.onnx.export(
|
124 |
model=vad_model,
|
125 |
args=inputs,
|
126 |
+
f=ONNX_EXPORT_PATH,
|
127 |
input_names=["processed_signal", "processed_signal_length"],
|
128 |
output_names=["output"],
|
129 |
dynamic_axes={
|
|
|
136 |
# Validate exported ONNX model
|
137 |
onnx.checker.check_model(onnx.load(ONNX_EXPORT_PATH))
|
138 |
```
|
|
|
139 |
## Software Integration:
|
140 |
**Runtime Engine(s):**
|
141 |
* NeMo-2.0.0 <br>
|