zhengrongzhang wangfangyuan commited on
Commit
e0810ef
1 Parent(s): 275bffb

update for regression test (#5)

Browse files

- update for regression test (1eff13d3eebb0fd4418ec88b8a4017f82e424cd6)


Co-authored-by: fangyuan wang <[email protected]>

Files changed (3) hide show
  1. README.md +3 -3
  2. eval_onnx.py +2 -2
  3. infer_onnx.py +3 -3
README.md CHANGED
@@ -74,7 +74,7 @@ val: val2017.txt # val images (relative to 'path') 5000 images
74
 
75
  ### Test & Evaluation
76
 
77
- - Code snippet from [`onnx_inference.py`](onnx_inference.py) on how to use
78
  ```python
79
  args = make_parser().parse_args()
80
  source = args.image_path
@@ -106,12 +106,12 @@ for batch in dataset:
106
 
107
  - Run inference for a single image
108
  ```python
109
- python onnx_inference.py -m ./yolov8m.onnx -i /Path/To/Your/Image --ipu --provider_config /Path/To/Your/Provider_config
110
  ```
111
  *Note: __vaip_config.json__ is located at the setup package of Ryzen AI (refer to [Installation](#installation))*
112
  - Test accuracy of the quantized model
113
  ```python
114
- python onnx_eval.py -m ./yolov8m.onnx --ipu --provider_config /Path/To/Your/Provider_config
115
  ```
116
 
117
  ### Performance
 
74
 
75
  ### Test & Evaluation
76
 
77
+ - Code snippet from [`infer_onnx.py`](./infer_onnx.py) on how to use
78
  ```python
79
  args = make_parser().parse_args()
80
  source = args.image_path
 
106
 
107
  - Run inference for a single image
108
  ```python
109
+ python infer_onnx.py --onnx_model ./yolov8m.onnx -i /Path/To/Your/Image --ipu --provider_config /Path/To/Your/Provider_config
110
  ```
111
  *Note: __vaip_config.json__ is located at the setup package of Ryzen AI (refer to [Installation](#installation))*
112
  - Test accuracy of the quantized model
113
  ```python
114
+ python eval_onnx.py --onnx_model ./yolov8m.onnx --ipu --provider_config /Path/To/Your/Provider_config
115
  ```
116
 
117
  ### Performance
eval_onnx.py CHANGED
@@ -271,7 +271,7 @@ def parse_opt():
271
  parser = argparse.ArgumentParser()
272
  parser.add_argument('--ipu', action='store_true', help='flag for ryzen ai')
273
  parser.add_argument('--provider_config', default='', type=str, help='provider config for ryzen ai')
274
- parser.add_argument("-m", "--model", default="./yolov8m_qat.onnx", type=str, help='onnx_weight')
275
  opt = parser.parse_args()
276
  return opt
277
 
@@ -280,7 +280,7 @@ if __name__ == "__main__":
280
  opt = parse_opt()
281
  args = get_cfg(DEFAULT_CFG)
282
  args.ipu = opt.ipu
283
- args.onnx_weight = opt.model
284
  args.provider_config = opt.provider_config
285
  validator = DetectionValidator(args=args)
286
  validator()
 
271
  parser = argparse.ArgumentParser()
272
  parser.add_argument('--ipu', action='store_true', help='flag for ryzen ai')
273
  parser.add_argument('--provider_config', default='', type=str, help='provider config for ryzen ai')
274
+ parser.add_argument("-m", "--onnx_model", default="./yolov8m.onnx", type=str, help='onnx_weight')
275
  opt = parser.parse_args()
276
  return opt
277
 
 
280
  opt = parse_opt()
281
  args = get_cfg(DEFAULT_CFG)
282
  args.ipu = opt.ipu
283
+ args.onnx_weight = opt.onnx_model
284
  args.provider_config = opt.provider_config
285
  validator = DetectionValidator(args=args)
286
  validator()
infer_onnx.py CHANGED
@@ -75,9 +75,9 @@ def make_parser():
75
  parser = argparse.ArgumentParser("onnxruntime inference sample")
76
  parser.add_argument(
77
  "-m",
78
- "--model",
79
  type=str,
80
- default="./yolov8m_qat.onnx",
81
  help="input your onnx model.",
82
  )
83
  parser.add_argument(
@@ -121,7 +121,7 @@ if __name__ == '__main__':
121
  dataset = LoadImages(
122
  source, imgsz=imgsz, stride=32, auto=False, transforms=None, vid_stride=1
123
  )
124
- onnx_weight = args.model
125
  if args.ipu:
126
  providers = ["VitisAIExecutionProvider"]
127
  provider_options = [{"config_file": args.provider_config}]
 
75
  parser = argparse.ArgumentParser("onnxruntime inference sample")
76
  parser.add_argument(
77
  "-m",
78
+ "--onnx_model",
79
  type=str,
80
+ default="./yolov8m.onnx",
81
  help="input your onnx model.",
82
  )
83
  parser.add_argument(
 
121
  dataset = LoadImages(
122
  source, imgsz=imgsz, stride=32, auto=False, transforms=None, vid_stride=1
123
  )
124
+ onnx_weight = args.onnx_model
125
  if args.ipu:
126
  providers = ["VitisAIExecutionProvider"]
127
  provider_options = [{"config_file": args.provider_config}]