Upload 20 files
Browse files- model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_int8_aidlite/README.md +49 -0
- model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_int8_aidlite/models/real_esrgan_x4plus_w8a8.qnn231.ctx.bin +3 -0
- model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_int8_aidlite/python/original_image.jpg +0 -0
- model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_int8_aidlite/python/run_test.py +100 -0
- model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_w8a16_aidlite/README.md +49 -0
- model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_w8a16_aidlite/models/real_esrgan_x4plus_w8a16.qnn231.ctx.bin +3 -0
- model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_w8a16_aidlite/python/original_image.jpg +0 -0
- model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_w8a16_aidlite/python/run_test.py +100 -0
- model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_fp16_aidlite/README.md +49 -0
- model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_fp16_aidlite/models/real_esrgan_x4plus_fp16.qnn231.ctx.bin +3 -0
- model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_fp16_aidlite/python/original_image.jpg +0 -0
- model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_fp16_aidlite/python/run_test.py +100 -0
- model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_int8_aidlite/README.md +48 -0
- model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_int8_aidlite/models/real_esrgan_x4plus_w8a8.qnn231.ctx.bin +3 -0
- model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_int8_aidlite/python/original_image.jpg +0 -0
- model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_int8_aidlite/python/run_test.py +100 -0
- model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_w8a16_aidlite/README.md +49 -0
- model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_w8a16_aidlite/models/real_esrgan_x4plus_w8a16.qnn231.ctx.bin +3 -0
- model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_w8a16_aidlite/python/original_image.jpg +0 -0
- model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_w8a16_aidlite/python/run_test.py +100 -0
model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_int8_aidlite/README.md
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Model Information
|
2 |
+
|
3 |
+
### Source model
|
4 |
+
- Input shape: [1x3x128x128]
|
5 |
+
- Number of parameters: 15.92M
|
6 |
+
- Model size: 67.76M
|
7 |
+
- Output shape: [1x3x512x512]
|
8 |
+
|
9 |
+
Source model repository: [Real-ESRGAN-x4plus](https://github.com/xinntao/Real-ESRGAN)
|
10 |
+
|
11 |
+
### Converted model
|
12 |
+
|
13 |
+
- Precision: INT8
|
14 |
+
- Backend: QNN2.31
|
15 |
+
- Target Device: FV01 QCS6490
|
16 |
+
|
17 |
+
## Model Conversion Reference
|
18 |
+
User can find model conversion reference at [aimo.aidlux.com](https://aimo.aidlux.com/#/public/f0a4a851-b89a-4ccb-9035-ddd7b6494801)
|
19 |
+
|
20 |
+
## Inference with AidLite SDK
|
21 |
+
|
22 |
+
### SDK installation
|
23 |
+
Model Farm uses AidLite SDK as the model inference SDK. For details, please refer to the [AidLite Developer Documentation](https://v2.docs.aidlux.com/en/sdk-api/aidlite-sdk/)
|
24 |
+
|
25 |
+
- Install AidLite SDK
|
26 |
+
|
27 |
+
```bash
|
28 |
+
# Install the appropriate version of the aidlite sdk
|
29 |
+
sudo aid-pkg update
|
30 |
+
sudo aid-pkg install aidlite-sdk
|
31 |
+
# Download the qnn version that matches the above backend. Eg Install QNN2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
|
32 |
+
sudo aid-pkg install aidlite-{QNN VERSION}
|
33 |
+
```
|
34 |
+
|
35 |
+
- Verify AidLite SDK
|
36 |
+
|
37 |
+
```bash
|
38 |
+
# aidlite sdk c++ check
|
39 |
+
python3 -c "import aidlite ; print(aidlite.get_library_version())"
|
40 |
+
|
41 |
+
# aidlite sdk python check
|
42 |
+
python3 -c "import aidlite ; print(aidlite.get_py_library_version())"
|
43 |
+
```
|
44 |
+
|
45 |
+
### Run Demo
|
46 |
+
```bash
|
47 |
+
cd model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_int8_aidlite
|
48 |
+
python3 python/run_test.py --target_model ./models/real_esrgan_x4plus_w8a8.qnn231.ctx.bin --imgs ./python/original_image.jpg --invoke_nums 10
|
49 |
+
```
|
model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_int8_aidlite/models/real_esrgan_x4plus_w8a8.qnn231.ctx.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da32810d1b4b81f307f2bd2f717be93ae413f7bbb37ee1f077081f799c3cd679
|
3 |
+
size 22285152
|
model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_int8_aidlite/python/original_image.jpg
ADDED
![]() |
model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_int8_aidlite/python/run_test.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import time
|
3 |
+
import aidlite
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
import argparse
|
7 |
+
import json
|
8 |
+
|
9 |
+
def run(input_shapes,output_shapes,is_quant=True):
|
10 |
+
args = parser_args()
|
11 |
+
model_path = args.target_model
|
12 |
+
img_path = args.imgs
|
13 |
+
model_type = args.model_type
|
14 |
+
invoke_nums = int(args.invoke_nums)
|
15 |
+
|
16 |
+
model = aidlite.Model.create_instance(model_path)
|
17 |
+
model.set_model_properties(input_shapes, aidlite.DataType.TYPE_FLOAT32,
|
18 |
+
output_shapes, aidlite.DataType.TYPE_FLOAT32)
|
19 |
+
|
20 |
+
config = aidlite.Config.create_instance()
|
21 |
+
config.implement_type = aidlite.ImplementType.TYPE_LOCAL
|
22 |
+
if model_type.lower()=="qnn" :
|
23 |
+
config.framework_type = aidlite.FrameworkType.TYPE_QNN231
|
24 |
+
config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
25 |
+
elif model_type.lower()=="snpe2" or model_type.lower()=="snpe":
|
26 |
+
config.framework_type = aidlite.FrameworkType.TYPE_SNPE2
|
27 |
+
config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
28 |
+
|
29 |
+
config.number_of_threads = 4
|
30 |
+
if is_quant:
|
31 |
+
config.is_quantify_model = 1
|
32 |
+
|
33 |
+
interpreter = aidlite.InterpreterBuilder.build_interpretper_from_model_and_config(model, config)
|
34 |
+
if interpreter is None:
|
35 |
+
print("build_interpretper_from_model_and_config failed !")
|
36 |
+
result = interpreter.init()
|
37 |
+
if result != 0:
|
38 |
+
print(f"interpreter init failed !")
|
39 |
+
return False
|
40 |
+
result = interpreter.load_model()
|
41 |
+
if result != 0:
|
42 |
+
print("interpreter load model failed !")
|
43 |
+
return False
|
44 |
+
print("detect model load success!")
|
45 |
+
|
46 |
+
|
47 |
+
frame = cv2.imread(img_path)
|
48 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
49 |
+
img_input=cv2.resize(frame,(128,128))
|
50 |
+
img_input = img_input/255.
|
51 |
+
img_input = img_input.astype(np.float32)
|
52 |
+
|
53 |
+
|
54 |
+
invoke_time=[]
|
55 |
+
for i in range(invoke_nums):
|
56 |
+
result = interpreter.set_input_tensor(0, img_input.data)
|
57 |
+
if result != 0:
|
58 |
+
print("interpreter set_input_tensor() failed")
|
59 |
+
t1=time.time()
|
60 |
+
result = interpreter.invoke()
|
61 |
+
cost_time = (time.time()-t1)*1000
|
62 |
+
invoke_time.append(cost_time)
|
63 |
+
if result != 0:
|
64 |
+
print("interpreter set_input_tensor() failed")
|
65 |
+
qnn_out = interpreter.get_output_tensor(0)
|
66 |
+
if qnn_out is None:
|
67 |
+
print("sample : interpreter->get_output_tensor() 0 failed !")
|
68 |
+
result = interpreter.destory()
|
69 |
+
|
70 |
+
max_invoke_time = max(invoke_time)
|
71 |
+
min_invoke_time = min(invoke_time)
|
72 |
+
mean_invoke_time = sum(invoke_time)/invoke_nums
|
73 |
+
var_invoketime=np.var(invoke_time)
|
74 |
+
print("====================================")
|
75 |
+
print(f"QNN invoke time:\n --mean_invoke_time is {mean_invoke_time} \n --max_invoke_time is {max_invoke_time} \n --min_invoke_time is {min_invoke_time} \n --var_invoketime is {var_invoketime}")
|
76 |
+
print("====================================")
|
77 |
+
|
78 |
+
return qnn_out
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
def parser_args():
|
83 |
+
parser = argparse.ArgumentParser(description="Run model benchmarks")
|
84 |
+
parser.add_argument('--target_model',type=str,default='./models/real_esrgan_x4plus_w8a8.qnn231.ctx.bin',help="Inference model path")
|
85 |
+
parser.add_argument('--imgs',type=str,default='./python/baboon.png',help="Predict images path")
|
86 |
+
parser.add_argument('--invoke_nums',type=str,default=10,help="Inference nums")
|
87 |
+
parser.add_argument('--model_type',type=str,default='QNN',help="Run backend")
|
88 |
+
args = parser.parse_args()
|
89 |
+
return args
|
90 |
+
|
91 |
+
|
92 |
+
if __name__ == "__main__":
|
93 |
+
input_shapes=[[1,128,128,3]]
|
94 |
+
output_shapes=[[1,512,512,3]]
|
95 |
+
qnn_out=run(input_shapes,output_shapes)
|
96 |
+
qnn_out = qnn_out.reshape(1,512,512,3)
|
97 |
+
qnn_img = (qnn_out[0]*255).astype(np.uint8)
|
98 |
+
qnn_img = cv2.cvtColor(qnn_img, cv2.COLOR_RGB2BGR)
|
99 |
+
cv2.imwrite("python/results.png",qnn_img)
|
100 |
+
|
model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_w8a16_aidlite/README.md
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Model Information
|
2 |
+
|
3 |
+
### Source model
|
4 |
+
- Input shape: [1x3x128x128]
|
5 |
+
- Number of parameters: 15.92M
|
6 |
+
- Model size: 67.76M
|
7 |
+
- Output shape: [1x3x512x512]
|
8 |
+
|
9 |
+
Source model repository: [Real-ESRGAN-x4plus](https://github.com/xinntao/Real-ESRGAN)
|
10 |
+
|
11 |
+
### Converted model
|
12 |
+
|
13 |
+
- Precision: W8A16
|
14 |
+
- Backend: QNN2.31
|
15 |
+
- Target Device: FV01 QCS6490
|
16 |
+
|
17 |
+
## Model Conversion Reference
|
18 |
+
User can find model conversion reference at [aimo.aidlux.com](https://aimo.aidlux.com/#/public/898d1eba-352d-441a-8b83-663e86cd433b)
|
19 |
+
|
20 |
+
## Inference with AidLite SDK
|
21 |
+
|
22 |
+
### SDK installation
|
23 |
+
Model Farm uses AidLite SDK as the model inference SDK. For details, please refer to the [AidLite Developer Documentation](https://v2.docs.aidlux.com/en/sdk-api/aidlite-sdk/)
|
24 |
+
|
25 |
+
- Install AidLite SDK
|
26 |
+
|
27 |
+
```bash
|
28 |
+
# Install the appropriate version of the aidlite sdk
|
29 |
+
sudo aid-pkg update
|
30 |
+
sudo aid-pkg install aidlite-sdk
|
31 |
+
# Download the qnn version that matches the above backend. Eg Install QNN2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
|
32 |
+
sudo aid-pkg install aidlite-{QNN VERSION}
|
33 |
+
```
|
34 |
+
|
35 |
+
- Verify AidLite SDK
|
36 |
+
|
37 |
+
```bash
|
38 |
+
# aidlite sdk c++ check
|
39 |
+
python3 -c "import aidlite ; print(aidlite.get_library_version())"
|
40 |
+
|
41 |
+
# aidlite sdk python check
|
42 |
+
python3 -c "import aidlite ; print(aidlite.get_py_library_version())"
|
43 |
+
```
|
44 |
+
|
45 |
+
### Run Demo
|
46 |
+
```bash
|
47 |
+
cd model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_w8a16_aidlite
|
48 |
+
python3 python/run_test.py --target_model ./models/real_esrgan_x4plus_w8a16.qnn231.ctx.bin --imgs ./python/original_image.jpg --invoke_nums 10
|
49 |
+
```
|
model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_w8a16_aidlite/models/real_esrgan_x4plus_w8a16.qnn231.ctx.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31b0b4b88d73a8bd16b8a8a7366e07f810dec85bf04b2c91ac1eb4b512e24f70
|
3 |
+
size 27327328
|
model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_w8a16_aidlite/python/original_image.jpg
ADDED
![]() |
model_farm_real_esrgan_x4plus_qcs6490_qnn2.31_w8a16_aidlite/python/run_test.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import time
|
3 |
+
import aidlite
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
import argparse
|
7 |
+
import json
|
8 |
+
|
9 |
+
def run(input_shapes,output_shapes,is_quant=True):
|
10 |
+
args = parser_args()
|
11 |
+
model_path = args.target_model
|
12 |
+
img_path = args.imgs
|
13 |
+
model_type = args.model_type
|
14 |
+
invoke_nums = int(args.invoke_nums)
|
15 |
+
|
16 |
+
model = aidlite.Model.create_instance(model_path)
|
17 |
+
model.set_model_properties(input_shapes, aidlite.DataType.TYPE_FLOAT32,
|
18 |
+
output_shapes, aidlite.DataType.TYPE_FLOAT32)
|
19 |
+
|
20 |
+
config = aidlite.Config.create_instance()
|
21 |
+
config.implement_type = aidlite.ImplementType.TYPE_LOCAL
|
22 |
+
if model_type.lower()=="qnn" :
|
23 |
+
config.framework_type = aidlite.FrameworkType.TYPE_QNN231
|
24 |
+
config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
25 |
+
elif model_type.lower()=="snpe2" or model_type.lower()=="snpe":
|
26 |
+
config.framework_type = aidlite.FrameworkType.TYPE_SNPE2
|
27 |
+
config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
28 |
+
|
29 |
+
config.number_of_threads = 4
|
30 |
+
if is_quant:
|
31 |
+
config.is_quantify_model = 1
|
32 |
+
|
33 |
+
interpreter = aidlite.InterpreterBuilder.build_interpretper_from_model_and_config(model, config)
|
34 |
+
if interpreter is None:
|
35 |
+
print("build_interpretper_from_model_and_config failed !")
|
36 |
+
result = interpreter.init()
|
37 |
+
if result != 0:
|
38 |
+
print(f"interpreter init failed !")
|
39 |
+
return False
|
40 |
+
result = interpreter.load_model()
|
41 |
+
if result != 0:
|
42 |
+
print("interpreter load model failed !")
|
43 |
+
return False
|
44 |
+
print("detect model load success!")
|
45 |
+
|
46 |
+
|
47 |
+
frame = cv2.imread(img_path)
|
48 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
49 |
+
img_input=cv2.resize(frame,(128,128))
|
50 |
+
img_input = img_input/255.
|
51 |
+
img_input = img_input.astype(np.float32)
|
52 |
+
|
53 |
+
|
54 |
+
invoke_time=[]
|
55 |
+
for i in range(invoke_nums):
|
56 |
+
result = interpreter.set_input_tensor(0, img_input.data)
|
57 |
+
if result != 0:
|
58 |
+
print("interpreter set_input_tensor() failed")
|
59 |
+
t1=time.time()
|
60 |
+
result = interpreter.invoke()
|
61 |
+
cost_time = (time.time()-t1)*1000
|
62 |
+
invoke_time.append(cost_time)
|
63 |
+
if result != 0:
|
64 |
+
print("interpreter set_input_tensor() failed")
|
65 |
+
qnn_out = interpreter.get_output_tensor(0)
|
66 |
+
if qnn_out is None:
|
67 |
+
print("sample : interpreter->get_output_tensor() 0 failed !")
|
68 |
+
result = interpreter.destory()
|
69 |
+
|
70 |
+
max_invoke_time = max(invoke_time)
|
71 |
+
min_invoke_time = min(invoke_time)
|
72 |
+
mean_invoke_time = sum(invoke_time)/invoke_nums
|
73 |
+
var_invoketime=np.var(invoke_time)
|
74 |
+
print("====================================")
|
75 |
+
print(f"QNN invoke time:\n --mean_invoke_time is {mean_invoke_time} \n --max_invoke_time is {max_invoke_time} \n --min_invoke_time is {min_invoke_time} \n --var_invoketime is {var_invoketime}")
|
76 |
+
print("====================================")
|
77 |
+
|
78 |
+
return qnn_out
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
def parser_args():
|
83 |
+
parser = argparse.ArgumentParser(description="Run model benchmarks")
|
84 |
+
parser.add_argument('--target_model',type=str,default='./models/real_esrgan_x4plus_w8a16.qnn231.ctx.bin',help="Inference model path")
|
85 |
+
parser.add_argument('--imgs',type=str,default='./python/baboon.png',help="Predict images path")
|
86 |
+
parser.add_argument('--invoke_nums',type=str,default=10,help="Inference nums")
|
87 |
+
parser.add_argument('--model_type',type=str,default='QNN',help="Run backend")
|
88 |
+
args = parser.parse_args()
|
89 |
+
return args
|
90 |
+
|
91 |
+
|
92 |
+
if __name__ == "__main__":
|
93 |
+
input_shapes=[[1,128,128,3]]
|
94 |
+
output_shapes=[[1,512,512,3]]
|
95 |
+
qnn_out=run(input_shapes,output_shapes)
|
96 |
+
qnn_out = qnn_out.reshape(1,512,512,3)
|
97 |
+
qnn_img = (qnn_out[0]*255).astype(np.uint8)
|
98 |
+
qnn_img = cv2.cvtColor(qnn_img, cv2.COLOR_RGB2BGR)
|
99 |
+
cv2.imwrite("python/results.png",qnn_img)
|
100 |
+
|
model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_fp16_aidlite/README.md
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Model Information
|
2 |
+
|
3 |
+
### Source model
|
4 |
+
- Input shape: [1x3x128x128]
|
5 |
+
- Number of parameters: 15.92M
|
6 |
+
- Model size: 67.76M
|
7 |
+
- Output shape: [1x3x512x512]
|
8 |
+
|
9 |
+
Source model repository: [Real-ESRGAN-x4plus](https://github.com/xinntao/Real-ESRGAN)
|
10 |
+
|
11 |
+
### Converted model
|
12 |
+
|
13 |
+
- Precision: FP16
|
14 |
+
- Backend: QNN2.31
|
15 |
+
- Target Device: SNM972 QCS8550
|
16 |
+
|
17 |
+
## Model Conversion Reference
|
18 |
+
User can find model conversion reference at [aimo.aidlux.com](https://aimo.aidlux.com/#/public/c32bd626-0d40-4367-942f-03576c5118f5)
|
19 |
+
|
20 |
+
## Inference with AidLite SDK
|
21 |
+
|
22 |
+
### SDK installation
|
23 |
+
Model Farm uses AidLite SDK as the model inference SDK. For details, please refer to the [AidLite Developer Documentation](https://v2.docs.aidlux.com/en/sdk-api/aidlite-sdk/)
|
24 |
+
|
25 |
+
- Install AidLite SDK
|
26 |
+
|
27 |
+
```bash
|
28 |
+
# Install the appropriate version of the aidlite sdk
|
29 |
+
sudo aid-pkg update
|
30 |
+
sudo aid-pkg install aidlite-sdk
|
31 |
+
# Download the qnn version that matches the above backend. Eg Install QNN2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
|
32 |
+
sudo aid-pkg install aidlite-{QNN VERSION}
|
33 |
+
```
|
34 |
+
|
35 |
+
- Verify AidLite SDK
|
36 |
+
|
37 |
+
```bash
|
38 |
+
# aidlite sdk c++ check
|
39 |
+
python3 -c "import aidlite ; print(aidlite.get_library_version())"
|
40 |
+
|
41 |
+
# aidlite sdk python check
|
42 |
+
python3 -c "import aidlite ; print(aidlite.get_py_library_version())"
|
43 |
+
```
|
44 |
+
|
45 |
+
### Run Demo
|
46 |
+
```bash
|
47 |
+
cd model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_fp16_aidlite
|
48 |
+
python3 python/run_test.py --target_model ./models/real_esrgan_x4plus_fp16.qnn231.ctx.bin --imgs ./python/original_image.jpg --invoke_nums 10
|
49 |
+
```
|
model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_fp16_aidlite/models/real_esrgan_x4plus_fp16.qnn231.ctx.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b43a3c9d94b80677249c5afbe3502511317117ad96ec1b7ecb9c27f448b64434
|
3 |
+
size 39803752
|
model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_fp16_aidlite/python/original_image.jpg
ADDED
![]() |
model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_fp16_aidlite/python/run_test.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import time
|
3 |
+
import aidlite
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
import argparse
|
7 |
+
import json
|
8 |
+
|
9 |
+
def run(input_shapes,output_shapes,is_quant=True):
|
10 |
+
args = parser_args()
|
11 |
+
model_path = args.target_model
|
12 |
+
img_path = args.imgs
|
13 |
+
model_type = args.model_type
|
14 |
+
invoke_nums = int(args.invoke_nums)
|
15 |
+
|
16 |
+
model = aidlite.Model.create_instance(model_path)
|
17 |
+
model.set_model_properties(input_shapes, aidlite.DataType.TYPE_FLOAT32,
|
18 |
+
output_shapes, aidlite.DataType.TYPE_FLOAT32)
|
19 |
+
|
20 |
+
config = aidlite.Config.create_instance()
|
21 |
+
config.implement_type = aidlite.ImplementType.TYPE_LOCAL
|
22 |
+
if model_type.lower()=="qnn" :
|
23 |
+
config.framework_type = aidlite.FrameworkType.TYPE_QNN231
|
24 |
+
config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
25 |
+
elif model_type.lower()=="snpe2" or model_type.lower()=="snpe":
|
26 |
+
config.framework_type = aidlite.FrameworkType.TYPE_SNPE2
|
27 |
+
config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
28 |
+
|
29 |
+
config.number_of_threads = 4
|
30 |
+
if is_quant:
|
31 |
+
config.is_quantify_model = 1
|
32 |
+
|
33 |
+
interpreter = aidlite.InterpreterBuilder.build_interpretper_from_model_and_config(model, config)
|
34 |
+
if interpreter is None:
|
35 |
+
print("build_interpretper_from_model_and_config failed !")
|
36 |
+
result = interpreter.init()
|
37 |
+
if result != 0:
|
38 |
+
print(f"interpreter init failed !")
|
39 |
+
return False
|
40 |
+
result = interpreter.load_model()
|
41 |
+
if result != 0:
|
42 |
+
print("interpreter load model failed !")
|
43 |
+
return False
|
44 |
+
print("detect model load success!")
|
45 |
+
|
46 |
+
|
47 |
+
frame = cv2.imread(img_path)
|
48 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
49 |
+
img_input=cv2.resize(frame,(128,128))
|
50 |
+
img_input = img_input/255.
|
51 |
+
img_input = img_input.astype(np.float32)
|
52 |
+
|
53 |
+
|
54 |
+
invoke_time=[]
|
55 |
+
for i in range(invoke_nums):
|
56 |
+
result = interpreter.set_input_tensor(0, img_input.data)
|
57 |
+
if result != 0:
|
58 |
+
print("interpreter set_input_tensor() failed")
|
59 |
+
t1=time.time()
|
60 |
+
result = interpreter.invoke()
|
61 |
+
cost_time = (time.time()-t1)*1000
|
62 |
+
invoke_time.append(cost_time)
|
63 |
+
if result != 0:
|
64 |
+
print("interpreter set_input_tensor() failed")
|
65 |
+
qnn_out = interpreter.get_output_tensor(0)
|
66 |
+
if qnn_out is None:
|
67 |
+
print("sample : interpreter->get_output_tensor() 0 failed !")
|
68 |
+
result = interpreter.destory()
|
69 |
+
|
70 |
+
max_invoke_time = max(invoke_time)
|
71 |
+
min_invoke_time = min(invoke_time)
|
72 |
+
mean_invoke_time = sum(invoke_time)/invoke_nums
|
73 |
+
var_invoketime=np.var(invoke_time)
|
74 |
+
print("====================================")
|
75 |
+
print(f"QNN invoke time:\n --mean_invoke_time is {mean_invoke_time} \n --max_invoke_time is {max_invoke_time} \n --min_invoke_time is {min_invoke_time} \n --var_invoketime is {var_invoketime}")
|
76 |
+
print("====================================")
|
77 |
+
|
78 |
+
return qnn_out
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
def parser_args():
|
83 |
+
parser = argparse.ArgumentParser(description="Run model benchmarks")
|
84 |
+
parser.add_argument('--target_model',type=str,default='./models/real_esrgan_x4plus_fp16.qnn231.ctx.bin',help="Inference model path")
|
85 |
+
parser.add_argument('--imgs',type=str,default='./python/baboon.png',help="Predict images path")
|
86 |
+
parser.add_argument('--invoke_nums',type=str,default=10,help="Inference nums")
|
87 |
+
parser.add_argument('--model_type',type=str,default='QNN',help="Run backend")
|
88 |
+
args = parser.parse_args()
|
89 |
+
return args
|
90 |
+
|
91 |
+
|
92 |
+
if __name__ == "__main__":
|
93 |
+
input_shapes=[[1,128,128,3]]
|
94 |
+
output_shapes=[[1,512,512,3]]
|
95 |
+
qnn_out=run(input_shapes,output_shapes)
|
96 |
+
qnn_out = qnn_out.reshape(1,512,512,3)
|
97 |
+
qnn_img = (qnn_out[0]*255).astype(np.uint8)
|
98 |
+
qnn_img = cv2.cvtColor(qnn_img, cv2.COLOR_RGB2BGR)
|
99 |
+
cv2.imwrite("python/results.png",qnn_img)
|
100 |
+
|
model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_int8_aidlite/README.md
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Model Information
|
2 |
+
|
3 |
+
### Source model
|
4 |
+
- Input shape: [1x3x128x128]
|
5 |
+
- Number of parameters: 15.92M
|
6 |
+
- Model size: 67.76M
|
7 |
+
- Output shape: [1x3x512x512]
|
8 |
+
|
9 |
+
Source model repository: [Real-ESRGAN-x4plus](https://github.com/xinntao/Real-ESRGAN)
|
10 |
+
### Converted model
|
11 |
+
|
12 |
+
- Precision: INT8
|
13 |
+
- Backend: QNN2.31
|
14 |
+
- Target Device: SNM972 QCS8550
|
15 |
+
|
16 |
+
## Model Conversion Reference
|
17 |
+
User can find model conversion reference at [aimo.aidlux.com](https://aimo.aidlux.com/#/public/82096bda-553e-42a7-a4b9-39083ea2ce9f)
|
18 |
+
|
19 |
+
## Inference with AidLite SDK
|
20 |
+
|
21 |
+
### SDK installation
|
22 |
+
Model Farm uses AidLite SDK as the model inference SDK. For details, please refer to the [AidLite Developer Documentation](https://v2.docs.aidlux.com/en/sdk-api/aidlite-sdk/)
|
23 |
+
|
24 |
+
- Install AidLite SDK
|
25 |
+
|
26 |
+
```bash
|
27 |
+
# Install the appropriate version of the aidlite sdk
|
28 |
+
sudo aid-pkg update
|
29 |
+
sudo aid-pkg install aidlite-sdk
|
30 |
+
# Download the qnn version that matches the above backend. Eg Install QNN2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
|
31 |
+
sudo aid-pkg install aidlite-{QNN VERSION}
|
32 |
+
```
|
33 |
+
|
34 |
+
- Verify AidLite SDK
|
35 |
+
|
36 |
+
```bash
|
37 |
+
# aidlite sdk c++ check
|
38 |
+
python3 -c "import aidlite ; print(aidlite.get_library_version())"
|
39 |
+
|
40 |
+
# aidlite sdk python check
|
41 |
+
python3 -c "import aidlite ; print(aidlite.get_py_library_version())"
|
42 |
+
```
|
43 |
+
|
44 |
+
### Run Demo
|
45 |
+
```bash
|
46 |
+
cd model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_int8_aidlite
|
47 |
+
python3 python/run_test.py --target_model ./models/real_esrgan_x4plus_w8a8.qnn231.ctx.bin --imgs ./python/original_image.jpg --invoke_nums 10
|
48 |
+
```
|
model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_int8_aidlite/models/real_esrgan_x4plus_w8a8.qnn231.ctx.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:272021c149d95af12f8a8964c52abc23ca83a4b89d8f13b24fd7891e55682c3a
|
3 |
+
size 22109024
|
model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_int8_aidlite/python/original_image.jpg
ADDED
![]() |
model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_int8_aidlite/python/run_test.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import time
|
3 |
+
import aidlite
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
import argparse
|
7 |
+
import json
|
8 |
+
|
9 |
+
def run(input_shapes,output_shapes,is_quant=True):
|
10 |
+
args = parser_args()
|
11 |
+
model_path = args.target_model
|
12 |
+
img_path = args.imgs
|
13 |
+
model_type = args.model_type
|
14 |
+
invoke_nums = int(args.invoke_nums)
|
15 |
+
|
16 |
+
model = aidlite.Model.create_instance(model_path)
|
17 |
+
model.set_model_properties(input_shapes, aidlite.DataType.TYPE_FLOAT32,
|
18 |
+
output_shapes, aidlite.DataType.TYPE_FLOAT32)
|
19 |
+
|
20 |
+
config = aidlite.Config.create_instance()
|
21 |
+
config.implement_type = aidlite.ImplementType.TYPE_LOCAL
|
22 |
+
if model_type.lower()=="qnn" :
|
23 |
+
config.framework_type = aidlite.FrameworkType.TYPE_QNN231
|
24 |
+
config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
25 |
+
elif model_type.lower()=="snpe2" or model_type.lower()=="snpe":
|
26 |
+
config.framework_type = aidlite.FrameworkType.TYPE_SNPE2
|
27 |
+
config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
28 |
+
|
29 |
+
config.number_of_threads = 4
|
30 |
+
if is_quant:
|
31 |
+
config.is_quantify_model = 1
|
32 |
+
|
33 |
+
interpreter = aidlite.InterpreterBuilder.build_interpretper_from_model_and_config(model, config)
|
34 |
+
if interpreter is None:
|
35 |
+
print("build_interpretper_from_model_and_config failed !")
|
36 |
+
result = interpreter.init()
|
37 |
+
if result != 0:
|
38 |
+
print(f"interpreter init failed !")
|
39 |
+
return False
|
40 |
+
result = interpreter.load_model()
|
41 |
+
if result != 0:
|
42 |
+
print("interpreter load model failed !")
|
43 |
+
return False
|
44 |
+
print("detect model load success!")
|
45 |
+
|
46 |
+
|
47 |
+
frame = cv2.imread(img_path)
|
48 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
49 |
+
img_input=cv2.resize(frame,(128,128))
|
50 |
+
img_input = img_input/255.
|
51 |
+
img_input = img_input.astype(np.float32)
|
52 |
+
|
53 |
+
|
54 |
+
invoke_time=[]
|
55 |
+
for i in range(invoke_nums):
|
56 |
+
result = interpreter.set_input_tensor(0, img_input.data)
|
57 |
+
if result != 0:
|
58 |
+
print("interpreter set_input_tensor() failed")
|
59 |
+
t1=time.time()
|
60 |
+
result = interpreter.invoke()
|
61 |
+
cost_time = (time.time()-t1)*1000
|
62 |
+
invoke_time.append(cost_time)
|
63 |
+
if result != 0:
|
64 |
+
print("interpreter set_input_tensor() failed")
|
65 |
+
qnn_out = interpreter.get_output_tensor(0)
|
66 |
+
if qnn_out is None:
|
67 |
+
print("sample : interpreter->get_output_tensor() 0 failed !")
|
68 |
+
result = interpreter.destory()
|
69 |
+
|
70 |
+
max_invoke_time = max(invoke_time)
|
71 |
+
min_invoke_time = min(invoke_time)
|
72 |
+
mean_invoke_time = sum(invoke_time)/invoke_nums
|
73 |
+
var_invoketime=np.var(invoke_time)
|
74 |
+
print("====================================")
|
75 |
+
print(f"QNN invoke time:\n --mean_invoke_time is {mean_invoke_time} \n --max_invoke_time is {max_invoke_time} \n --min_invoke_time is {min_invoke_time} \n --var_invoketime is {var_invoketime}")
|
76 |
+
print("====================================")
|
77 |
+
|
78 |
+
return qnn_out
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
def parser_args():
|
83 |
+
parser = argparse.ArgumentParser(description="Run model benchmarks")
|
84 |
+
parser.add_argument('--target_model',type=str,default='./models/real_esrgan_x4plus_w8a8.qnn231.ctx.bin',help="Inference model path")
|
85 |
+
parser.add_argument('--imgs',type=str,default='./python/baboon.png',help="Predict images path")
|
86 |
+
parser.add_argument('--invoke_nums',type=str,default=10,help="Inference nums")
|
87 |
+
parser.add_argument('--model_type',type=str,default='QNN',help="Run backend")
|
88 |
+
args = parser.parse_args()
|
89 |
+
return args
|
90 |
+
|
91 |
+
|
92 |
+
if __name__ == "__main__":
|
93 |
+
input_shapes=[[1,128,128,3]]
|
94 |
+
output_shapes=[[1,512,512,3]]
|
95 |
+
qnn_out=run(input_shapes,output_shapes)
|
96 |
+
qnn_out = qnn_out.reshape(1,512,512,3)
|
97 |
+
qnn_img = (qnn_out[0]*255).astype(np.uint8)
|
98 |
+
qnn_img = cv2.cvtColor(qnn_img, cv2.COLOR_RGB2BGR)
|
99 |
+
cv2.imwrite("python/results.png",qnn_img)
|
100 |
+
|
model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_w8a16_aidlite/README.md
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Model Information
|
2 |
+
|
3 |
+
### Source model
|
4 |
+
- Input shape: [1x3x128x128]
|
5 |
+
- Number of parameters: 15.92M
|
6 |
+
- Model size: 67.76M
|
7 |
+
- Output shape: [1x3x512x512]
|
8 |
+
|
9 |
+
Source model repository: [Real-ESRGAN-x4plus](https://github.com/xinntao/Real-ESRGAN)
|
10 |
+
|
11 |
+
### Converted model
|
12 |
+
|
13 |
+
- Precision: W8A16
|
14 |
+
- Backend: QNN2.31
|
15 |
+
- Target Device: SNM972 QCS8550
|
16 |
+
|
17 |
+
## Model Conversion Reference
|
18 |
+
User can find model conversion reference at [aimo.aidlux.com](https://aimo.aidlux.com/#/public/51fdc8f0-c87a-4b55-8ac5-bf4a7e637c62)
|
19 |
+
|
20 |
+
## Inference with AidLite SDK
|
21 |
+
|
22 |
+
### SDK installation
|
23 |
+
Model Farm uses AidLite SDK as the model inference SDK. For details, please refer to the [AidLite Developer Documentation](https://v2.docs.aidlux.com/en/sdk-api/aidlite-sdk/)
|
24 |
+
|
25 |
+
- Install AidLite SDK
|
26 |
+
|
27 |
+
```bash
|
28 |
+
# Install the appropriate version of the aidlite sdk
|
29 |
+
sudo aid-pkg update
|
30 |
+
sudo aid-pkg install aidlite-sdk
|
31 |
+
# Download the qnn version that matches the above backend. Eg Install QNN2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
|
32 |
+
sudo aid-pkg install aidlite-{QNN VERSION}
|
33 |
+
```
|
34 |
+
|
35 |
+
- Verify AidLite SDK
|
36 |
+
|
37 |
+
```bash
|
38 |
+
# aidlite sdk c++ check
|
39 |
+
python3 -c "import aidlite ; print(aidlite.get_library_version())"
|
40 |
+
|
41 |
+
# aidlite sdk python check
|
42 |
+
python3 -c "import aidlite ; print(aidlite.get_py_library_version())"
|
43 |
+
```
|
44 |
+
|
45 |
+
### Run Demo
|
46 |
+
```bash
|
47 |
+
cd model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_w8a16_aidlite
|
48 |
+
python3 python/run_test.py --target_model ./models/real_esrgan_x4plus_w8a16.qnn231.ctx.bin --imgs ./python/original_image.jpg --invoke_nums 10
|
49 |
+
```
|
model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_w8a16_aidlite/models/real_esrgan_x4plus_w8a16.qnn231.ctx.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0953ed73e63ea73329738a79760f81ee83b4380e9b4aa2219802fb2ccc7a5cb6
|
3 |
+
size 24025952
|
model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_w8a16_aidlite/python/original_image.jpg
ADDED
![]() |
model_farm_real_esrgan_x4plus_qcs8550_qnn2.31_w8a16_aidlite/python/run_test.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import time
|
3 |
+
import aidlite
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
import argparse
|
7 |
+
import json
|
8 |
+
|
9 |
+
def run(input_shapes,output_shapes,is_quant=True):
|
10 |
+
args = parser_args()
|
11 |
+
model_path = args.target_model
|
12 |
+
img_path = args.imgs
|
13 |
+
model_type = args.model_type
|
14 |
+
invoke_nums = int(args.invoke_nums)
|
15 |
+
|
16 |
+
model = aidlite.Model.create_instance(model_path)
|
17 |
+
model.set_model_properties(input_shapes, aidlite.DataType.TYPE_FLOAT32,
|
18 |
+
output_shapes, aidlite.DataType.TYPE_FLOAT32)
|
19 |
+
|
20 |
+
config = aidlite.Config.create_instance()
|
21 |
+
config.implement_type = aidlite.ImplementType.TYPE_LOCAL
|
22 |
+
if model_type.lower()=="qnn" :
|
23 |
+
config.framework_type = aidlite.FrameworkType.TYPE_QNN231
|
24 |
+
config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
25 |
+
elif model_type.lower()=="snpe2" or model_type.lower()=="snpe":
|
26 |
+
config.framework_type = aidlite.FrameworkType.TYPE_SNPE2
|
27 |
+
config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
28 |
+
|
29 |
+
config.number_of_threads = 4
|
30 |
+
if is_quant:
|
31 |
+
config.is_quantify_model = 1
|
32 |
+
|
33 |
+
interpreter = aidlite.InterpreterBuilder.build_interpretper_from_model_and_config(model, config)
|
34 |
+
if interpreter is None:
|
35 |
+
print("build_interpretper_from_model_and_config failed !")
|
36 |
+
result = interpreter.init()
|
37 |
+
if result != 0:
|
38 |
+
print(f"interpreter init failed !")
|
39 |
+
return False
|
40 |
+
result = interpreter.load_model()
|
41 |
+
if result != 0:
|
42 |
+
print("interpreter load model failed !")
|
43 |
+
return False
|
44 |
+
print("detect model load success!")
|
45 |
+
|
46 |
+
|
47 |
+
frame = cv2.imread(img_path)
|
48 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
49 |
+
img_input=cv2.resize(frame,(128,128))
|
50 |
+
img_input = img_input/255.
|
51 |
+
img_input = img_input.astype(np.float32)
|
52 |
+
|
53 |
+
|
54 |
+
invoke_time=[]
|
55 |
+
for i in range(invoke_nums):
|
56 |
+
result = interpreter.set_input_tensor(0, img_input.data)
|
57 |
+
if result != 0:
|
58 |
+
print("interpreter set_input_tensor() failed")
|
59 |
+
t1=time.time()
|
60 |
+
result = interpreter.invoke()
|
61 |
+
cost_time = (time.time()-t1)*1000
|
62 |
+
invoke_time.append(cost_time)
|
63 |
+
if result != 0:
|
64 |
+
print("interpreter set_input_tensor() failed")
|
65 |
+
qnn_out = interpreter.get_output_tensor(0)
|
66 |
+
if qnn_out is None:
|
67 |
+
print("sample : interpreter->get_output_tensor() 0 failed !")
|
68 |
+
result = interpreter.destory()
|
69 |
+
|
70 |
+
max_invoke_time = max(invoke_time)
|
71 |
+
min_invoke_time = min(invoke_time)
|
72 |
+
mean_invoke_time = sum(invoke_time)/invoke_nums
|
73 |
+
var_invoketime=np.var(invoke_time)
|
74 |
+
print("====================================")
|
75 |
+
print(f"QNN invoke time:\n --mean_invoke_time is {mean_invoke_time} \n --max_invoke_time is {max_invoke_time} \n --min_invoke_time is {min_invoke_time} \n --var_invoketime is {var_invoketime}")
|
76 |
+
print("====================================")
|
77 |
+
|
78 |
+
return qnn_out
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
def parser_args():
|
83 |
+
parser = argparse.ArgumentParser(description="Run model benchmarks")
|
84 |
+
parser.add_argument('--target_model',type=str,default='./models/real_esrgan_x4plus_w8a16.qnn231.ctx.bin',help="Inference model path")
|
85 |
+
parser.add_argument('--imgs',type=str,default='./python/baboon.png',help="Predict images path")
|
86 |
+
parser.add_argument('--invoke_nums',type=str,default=10,help="Inference nums")
|
87 |
+
parser.add_argument('--model_type',type=str,default='QNN',help="Run backend")
|
88 |
+
args = parser.parse_args()
|
89 |
+
return args
|
90 |
+
|
91 |
+
|
92 |
+
if __name__ == "__main__":
|
93 |
+
input_shapes=[[1,128,128,3]]
|
94 |
+
output_shapes=[[1,512,512,3]]
|
95 |
+
qnn_out=run(input_shapes,output_shapes)
|
96 |
+
qnn_out = qnn_out.reshape(1,512,512,3)
|
97 |
+
qnn_img = (qnn_out[0]*255).astype(np.uint8)
|
98 |
+
qnn_img = cv2.cvtColor(qnn_img, cv2.COLOR_RGB2BGR)
|
99 |
+
cv2.imwrite("python/results.png",qnn_img)
|
100 |
+
|