xref: /OK3568_Linux_fs/external/rknn-toolkit2/examples/darknet/yolov3_416x416/test.py (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyunimport numpy as np
2*4882a593Smuzhiyunimport cv2
3*4882a593Smuzhiyunfrom rknn.api import RKNN
4*4882a593Smuzhiyun
5*4882a593Smuzhiyunfrom yolov3_utils import yolov3_post_process, draw, download_yolov3_weight
6*4882a593Smuzhiyun
7*4882a593SmuzhiyunGRID0 = 13
8*4882a593SmuzhiyunGRID1 = 26
9*4882a593SmuzhiyunGRID2 = 52
10*4882a593SmuzhiyunLISTSIZE = 85
11*4882a593SmuzhiyunSPAN = 3
12*4882a593Smuzhiyun
13*4882a593Smuzhiyunif __name__ == '__main__':
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun    MODEL_PATH = './yolov3.cfg'
16*4882a593Smuzhiyun    WEIGHT_PATH = './yolov3.weights'
17*4882a593Smuzhiyun    RKNN_MODEL_PATH = './yolov3_416.rknn'
18*4882a593Smuzhiyun    im_file = './dog_bike_car_416x416.jpg'
19*4882a593Smuzhiyun    DATASET = './dataset.txt'
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun    # Download yolov3.weight
22*4882a593Smuzhiyun    download_yolov3_weight(WEIGHT_PATH)
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun    # Create RKNN object
25*4882a593Smuzhiyun    rknn = RKNN(verbose=True)
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun    # Pre-process config
28*4882a593Smuzhiyun    print('--> Config model')
29*4882a593Smuzhiyun    rknn.config(mean_values=[0, 0, 0], std_values=[255, 255, 255])
30*4882a593Smuzhiyun    print('done')
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun    # Load model
33*4882a593Smuzhiyun    print('--> Loading model')
34*4882a593Smuzhiyun    ret = rknn.load_darknet(model=MODEL_PATH, weight=WEIGHT_PATH)
35*4882a593Smuzhiyun    if ret != 0:
36*4882a593Smuzhiyun        print('Load model failed!')
37*4882a593Smuzhiyun        exit(ret)
38*4882a593Smuzhiyun    print('done')
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun    # Build model
41*4882a593Smuzhiyun    print('--> Building model')
42*4882a593Smuzhiyun    ret = rknn.build(do_quantization=True, dataset=DATASET)
43*4882a593Smuzhiyun    if ret != 0:
44*4882a593Smuzhiyun        print('Build model failed!')
45*4882a593Smuzhiyun        exit(ret)
46*4882a593Smuzhiyun    print('done')
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun    # Export rknn model
49*4882a593Smuzhiyun    print('--> Export rknn model')
50*4882a593Smuzhiyun    ret = rknn.export_rknn(RKNN_MODEL_PATH)
51*4882a593Smuzhiyun    if ret != 0:
52*4882a593Smuzhiyun        print('Export rknn model failed!')
53*4882a593Smuzhiyun        exit(ret)
54*4882a593Smuzhiyun    print('done')
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun    # Set inputs
57*4882a593Smuzhiyun    img = cv2.imread(im_file)
58*4882a593Smuzhiyun    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun    # Init runtime environment
61*4882a593Smuzhiyun    print('--> Init runtime environment')
62*4882a593Smuzhiyun    ret = rknn.init_runtime()
63*4882a593Smuzhiyun    if ret != 0:
64*4882a593Smuzhiyun        print('Init runtime environment failed!')
65*4882a593Smuzhiyun        exit(ret)
66*4882a593Smuzhiyun    print('done')
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun    # Inference
69*4882a593Smuzhiyun    print('--> Running model')
70*4882a593Smuzhiyun    outputs = rknn.inference(inputs=[img])
71*4882a593Smuzhiyun    print('done')
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun    input0_data = outputs[0]
74*4882a593Smuzhiyun    np.save('./darknet_yolov3_416x416_0.npy', input0_data)
75*4882a593Smuzhiyun    input1_data = outputs[1]
76*4882a593Smuzhiyun    np.save('./darknet_yolov3_416x416_1.npy', input1_data)
77*4882a593Smuzhiyun    input2_data = outputs[2]
78*4882a593Smuzhiyun    np.save('./darknet_yolov3_416x416_2.npy', input1_data)
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun    input0_data = input0_data.reshape(SPAN, LISTSIZE, GRID0, GRID0)
81*4882a593Smuzhiyun    input1_data = input1_data.reshape(SPAN, LISTSIZE, GRID1, GRID1)
82*4882a593Smuzhiyun    input2_data = input2_data.reshape(SPAN, LISTSIZE, GRID2, GRID2)
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun    input_data = []
85*4882a593Smuzhiyun    input_data.append(np.transpose(input0_data, (2, 3, 0, 1)))
86*4882a593Smuzhiyun    input_data.append(np.transpose(input1_data, (2, 3, 0, 1)))
87*4882a593Smuzhiyun    input_data.append(np.transpose(input2_data, (2, 3, 0, 1)))
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun    boxes, classes, scores = yolov3_post_process(input_data)
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun    image = cv2.imread(im_file)
92*4882a593Smuzhiyun    if boxes is not None:
93*4882a593Smuzhiyun        draw(image, boxes, scores, classes)
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun    print('Save results to results.jpg!')
96*4882a593Smuzhiyun    cv2.imwrite('result.jpg', image)
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun    rknn.release()
99