xref: /OK3568_Linux_fs/external/rknpu2/examples/librknn_api_android_demo/include/rknn_api.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /****************************************************************************
2 *
3 *    Copyright (c) 2017 - 2022 by Rockchip Corp.  All rights reserved.
4 *
5 *    The material in this file is confidential and contains trade secrets
6 *    of Rockchip Corporation. This is proprietary information owned by
7 *    Rockchip Corporation. No part of this work may be disclosed,
8 *    reproduced, copied, transmitted, or used in any way for any purpose,
9 *    without the express written permission of Rockchip Corporation.
10 *
11 *****************************************************************************/
12 
13 
14 #ifndef _RKNN_API_H
15 #define _RKNN_API_H
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <stdint.h>
22 
23 /*
24     Definition of extended flag for rknn_init.
25 */
26 /* set high priority context. */
27 #define RKNN_FLAG_PRIOR_HIGH                    0x00000000
28 
29 /* set medium priority context */
30 #define RKNN_FLAG_PRIOR_MEDIUM                  0x00000001
31 
32 /* set low priority context. */
33 #define RKNN_FLAG_PRIOR_LOW                     0x00000002
34 
35 /* asynchronous mode.
36    when enable, rknn_outputs_get will not block for too long because it directly retrieves the result of
37    the previous frame which can increase the frame rate on single-threaded mode, but at the cost of
38    rknn_outputs_get not retrieves the result of the current frame.
39    in multi-threaded mode you do not need to turn this mode on. */
40 #define RKNN_FLAG_ASYNC_MASK                    0x00000004
41 
42 /* collect performance mode.
43    when enable, you can get detailed performance reports via rknn_query(ctx, RKNN_QUERY_PERF_DETAIL, ...),
44    but it will reduce the frame rate. */
45 #define RKNN_FLAG_COLLECT_PERF_MASK             0x00000008
46 
47 /* allocate all memory in outside, includes weight/internal/inputs/outputs */
48 #define RKNN_FLAG_MEM_ALLOC_OUTSIDE             0x00000010
49 
50 /* weight sharing with the same network structure */
51 #define RKNN_FLAG_SHARE_WEIGHT_MEM              0x00000020
52 
53 /* send fence fd from outside */
54 #define RKNN_FLAG_FENCE_IN_OUTSIDE              0x00000040
55 
56 /* get fence fd from inside */
57 #define RKNN_FLAG_FENCE_OUT_OUTSIDE             0x00000080
58 
59 /*
60     Error code returned by the RKNN API.
61 */
62 #define RKNN_SUCC                               0       /* execute succeed. */
63 #define RKNN_ERR_FAIL                           -1      /* execute failed. */
64 #define RKNN_ERR_TIMEOUT                        -2      /* execute timeout. */
65 #define RKNN_ERR_DEVICE_UNAVAILABLE             -3      /* device is unavailable. */
66 #define RKNN_ERR_MALLOC_FAIL                    -4      /* memory malloc fail. */
67 #define RKNN_ERR_PARAM_INVALID                  -5      /* parameter is invalid. */
68 #define RKNN_ERR_MODEL_INVALID                  -6      /* model is invalid. */
69 #define RKNN_ERR_CTX_INVALID                    -7      /* context is invalid. */
70 #define RKNN_ERR_INPUT_INVALID                  -8      /* input is invalid. */
71 #define RKNN_ERR_OUTPUT_INVALID                 -9      /* output is invalid. */
72 #define RKNN_ERR_DEVICE_UNMATCH                 -10     /* the device is unmatch, please update rknn sdk
73                                                            and npu driver/firmware. */
74 #define RKNN_ERR_INCOMPATILE_PRE_COMPILE_MODEL  -11     /* This RKNN model use pre_compile mode, but not compatible with current driver. */
75 #define RKNN_ERR_INCOMPATILE_OPTIMIZATION_LEVEL_VERSION  -12     /* This RKNN model set optimization level, but not compatible with current driver. */
76 #define RKNN_ERR_TARGET_PLATFORM_UNMATCH        -13     /* This RKNN model set target platform, but not compatible with current platform. */
77 
78 /*
79     Definition for tensor
80 */
81 #define RKNN_MAX_DIMS                           16      /* maximum dimension of tensor. */
82 #define RKNN_MAX_NUM_CHANNEL                    15      /* maximum channel number of input tensor. */
83 #define RKNN_MAX_NAME_LEN                       256     /* maximum name lenth of tensor. */
84 
85 
86 #ifdef __arm__
87 typedef uint32_t rknn_context;
88 #else
89 typedef uint64_t rknn_context;
90 #endif
91 
92 
93 /*
94     The query command for rknn_query
95 */
96 typedef enum _rknn_query_cmd {
97     RKNN_QUERY_IN_OUT_NUM = 0,                              /* query the number of input & output tensor. */
98     RKNN_QUERY_INPUT_ATTR = 1,                              /* query the attribute of input tensor. */
99     RKNN_QUERY_OUTPUT_ATTR = 2,                             /* query the attribute of output tensor. */
100     RKNN_QUERY_PERF_DETAIL = 3,                             /* query the detail performance, need set
101                                                                RKNN_FLAG_COLLECT_PERF_MASK when call rknn_init,
102                                                                this query needs to be valid after rknn_outputs_get. */
103     RKNN_QUERY_PERF_RUN = 4,                                /* query the time of run,
104                                                                this query needs to be valid after rknn_outputs_get. */
105     RKNN_QUERY_SDK_VERSION = 5,                             /* query the sdk & driver version */
106 
107     RKNN_QUERY_MEM_SIZE = 6,                                /* query the weight & internal memory size */
108     RKNN_QUERY_CUSTOM_STRING = 7,                           /* query the custom string */
109 
110     RKNN_QUERY_NATIVE_INPUT_ATTR = 8,                       /* query the attribute of native input tensor. */
111     RKNN_QUERY_NATIVE_OUTPUT_ATTR = 9,                      /* query the attribute of native output tensor. */
112 
113     RKNN_QUERY_NATIVE_NC1HWC2_INPUT_ATTR = 8,               /* query the attribute of native input tensor. */
114     RKNN_QUERY_NATIVE_NC1HWC2_OUTPUT_ATTR = 9,              /* query the attribute of native output tensor. */
115 
116     RKNN_QUERY_NATIVE_NHWC_INPUT_ATTR = 10,                 /* query the attribute of native input tensor. */
117     RKNN_QUERY_NATIVE_NHWC_OUTPUT_ATTR = 11,                /* query the attribute of native output tensor. */
118 
119     RKNN_QUERY_DEVICE_MEM_INFO = 12,                        /* query the attribute of rknn memory information. */
120 
121     RKNN_QUERY_CMD_MAX
122 } rknn_query_cmd;
123 
124 /*
125     the tensor data type.
126 */
127 typedef enum _rknn_tensor_type {
128     RKNN_TENSOR_FLOAT32 = 0,                            /* data type is float32. */
129     RKNN_TENSOR_FLOAT16,                                /* data type is float16. */
130     RKNN_TENSOR_INT8,                                   /* data type is int8. */
131     RKNN_TENSOR_UINT8,                                  /* data type is uint8. */
132     RKNN_TENSOR_INT16,                                  /* data type is int16. */
133     RKNN_TENSOR_UINT16,                                 /* data type is uint16. */
134     RKNN_TENSOR_INT32,                                  /* data type is int32. */
135     RKNN_TENSOR_UINT32,                                 /* data type is uint32. */
136     RKNN_TENSOR_INT64,                                  /* data type is int64. */
137     RKNN_TENSOR_BOOL,
138 
139     RKNN_TENSOR_TYPE_MAX
140 } rknn_tensor_type;
141 
get_type_string(rknn_tensor_type type)142 inline static const char* get_type_string(rknn_tensor_type type)
143 {
144     switch(type) {
145     case RKNN_TENSOR_FLOAT32: return "FP32";
146     case RKNN_TENSOR_FLOAT16: return "FP16";
147     case RKNN_TENSOR_INT8: return "INT8";
148     case RKNN_TENSOR_UINT8: return "UINT8";
149     case RKNN_TENSOR_INT16: return "INT16";
150     case RKNN_TENSOR_UINT16: return "UINT16";
151     case RKNN_TENSOR_INT32: return "INT32";
152     case RKNN_TENSOR_UINT32: return "UINT32";
153     case RKNN_TENSOR_INT64: return "INT64";
154     case RKNN_TENSOR_BOOL: return "BOOL";
155     default: return "UNKNOW";
156     }
157 }
158 
159 /*
160     the quantitative type.
161 */
162 typedef enum _rknn_tensor_qnt_type {
163     RKNN_TENSOR_QNT_NONE = 0,                           /* none. */
164     RKNN_TENSOR_QNT_DFP,                                /* dynamic fixed point. */
165     RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC,                  /* asymmetric affine. */
166 
167     RKNN_TENSOR_QNT_MAX
168 } rknn_tensor_qnt_type;
169 
get_qnt_type_string(rknn_tensor_qnt_type type)170 inline static const char* get_qnt_type_string(rknn_tensor_qnt_type type)
171 {
172     switch(type) {
173     case RKNN_TENSOR_QNT_NONE: return "NONE";
174     case RKNN_TENSOR_QNT_DFP: return "DFP";
175     case RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC: return "AFFINE";
176     default: return "UNKNOW";
177     }
178 }
179 
180 /*
181     the tensor data format.
182 */
183 typedef enum _rknn_tensor_format {
184     RKNN_TENSOR_NCHW = 0,                               /* data format is NCHW. */
185     RKNN_TENSOR_NHWC,                                   /* data format is NHWC. */
186     RKNN_TENSOR_NC1HWC2,                                /* data format is NC1HWC2. */
187     RKNN_TENSOR_UNDEFINED,
188 
189     RKNN_TENSOR_FORMAT_MAX
190 } rknn_tensor_format;
191 
192 /*
193     the mode of running on target NPU core.
194 */
195 typedef enum _rknn_core_mask {
196     RKNN_NPU_CORE_AUTO = 0,                              /* default, run on NPU core randomly. */
197     RKNN_NPU_CORE_0 = 1,                                 /* run on NPU core 0. */
198     RKNN_NPU_CORE_1 = 2,                                 /* run on NPU core 1. */
199     RKNN_NPU_CORE_2 = 4,                                 /* run on NPU core 2. */
200     RKNN_NPU_CORE_0_1 = 3,                               /* run on NPU core 1 and core 2. */
201     RKNN_NPU_CORE_0_1_2 = 7,                             /* run on NPU core 1 and core 2 and core 3. */
202 
203     RKNN_NPU_CORE_UNDEFINED,
204 } rknn_core_mask;
205 
get_format_string(rknn_tensor_format fmt)206 inline static const char* get_format_string(rknn_tensor_format fmt)
207 {
208     switch(fmt) {
209     case RKNN_TENSOR_NCHW: return "NCHW";
210     case RKNN_TENSOR_NHWC: return "NHWC";
211     case RKNN_TENSOR_NC1HWC2: return "NC1HWC2";
212     case RKNN_TENSOR_UNDEFINED: return "UNDEFINED";
213     default: return "UNKNOW";
214     }
215 }
216 
217 /*
218     the information for RKNN_QUERY_IN_OUT_NUM.
219 */
220 typedef struct _rknn_input_output_num {
221     uint32_t n_input;                                   /* the number of input. */
222     uint32_t n_output;                                  /* the number of output. */
223 } rknn_input_output_num;
224 
225 /*
226     the information for RKNN_QUERY_INPUT_ATTR / RKNN_QUERY_OUTPUT_ATTR.
227 */
228 typedef struct _rknn_tensor_attr {
229     uint32_t index;                                     /* input parameter, the index of input/output tensor,
230                                                            need set before call rknn_query. */
231 
232     uint32_t n_dims;                                    /* the number of dimensions. */
233     uint32_t dims[RKNN_MAX_DIMS];                       /* the dimensions array. */
234     char name[RKNN_MAX_NAME_LEN];                       /* the name of tensor. */
235 
236     uint32_t n_elems;                                   /* the number of elements. */
237     uint32_t size;                                      /* the bytes size of tensor. */
238 
239     rknn_tensor_format fmt;                             /* the data format of tensor. */
240     rknn_tensor_type type;                              /* the data type of tensor. */
241     rknn_tensor_qnt_type qnt_type;                      /* the quantitative type of tensor. */
242     int8_t fl;                                          /* fractional length for RKNN_TENSOR_QNT_DFP. */
243     int32_t zp;                                         /* zero point for RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC. */
244     float scale;                                        /* scale for RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC. */
245 
246     uint32_t w_stride;                                  /* the stride of tensor along the width dimention of input,
247                                                            Note: it is read-only, 0 means equal to width. */
248     uint32_t size_with_stride;                          /* the bytes size of tensor with stride. */
249 
250     uint8_t pass_through;                               /* pass through mode, for rknn_set_io_mem interface.
251                                                            if TRUE, the buf data is passed directly to the input node of the rknn model
252                                                                     without any conversion. the following variables do not need to be set.
253                                                            if FALSE, the buf data is converted into an input consistent with the model
254                                                                      according to the following type and fmt. so the following variables
255                                                                      need to be set.*/
256     uint32_t h_stride;                                  /* the stride along the height dimention of input,
257                                                            Note: it is write-only, if it was set to 0, h_stride = height. */
258 } rknn_tensor_attr;
259 
260 /*
261     the information for RKNN_QUERY_PERF_DETAIL.
262 */
263 typedef struct _rknn_perf_detail {
264     char* perf_data;                                    /* the string pointer of perf detail. don't need free it by user. */
265     uint64_t data_len;                                  /* the string length. */
266 } rknn_perf_detail;
267 
268 /*
269     the information for RKNN_QUERY_PERF_RUN.
270 */
271 typedef struct _rknn_perf_run {
272     int64_t run_duration;                               /* real inference time (us) */
273 } rknn_perf_run;
274 
275 /*
276     the information for RKNN_QUERY_SDK_VERSION.
277 */
278 typedef struct _rknn_sdk_version {
279     char api_version[256];                              /* the version of rknn api. */
280     char drv_version[256];                              /* the version of rknn driver. */
281 } rknn_sdk_version;
282 
283 /*
284     the information for RKNN_QUERY_MEM_SIZE.
285 */
286 typedef struct _rknn_mem_size {
287     uint32_t total_weight_size;                         /* the weight memory size */
288     uint32_t total_internal_size;                       /* the internal memory size, exclude inputs/outputs */
289     uint64_t total_dma_allocated_size;                  /* total dma memory allocated size */
290     uint32_t total_sram_size;                           /* total system sram size reserved for rknn */
291     uint32_t free_sram_size;                            /* free system sram size reserved for rknn */
292     uint32_t reserved[10];                              /* reserved */
293 } rknn_mem_size;
294 
295 /*
296     the information for RKNN_QUERY_CUSTOM_STRING.
297 */
298 typedef struct _rknn_custom_string {
299     char string[1024];                                  /* the string of custom, lengths max to 1024 bytes */
300 } rknn_custom_string;
301 
302 /*
303    The flags of rknn_tensor_mem.
304 */
305 typedef enum _rknn_tensor_mem_flags {
306     RKNN_TENSOR_MEMORY_FLAGS_ALLOC_INSIDE = 1,           /*Used to mark in rknn_destroy_mem() whether it is necessary to release the "mem" pointer itself.
307                                                          If the flag RKNN_TENSOR_MEMORY_FLAGS_ALLOC_INSIDE is set, rknn_destroy_mem() will call free(mem).*/
308     RKNN_TENSOR_MEMORY_FLAGS_FROM_FD      = 2,           /*Used to mark in rknn_create_mem_from_fd() whether it is necessary to release the "mem" pointer itself.
309                                                          If the flag RKNN_TENSOR_MEMORY_FLAGS_FROM_FD is set, rknn_destroy_mem() will call free(mem).*/
310     RKNN_TENSOR_MEMORY_FLAGS_FROM_PHYS    = 3,           /*Used to mark in rknn_create_mem_from_phys() whether it is necessary to release the "mem" pointer itself.
311                                                          If the flag RKNN_TENSOR_MEMORY_FLAGS_FROM_PHYS is set, rknn_destroy_mem() will call free(mem).*/
312     RKNN_TENSOR_MEMORY_FLAGS_UNKNOWN
313 } rknn_tensor_mem_flags;
314 
315 /*
316     the memory information of tensor.
317 */
318 typedef struct _rknn_tensor_memory {
319     void*            virt_addr;                         /* the virtual address of tensor buffer. */
320     uint64_t         phys_addr;                         /* the physical address of tensor buffer. */
321     int32_t          fd;                                /* the fd of tensor buffer. */
322     int32_t          offset;                            /* indicates the offset of the memory. */
323     uint32_t         size;                              /* the size of tensor buffer. */
324     uint32_t         flags;                             /* the flags of tensor buffer, reserved */
325     void *           priv_data;                         /* the private data of tensor buffer. */
326 } rknn_tensor_mem;
327 
328 /*
329     the input information for rknn_input_set.
330 */
331 typedef struct _rknn_input {
332     uint32_t index;                                     /* the input index. */
333     void* buf;                                          /* the input buf for index. */
334     uint32_t size;                                      /* the size of input buf. */
335     uint8_t pass_through;                               /* pass through mode.
336                                                            if TRUE, the buf data is passed directly to the input node of the rknn model
337                                                                     without any conversion. the following variables do not need to be set.
338                                                            if FALSE, the buf data is converted into an input consistent with the model
339                                                                      according to the following type and fmt. so the following variables
340                                                                      need to be set.*/
341     rknn_tensor_type type;                              /* the data type of input buf. */
342     rknn_tensor_format fmt;                             /* the data format of input buf.
343                                                            currently the internal input format of NPU is NCHW by default.
344                                                            so entering NCHW data can avoid the format conversion in the driver. */
345 } rknn_input;
346 
347 /*
348     the output information for rknn_outputs_get.
349 */
350 typedef struct _rknn_output {
351     uint8_t want_float;                                 /* want transfer output data to float */
352     uint8_t is_prealloc;                                /* whether buf is pre-allocated.
353                                                            if TRUE, the following variables need to be set.
354                                                            if FALSE, the following variables do not need to be set. */
355     uint32_t index;                                     /* the output index. */
356     void* buf;                                          /* the output buf for index.
357                                                            when is_prealloc = FALSE and rknn_outputs_release called,
358                                                            this buf pointer will be free and don't use it anymore. */
359     uint32_t size;                                      /* the size of output buf. */
360 } rknn_output;
361 
362 /*
363     the extend information for rknn_init.
364 */
365 typedef struct _rknn_init_extend {
366     rknn_context ctx;                                    /* rknn context */
367     int32_t      real_model_offset;                      /* real rknn model file size, only valid when init context with rknn file path */
368     uint32_t     real_model_size;                        /* real rknn model file offset, only valid when init context with rknn file path */
369     uint8_t      reserved[120];                          /* reserved */
370 } rknn_init_extend;
371 
372 /*
373     the extend information for rknn_run.
374 */
375 typedef struct _rknn_run_extend {
376     uint64_t frame_id;                                  /* output parameter, indicate current frame id of run. */
377     int32_t non_block;                                  /* block flag of run, 0 is block else 1 is non block */
378     int32_t timeout_ms;                                 /* timeout for block mode, in milliseconds */
379     int32_t fence_fd;                                   /* fence fd from other unit */
380 } rknn_run_extend;
381 
382 /*
383     the extend information for rknn_outputs_get.
384 */
385 typedef struct _rknn_output_extend {
386     uint64_t frame_id;                                  /* output parameter, indicate the frame id of outputs, corresponds to
387                                                            struct rknn_run_extend.frame_id.*/
388 } rknn_output_extend;
389 
390 
391 /*  rknn_init
392 
393     initial the context and load the rknn model.
394 
395     input:
396         rknn_context* context       the pointer of context handle.
397         void* model                 if size > 0, pointer to the rknn model, if size = 0, filepath to the rknn model.
398         uint32_t size               the size of rknn model.
399         uint32_t flag               extend flag, see the define of RKNN_FLAG_XXX_XXX.
400         rknn_init_extend* extend    the extend information of init.
401     return:
402         int                         error code.
403 */
404 int rknn_init(rknn_context* context, void* model, uint32_t size, uint32_t flag, rknn_init_extend* extend);
405 
406 /*  rknn_dup_context
407 
408     initial the context and load the rknn model.
409 
410     input:
411         rknn_context* context_in       the pointer of context in handle.
412         rknn_context* context_out      the pointer of context out handle.
413     return:
414         int                         error code.
415 */
416 int rknn_dup_context(rknn_context* context_in, rknn_context* context_out);
417 
418 /*  rknn_destroy
419 
420     unload the rknn model and destroy the context.
421 
422     input:
423         rknn_context context        the handle of context.
424     return:
425         int                         error code.
426 */
427 int rknn_destroy(rknn_context context);
428 
429 
430 /*  rknn_query
431 
432     query the information about model or others. see rknn_query_cmd.
433 
434     input:
435         rknn_context context        the handle of context.
436         rknn_query_cmd cmd          the command of query.
437         void* info                  the buffer point of information.
438         uint32_t size               the size of information.
439     return:
440         int                         error code.
441 */
442 int rknn_query(rknn_context context, rknn_query_cmd cmd, void* info, uint32_t size);
443 
444 
445 /*  rknn_inputs_set
446 
447     set inputs information by input index of rknn model.
448     inputs information see rknn_input.
449 
450     input:
451         rknn_context context        the handle of context.
452         uint32_t n_inputs           the number of inputs.
453         rknn_input inputs[]         the arrays of inputs information, see rknn_input.
454     return:
455         int                         error code
456 */
457 int rknn_inputs_set(rknn_context context, uint32_t n_inputs, rknn_input inputs[]);
458 
459 /*
460     rknn_set_batch_core_num
461 
462     set rknn batch core_num.
463 
464     input:
465         rknn_context context        the handle of context.
466         int core_num                the core number.
467     return:
468         int                         error code.
469 
470 */
471 int rknn_set_batch_core_num(rknn_context context, int core_num);
472 
473 /*  rknn_set_core_mask
474 
475     set rknn core mask.(only supported on RK3588 now)
476 
477     RKNN_NPU_CORE_AUTO: auto mode, default value
478     RKNN_NPU_CORE_0: core 0 mode
479     RKNN_NPU_CORE_1: core 1 mode
480     RKNN_NPU_CORE_2: core 2 mode
481     RKNN_NPU_CORE_0_1: combine core 0/1 mode
482     RKNN_NPU_CORE_0_1_2: combine core 0/1/2 mode
483 
484     input:
485         rknn_context context        the handle of context.
486         rknn_core_mask core_mask    the core mask.
487     return:
488         int                         error code.
489 */
490 int rknn_set_core_mask(rknn_context context, rknn_core_mask core_mask);
491 
492 /*  rknn_run
493 
494     run the model to execute inference.
495 
496     input:
497         rknn_context context        the handle of context.
498         rknn_run_extend* extend     the extend information of run.
499     return:
500         int                         error code.
501 */
502 int rknn_run(rknn_context context, rknn_run_extend* extend);
503 
504 
505 /*  rknn_wait
506 
507     wait the model after execute inference.
508 
509     input:
510         rknn_context context        the handle of context.
511         rknn_run_extend* extend     the extend information of run.
512     return:
513         int                         error code.
514 */
515 int rknn_wait(rknn_context context, rknn_run_extend* extend);
516 
517 
518 /*  rknn_outputs_get
519 
520     wait the inference to finish and get the outputs.
521     this function will block until inference finish.
522     the results will set to outputs[].
523 
524     input:
525         rknn_context context        the handle of context.
526         uint32_t n_outputs          the number of outputs.
527         rknn_output outputs[]       the arrays of output, see rknn_output.
528         rknn_output_extend*         the extend information of output.
529     return:
530         int                         error code.
531 */
532 int rknn_outputs_get(rknn_context context, uint32_t n_outputs, rknn_output outputs[], rknn_output_extend* extend);
533 
534 
535 /*  rknn_outputs_release
536 
537     release the outputs that get by rknn_outputs_get.
538     after called, the rknn_output[x].buf get from rknn_outputs_get will
539     also be free when rknn_output[x].is_prealloc = FALSE.
540 
541     input:
542         rknn_context context        the handle of context.
543         uint32_t n_ouputs           the number of outputs.
544         rknn_output outputs[]       the arrays of output.
545     return:
546         int                         error code
547 */
548 int rknn_outputs_release(rknn_context context, uint32_t n_ouputs, rknn_output outputs[]);
549 
550 
551 /* new api for zero copy */
552 
553 /*  rknn_create_mem_from_phys (memory allocated outside)
554 
555     initialize tensor memory from physical address.
556 
557     input:
558         rknn_context ctx            the handle of context.
559         uint64_t phys_addr          physical address.
560         void *virt_addr             virtual address.
561         uint32_t size               the size of tensor buffer.
562     return:
563         rknn_tensor_mem             the pointer of tensor memory information.
564 */
565 rknn_tensor_mem* rknn_create_mem_from_phys(rknn_context ctx, uint64_t phys_addr, void *virt_addr, uint32_t size);
566 
567 
568 /*  rknn_create_mem_from_fd (memory allocated outside)
569 
570     initialize tensor memory from file description.
571 
572     input:
573         rknn_context ctx            the handle of context.
574         int32_t fd                  file description.
575         void *virt_addr             virtual address.
576         uint32_t size               the size of tensor buffer.
577         int32_t offset              indicates the offset of the memory (virt_addr without offset).
578     return:
579         rknn_tensor_mem             the pointer of tensor memory information.
580 */
581 rknn_tensor_mem* rknn_create_mem_from_fd(rknn_context ctx, int32_t fd, void *virt_addr, uint32_t size, int32_t offset);
582 
583 
584 /*  rknn_create_mem_from_mb_blk (memory allocated outside)
585 
586     create tensor memory from mb_blk.
587 
588     input:
589         rknn_context ctx            the handle of context.
590         void *mb_blk                mb_blk allocate from system api.
591         int32_t offset              indicates the offset of the memory.
592     return:
593         rknn_tensor_mem             the pointer of tensor memory information.
594 */
595 rknn_tensor_mem* rknn_create_mem_from_mb_blk(rknn_context ctx, void *mb_blk, int32_t offset);
596 
597 
598 /*  rknn_create_mem (memory allocated inside)
599 
600     create tensor memory.
601 
602     input:
603         rknn_context ctx            the handle of context.
604         uint32_t size               the size of tensor buffer.
605     return:
606         rknn_tensor_mem             the pointer of tensor memory information.
607 */
608 rknn_tensor_mem* rknn_create_mem(rknn_context ctx, uint32_t size);
609 
610 
611 /*  rknn_destroy_mem (support allocate inside and outside)
612 
613     destroy tensor memory.
614 
615     input:
616         rknn_context ctx            the handle of context.
617         rknn_tensor_mem *mem        the pointer of tensor memory information.
618     return:
619         int                         error code
620 */
621 int rknn_destroy_mem(rknn_context ctx, rknn_tensor_mem *mem);
622 
623 
624 /*  rknn_set_weight_mem
625 
626     set the weight memory.
627 
628     input:
629         rknn_context ctx            the handle of context.
630         rknn_tensor_mem *mem        the array of tensor memory information
631     return:
632         int                         error code.
633 */
634 int rknn_set_weight_mem(rknn_context ctx, rknn_tensor_mem *mem);
635 
636 
637 /*  rknn_set_internal_mem
638 
639     set the internal memory.
640 
641     input:
642         rknn_context ctx            the handle of context.
643         rknn_tensor_mem *mem        the array of tensor memory information
644     return:
645         int                         error code.
646 */
647 int rknn_set_internal_mem(rknn_context ctx, rknn_tensor_mem *mem);
648 
649 
650 /*  rknn_set_io_mem
651 
652     set the input and output tensors buffer.
653 
654     input:
655         rknn_context ctx            the handle of context.
656         rknn_tensor_mem *mem        the array of tensor memory information.
657         rknn_tensor_attr *attr      the attribute of input or output tensor buffer.
658     return:
659         int                         error code.
660 */
661 int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *attr);
662 
663 
664 #ifdef __cplusplus
665 } //extern "C"
666 #endif
667 
668 #endif  //_RKNN_API_H
669