xref: /rockchip-linux_mpp/osal/allocator/allocator_ion.c (revision 437bfbeb9567cca9cd9080e3f6954aa9d6a94f18)
1 /* SPDX-License-Identifier: Apache-2.0 OR MIT */
2 /*
3  * Copyright (c) 2015 Rockchip Electronics Co., Ltd.
4  */
5 
6 #define MODULE_TAG "mpp_ion"
7 
8 #include <unistd.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <sys/ioctl.h>
12 #include <sys/mman.h>
13 
14 #if defined(ARMLINUX)
15 #include <ion.h>
16 #else
17 #include "ion.h"
18 #endif
19 
20 #include "os_mem.h"
21 #include "allocator_ion.h"
22 
23 #include "mpp_env.h"
24 #include "mpp_mem.h"
25 #include "mpp_debug.h"
26 #include "mpp_common.h"
27 #include "mpp_thread.h"
28 
29 static RK_U32 ion_debug = 0;
30 static pthread_once_t once = PTHREAD_ONCE_INIT;
31 static pthread_mutex_t scandir_lock;
32 
33 #define ION_FUNCTION                (0x00000001)
34 #define ION_DEVICE                  (0x00000002)
35 #define ION_CLINET                  (0x00000004)
36 #define ION_IOCTL                   (0x00000008)
37 
38 #define ION_DETECT_IOMMU_DISABLE    (0x0)   /* use ION_HEAP_TYPE_DMA */
39 #define ION_DETECT_IOMMU_ENABLE     (0x1)   /* use ION_HEAP_TYPE_SYSTEM */
40 #define ION_DETECT_NO_DTS           (0x2)   /* use ION_HEAP_TYPE_CARVEOUT */
41 
42 #define ion_dbg(flag, fmt, ...)     _mpp_dbg(ion_debug, flag, fmt, ## __VA_ARGS__)
43 #define ion_dbg_f(flag, fmt, ...)   _mpp_dbg_f(ion_debug, flag, fmt, ## __VA_ARGS__)
44 #define ion_dbg_func(fmt, ...)      ion_dbg_f(ION_FUNCTION, fmt, ## __VA_ARGS__)
45 
ion_ioctl(int fd,int req,void * arg)46 static int ion_ioctl(int fd, int req, void *arg)
47 {
48     int ret = ioctl(fd, req, arg);
49     if (ret < 0) {
50         mpp_err("ion_ioctl %x failed with code %d: %s\n", req,
51                 ret, strerror(errno));
52         return -errno;
53     }
54     return ret;
55 }
56 
ion_alloc(int fd,size_t len,size_t align,unsigned int heap_mask,unsigned int flags,ion_user_handle_t * handle)57 static int ion_alloc(int fd, size_t len, size_t align, unsigned int heap_mask,
58                      unsigned int flags, ion_user_handle_t *handle)
59 {
60     int ret = -EINVAL;
61     struct ion_allocation_data data = {
62         .len = len,
63         .align = align,
64         .heap_id_mask = heap_mask,
65         .flags = flags,
66     };
67 
68     ion_dbg_func("enter: fd %d len %d align %d heap_mask %x flags %x",
69                  fd, len, align, heap_mask, flags);
70 
71     if (handle) {
72         ret = ion_ioctl(fd, ION_IOC_ALLOC, &data);
73         if (ret >= 0)
74             *handle = data.handle;
75     }
76 
77     ion_dbg_func("leave: ret %d\n", ret);
78 
79     return ret;
80 }
81 
ion_free(int fd,ion_user_handle_t handle)82 static int ion_free(int fd, ion_user_handle_t handle)
83 {
84     int ret;
85     struct ion_handle_data data = {
86         .handle = handle,
87     };
88 
89     ion_dbg_func("enter: fd %d\n", fd);
90     ret = ion_ioctl(fd, ION_IOC_FREE, &data);
91     ion_dbg_func("leave: ret %d\n", ret);
92     return ret;
93 }
94 
ion_map_fd(int fd,ion_user_handle_t handle,int * map_fd)95 static int ion_map_fd(int fd, ion_user_handle_t handle, int *map_fd)
96 {
97     int ret;
98     struct ion_fd_data data = {
99         .handle = handle,
100     };
101 
102     if (map_fd == NULL)
103         return -EINVAL;
104 
105     ret = ion_ioctl(fd, ION_IOC_MAP, &data);
106     if (ret < 0)
107         return ret;
108 
109     *map_fd = data.fd;
110     if (*map_fd < 0) {
111         mpp_err("map ioctl returned negative fd\n");
112         return -EINVAL;
113     }
114 
115     return 0;
116 }
117 
ion_mmap(int fd,size_t length,int prot,int flags,off_t offset,void ** ptr)118 static int ion_mmap(int fd, size_t length, int prot, int flags, off_t offset,
119                     void **ptr)
120 {
121     static unsigned long pagesize_mask = 0;
122     if (ptr == NULL)
123         return -EINVAL;
124 
125     if (!pagesize_mask)
126         pagesize_mask = sysconf(_SC_PAGESIZE) - 1;
127     offset = offset & (~pagesize_mask);
128 
129     *ptr = mmap(NULL, length, prot, flags, fd, offset);
130     if (*ptr == MAP_FAILED) {
131         mpp_err("mmap failed: %s\n", strerror(errno));
132         *ptr = NULL;
133         return -errno;
134     }
135     return 0;
136 }
137 
138 #include <dirent.h>
139 
140 static const char *search_name = NULL;
141 
_compare_name(const struct dirent * dir)142 static int _compare_name(const struct dirent *dir)
143 {
144     if (search_name && strstr(dir->d_name, search_name))
145         return 1;
146 
147     return 0;
148 }
149 
scandir_lock_init(void)150 static void scandir_lock_init(void)
151 {
152     pthread_mutexattr_t attr;
153     pthread_mutexattr_init(&attr);
154     pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
155     pthread_mutex_init(&scandir_lock, &attr);
156     pthread_mutexattr_destroy(&attr);
157 }
158 
159 /*
160  * directory search function:
161  * search directory with dir_name on path.
162  * if found match dir append name on path and return
163  *
164  * return 0 for failure
165  * return positive value for length of new path
166  */
find_dir_in_path(char * path,const char * dir_name,size_t max_length)167 static RK_S32 find_dir_in_path(char *path, const char *dir_name,
168                                size_t max_length)
169 {
170     struct dirent **dir;
171     RK_S32 path_len = strnlen(path, max_length);
172     RK_S32 new_path_len = 0;
173     RK_S32 n;
174 
175     pthread_once(&once, scandir_lock_init);
176     pthread_mutex_lock(&scandir_lock);
177     search_name = dir_name;
178     n = scandir(path, &dir, _compare_name, alphasort);
179     if (n <= 0) {
180         mpp_log("scan %s for %s return %d\n", path, dir_name, n);
181     } else {
182         mpp_assert(n == 1);
183 
184         new_path_len = path_len;
185         new_path_len += snprintf(path + path_len, max_length - path_len - 1,
186                                  "/%s", dir[0]->d_name);
187         free(dir[0]);
188         free(dir);
189     }
190     search_name = NULL;
191     pthread_mutex_unlock(&scandir_lock);
192     return new_path_len;
193 }
194 
195 #define MAX_PATH_NAME_SIZE  256
196 
check_sysfs_iommu()197 static RK_S32 check_sysfs_iommu()
198 {
199     RK_U32 i = 0;
200     RK_U32 dts_info_found = 0;
201     RK_U32 ion_info_found = 0;
202     RK_S32 ret = ION_DETECT_IOMMU_DISABLE;
203     char path[MAX_PATH_NAME_SIZE];
204     static char *dts_devices[] = {
205         "vpu_service",
206         "hevc_service",
207         "rkvdec",
208         "rkvenc",
209         "vpu_combo",
210     };
211     static char *system_heaps[] = {
212         "vmalloc",
213         "system-heap",
214     };
215 
216     mpp_env_get_u32("ion_debug", &ion_debug, 0);
217 #ifdef SOFIA_3GR_LINUX
218     return ret;
219 #endif
220 
221     for (i = 0; i < MPP_ARRAY_ELEMS(dts_devices); i++) {
222         snprintf(path, sizeof(path), "/proc/device-tree");
223         if (find_dir_in_path(path, dts_devices[i], sizeof(path))) {
224             if (find_dir_in_path(path, "iommu_enabled", sizeof(path))) {
225                 FILE *iommu_fp = fopen(path, "rb");
226 
227                 if (iommu_fp) {
228                     RK_U32 iommu_enabled = 0;
229                     if (fread(&iommu_enabled, sizeof(RK_U32), 1, iommu_fp))
230                         mpp_log("%s iommu_enabled %d\n", dts_devices[i],
231                                 (iommu_enabled > 0));
232                     fclose(iommu_fp);
233                     if (iommu_enabled)
234                         ret = ION_DETECT_IOMMU_ENABLE;
235                 }
236                 dts_info_found = 1;
237                 break;
238             }
239         }
240     }
241 
242     if (!dts_info_found) {
243         for (i = 0; i < MPP_ARRAY_ELEMS(system_heaps); i++) {
244             snprintf(path, sizeof(path), "/sys/kernel/debug/ion/heaps");
245             if (find_dir_in_path(path, system_heaps[i], sizeof(path))) {
246                 mpp_log("%s found\n", system_heaps[i]);
247                 ret = ION_DETECT_IOMMU_ENABLE;
248                 ion_info_found = 1;
249                 break;
250             }
251         }
252     }
253 
254     if (!dts_info_found && !ion_info_found) {
255         mpp_err("can not find any hint from all possible devices\n");
256         ret = ION_DETECT_NO_DTS;
257     }
258 
259     return ret;
260 }
261 
262 typedef struct {
263     size_t              alignment;
264     RK_S32              ion_device;
265     MppAllocFlagType    flags;
266 } allocator_ctx_ion;
267 
268 static const char *dev_ion = "/dev/ion";
269 static RK_S32 ion_heap_id = -1;
270 static RK_U32 ion_heap_mask = (1 << ION_HEAP_TYPE_SYSTEM);
271 static pthread_mutex_t lock;
272 
allocator_ion_open(void ** ctx,size_t alignment,MppAllocFlagType flags)273 static MPP_RET allocator_ion_open(void **ctx, size_t alignment, MppAllocFlagType flags)
274 {
275     RK_S32 fd;
276     allocator_ctx_ion *p;
277 
278     if (NULL == ctx) {
279         mpp_err("os_allocator_open Android do not accept NULL input\n");
280         return MPP_ERR_NULL_PTR;
281     }
282 
283     *ctx = NULL;
284 
285     fd = open(dev_ion, O_RDWR | O_CLOEXEC);
286     if (fd < 0) {
287         mpp_err("open %s failed!\n", dev_ion);
288         return MPP_ERR_UNKNOW;
289     }
290 
291     ion_dbg(ION_DEVICE, "open ion dev fd %d\n", fd);
292 
293     p = mpp_malloc(allocator_ctx_ion, 1);
294     if (NULL == p) {
295         close(fd);
296         mpp_err("os_allocator_open Android failed to allocate context\n");
297         return MPP_ERR_MALLOC;
298     } else {
299         /*
300          * do heap id detection here:
301          * if there is no vpu_service use default ION_HEAP_TYPE_SYSTEM_CONTIG
302          * if there is vpu_service then check the iommu_enable status
303          */
304         pthread_mutex_lock(&lock);
305         if (ion_heap_id < 0) {
306             int detect_result = check_sysfs_iommu();
307             const char *heap_name = NULL;
308 
309             switch (detect_result) {
310             case ION_DETECT_IOMMU_DISABLE : {
311                 ion_heap_mask   = (1 << ION_HEAP_TYPE_DMA);
312                 ion_heap_id     = ION_HEAP_TYPE_DMA;
313                 heap_name = "ION_HEAP_TYPE_DMA";
314             } break;
315             case ION_DETECT_IOMMU_ENABLE : {
316                 ion_heap_mask   = (1 << ION_HEAP_TYPE_SYSTEM);
317                 ion_heap_id     = ION_HEAP_TYPE_SYSTEM;
318                 heap_name = "ION_HEAP_TYPE_SYSTEM";
319             } break;
320             case ION_DETECT_NO_DTS : {
321                 ion_heap_mask   = (1 << ION_HEAP_TYPE_CARVEOUT);
322                 ion_heap_id     = ION_HEAP_TYPE_CARVEOUT;
323                 heap_name = "ION_HEAP_TYPE_CARVEOUT";
324             } break;
325             default : {
326                 mpp_err("invalid detect result %d\n", detect_result);
327                 ion_heap_mask   = (1 << ION_HEAP_TYPE_DMA);
328                 ion_heap_id     = ION_HEAP_TYPE_DMA;
329                 heap_name = "ION_HEAP_TYPE_DMA";
330             } break;
331             }
332             mpp_log("using ion heap %s\n", heap_name);
333         }
334         pthread_mutex_unlock(&lock);
335         p->alignment    = alignment;
336         p->flags        = flags;
337         p->ion_device   = fd;
338         *ctx = p;
339     }
340 
341     return MPP_OK;
342 }
343 
allocator_ion_alloc(void * ctx,MppBufferInfo * info)344 static MPP_RET allocator_ion_alloc(void *ctx, MppBufferInfo *info)
345 {
346     MPP_RET ret = MPP_OK;
347     int fd = -1;
348     ion_user_handle_t hnd = -1;
349     allocator_ctx_ion *p = NULL;
350 
351     if (NULL == ctx) {
352         mpp_err("os_allocator_close Android do not accept NULL input\n");
353         return MPP_ERR_NULL_PTR;
354     }
355 
356     ion_dbg_func("enter: ctx %p size %d\n", ctx, info->size);
357 
358     p = (allocator_ctx_ion *)ctx;
359     ret = ion_alloc(p->ion_device, info->size, p->alignment, ion_heap_mask,
360                     0, &hnd);
361     if (ret)
362         mpp_err_f("ion_alloc failed ret %d\n", ret);
363     else {
364         ret = ion_map_fd(p->ion_device, hnd, &fd);
365         if (ret)
366             mpp_err_f("ion_map_fd failed ret %d\n", ret);
367     }
368 
369     info->fd  = fd;
370     info->ptr = NULL;
371     info->hnd = (void *)(intptr_t)hnd;
372 
373     ion_dbg_func("leave: ret %d handle %d fd %d\n", ret, hnd, fd);
374     return ret;
375 }
376 
allocator_ion_import(void * ctx,MppBufferInfo * data)377 static MPP_RET allocator_ion_import(void *ctx, MppBufferInfo *data)
378 {
379     MPP_RET ret = MPP_NOK;
380     allocator_ctx_ion *p = (allocator_ctx_ion *)ctx;
381     struct ion_fd_data fd_data;
382 
383     ion_dbg_func("enter: ctx %p dev %d fd %d size %d\n",
384                  ctx, p->ion_device, data->fd, data->size);
385 
386     fd_data.fd = data->fd;
387     ret = ion_ioctl(p->ion_device, ION_IOC_IMPORT, &fd_data);
388     if (0 > fd_data.handle) {
389         mpp_err_f("fd %d import failed for %s\n", data->fd, strerror(errno));
390         goto RET;
391     }
392 
393     data->hnd = (void *)(intptr_t)fd_data.handle;
394     ret = ion_map_fd(p->ion_device, fd_data.handle, &data->fd);
395     data->ptr = NULL;
396 RET:
397     ion_dbg_func("leave: ret %d handle %d\n", ret, data->hnd);
398     return ret;
399 }
400 
allocator_ion_mmap(void * ctx,MppBufferInfo * data)401 static MPP_RET allocator_ion_mmap(void *ctx, MppBufferInfo *data)
402 {
403     MPP_RET ret = MPP_OK;
404 
405     if (NULL == ctx) {
406         mpp_err_f("do not accept NULL input\n");
407         return MPP_ERR_NULL_PTR;
408     }
409 
410     ion_dbg_func("enter: ctx %p fd %d size %d\n", ctx, data->fd, data->size);
411 
412     if (NULL == data->ptr)
413         ret = ion_mmap(data->fd, data->size, PROT_READ | PROT_WRITE,
414                        MAP_SHARED, 0, &data->ptr);
415 
416     ion_dbg_func("leave: ret %d ptr %p\n", ret, data->ptr);
417     return ret;
418 }
419 
allocator_ion_free(void * ctx,MppBufferInfo * data)420 static MPP_RET allocator_ion_free(void *ctx, MppBufferInfo *data)
421 {
422     allocator_ctx_ion *p = NULL;
423     if (NULL == ctx) {
424         mpp_err_f("do not accept NULL input\n");
425         return MPP_ERR_NULL_PTR;
426     }
427 
428     ion_dbg_func("enter: ctx %p fd %d ptr %p size %d\n",
429                  ctx, data->fd, data->ptr, data->size);
430 
431     p = (allocator_ctx_ion *)ctx;
432     if (data->ptr) {
433         munmap(data->ptr, data->size);
434         data->ptr = NULL;
435     }
436 
437     if (data->fd > 0) {
438         close(data->fd);
439         data->fd = -1;
440     }
441 
442     if (data->hnd) {
443         ion_free(p->ion_device, (ion_user_handle_t)((intptr_t)data->hnd));
444         data->hnd = NULL;
445     }
446 
447     ion_dbg_func("leave\n");
448     return MPP_OK;
449 }
450 
allocator_ion_close(void * ctx)451 static MPP_RET allocator_ion_close(void *ctx)
452 {
453     int ret;
454     allocator_ctx_ion *p;
455 
456     if (NULL == ctx) {
457         mpp_err("os_allocator_close Android do not accept NULL input\n");
458         return MPP_ERR_NULL_PTR;
459     }
460 
461     ion_dbg_func("enter: ctx\n", ctx);
462 
463     p = (allocator_ctx_ion *)ctx;
464     ret = close(p->ion_device);
465     mpp_free(p);
466     if (ret < 0)
467         ret = (MPP_RET) - errno;
468 
469     ion_dbg_func("leave: ret %d\n", ret);
470 
471     return ret;
472 }
473 
os_allocator_ion_flags(void * ctx)474 static MppAllocFlagType os_allocator_ion_flags(void *ctx)
475 {
476     allocator_ctx_ion *p = (allocator_ctx_ion *)ctx;
477 
478     return p ? p->flags : MPP_ALLOC_FLAG_NONE;
479 }
480 
481 os_allocator allocator_ion = {
482     .type = MPP_BUFFER_TYPE_ION,
483     .name = "ion",
484     .open = allocator_ion_open,
485     .close = allocator_ion_close,
486     .alloc = allocator_ion_alloc,
487     .free = allocator_ion_free,
488     .import = allocator_ion_import,
489     .release = allocator_ion_free,
490     .mmap = allocator_ion_mmap,
491     .flags = os_allocator_ion_flags,
492 };
493