xref: /OK3568_Linux_fs/external/mpp/osal/allocator/allocator_ion.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright 2015 Rockchip Electronics Co. LTD
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define MODULE_TAG "mpp_ion"
18 
19 #include <unistd.h>
20 #include <string.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <sys/ioctl.h>
24 #include <sys/mman.h>
25 
26 #if defined(ARMLINUX)
27 #include <ion.h>
28 #else
29 #include "ion.h"
30 #endif
31 
32 #include "os_mem.h"
33 #include "allocator_ion.h"
34 
35 #include "mpp_env.h"
36 #include "mpp_mem.h"
37 #include "mpp_debug.h"
38 #include "mpp_common.h"
39 #include "mpp_thread.h"
40 
41 static RK_U32 ion_debug = 0;
42 static pthread_once_t once = PTHREAD_ONCE_INIT;
43 static pthread_mutex_t scandir_lock;
44 
45 #define ION_FUNCTION                (0x00000001)
46 #define ION_DEVICE                  (0x00000002)
47 #define ION_CLINET                  (0x00000004)
48 #define ION_IOCTL                   (0x00000008)
49 
50 #define ION_DETECT_IOMMU_DISABLE    (0x0)   /* use ION_HEAP_TYPE_DMA */
51 #define ION_DETECT_IOMMU_ENABLE     (0x1)   /* use ION_HEAP_TYPE_SYSTEM */
52 #define ION_DETECT_NO_DTS           (0x2)   /* use ION_HEAP_TYPE_CARVEOUT */
53 
54 #define ion_dbg(flag, fmt, ...)     _mpp_dbg(ion_debug, flag, fmt, ## __VA_ARGS__)
55 #define ion_dbg_f(flag, fmt, ...)   _mpp_dbg_f(ion_debug, flag, fmt, ## __VA_ARGS__)
56 #define ion_dbg_func(fmt, ...)      ion_dbg_f(ION_FUNCTION, fmt, ## __VA_ARGS__)
57 
ion_ioctl(int fd,int req,void * arg)58 static int ion_ioctl(int fd, int req, void *arg)
59 {
60     int ret = ioctl(fd, req, arg);
61     if (ret < 0) {
62         mpp_err("ion_ioctl %x failed with code %d: %s\n", req,
63                 ret, strerror(errno));
64         return -errno;
65     }
66     return ret;
67 }
68 
ion_alloc(int fd,size_t len,size_t align,unsigned int heap_mask,unsigned int flags,ion_user_handle_t * handle)69 static int ion_alloc(int fd, size_t len, size_t align, unsigned int heap_mask,
70                      unsigned int flags, ion_user_handle_t *handle)
71 {
72     int ret = -EINVAL;
73     struct ion_allocation_data data = {
74         .len = len,
75         .align = align,
76         .heap_id_mask = heap_mask,
77         .flags = flags,
78     };
79 
80     ion_dbg_func("enter: fd %d len %d align %d heap_mask %x flags %x",
81                  fd, len, align, heap_mask, flags);
82 
83     if (handle) {
84         ret = ion_ioctl(fd, ION_IOC_ALLOC, &data);
85         if (ret >= 0)
86             *handle = data.handle;
87     }
88 
89     ion_dbg_func("leave: ret %d\n", ret);
90 
91     return ret;
92 }
93 
ion_free(int fd,ion_user_handle_t handle)94 static int ion_free(int fd, ion_user_handle_t handle)
95 {
96     int ret;
97     struct ion_handle_data data = {
98         .handle = handle,
99     };
100 
101     ion_dbg_func("enter: fd %d\n", fd);
102     ret = ion_ioctl(fd, ION_IOC_FREE, &data);
103     ion_dbg_func("leave: ret %d\n", ret);
104     return ret;
105 }
106 
ion_map_fd(int fd,ion_user_handle_t handle,int * map_fd)107 static int ion_map_fd(int fd, ion_user_handle_t handle, int *map_fd)
108 {
109     int ret;
110     struct ion_fd_data data = {
111         .handle = handle,
112     };
113 
114     if (map_fd == NULL)
115         return -EINVAL;
116 
117     ret = ion_ioctl(fd, ION_IOC_MAP, &data);
118     if (ret < 0)
119         return ret;
120 
121     *map_fd = data.fd;
122     if (*map_fd < 0) {
123         mpp_err("map ioctl returned negative fd\n");
124         return -EINVAL;
125     }
126 
127     return 0;
128 }
129 
ion_mmap(int fd,size_t length,int prot,int flags,off_t offset,void ** ptr)130 static int ion_mmap(int fd, size_t length, int prot, int flags, off_t offset,
131                     void **ptr)
132 {
133     static unsigned long pagesize_mask = 0;
134     if (ptr == NULL)
135         return -EINVAL;
136 
137     if (!pagesize_mask)
138         pagesize_mask = sysconf(_SC_PAGESIZE) - 1;
139     offset = offset & (~pagesize_mask);
140 
141     *ptr = mmap(NULL, length, prot, flags, fd, offset);
142     if (*ptr == MAP_FAILED) {
143         mpp_err("mmap failed: %s\n", strerror(errno));
144         *ptr = NULL;
145         return -errno;
146     }
147     return 0;
148 }
149 
150 #include <dirent.h>
151 
152 static const char *search_name = NULL;
153 
_compare_name(const struct dirent * dir)154 static int _compare_name(const struct dirent *dir)
155 {
156     if (search_name && strstr(dir->d_name, search_name))
157         return 1;
158 
159     return 0;
160 }
161 
scandir_lock_init(void)162 static void scandir_lock_init(void)
163 {
164     pthread_mutexattr_t attr;
165     pthread_mutexattr_init(&attr);
166     pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
167     pthread_mutex_init(&scandir_lock, &attr);
168     pthread_mutexattr_destroy(&attr);
169 }
170 
171 /*
172  * directory search function:
173  * search directory with dir_name on path.
174  * if found match dir append name on path and return
175  *
176  * return 0 for failure
177  * return positive value for length of new path
178  */
find_dir_in_path(char * path,const char * dir_name,size_t max_length)179 static RK_S32 find_dir_in_path(char *path, const char *dir_name,
180                                size_t max_length)
181 {
182     struct dirent **dir;
183     RK_S32 path_len = strnlen(path, max_length);
184     RK_S32 new_path_len = 0;
185     RK_S32 n;
186 
187     pthread_once(&once, scandir_lock_init);
188     pthread_mutex_lock(&scandir_lock);
189     search_name = dir_name;
190     n = scandir(path, &dir, _compare_name, alphasort);
191     if (n <= 0) {
192         mpp_log("scan %s for %s return %d\n", path, dir_name, n);
193     } else {
194         mpp_assert(n == 1);
195 
196         new_path_len = path_len;
197         new_path_len += snprintf(path + path_len, max_length - path_len - 1,
198                                  "/%s", dir[0]->d_name);
199         free(dir[0]);
200         free(dir);
201     }
202     search_name = NULL;
203     pthread_mutex_unlock(&scandir_lock);
204     return new_path_len;
205 }
206 
207 #define MAX_PATH_NAME_SIZE  256
208 
check_sysfs_iommu()209 static RK_S32 check_sysfs_iommu()
210 {
211     RK_U32 i = 0;
212     RK_U32 dts_info_found = 0;
213     RK_U32 ion_info_found = 0;
214     RK_S32 ret = ION_DETECT_IOMMU_DISABLE;
215     char path[MAX_PATH_NAME_SIZE];
216     static char *dts_devices[] = {
217         "vpu_service",
218         "hevc_service",
219         "rkvdec",
220         "rkvenc",
221         "vpu_combo",
222     };
223     static char *system_heaps[] = {
224         "vmalloc",
225         "system-heap",
226     };
227 
228     mpp_env_get_u32("ion_debug", &ion_debug, 0);
229 #ifdef SOFIA_3GR_LINUX
230     return ret;
231 #endif
232 
233     for (i = 0; i < MPP_ARRAY_ELEMS(dts_devices); i++) {
234         snprintf(path, sizeof(path), "/proc/device-tree");
235         if (find_dir_in_path(path, dts_devices[i], sizeof(path))) {
236             if (find_dir_in_path(path, "iommu_enabled", sizeof(path))) {
237                 FILE *iommu_fp = fopen(path, "rb");
238 
239                 if (iommu_fp) {
240                     RK_U32 iommu_enabled = 0;
241                     if (fread(&iommu_enabled, sizeof(RK_U32), 1, iommu_fp))
242                         mpp_log("%s iommu_enabled %d\n", dts_devices[i],
243                                 (iommu_enabled > 0));
244                     fclose(iommu_fp);
245                     if (iommu_enabled)
246                         ret = ION_DETECT_IOMMU_ENABLE;
247                 }
248                 dts_info_found = 1;
249                 break;
250             }
251         }
252     }
253 
254     if (!dts_info_found) {
255         for (i = 0; i < MPP_ARRAY_ELEMS(system_heaps); i++) {
256             snprintf(path, sizeof(path), "/sys/kernel/debug/ion/heaps");
257             if (find_dir_in_path(path, system_heaps[i], sizeof(path))) {
258                 mpp_log("%s found\n", system_heaps[i]);
259                 ret = ION_DETECT_IOMMU_ENABLE;
260                 ion_info_found = 1;
261                 break;
262             }
263         }
264     }
265 
266     if (!dts_info_found && !ion_info_found) {
267         mpp_err("can not find any hint from all possible devices\n");
268         ret = ION_DETECT_NO_DTS;
269     }
270 
271     return ret;
272 }
273 
274 typedef struct {
275     RK_U32  alignment;
276     RK_S32  ion_device;
277 } allocator_ctx_ion;
278 
279 static const char *dev_ion = "/dev/ion";
280 static RK_S32 ion_heap_id = -1;
281 static RK_U32 ion_heap_mask = (1 << ION_HEAP_TYPE_SYSTEM);
282 static pthread_mutex_t lock;
283 
allocator_ion_open(void ** ctx,MppAllocatorCfg * cfg)284 static MPP_RET allocator_ion_open(void **ctx, MppAllocatorCfg *cfg)
285 {
286     RK_S32 fd;
287     allocator_ctx_ion *p;
288 
289     if (NULL == ctx) {
290         mpp_err("os_allocator_open Android do not accept NULL input\n");
291         return MPP_ERR_NULL_PTR;
292     }
293 
294     *ctx = NULL;
295 
296     fd = open(dev_ion, O_RDWR | O_CLOEXEC);
297     if (fd < 0) {
298         mpp_err("open %s failed!\n", dev_ion);
299         return MPP_ERR_UNKNOW;
300     }
301 
302     ion_dbg(ION_DEVICE, "open ion dev fd %d\n", fd);
303 
304     p = mpp_malloc(allocator_ctx_ion, 1);
305     if (NULL == p) {
306         close(fd);
307         mpp_err("os_allocator_open Android failed to allocate context\n");
308         return MPP_ERR_MALLOC;
309     } else {
310         /*
311          * do heap id detection here:
312          * if there is no vpu_service use default ION_HEAP_TYPE_SYSTEM_CONTIG
313          * if there is vpu_service then check the iommu_enable status
314          */
315         pthread_mutex_lock(&lock);
316         if (ion_heap_id < 0) {
317             int detect_result = check_sysfs_iommu();
318             const char *heap_name = NULL;
319 
320             switch (detect_result) {
321             case ION_DETECT_IOMMU_DISABLE : {
322                 ion_heap_mask   = (1 << ION_HEAP_TYPE_DMA);
323                 ion_heap_id     = ION_HEAP_TYPE_DMA;
324                 heap_name = "ION_HEAP_TYPE_DMA";
325             } break;
326             case ION_DETECT_IOMMU_ENABLE : {
327                 ion_heap_mask   = (1 << ION_HEAP_TYPE_SYSTEM);
328                 ion_heap_id     = ION_HEAP_TYPE_SYSTEM;
329                 heap_name = "ION_HEAP_TYPE_SYSTEM";
330             } break;
331             case ION_DETECT_NO_DTS : {
332                 ion_heap_mask   = (1 << ION_HEAP_TYPE_CARVEOUT);
333                 ion_heap_id     = ION_HEAP_TYPE_CARVEOUT;
334                 heap_name = "ION_HEAP_TYPE_CARVEOUT";
335             } break;
336             default : {
337                 mpp_err("invalid detect result %d\n", detect_result);
338                 ion_heap_mask   = (1 << ION_HEAP_TYPE_DMA);
339                 ion_heap_id     = ION_HEAP_TYPE_DMA;
340                 heap_name = "ION_HEAP_TYPE_DMA";
341             } break;
342             }
343             mpp_log("using ion heap %s\n", heap_name);
344         }
345         pthread_mutex_unlock(&lock);
346         p->alignment    = cfg->alignment;
347         p->ion_device   = fd;
348         *ctx = p;
349     }
350 
351     return MPP_OK;
352 }
353 
allocator_ion_alloc(void * ctx,MppBufferInfo * info)354 static MPP_RET allocator_ion_alloc(void *ctx, MppBufferInfo *info)
355 {
356     MPP_RET ret = MPP_OK;
357     int fd = -1;
358     ion_user_handle_t hnd = -1;
359     allocator_ctx_ion *p = NULL;
360 
361     if (NULL == ctx) {
362         mpp_err("os_allocator_close Android do not accept NULL input\n");
363         return MPP_ERR_NULL_PTR;
364     }
365 
366     ion_dbg_func("enter: ctx %p size %d\n", ctx, info->size);
367 
368     p = (allocator_ctx_ion *)ctx;
369     ret = ion_alloc(p->ion_device, info->size, p->alignment, ion_heap_mask,
370                     0, &hnd);
371     if (ret)
372         mpp_err_f("ion_alloc failed ret %d\n", ret);
373     else {
374         ret = ion_map_fd(p->ion_device, hnd, &fd);
375         if (ret)
376             mpp_err_f("ion_map_fd failed ret %d\n", ret);
377     }
378 
379     info->fd  = fd;
380     info->ptr = NULL;
381     info->hnd = (void *)(intptr_t)hnd;
382 
383     ion_dbg_func("leave: ret %d handle %d fd %d\n", ret, hnd, fd);
384     return ret;
385 }
386 
allocator_ion_import(void * ctx,MppBufferInfo * data)387 static MPP_RET allocator_ion_import(void *ctx, MppBufferInfo *data)
388 {
389     MPP_RET ret = MPP_NOK;
390     allocator_ctx_ion *p = (allocator_ctx_ion *)ctx;
391     struct ion_fd_data fd_data;
392 
393     ion_dbg_func("enter: ctx %p dev %d fd %d size %d\n",
394                  ctx, p->ion_device, data->fd, data->size);
395 
396     fd_data.fd = data->fd;
397     ret = ion_ioctl(p->ion_device, ION_IOC_IMPORT, &fd_data);
398     if (0 > fd_data.handle) {
399         mpp_err_f("fd %d import failed for %s\n", data->fd, strerror(errno));
400         goto RET;
401     }
402 
403     data->hnd = (void *)(intptr_t)fd_data.handle;
404     ret = ion_map_fd(p->ion_device, fd_data.handle, &data->fd);
405     data->ptr = NULL;
406 RET:
407     ion_dbg_func("leave: ret %d handle %d\n", ret, data->hnd);
408     return ret;
409 }
410 
allocator_ion_mmap(void * ctx,MppBufferInfo * data)411 static MPP_RET allocator_ion_mmap(void *ctx, MppBufferInfo *data)
412 {
413     MPP_RET ret = MPP_OK;
414 
415     if (NULL == ctx) {
416         mpp_err_f("do not accept NULL input\n");
417         return MPP_ERR_NULL_PTR;
418     }
419 
420     ion_dbg_func("enter: ctx %p fd %d size %d\n", ctx, data->fd, data->size);
421 
422     if (NULL == data->ptr)
423         ret = ion_mmap(data->fd, data->size, PROT_READ | PROT_WRITE,
424                        MAP_SHARED, 0, &data->ptr);
425 
426     ion_dbg_func("leave: ret %d ptr %p\n", ret, data->ptr);
427     return ret;
428 }
429 
allocator_ion_free(void * ctx,MppBufferInfo * data)430 static MPP_RET allocator_ion_free(void *ctx, MppBufferInfo *data)
431 {
432     allocator_ctx_ion *p = NULL;
433     if (NULL == ctx) {
434         mpp_err_f("do not accept NULL input\n");
435         return MPP_ERR_NULL_PTR;
436     }
437 
438     ion_dbg_func("enter: ctx %p fd %d ptr %p size %d\n",
439                  ctx, data->fd, data->ptr, data->size);
440 
441     p = (allocator_ctx_ion *)ctx;
442     if (data->ptr) {
443         munmap(data->ptr, data->size);
444         data->ptr = NULL;
445     }
446 
447     if (data->fd > 0) {
448         close(data->fd);
449         data->fd = -1;
450     }
451 
452     if (data->hnd) {
453         ion_free(p->ion_device, (ion_user_handle_t)((intptr_t)data->hnd));
454         data->hnd = NULL;
455     }
456 
457     ion_dbg_func("leave\n");
458     return MPP_OK;
459 }
460 
allocator_ion_close(void * ctx)461 static MPP_RET allocator_ion_close(void *ctx)
462 {
463     int ret;
464     allocator_ctx_ion *p;
465 
466     if (NULL == ctx) {
467         mpp_err("os_allocator_close Android do not accept NULL input\n");
468         return MPP_ERR_NULL_PTR;
469     }
470 
471     ion_dbg_func("enter: ctx\n", ctx);
472 
473     p = (allocator_ctx_ion *)ctx;
474     ret = close(p->ion_device);
475     mpp_free(p);
476     if (ret < 0)
477         ret = (MPP_RET) - errno;
478 
479     ion_dbg_func("leave: ret %d\n", ret);
480 
481     return ret;
482 }
483 
484 os_allocator allocator_ion = {
485     .type = MPP_BUFFER_TYPE_ION,
486     .open = allocator_ion_open,
487     .close = allocator_ion_close,
488     .alloc = allocator_ion_alloc,
489     .free = allocator_ion_free,
490     .import = allocator_ion_import,
491     .release = allocator_ion_free,
492     .mmap = allocator_ion_mmap,
493 };
494 
495