1 /* SPDX-License-Identifier: Apache-2.0 OR MIT */
2 /*
3 * Copyright (c) 2010 Rockchip Electronics Co., Ltd.
4 */
5
6 #define MODULE_TAG "mpp_dma_heap"
7
8 #include <unistd.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <sys/ioctl.h>
12 #include <sys/mman.h>
13
14 #include "os_mem.h"
15 #include "allocator_dma_heap.h"
16
17 #include "mpp_env.h"
18 #include "mpp_mem.h"
19 #include "mpp_lock.h"
20 #include "mpp_debug.h"
21 #include "mpp_common.h"
22 #include "mpp_thread.h"
23 #include "mpp_runtime.h"
24
25 #define DMA_HEAP_VALID_FD_FLAGS (O_CLOEXEC | O_ACCMODE)
26 #define DMA_HEAP_VALID_HEAP_FLAGS (0)
27
28 struct dma_heap_allocation_data {
29 RK_U64 len;
30 RK_U32 fd;
31 RK_U32 fd_flags;
32 RK_U64 heap_flags;
33 };
34
35 #define DMA_HEAP_IOC_MAGIC 'H'
36 #define DMA_HEAP_IOCTL_ALLOC _IOWR(DMA_HEAP_IOC_MAGIC, 0x0, struct dma_heap_allocation_data)
37
38 static RK_U32 dma_heap_debug = 0;
39
40 #define DMA_HEAP_OPS (0x00000001)
41 #define DMA_HEAP_DEVICE (0x00000002)
42 #define DMA_HEAP_IOCTL (0x00000004)
43 #define DMA_HEAP_CHECK (0x00000008)
44
45 #define dma_heap_dbg(flag, fmt, ...) _mpp_dbg(dma_heap_debug, flag, fmt, ## __VA_ARGS__)
46 #define dma_heap_dbg_f(flag, fmt, ...) _mpp_dbg_f(dma_heap_debug, flag, fmt, ## __VA_ARGS__)
47
48 #define dma_heap_dbg_ops(fmt, ...) dma_heap_dbg(DMA_HEAP_OPS, fmt, ## __VA_ARGS__)
49 #define dma_heap_dbg_dev(fmt, ...) dma_heap_dbg(DMA_HEAP_DEVICE, fmt, ## __VA_ARGS__)
50 #define dma_heap_dbg_ioctl(fmt, ...) dma_heap_dbg(DMA_HEAP_IOCTL, fmt, ## __VA_ARGS__)
51 #define dma_heap_dbg_chk(fmt, ...) dma_heap_dbg(DMA_HEAP_CHECK, fmt, ## __VA_ARGS__)
52
53 typedef struct {
54 RK_U32 alignment;
55 RK_S32 device;
56 RK_U32 flags;
57 } allocator_ctx_dmaheap;
58
59 typedef struct DmaHeapInfo_t {
60 const char *name;
61 RK_S32 fd;
62 RK_U32 flags;
63 } DmaHeapInfo;
64
65 static DmaHeapInfo heap_infos[MPP_ALLOC_FLAG_TYPE_NB] = {
66 { "system-uncached", -1, MPP_ALLOC_FLAG_NONE , }, /* 0 */
67 { "system-uncached-dma32", -1, MPP_ALLOC_FLAG_DMA32, }, /* 1 */
68 { "system", -1, MPP_ALLOC_FLAG_CACHABLE , }, /* 2 */
69 { "system-dma32", -1, MPP_ALLOC_FLAG_CACHABLE | MPP_ALLOC_FLAG_DMA32, }, /* 3 */
70 { "cma-uncached", -1, MPP_ALLOC_FLAG_CMA , }, /* 4 */
71 { "cma-uncached-dma32", -1, MPP_ALLOC_FLAG_CMA | MPP_ALLOC_FLAG_DMA32, }, /* 5 */
72 { "cma", -1, MPP_ALLOC_FLAG_CMA | MPP_ALLOC_FLAG_CACHABLE , }, /* 6 */
73 { "cma-dma32", -1, MPP_ALLOC_FLAG_CMA | MPP_ALLOC_FLAG_CACHABLE | MPP_ALLOC_FLAG_DMA32, }, /* 7 */
74 };
75
try_open_path(const char * name)76 static int try_open_path(const char *name)
77 {
78 static const char *heap_path = "/dev/dma_heap/";
79 char buf[64];
80 int fd;
81
82 snprintf(buf, sizeof(buf) - 1, "%s%s", heap_path, name);
83 fd = open(buf, O_RDONLY | O_CLOEXEC); // read permission is enough
84
85 dma_heap_dbg_ops("open dma_heap %-24s -> fd %d\n", name, fd);
86
87 return fd;
88 }
89
try_flip_flag(RK_U32 orig,RK_U32 flag)90 static MPP_RET try_flip_flag(RK_U32 orig, RK_U32 flag)
91 {
92 DmaHeapInfo *dst = &heap_infos[orig];
93 DmaHeapInfo *src;
94 RK_U32 used;
95
96 if (orig & flag)
97 used = (RK_U32)(orig & (~flag));
98 else
99 used = (RK_U32)(orig | flag);
100
101 src = &heap_infos[used];
102 if (src->fd > 0) {
103 /* found valid heap use it */
104 dst->fd = mpp_dup(src->fd);
105 dst->flags = src->flags;
106
107 dma_heap_dbg_chk("dma-heap type %x %s remap to %x %s\n",
108 orig, dst->name, used, src->name);
109 }
110
111 return dst->fd > 0 ? MPP_OK : MPP_NOK;
112 }
113
114 __attribute__ ((constructor))
dma_heap_init(void)115 void dma_heap_init(void)
116 {
117 DmaHeapInfo *info = NULL;
118 RK_U32 all_success = 1;
119 RK_U32 i;
120
121 mpp_env_get_u32("dma_heap_debug", &dma_heap_debug, 0);
122
123 /* go through all heap first */
124 for (i = 0; i < MPP_ARRAY_ELEMS(heap_infos); i++) {
125 info = &heap_infos[i];
126
127 if (info->fd > 0)
128 continue;
129
130 info->fd = try_open_path(info->name);
131 if (info->fd <= 0)
132 all_success = 0;
133 }
134
135 if (!all_success) {
136 /* check remaining failed heap mapping */
137 for (i = 0; i < MPP_ARRAY_ELEMS(heap_infos); i++) {
138 info = &heap_infos[i];
139
140 if (info->fd > 0)
141 continue;
142
143 /* if original heap failed then try revert cacheable flag */
144 if (MPP_OK == try_flip_flag((RK_U32)i, MPP_ALLOC_FLAG_CACHABLE))
145 continue;
146
147 /* if cacheable heap failed then try revert dma32 flag */
148 if (MPP_OK == try_flip_flag((RK_U32)i, MPP_ALLOC_FLAG_DMA32))
149 continue;
150
151 /* if dma32 heap failed then try revert both cacheable and dma32 flag */
152 if (MPP_OK == try_flip_flag((RK_U32)i, MPP_ALLOC_FLAG_CACHABLE | MPP_ALLOC_FLAG_DMA32))
153 continue;
154
155 dma_heap_dbg_chk("dma-heap type %x - %s remap failed\n", i, info->name);
156 }
157 }
158 }
159
160 __attribute__ ((destructor))
dma_heap_deinit(void)161 void dma_heap_deinit(void)
162 {
163 RK_U32 i;
164
165 for (i = 0; i < MPP_ARRAY_ELEMS(heap_infos); i++) {
166 DmaHeapInfo *info = &heap_infos[i];
167
168 if (info->fd > 0) {
169 close(info->fd);
170 info->fd = -1;
171 }
172 }
173 }
174
dma_heap_alloc(int fd,size_t len,RK_S32 * dmabuf_fd,RK_U32 flags)175 static int dma_heap_alloc(int fd, size_t len, RK_S32 *dmabuf_fd, RK_U32 flags)
176 {
177 struct dma_heap_allocation_data data;
178 int ret;
179
180 memset(&data, 0, sizeof(data));
181 data.len = len;
182 data.fd_flags = O_RDWR | O_CLOEXEC;
183 data.heap_flags = 0; // heap_flags should be set to 0
184
185 ret = ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &data);
186 if (ret < 0) {
187 mpp_err("ioctl alloc failed for %s\n", strerror(errno));
188 return ret;
189 }
190
191 dma_heap_dbg_ioctl("ioctl alloc get fd %d\n", data.fd);
192
193 *dmabuf_fd = data.fd;
194
195 (void) flags;
196 return ret;
197 }
198
os_allocator_dma_heap_open(void ** ctx,size_t alignment,MppAllocFlagType flags)199 static MPP_RET os_allocator_dma_heap_open(void **ctx, size_t alignment, MppAllocFlagType flags)
200 {
201 allocator_ctx_dmaheap *p;
202 DmaHeapInfo *info = NULL;
203 RK_U32 type = 0;
204
205 mpp_env_get_u32("dma_heap_debug", &dma_heap_debug, dma_heap_debug);
206
207 if (NULL == ctx) {
208 mpp_err_f("does not accept NULL input\n");
209 return MPP_ERR_NULL_PTR;
210 }
211
212 *ctx = NULL;
213
214 info = &heap_infos[flags];
215 if (info->fd <= 0) {
216 mpp_err_f("open dma heap type %x %s failed!\n", type, info->name);
217 return MPP_ERR_UNKNOW;
218 }
219
220 p = mpp_malloc(allocator_ctx_dmaheap, 1);
221 if (NULL == p) {
222 mpp_err_f("failed to allocate context\n");
223 return MPP_ERR_MALLOC;
224 } else {
225 p->alignment = alignment;
226 p->flags = info->flags;
227 p->device = info->fd;
228 *ctx = p;
229 }
230
231 dma_heap_dbg_ops("dev %d open heap type %x:%x\n", p->device, flags, info->flags);
232
233 return MPP_OK;
234 }
235
os_allocator_dma_heap_alloc(void * ctx,MppBufferInfo * info)236 static MPP_RET os_allocator_dma_heap_alloc(void *ctx, MppBufferInfo *info)
237 {
238 MPP_RET ret = MPP_OK;
239 allocator_ctx_dmaheap *p = NULL;
240
241 if (NULL == ctx) {
242 mpp_err_f("does not accept NULL input\n");
243 return MPP_ERR_NULL_PTR;
244 }
245
246 p = (allocator_ctx_dmaheap *)ctx;
247
248 ret = dma_heap_alloc(p->device, info->size, (RK_S32 *)&info->fd, p->flags);
249
250 dma_heap_dbg_ops("dev %d alloc %3d size %d\n", p->device, info->fd, info->size);
251
252 if (ret) {
253 mpp_err_f("dma_heap_alloc failed ret %d\n", ret);
254 return ret;
255 }
256
257 info->ptr = NULL;
258 return ret;
259 }
260
os_allocator_dma_heap_import(void * ctx,MppBufferInfo * data)261 static MPP_RET os_allocator_dma_heap_import(void *ctx, MppBufferInfo *data)
262 {
263 allocator_ctx_dmaheap *p = (allocator_ctx_dmaheap *)ctx;
264 RK_S32 fd_ext = data->fd;
265 MPP_RET ret = MPP_OK;
266
267 mpp_assert(fd_ext > 0);
268
269 data->fd = mpp_dup(fd_ext);
270 data->ptr = NULL;
271
272 dma_heap_dbg_ops("dev %d import %3d -> %3d\n", p->device, fd_ext, data->fd);
273
274 mpp_assert(data->fd > 0);
275
276 return ret;
277 }
278
os_allocator_dma_heap_free(void * ctx,MppBufferInfo * data)279 static MPP_RET os_allocator_dma_heap_free(void *ctx, MppBufferInfo *data)
280 {
281 allocator_ctx_dmaheap *p = NULL;
282 MPP_RET ret = MPP_OK;
283
284 if (NULL == ctx) {
285 mpp_err_f("does not accept NULL input\n");
286 return MPP_ERR_NULL_PTR;
287 }
288
289 p = (allocator_ctx_dmaheap *)ctx;
290
291 dma_heap_dbg_ops("dev %d free %3d size %d ptr %p\n", p->device,
292 data->fd, data->size, data->ptr);
293
294 if (data->ptr) {
295 munmap(data->ptr, data->size);
296 data->ptr = NULL;
297 }
298 close(data->fd);
299
300 return ret;
301 }
302
os_allocator_dma_heap_close(void * ctx)303 static MPP_RET os_allocator_dma_heap_close(void *ctx)
304 {
305 if (NULL == ctx) {
306 mpp_err("os_allocator_close doesn't accept NULL input\n");
307 return MPP_ERR_NULL_PTR;
308 }
309
310 MPP_FREE(ctx);
311
312 return MPP_OK;
313 }
314
os_allocator_dma_heap_mmap(void * ctx,MppBufferInfo * data)315 static MPP_RET os_allocator_dma_heap_mmap(void *ctx, MppBufferInfo *data)
316 {
317 allocator_ctx_dmaheap *p;
318 MPP_RET ret = MPP_OK;
319 if (NULL == ctx) {
320 mpp_err("os_allocator_close do not accept NULL input\n");
321 return MPP_ERR_NULL_PTR;
322 }
323 p = (allocator_ctx_dmaheap *)ctx;
324
325 if (NULL == ctx)
326 return MPP_ERR_NULL_PTR;
327
328 if (NULL == data->ptr) {
329 int flags = PROT_READ;
330
331 if (fcntl(data->fd, F_GETFL) & O_RDWR)
332 flags |= PROT_WRITE;
333
334 data->ptr = mmap(NULL, data->size, flags, MAP_SHARED, data->fd, 0);
335 if (data->ptr == MAP_FAILED) {
336 mpp_err("mmap failed: %s\n", strerror(errno));
337 data->ptr = NULL;
338 return -errno;
339 }
340
341 dma_heap_dbg_ops("dev %d mmap %3d ptr %p (%s)\n", p->device,
342 data->fd, data->ptr,
343 flags & PROT_WRITE ? "RDWR" : "RDONLY");
344 }
345
346 return ret;
347 }
348
os_allocator_dma_heap_flags(void * ctx)349 static MppAllocFlagType os_allocator_dma_heap_flags(void *ctx)
350 {
351 allocator_ctx_dmaheap *p = (allocator_ctx_dmaheap *)ctx;
352
353 return p ? (MppAllocFlagType)p->flags : MPP_ALLOC_FLAG_NONE;
354 }
355
356 os_allocator allocator_dma_heap = {
357 .type = MPP_BUFFER_TYPE_DMA_HEAP,
358 .name = "dma_heap",
359 .open = os_allocator_dma_heap_open,
360 .close = os_allocator_dma_heap_close,
361 .alloc = os_allocator_dma_heap_alloc,
362 .free = os_allocator_dma_heap_free,
363 .import = os_allocator_dma_heap_import,
364 .release = os_allocator_dma_heap_free,
365 .mmap = os_allocator_dma_heap_mmap,
366 .flags = os_allocator_dma_heap_flags,
367 };
368