xref: /OK3568_Linux_fs/external/mpp/osal/allocator/allocator_dma_heap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright 2010 Rockchip Electronics S.LSI Co. LTD
3  *
4  * Licensed under the Apache License, Versdrm 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITDRMS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissdrms and
14  * limitatdrms under the License.
15  */
16 
17 #define MODULE_TAG "mpp_dma_heap"
18 
19 #include <unistd.h>
20 #include <string.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <sys/ioctl.h>
24 #include <sys/mman.h>
25 
26 #include "os_mem.h"
27 #include "allocator_dma_heap.h"
28 
29 #include "mpp_env.h"
30 #include "mpp_mem.h"
31 #include "mpp_lock.h"
32 #include "mpp_debug.h"
33 #include "mpp_common.h"
34 #include "mpp_thread.h"
35 #include "mpp_runtime.h"
36 
37 #define DMA_HEAP_VALID_FD_FLAGS         (O_CLOEXEC | O_ACCMODE)
38 #define DMA_HEAP_VALID_HEAP_FLAGS       (0)
39 
40 struct dma_heap_allocation_data {
41     RK_U64 len;
42     RK_U32 fd;
43     RK_U32 fd_flags;
44     RK_U64 heap_flags;
45 };
46 
47 #define DMA_HEAP_IOC_MAGIC              'H'
48 #define DMA_HEAP_IOCTL_ALLOC            _IOWR(DMA_HEAP_IOC_MAGIC, 0x0, struct dma_heap_allocation_data)
49 
50 static RK_U32 dma_heap_debug = 0;
51 
52 #define DMA_HEAP_OPS                    (0x00000001)
53 #define DMA_HEAP_DEVICE                 (0x00000002)
54 #define DMA_HEAP_IOCTL                  (0x00000004)
55 
56 #define dma_heap_dbg(flag, fmt, ...)    _mpp_dbg(dma_heap_debug, flag, fmt, ## __VA_ARGS__)
57 #define dma_heap_dbg_f(flag, fmt, ...)  _mpp_dbg_f(dma_heap_debug, flag, fmt, ## __VA_ARGS__)
58 
59 #define dma_heap_dbg_ops(fmt, ...)      dma_heap_dbg(DMA_HEAP_OPS, fmt, ## __VA_ARGS__)
60 #define dma_heap_dbg_dev(fmt, ...)      dma_heap_dbg(DMA_HEAP_DEVICE, fmt, ## __VA_ARGS__)
61 
62 typedef struct {
63     RK_U32  alignment;
64     RK_S32  device;
65     RK_U32  flags;
66 } allocator_ctx_dmaheap;
67 
68 typedef enum DmaHeapType_e {
69     DMA_HEAP_CMA        = (1 << 0),
70     DMA_HEAP_CACHABLE   = (1 << 1),
71     DMA_HEAP_DMA32      = (1 << 2),
72     DMA_HEAP_TYPE_MASK  = DMA_HEAP_CMA | DMA_HEAP_CACHABLE | DMA_HEAP_DMA32,
73     DMA_HEAP_TYPE_NB,
74 } DmaHeapType;
75 
76 static const char *heap_names[] = {
77     "system-uncached",          /* 0 - default */
78     "cma-uncached",             /* 1 -                                      DMA_HEAP_CMA */
79     "system",                   /* 2 -                  DMA_HEAP_CACHABLE                */
80     "cma",                      /* 3 -                  DMA_HEAP_CACHABLE | DMA_HEAP_CMA */
81     "system-uncached-dma32",    /* 4 - DMA_HEAP_DMA32                                    */
82     "cma-uncached",             /* 5 - DMA_HEAP_DMA32                     | DMA_HEAP_CMA */
83     "system-dma32",             /* 6 - DMA_HEAP_DMA32 | DMA_HEAP_CACHABLE                */
84     "cma",                      /* 7 - DMA_HEAP_DMA32 | DMA_HEAP_CACHABLE | DMA_HEAP_CMA */
85 };
86 
87 static int heap_fds[DMA_HEAP_TYPE_NB];
88 static pthread_once_t dma_heap_once = PTHREAD_ONCE_INIT;
89 static spinlock_t dma_heap_lock;
90 
dma_heap_alloc(int fd,size_t len,RK_S32 * dmabuf_fd,RK_U32 flags)91 static int dma_heap_alloc(int fd, size_t len, RK_S32 *dmabuf_fd, RK_U32 flags)
92 {
93     int ret;
94     struct dma_heap_allocation_data data = {
95         .len = len,
96         .fd_flags = O_RDWR | O_CLOEXEC,
97         .heap_flags = flags,
98     };
99 
100     memset(&data, 0, sizeof(data));
101     data.len = len;
102     data.fd_flags = O_RDWR | O_CLOEXEC;
103     data.heap_flags = 0; // heap_flags should be set to 0
104 
105     ret = ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &data);
106     if (ret < 0) {
107         mpp_err("ioctl alloc failed for %s\n", strerror(errno));
108         return ret;
109     }
110 
111     dma_heap_dbg(DMA_HEAP_IOCTL, "ioctl alloc get fd %d\n", data.fd);
112 
113     *dmabuf_fd = data.fd;
114 
115     return ret;
116 }
117 
heap_fds_init(void)118 static void heap_fds_init(void)
119 {
120     memset(heap_fds, -1, sizeof(heap_fds));
121     mpp_spinlock_init(&dma_heap_lock);
122 }
123 
heap_fd_open(DmaHeapType type)124 static int heap_fd_open(DmaHeapType type)
125 {
126     mpp_assert(type < DMA_HEAP_TYPE_NB);
127 
128     mpp_spinlock_lock(&dma_heap_lock);
129 
130     if (heap_fds[type] <= 0) {
131         static const char *heap_path = "/dev/dma_heap/";
132         char name[64];
133         int fd;
134 
135         snprintf(name, sizeof(name) - 1, "%s%s", heap_path, heap_names[type]);
136         fd = open(name, O_RDONLY | O_CLOEXEC); // read permission is enough
137         if (fd <= 0)
138             mpp_err("dma-heap open %s %s\n", name, strerror(errno));
139 
140         mpp_assert(fd > 0);
141 
142         dma_heap_dbg(DMA_HEAP_DEVICE, "open dma heap dev %s fd %d\n", name, fd);
143         heap_fds[type] = fd;
144     }
145 
146     mpp_spinlock_unlock(&dma_heap_lock);
147 
148     return heap_fds[type];
149 }
150 
151 
os_allocator_dma_heap_open(void ** ctx,MppAllocatorCfg * cfg)152 static MPP_RET os_allocator_dma_heap_open(void **ctx, MppAllocatorCfg *cfg)
153 {
154     allocator_ctx_dmaheap *p;
155     DmaHeapType type = 0;
156     RK_S32 fd;
157 
158     mpp_env_get_u32("dma_heap_debug", &dma_heap_debug, 0);
159 
160     pthread_once(&dma_heap_once, heap_fds_init);
161 
162     if (NULL == ctx) {
163         mpp_err_f("does not accept NULL input\n");
164         return MPP_ERR_NULL_PTR;
165     }
166 
167     *ctx = NULL;
168 
169     if (cfg->flags & (MPP_BUFFER_FLAGS_CONTIG >> 16))
170         type |= DMA_HEAP_CMA;
171 
172     if (cfg->flags & (MPP_BUFFER_FLAGS_CACHABLE >> 16))
173         type |= DMA_HEAP_CACHABLE;
174 
175     if (cfg->flags & (MPP_BUFFER_FLAGS_DMA32 >> 16))
176         type |= DMA_HEAP_DMA32;
177 
178     fd = heap_fd_open(type);
179     if (fd < 0) {
180         mpp_err_f("open dma heap type %x failed!\n", type);
181         return MPP_ERR_UNKNOW;
182     }
183 
184     p = mpp_malloc(allocator_ctx_dmaheap, 1);
185     if (NULL == p) {
186         close(fd);
187         mpp_err_f("failed to allocate context\n");
188         return MPP_ERR_MALLOC;
189     } else {
190         /*
191          * default drm use cma, do nothing here
192          */
193         p->alignment    = cfg->alignment;
194         p->flags        = cfg->flags;
195         p->device       = fd;
196         *ctx = p;
197     }
198 
199     dma_heap_dbg_ops("dev %d open heap type %x:%x\n", fd, cfg->flags, type);
200 
201     return MPP_OK;
202 }
203 
os_allocator_dma_heap_alloc(void * ctx,MppBufferInfo * info)204 static MPP_RET os_allocator_dma_heap_alloc(void *ctx, MppBufferInfo *info)
205 {
206     MPP_RET ret = MPP_OK;
207     allocator_ctx_dmaheap *p = NULL;
208 
209     if (NULL == ctx) {
210         mpp_err_f("does not accept NULL input\n");
211         return MPP_ERR_NULL_PTR;
212     }
213 
214     p = (allocator_ctx_dmaheap *)ctx;
215 
216     ret = dma_heap_alloc(p->device, info->size, (RK_S32 *)&info->fd, p->flags);
217 
218     dma_heap_dbg_ops("dev %d alloc %3d size %d\n", p->device, info->fd, info->size);
219 
220     if (ret) {
221         mpp_err_f("dma_heap_alloc failed ret %d\n", ret);
222         return ret;
223     }
224 
225     info->ptr = NULL;
226     return ret;
227 }
228 
os_allocator_dma_heap_import(void * ctx,MppBufferInfo * data)229 static MPP_RET os_allocator_dma_heap_import(void *ctx, MppBufferInfo *data)
230 {
231     allocator_ctx_dmaheap *p = (allocator_ctx_dmaheap *)ctx;
232     RK_S32 fd_ext = data->fd;
233     MPP_RET ret = MPP_OK;
234 
235     mpp_assert(fd_ext > 0);
236 
237     data->fd = dup(fd_ext);
238     data->ptr = NULL;
239 
240     dma_heap_dbg_ops("dev %d import %3d -> %3d\n", p->device, fd_ext, data->fd);
241 
242     mpp_assert(data->fd > 0);
243 
244     return ret;
245 }
246 
os_allocator_dma_heap_free(void * ctx,MppBufferInfo * data)247 static MPP_RET os_allocator_dma_heap_free(void *ctx, MppBufferInfo *data)
248 {
249     allocator_ctx_dmaheap *p = NULL;
250     MPP_RET ret = MPP_OK;
251 
252     if (NULL == ctx) {
253         mpp_err_f("does not accept NULL input\n");
254         return MPP_ERR_NULL_PTR;
255     }
256 
257     p = (allocator_ctx_dmaheap *)ctx;
258 
259     dma_heap_dbg_ops("dev %d free  %3d size %d ptr %p\n", p->device,
260                      data->fd, data->size, data->ptr);
261 
262     if (data->ptr) {
263         munmap(data->ptr, data->size);
264         data->ptr = NULL;
265     }
266     close(data->fd);
267 
268     return ret;
269 }
270 
os_allocator_dma_heap_close(void * ctx)271 static MPP_RET os_allocator_dma_heap_close(void *ctx)
272 {
273     int ret;
274     allocator_ctx_dmaheap *p;
275 
276     if (NULL == ctx) {
277         mpp_err("os_allocator_close doesn't accept NULL input\n");
278         return MPP_ERR_NULL_PTR;
279     }
280 
281     p = (allocator_ctx_dmaheap *)ctx;
282     dma_heap_dbg_ops("dev %d close", p->device);
283 
284     ret = close(p->device);
285     mpp_free(p);
286     if (ret < 0)
287         return (MPP_RET) - errno;
288 
289     return MPP_OK;
290 }
291 
os_allocator_dma_heap_mmap(void * ctx,MppBufferInfo * data)292 static MPP_RET os_allocator_dma_heap_mmap(void *ctx, MppBufferInfo *data)
293 {
294     allocator_ctx_dmaheap *p;
295     MPP_RET ret = MPP_OK;
296     if (NULL == ctx) {
297         mpp_err("os_allocator_close do not accept NULL input\n");
298         return MPP_ERR_NULL_PTR;
299     }
300     p = (allocator_ctx_dmaheap *)ctx;
301 
302     if (NULL == ctx)
303         return MPP_ERR_NULL_PTR;
304 
305     if (NULL == data->ptr) {
306         int flags = PROT_READ;
307 
308         if (fcntl(data->fd, F_GETFL) & O_RDWR)
309             flags |= PROT_WRITE;
310 
311         data->ptr = mmap(NULL, data->size, flags, MAP_SHARED, data->fd, 0);
312         if (data->ptr == MAP_FAILED) {
313             mpp_err("mmap failed: %s\n", strerror(errno));
314             data->ptr = NULL;
315             return -errno;
316         }
317 
318         dma_heap_dbg_ops("dev %d mmap  %3d ptr  %p (%s)\n", p->device,
319                          data->fd, data->ptr,
320                          flags & PROT_WRITE ? "RDWR" : "RDONLY");
321     }
322 
323     return ret;
324 }
325 
326 os_allocator allocator_dma_heap = {
327     .type = MPP_BUFFER_TYPE_DMA_HEAP,
328     .open = os_allocator_dma_heap_open,
329     .close = os_allocator_dma_heap_close,
330     .alloc = os_allocator_dma_heap_alloc,
331     .free = os_allocator_dma_heap_free,
332     .import = os_allocator_dma_heap_import,
333     .release = os_allocator_dma_heap_free,
334     .mmap = os_allocator_dma_heap_mmap,
335 };
336