1 /* SPDX-License-Identifier: Apache-2.0 OR MIT */
2 /*
3 * Copyright (c) 2010 Rockchip Electronics Co., Ltd.
4 */
5
6 #define MODULE_TAG "mpp_drm"
7
8 #include <unistd.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <sys/ioctl.h>
12 #include <sys/mman.h>
13
14 #include <linux/drm.h>
15 #include <linux/drm_mode.h>
16
17 #include "os_mem.h"
18 #include "allocator_drm.h"
19
20 #include "mpp_env.h"
21 #include "mpp_mem.h"
22 #include "mpp_debug.h"
23 #include "mpp_common.h"
24 #include "mpp_runtime.h"
25
26 #define DRM_FUNCTION (0x00000001)
27 #define DRM_DEVICE (0x00000002)
28 #define DRM_IOCTL (0x00000004)
29
30 #define drm_dbg(flag, fmt, ...) _mpp_dbg_f(drm_debug, flag, fmt, ## __VA_ARGS__)
31 #define drm_dbg_func(fmt, ...) drm_dbg(DRM_FUNCTION, fmt, ## __VA_ARGS__)
32 #define drm_dbg_dev(fmt, ...) drm_dbg(DRM_DEVICE, fmt, ## __VA_ARGS__)
33 #define drm_dbg_ioctl(fmt, ...) drm_dbg(DRM_IOCTL, fmt, ## __VA_ARGS__)
34
35 static RK_U32 drm_debug = 0;
36
37 /* memory type definitions. */
38 enum drm_rockchip_gem_mem_type {
39 /* Physically Continuous memory. */
40 ROCKCHIP_BO_CONTIG = 1 << 0,
41 /* cachable mapping. */
42 ROCKCHIP_BO_CACHABLE = 1 << 1,
43 /* write-combine mapping. */
44 ROCKCHIP_BO_WC = 1 << 2,
45 ROCKCHIP_BO_SECURE = 1 << 3,
46 /* keep kmap for cma buffer or alloc kmap for other type memory */
47 ROCKCHIP_BO_ALLOC_KMAP = 1 << 4,
48 /* alloc page with gfp_dma32 */
49 ROCKCHIP_BO_DMA32 = 1 << 5,
50 ROCKCHIP_BO_MASK = ROCKCHIP_BO_CONTIG | ROCKCHIP_BO_CACHABLE |
51 ROCKCHIP_BO_WC | ROCKCHIP_BO_SECURE | ROCKCHIP_BO_ALLOC_KMAP |
52 ROCKCHIP_BO_DMA32,
53 };
54
55 typedef struct {
56 RK_U32 alignment;
57 RK_S32 drm_device;
58 RK_U32 flags;
59 } allocator_ctx_drm;
60
61 /* use renderD128 first to avoid GKI kernel permission issue */
62 static const char *dev_drm[] = {
63 "/dev/dri/renderD128",
64 "/dev/dri/card0",
65 };
66
to_rockchip_gem_mem_flag(RK_U32 flags)67 static RK_U32 to_rockchip_gem_mem_flag(RK_U32 flags)
68 {
69 RK_U32 ret = 0;
70
71 if (flags & MPP_ALLOC_FLAG_DMA32)
72 ret |= ROCKCHIP_BO_DMA32;
73
74 if (flags & MPP_ALLOC_FLAG_CACHABLE)
75 ret |= ROCKCHIP_BO_CACHABLE;
76
77 if (flags & MPP_ALLOC_FLAG_CMA)
78 ret |= ROCKCHIP_BO_CONTIG;
79
80 return ret;
81 }
82
drm_ioctl(int fd,int req,void * arg)83 static int drm_ioctl(int fd, int req, void *arg)
84 {
85 int ret;
86
87 do {
88 ret = ioctl(fd, req, arg);
89 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
90
91 drm_dbg_ioctl("%x ret %d: %s\n", req, ret, strerror(errno));
92
93 return ret;
94 }
95
drm_handle_to_fd(int fd,RK_U32 handle,int * map_fd,RK_U32 flags)96 static int drm_handle_to_fd(int fd, RK_U32 handle, int *map_fd, RK_U32 flags)
97 {
98 int ret;
99 struct drm_prime_handle dph;
100
101 memset(&dph, 0, sizeof(struct drm_prime_handle));
102 dph.handle = handle;
103 dph.fd = -1;
104 dph.flags = flags;
105
106 if (map_fd == NULL)
107 return -EINVAL;
108
109 ret = drm_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &dph);
110 if (ret < 0)
111 return ret;
112
113 *map_fd = dph.fd;
114
115 drm_dbg_func("dev %d handle %d flags %x get fd %d\n", fd, handle, dph.flags, *map_fd);
116
117 if (*map_fd < 0) {
118 mpp_err_f("map ioctl returned negative fd\n");
119 return -EINVAL;
120 }
121
122 return ret;
123 }
124
drm_alloc(int fd,size_t len,size_t align,RK_U32 * handle,RK_U32 flags)125 static int drm_alloc(int fd, size_t len, size_t align, RK_U32 *handle, RK_U32 flags)
126 {
127 int ret;
128 struct drm_mode_create_dumb dmcb;
129
130 memset(&dmcb, 0, sizeof(struct drm_mode_create_dumb));
131 dmcb.bpp = 8;
132 dmcb.width = (len + align - 1) & (~(align - 1));
133 dmcb.height = 1;
134 dmcb.flags = to_rockchip_gem_mem_flag(flags);
135
136 if (handle == NULL)
137 return -EINVAL;
138
139 ret = drm_ioctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &dmcb);
140 if (ret < 0)
141 return ret;
142
143 *handle = dmcb.handle;
144 drm_dbg_func("dev %d alloc aligned %d flags %x|%x handle %d size %lld\n", fd,
145 align, flags, dmcb.flags, dmcb.handle, dmcb.size);
146
147 return ret;
148 }
149
drm_free(int fd,RK_U32 handle)150 static int drm_free(int fd, RK_U32 handle)
151 {
152 struct drm_mode_destroy_dumb data = {
153 .handle = handle,
154 };
155 return drm_ioctl(fd, DRM_IOCTL_MODE_DESTROY_DUMB, &data);
156 }
157
os_allocator_drm_open(void ** ctx,size_t alignment,MppAllocFlagType flags)158 static MPP_RET os_allocator_drm_open(void **ctx, size_t alignment, MppAllocFlagType flags)
159 {
160 allocator_ctx_drm *p;
161 RK_S32 fd;
162 RK_S32 i;
163
164 if (NULL == ctx) {
165 mpp_err_f("does not accept NULL input\n");
166 return MPP_ERR_NULL_PTR;
167 }
168
169 *ctx = NULL;
170
171 mpp_env_get_u32("drm_debug", &drm_debug, 0);
172
173 for (i = 0; i < (RK_S32)MPP_ARRAY_ELEMS(dev_drm); i++) {
174 fd = open(dev_drm[i], O_RDWR | O_CLOEXEC);
175 if (fd > 0)
176 break;
177 }
178
179 if (fd < 0) {
180 mpp_err_f("open all drm device failed.\n");
181 mpp_err("Please check the following device path and access permission:\n");
182 for (i = 0; i < (RK_S32)MPP_ARRAY_ELEMS(dev_drm); i++)
183 mpp_err("%s\n", dev_drm[i]);
184 return MPP_ERR_UNKNOW;
185 } else {
186 /* drop master by default to avoid becoming the drm master */
187 drm_ioctl(fd, DRM_IOCTL_DROP_MASTER, 0);
188 }
189
190 drm_dbg_dev("open drm dev fd %d flags %x\n", fd, flags);
191
192 p = mpp_malloc(allocator_ctx_drm, 1);
193 if (NULL == p) {
194 close(fd);
195 mpp_err_f("failed to allocate context\n");
196 return MPP_ERR_MALLOC;
197 } else {
198 /*
199 * default drm use cma, do nothing here
200 */
201 p->alignment = alignment;
202 p->flags = flags;
203 p->drm_device = fd;
204 *ctx = p;
205 }
206
207 return MPP_OK;
208 }
209
os_allocator_drm_alloc(void * ctx,MppBufferInfo * info)210 static MPP_RET os_allocator_drm_alloc(void *ctx, MppBufferInfo *info)
211 {
212 MPP_RET ret = MPP_OK;
213 allocator_ctx_drm *p = NULL;
214
215 if (NULL == ctx) {
216 mpp_err_f("does not accept NULL input\n");
217 return MPP_ERR_NULL_PTR;
218 }
219
220 p = (allocator_ctx_drm *)ctx;
221
222 drm_dbg_func("dev %d alloc alignment %d size %d\n", p->drm_device,
223 p->alignment, info->size);
224
225 ret = drm_alloc(p->drm_device, info->size, p->alignment,
226 (RK_U32 *)&info->hnd, p->flags);
227 if (ret) {
228 mpp_err_f("drm_alloc failed ret %d\n", ret);
229 return ret;
230 }
231
232 ret = drm_handle_to_fd(p->drm_device, (RK_U32)((intptr_t)info->hnd),
233 &info->fd, DRM_CLOEXEC | DRM_RDWR);
234
235 if (ret) {
236 mpp_err_f("handle_to_fd failed ret %d\n", ret);
237 return ret;
238 }
239
240 drm_dbg_func("dev %d get handle %d with fd %d\n", p->drm_device,
241 (RK_U32)((intptr_t)info->hnd), info->fd);
242
243 /* release handle to reduce iova usage */
244 drm_free(p->drm_device, (RK_U32)((intptr_t)info->hnd));
245 info->hnd = NULL;
246 info->ptr = NULL;
247
248 return ret;
249 }
250
os_allocator_drm_import(void * ctx,MppBufferInfo * data)251 static MPP_RET os_allocator_drm_import(void *ctx, MppBufferInfo *data)
252 {
253 allocator_ctx_drm *p = (allocator_ctx_drm *)ctx;
254 RK_S32 fd_ext = data->fd;
255 MPP_RET ret = MPP_OK;
256
257 drm_dbg_func("enter dev %d fd %d\n", p->drm_device, fd_ext);
258
259 mpp_assert(fd_ext > 0);
260
261 data->fd = mpp_dup(fd_ext);
262 data->ptr = NULL;
263
264 if (data->fd <= 0) {
265 mpp_err_f(" fd dup return invalid fd %d\n", data->fd);
266 ret = MPP_NOK;
267 }
268
269 drm_dbg_func("leave dev %d fd %d -> %d\n", p->drm_device, fd_ext, data->fd);
270
271 return ret;
272 }
273
os_allocator_drm_free(void * ctx,MppBufferInfo * data)274 static MPP_RET os_allocator_drm_free(void *ctx, MppBufferInfo *data)
275 {
276 allocator_ctx_drm *p = NULL;
277
278 if (NULL == ctx) {
279 mpp_err_f("does not accept NULL input\n");
280 return MPP_ERR_NULL_PTR;
281 }
282
283 p = (allocator_ctx_drm *)ctx;
284
285 drm_dbg_func("dev %d handle %p unmap %p fd %d size %d\n", p->drm_device,
286 data->hnd, data->ptr, data->fd, data->size);
287
288 if (data->ptr) {
289 munmap(data->ptr, data->size);
290 data->ptr = NULL;
291 }
292
293 if (data->fd > 0) {
294 close(data->fd);
295 data->fd = -1;
296 } else {
297 mpp_err_f("can not close invalid fd %d\n", data->fd);
298 }
299
300 return MPP_OK;
301 }
302
os_allocator_drm_close(void * ctx)303 static MPP_RET os_allocator_drm_close(void *ctx)
304 {
305 int ret;
306 allocator_ctx_drm *p;
307
308 if (NULL == ctx) {
309 mpp_err("os_allocator_close doesn't accept NULL input\n");
310 return MPP_ERR_NULL_PTR;
311 }
312
313 p = (allocator_ctx_drm *)ctx;
314 drm_dbg_func("dev %d", p->drm_device);
315
316 ret = close(p->drm_device);
317 mpp_free(p);
318 if (ret < 0)
319 return (MPP_RET) - errno;
320
321 return MPP_OK;
322 }
323
os_allocator_drm_mmap(void * ctx,MppBufferInfo * data)324 static MPP_RET os_allocator_drm_mmap(void *ctx, MppBufferInfo *data)
325 {
326 allocator_ctx_drm *p;
327 MPP_RET ret = MPP_OK;
328 if (NULL == ctx) {
329 mpp_err("os_allocator_close do not accept NULL input\n");
330 return MPP_ERR_NULL_PTR;
331 }
332 p = (allocator_ctx_drm *)ctx;
333
334 if (NULL == ctx)
335 return MPP_ERR_NULL_PTR;
336
337 if (NULL == data->ptr) {
338 int flags = PROT_READ;
339
340 if (fcntl(data->fd, F_GETFL) & O_RDWR)
341 flags |= PROT_WRITE;
342
343 data->ptr = mmap(NULL, data->size, flags, MAP_SHARED, data->fd, 0);
344 if (data->ptr == MAP_FAILED) {
345 mpp_err("mmap failed: %s\n", strerror(errno));
346 data->ptr = NULL;
347 return -errno;
348 }
349
350 drm_dbg_func("dev %d mmap fd %d to %p (%s)\n", p->drm_device,
351 data->fd, data->ptr,
352 flags & PROT_WRITE ? "RDWR" : "RDONLY");
353 }
354
355 return ret;
356 }
357
os_allocator_drm_flags(void * ctx)358 static MppAllocFlagType os_allocator_drm_flags(void *ctx)
359 {
360 allocator_ctx_drm *p = (allocator_ctx_drm *)ctx;
361
362 return p ? (MppAllocFlagType)p->flags : MPP_ALLOC_FLAG_NONE;
363 }
364
365 os_allocator allocator_drm = {
366 .type = MPP_BUFFER_TYPE_DRM,
367 .name = "drm",
368 .open = os_allocator_drm_open,
369 .close = os_allocator_drm_close,
370 .alloc = os_allocator_drm_alloc,
371 .free = os_allocator_drm_free,
372 .import = os_allocator_drm_import,
373 .release = os_allocator_drm_free,
374 .mmap = os_allocator_drm_mmap,
375 .flags = os_allocator_drm_flags,
376 };
377