1 /*
2 * Copyright (c) 2022 Rockchip Electronics Co. Ltd.
3 */
4 #include <inttypes.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <sys/mman.h>
9 #include <sys/ioctl.h>
10 #include <unistd.h>
11 #include <xf86drm.h>
12 #include <rockchip_drm.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <pthread.h>
16
17 #include "rkcrypto_mem.h"
18 #include "rkcrypto_trace.h"
19 #include "rk_list.h"
20 #include "dma-heap.h"
21
22 #ifndef ARRAY_SIZE
23 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
24 #endif
25
26 #define DRM_MODULE_NAME "rockchip"
27 #define DRM_CARD_PATH "/dev/dri/card0"
28 #define DMA_HEAP_PATH "/dev/rk_dma_heap/rk-dma-heap-cma"
29
30 #define IS_DMA_INVALID() (dma_node_fd < 0)
31
32 struct mem_pool_node {
33 rk_crypto_mem mem;
34 uint32_t handle;
35 uint32_t flags;
36 struct list_head list;
37 };
38
39 struct mem_ops {
40 int (*init)(void);
41 void (*deinit)(int dma_node_fd);
42 struct mem_pool_node *(*alloc_node)(int dma_node_fd, uint32_t size);
43 void (*free_node)(int dma_node_fd, struct mem_pool_node *node);
44 };
45
46 static int dma_node_fd = -1;
47 static struct list_head mem_pool_list;
48 pthread_mutex_t dma_mutex = PTHREAD_MUTEX_INITIALIZER;
49 static int mem_init_cnt;
50 struct mem_ops *cur_mem_pos;
51
crypto_init_drm(void)52 static int crypto_init_drm(void)
53 {
54 int fd;
55
56 fd = open(DRM_CARD_PATH, O_RDWR);
57 if (fd < 0)
58 D_TRACE("failed to open drm !\n");
59
60 return fd;
61 }
62
crypto_deinit_drm(int dma_node_fd)63 static void crypto_deinit_drm(int dma_node_fd)
64 {
65 if (dma_node_fd >= 0)
66 close(dma_node_fd);
67 }
68
crypto_alloc_node_drm(int dma_node_fd,uint32_t size)69 static struct mem_pool_node *crypto_alloc_node_drm(int dma_node_fd, uint32_t size)
70 {
71 int ret = -1;
72 size_t min_size;
73 struct mem_pool_node *node = NULL;
74 struct drm_rockchip_gem_create req = {
75 .size = size,
76 .flags = 1,
77 };
78 struct drm_rockchip_gem_map_off map_req;
79
80 /* cma must alloc at least two page */
81 min_size = 2 * getpagesize();
82 req.size = size < min_size ? min_size : size;
83
84 node = malloc(sizeof(*node));
85 if (!node)
86 return NULL;
87
88 memset(node, 0x00, sizeof(*node));
89 memset(&map_req, 0x00, sizeof(map_req));
90
91 ret = drmIoctl(dma_node_fd, DRM_IOCTL_ROCKCHIP_GEM_CREATE, &req);
92 if (ret) {
93 free(node);
94 return NULL;
95 }
96
97 ret = drmPrimeHandleToFD(dma_node_fd, req.handle, 0, &node->mem.dma_fd);
98 if (ret) {
99 E_TRACE("failed to get dma dma_node_fd.\n");
100 goto error;
101 }
102
103 map_req.handle = req.handle;
104 ret = drmIoctl(dma_node_fd, DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET, &map_req);
105 if (ret) {
106 E_TRACE("failed to ioctl gem map offset.");
107 goto error;
108 }
109
110 D_TRACE("handle = %u, dma_fd = %d, alloc_size = %u, real_size = %u\n",
111 req.handle, node->mem.dma_fd, size, req.size);
112
113 #ifdef __ANDROID__
114 node->mem.vaddr = mmap64(0, req.size, PROT_READ | PROT_WRITE, MAP_SHARED,
115 dma_node_fd, map_req.offset);
116 #else
117 node->mem.vaddr = mmap(0, req.size, PROT_READ | PROT_WRITE, MAP_SHARED,
118 dma_node_fd, map_req.offset);
119 #endif
120 if (node->mem.vaddr == MAP_FAILED) {
121 E_TRACE("failed to mmap buffer. offset = %"PRIu64", reason: %s\n",
122 map_req.offset, strerror(errno));
123 ret = -1;
124 goto error;
125 }
126
127 node->handle = req.handle;
128 node->flags = req.flags;
129 node->mem.size = size;
130
131 return node;
132 error:
133 drmIoctl(dma_node_fd, DRM_IOCTL_GEM_CLOSE, &req);
134
135 if (node)
136 free(node);
137
138 return NULL;
139 }
140
crypto_free_node_drm(int dma_node_fd,struct mem_pool_node * node)141 static void crypto_free_node_drm(int dma_node_fd, struct mem_pool_node *node)
142 {
143 struct drm_gem_close req;
144 size_t min_size;
145 min_size = 2 * getpagesize();
146
147 if (!node || node->mem.size == 0)
148 return;
149
150 memset(&req, 0x00, sizeof(req));
151
152 req.handle = node->handle;
153
154 if (node->mem.vaddr)
155 munmap(node->mem.vaddr, node->mem.size < min_size ? min_size : node->mem.size);
156
157 if (node->mem.dma_fd >= 0)
158 close(node->mem.dma_fd);
159
160 drmIoctl(dma_node_fd, DRM_IOCTL_GEM_CLOSE, &req);
161
162 free(node);
163 }
164
crypto_init_dma_heap(void)165 static int crypto_init_dma_heap(void)
166 {
167 int fd;
168
169 fd = open(DMA_HEAP_PATH, O_RDWR);
170 if (fd < 0)
171 D_TRACE("failed to open cma heap !\n");
172
173 return fd;
174 }
175
crypto_deinit_dma_heap(int dma_node_fd)176 static void crypto_deinit_dma_heap(int dma_node_fd)
177 {
178 if (dma_node_fd >= 0)
179 close(dma_node_fd);
180 }
181
crypto_alloc_node_dma_heap(int dma_node_fd,uint32_t size)182 static struct mem_pool_node *crypto_alloc_node_dma_heap(int dma_node_fd, uint32_t size)
183 {
184 int ret = -1;
185 size_t min_size;
186 struct mem_pool_node *node = NULL;
187 struct dma_heap_allocation_data req = {
188 .len = size,
189 .fd_flags = O_CLOEXEC | O_RDWR,
190 };
191
192 /* cma must alloc at least two page */
193 min_size = 2 * getpagesize();
194 req.len = size < min_size ? min_size : size;
195
196 node = malloc(sizeof(*node));
197 if (!node)
198 return NULL;
199
200 memset(node, 0x00, sizeof(*node));
201
202 req.fd = 0;
203 ret = ioctl(dma_node_fd, DMA_HEAP_IOCTL_ALLOC, &req);
204 if (ret < 0) {
205 E_TRACE("DMA_HEAP_ALLOC_BUFFER failed\n");
206 free(node);
207 return NULL;
208 }
209
210 node->mem.dma_fd = req.fd;
211
212 D_TRACE("dma_fd = %d, alloc_size = %u, real_size = %u\n",
213 node->mem.dma_fd, size, req.len);
214
215 #ifdef __ANDROID__
216 node->mem.vaddr = mmap64(0, req.len, PROT_READ | PROT_WRITE, MAP_SHARED, req.fd, 0);
217 #else
218 node->mem.vaddr = mmap(0, req.len, PROT_READ | PROT_WRITE, MAP_SHARED, req.fd, 0);
219 #endif
220 if (node->mem.vaddr == MAP_FAILED) {
221 E_TRACE("failed to mmap buffer. fd = %"PRIu64", reason: %s\n",
222 req.fd, strerror(errno));
223 ret = -1;
224 goto error;
225 }
226
227 node->flags = req.fd_flags;
228 node->mem.size = size;
229
230 return node;
231 error:
232 close(req.fd);
233
234 if (node)
235 free(node);
236
237 return NULL;
238 }
239
crypto_free_node_dma_heap(int dma_node_fd,struct mem_pool_node * node)240 static void crypto_free_node_dma_heap(int dma_node_fd, struct mem_pool_node *node)
241 {
242 size_t min_size;
243
244 min_size = 2 * getpagesize();
245
246 if (!node || node->mem.size == 0)
247 return;
248
249 if (node->mem.vaddr)
250 munmap(node->mem.vaddr, node->mem.size < min_size ? min_size : node->mem.size);
251
252 if (node->mem.dma_fd >= 0)
253 close(node->mem.dma_fd);
254
255 free(node);
256 }
257
258 struct mem_ops rk_mem_ops_tbl[] = {
259 {
260 .init = crypto_init_dma_heap,
261 .deinit = crypto_deinit_dma_heap,
262 .alloc_node = crypto_alloc_node_dma_heap,
263 .free_node = crypto_free_node_dma_heap,
264 },
265 {
266 .init = crypto_init_drm,
267 .deinit = crypto_deinit_drm,
268 .alloc_node = crypto_alloc_node_drm,
269 .free_node = crypto_free_node_drm,
270 },
271 };
272
rk_crypto_mem_init(void)273 int rk_crypto_mem_init(void)
274 {
275 int ret = -1;
276 uint32_t i;
277
278 if (mem_init_cnt > 0) {
279 ret = 0;
280 return ret;
281 }
282
283 pthread_mutex_lock(&dma_mutex);
284
285 INIT_LIST_HEAD(&mem_pool_list);
286
287 for (i = 0; i < ARRAY_SIZE(rk_mem_ops_tbl); i++) {
288 dma_node_fd = rk_mem_ops_tbl[i].init();
289 if (dma_node_fd >= 0)
290 break;
291 }
292
293 if (dma_node_fd < 0) {
294 ret = -1;
295 goto exit;
296 }
297
298 cur_mem_pos = &rk_mem_ops_tbl[i];
299
300 mem_init_cnt++;
301
302 ret = 0;
303 exit:
304 pthread_mutex_unlock(&dma_mutex);
305
306 return ret;
307 }
308
rk_crypto_mem_deinit(void)309 void rk_crypto_mem_deinit(void)
310 {
311 /* free list */
312 struct mem_pool_node *node;
313 struct list_head *pos = NULL, *n = NULL;
314
315 pthread_mutex_lock(&dma_mutex);
316
317 mem_init_cnt--;
318 if (mem_init_cnt > 0)
319 goto exit;
320
321 if (IS_DMA_INVALID())
322 goto exit;
323
324 list_for_each_safe(pos, n, &mem_pool_list) {
325 node = list_entry(pos, struct mem_pool_node, list);
326 list_del(pos);
327
328 if (cur_mem_pos)
329 cur_mem_pos->free_node(dma_node_fd, node);
330 }
331
332 if (cur_mem_pos)
333 cur_mem_pos->deinit(dma_node_fd);
334 exit:
335 pthread_mutex_unlock(&dma_mutex);
336 }
337
rk_crypto_mem_alloc(size_t size)338 rk_crypto_mem *rk_crypto_mem_alloc(size_t size)
339 {
340 struct mem_pool_node *node;
341
342 pthread_mutex_lock(&dma_mutex);
343
344 if (IS_DMA_INVALID() || !cur_mem_pos)
345 goto error;
346
347 node = cur_mem_pos->alloc_node(dma_node_fd, size);
348 if (!node)
349 goto error;
350
351 list_add_tail(&node->list, &mem_pool_list);
352
353 pthread_mutex_unlock(&dma_mutex);
354
355 return &node->mem;
356 error:
357 pthread_mutex_unlock(&dma_mutex);
358
359 return NULL;
360 }
361
rk_crypto_mem_free(rk_crypto_mem * memory)362 void rk_crypto_mem_free(rk_crypto_mem *memory)
363 {
364 struct mem_pool_node *node;
365 struct list_head *pos = NULL, *n = NULL;
366
367 pthread_mutex_lock(&dma_mutex);
368
369 if (IS_DMA_INVALID() || !cur_mem_pos)
370 goto exit;
371
372 if (!memory)
373 goto exit;
374
375 list_for_each_safe(pos, n, &mem_pool_list) {
376 node = list_entry(pos, struct mem_pool_node, list);
377
378 if (&node->mem == memory) {
379 list_del(pos);
380 cur_mem_pos->free_node(dma_node_fd, node);
381 goto exit;
382 }
383 }
384
385 exit:
386 pthread_mutex_unlock(&dma_mutex);
387 }
388