xref: /OK3568_Linux_fs/external/security/librkcrypto/src/rkcrypto_mem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2022 Rockchip Electronics Co. Ltd.
3*4882a593Smuzhiyun  */
4*4882a593Smuzhiyun #include <inttypes.h>
5*4882a593Smuzhiyun #include <stdio.h>
6*4882a593Smuzhiyun #include <stdlib.h>
7*4882a593Smuzhiyun #include <string.h>
8*4882a593Smuzhiyun #include <sys/mman.h>
9*4882a593Smuzhiyun #include <sys/ioctl.h>
10*4882a593Smuzhiyun #include <unistd.h>
11*4882a593Smuzhiyun #include <xf86drm.h>
12*4882a593Smuzhiyun #include <rockchip_drm.h>
13*4882a593Smuzhiyun #include <fcntl.h>
14*4882a593Smuzhiyun #include <errno.h>
15*4882a593Smuzhiyun #include <pthread.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "rkcrypto_mem.h"
18*4882a593Smuzhiyun #include "rkcrypto_trace.h"
19*4882a593Smuzhiyun #include "rk_list.h"
20*4882a593Smuzhiyun #include "dma-heap.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #ifndef ARRAY_SIZE
23*4882a593Smuzhiyun #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
24*4882a593Smuzhiyun #endif
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define DRM_MODULE_NAME "rockchip"
27*4882a593Smuzhiyun #define DRM_CARD_PATH "/dev/dri/card0"
28*4882a593Smuzhiyun #define DMA_HEAP_PATH "/dev/rk_dma_heap/rk-dma-heap-cma"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define IS_DMA_INVALID()	(dma_node_fd < 0)
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun struct mem_pool_node {
33*4882a593Smuzhiyun 	rk_crypto_mem		mem;
34*4882a593Smuzhiyun 	uint32_t		handle;
35*4882a593Smuzhiyun 	uint32_t		flags;
36*4882a593Smuzhiyun 	struct list_head	list;
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun struct mem_ops {
40*4882a593Smuzhiyun 	int (*init)(void);
41*4882a593Smuzhiyun 	void (*deinit)(int dma_node_fd);
42*4882a593Smuzhiyun 	struct mem_pool_node *(*alloc_node)(int dma_node_fd, uint32_t size);
43*4882a593Smuzhiyun 	void (*free_node)(int dma_node_fd, struct mem_pool_node *node);
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun static int dma_node_fd = -1;
47*4882a593Smuzhiyun static struct list_head mem_pool_list;
48*4882a593Smuzhiyun pthread_mutex_t dma_mutex = PTHREAD_MUTEX_INITIALIZER;
49*4882a593Smuzhiyun static int mem_init_cnt;
50*4882a593Smuzhiyun struct mem_ops *cur_mem_pos;
51*4882a593Smuzhiyun 
crypto_init_drm(void)52*4882a593Smuzhiyun static int crypto_init_drm(void)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	int fd;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	fd = open(DRM_CARD_PATH, O_RDWR);
57*4882a593Smuzhiyun 	if (fd < 0)
58*4882a593Smuzhiyun 		D_TRACE("failed to open drm !\n");
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	return fd;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
crypto_deinit_drm(int dma_node_fd)63*4882a593Smuzhiyun static void crypto_deinit_drm(int dma_node_fd)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	if (dma_node_fd >= 0)
66*4882a593Smuzhiyun 		close(dma_node_fd);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
crypto_alloc_node_drm(int dma_node_fd,uint32_t size)69*4882a593Smuzhiyun static struct mem_pool_node *crypto_alloc_node_drm(int dma_node_fd, uint32_t size)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	int ret = -1;
72*4882a593Smuzhiyun 	size_t min_size;
73*4882a593Smuzhiyun 	struct mem_pool_node *node = NULL;
74*4882a593Smuzhiyun 	struct drm_rockchip_gem_create req = {
75*4882a593Smuzhiyun 		.size = size,
76*4882a593Smuzhiyun 		.flags = 1,
77*4882a593Smuzhiyun 	};
78*4882a593Smuzhiyun 	struct drm_rockchip_gem_map_off map_req;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/* cma must alloc at least two page */
81*4882a593Smuzhiyun 	min_size = 2 * getpagesize();
82*4882a593Smuzhiyun 	req.size  = size < min_size ? min_size : size;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	node = malloc(sizeof(*node));
85*4882a593Smuzhiyun 	if (!node)
86*4882a593Smuzhiyun 		return NULL;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	memset(node, 0x00, sizeof(*node));
89*4882a593Smuzhiyun 	memset(&map_req, 0x00, sizeof(map_req));
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	ret = drmIoctl(dma_node_fd, DRM_IOCTL_ROCKCHIP_GEM_CREATE, &req);
92*4882a593Smuzhiyun 	if (ret) {
93*4882a593Smuzhiyun 		free(node);
94*4882a593Smuzhiyun 		return NULL;
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	ret = drmPrimeHandleToFD(dma_node_fd, req.handle, 0, &node->mem.dma_fd);
98*4882a593Smuzhiyun 	if (ret) {
99*4882a593Smuzhiyun 		E_TRACE("failed to get dma dma_node_fd.\n");
100*4882a593Smuzhiyun 		goto error;
101*4882a593Smuzhiyun 	}
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	map_req.handle = req.handle;
104*4882a593Smuzhiyun 	ret = drmIoctl(dma_node_fd, DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET, &map_req);
105*4882a593Smuzhiyun 	if (ret) {
106*4882a593Smuzhiyun 		E_TRACE("failed to ioctl gem map offset.");
107*4882a593Smuzhiyun 		goto error;
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	D_TRACE("handle = %u, dma_fd = %d, alloc_size = %u, real_size = %u\n",
111*4882a593Smuzhiyun 		req.handle, node->mem.dma_fd, size, req.size);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #ifdef __ANDROID__
114*4882a593Smuzhiyun 	node->mem.vaddr = mmap64(0, req.size, PROT_READ | PROT_WRITE, MAP_SHARED,
115*4882a593Smuzhiyun 				 dma_node_fd, map_req.offset);
116*4882a593Smuzhiyun #else
117*4882a593Smuzhiyun 	node->mem.vaddr = mmap(0, req.size, PROT_READ | PROT_WRITE, MAP_SHARED,
118*4882a593Smuzhiyun 			       dma_node_fd, map_req.offset);
119*4882a593Smuzhiyun #endif
120*4882a593Smuzhiyun 	if (node->mem.vaddr == MAP_FAILED) {
121*4882a593Smuzhiyun 		E_TRACE("failed to mmap buffer. offset = %"PRIu64", reason: %s\n",
122*4882a593Smuzhiyun 			map_req.offset, strerror(errno));
123*4882a593Smuzhiyun 		ret = -1;
124*4882a593Smuzhiyun 		goto error;
125*4882a593Smuzhiyun 	}
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	node->handle   = req.handle;
128*4882a593Smuzhiyun 	node->flags    = req.flags;
129*4882a593Smuzhiyun 	node->mem.size = size;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	return node;
132*4882a593Smuzhiyun error:
133*4882a593Smuzhiyun 	drmIoctl(dma_node_fd, DRM_IOCTL_GEM_CLOSE, &req);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (node)
136*4882a593Smuzhiyun 		free(node);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	return NULL;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
crypto_free_node_drm(int dma_node_fd,struct mem_pool_node * node)141*4882a593Smuzhiyun static void crypto_free_node_drm(int dma_node_fd, struct mem_pool_node *node)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	struct drm_gem_close req;
144*4882a593Smuzhiyun 	size_t min_size;
145*4882a593Smuzhiyun 	min_size = 2 * getpagesize();
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (!node || node->mem.size == 0)
148*4882a593Smuzhiyun 		return;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	memset(&req, 0x00, sizeof(req));
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	req.handle = node->handle;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if (node->mem.vaddr)
155*4882a593Smuzhiyun 		munmap(node->mem.vaddr, node->mem.size < min_size ? min_size : node->mem.size);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (node->mem.dma_fd >= 0)
158*4882a593Smuzhiyun 		close(node->mem.dma_fd);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	drmIoctl(dma_node_fd, DRM_IOCTL_GEM_CLOSE, &req);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	free(node);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
crypto_init_dma_heap(void)165*4882a593Smuzhiyun static int crypto_init_dma_heap(void)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	int fd;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	fd = open(DMA_HEAP_PATH, O_RDWR);
170*4882a593Smuzhiyun 	if (fd < 0)
171*4882a593Smuzhiyun 		D_TRACE("failed to open cma heap !\n");
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	return fd;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
crypto_deinit_dma_heap(int dma_node_fd)176*4882a593Smuzhiyun static void crypto_deinit_dma_heap(int dma_node_fd)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	if (dma_node_fd >= 0)
179*4882a593Smuzhiyun 		close(dma_node_fd);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
crypto_alloc_node_dma_heap(int dma_node_fd,uint32_t size)182*4882a593Smuzhiyun static struct mem_pool_node *crypto_alloc_node_dma_heap(int dma_node_fd, uint32_t size)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	int ret = -1;
185*4882a593Smuzhiyun 	size_t min_size;
186*4882a593Smuzhiyun 	struct mem_pool_node *node = NULL;
187*4882a593Smuzhiyun 	struct dma_heap_allocation_data req = {
188*4882a593Smuzhiyun 		.len = size,
189*4882a593Smuzhiyun 		.fd_flags = O_CLOEXEC | O_RDWR,
190*4882a593Smuzhiyun 	};
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* cma must alloc at least two page */
193*4882a593Smuzhiyun 	min_size = 2 * getpagesize();
194*4882a593Smuzhiyun 	req.len  = size < min_size ? min_size : size;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	node = malloc(sizeof(*node));
197*4882a593Smuzhiyun 	if (!node)
198*4882a593Smuzhiyun 		return NULL;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	memset(node, 0x00, sizeof(*node));
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	req.fd = 0;
203*4882a593Smuzhiyun 	ret = ioctl(dma_node_fd, DMA_HEAP_IOCTL_ALLOC, &req);
204*4882a593Smuzhiyun 	if (ret < 0) {
205*4882a593Smuzhiyun 		E_TRACE("DMA_HEAP_ALLOC_BUFFER failed\n");
206*4882a593Smuzhiyun 		free(node);
207*4882a593Smuzhiyun 		return NULL;
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	node->mem.dma_fd = req.fd;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	D_TRACE("dma_fd = %d, alloc_size = %u, real_size = %u\n",
213*4882a593Smuzhiyun 		node->mem.dma_fd, size, req.len);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun #ifdef __ANDROID__
216*4882a593Smuzhiyun 	node->mem.vaddr = mmap64(0, req.len, PROT_READ | PROT_WRITE, MAP_SHARED, req.fd, 0);
217*4882a593Smuzhiyun #else
218*4882a593Smuzhiyun 	node->mem.vaddr = mmap(0, req.len, PROT_READ | PROT_WRITE, MAP_SHARED, req.fd, 0);
219*4882a593Smuzhiyun #endif
220*4882a593Smuzhiyun 	if (node->mem.vaddr == MAP_FAILED) {
221*4882a593Smuzhiyun 		E_TRACE("failed to mmap buffer. fd = %"PRIu64", reason: %s\n",
222*4882a593Smuzhiyun 			req.fd, strerror(errno));
223*4882a593Smuzhiyun 		ret = -1;
224*4882a593Smuzhiyun 		goto error;
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	node->flags    = req.fd_flags;
228*4882a593Smuzhiyun 	node->mem.size = size;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	return node;
231*4882a593Smuzhiyun error:
232*4882a593Smuzhiyun 	close(req.fd);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (node)
235*4882a593Smuzhiyun 		free(node);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	return NULL;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
crypto_free_node_dma_heap(int dma_node_fd,struct mem_pool_node * node)240*4882a593Smuzhiyun static void crypto_free_node_dma_heap(int dma_node_fd, struct mem_pool_node *node)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	size_t min_size;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	min_size = 2 * getpagesize();
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	if (!node || node->mem.size == 0)
247*4882a593Smuzhiyun 		return;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	if (node->mem.vaddr)
250*4882a593Smuzhiyun 		munmap(node->mem.vaddr, node->mem.size < min_size ? min_size : node->mem.size);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (node->mem.dma_fd >= 0)
253*4882a593Smuzhiyun 		close(node->mem.dma_fd);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	free(node);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun struct mem_ops rk_mem_ops_tbl[] = {
259*4882a593Smuzhiyun 	{
260*4882a593Smuzhiyun 		.init       = crypto_init_dma_heap,
261*4882a593Smuzhiyun 		.deinit     = crypto_deinit_dma_heap,
262*4882a593Smuzhiyun 		.alloc_node = crypto_alloc_node_dma_heap,
263*4882a593Smuzhiyun 		.free_node  = crypto_free_node_dma_heap,
264*4882a593Smuzhiyun 	},
265*4882a593Smuzhiyun 	{
266*4882a593Smuzhiyun 		.init       = crypto_init_drm,
267*4882a593Smuzhiyun 		.deinit     = crypto_deinit_drm,
268*4882a593Smuzhiyun 		.alloc_node = crypto_alloc_node_drm,
269*4882a593Smuzhiyun 		.free_node  = crypto_free_node_drm,
270*4882a593Smuzhiyun 	},
271*4882a593Smuzhiyun };
272*4882a593Smuzhiyun 
rk_crypto_mem_init(void)273*4882a593Smuzhiyun int rk_crypto_mem_init(void)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	int ret = -1;
276*4882a593Smuzhiyun 	uint32_t i;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (mem_init_cnt > 0) {
279*4882a593Smuzhiyun 		ret = 0;
280*4882a593Smuzhiyun 		return ret;
281*4882a593Smuzhiyun 	}
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	pthread_mutex_lock(&dma_mutex);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	INIT_LIST_HEAD(&mem_pool_list);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(rk_mem_ops_tbl); i++) {
288*4882a593Smuzhiyun 		dma_node_fd = rk_mem_ops_tbl[i].init();
289*4882a593Smuzhiyun 		if (dma_node_fd >= 0)
290*4882a593Smuzhiyun 			break;
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	if (dma_node_fd < 0) {
294*4882a593Smuzhiyun 		ret = -1;
295*4882a593Smuzhiyun 		goto exit;
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	cur_mem_pos = &rk_mem_ops_tbl[i];
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	mem_init_cnt++;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	ret = 0;
303*4882a593Smuzhiyun exit:
304*4882a593Smuzhiyun 	pthread_mutex_unlock(&dma_mutex);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	return ret;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
rk_crypto_mem_deinit(void)309*4882a593Smuzhiyun void rk_crypto_mem_deinit(void)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	/* free list */
312*4882a593Smuzhiyun 	struct mem_pool_node *node;
313*4882a593Smuzhiyun 	struct list_head *pos = NULL, *n = NULL;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	pthread_mutex_lock(&dma_mutex);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	mem_init_cnt--;
318*4882a593Smuzhiyun 	if (mem_init_cnt > 0)
319*4882a593Smuzhiyun 		goto exit;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	if (IS_DMA_INVALID())
322*4882a593Smuzhiyun 		goto exit;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	list_for_each_safe(pos, n, &mem_pool_list) {
325*4882a593Smuzhiyun 		node = list_entry(pos, struct mem_pool_node, list);
326*4882a593Smuzhiyun 		list_del(pos);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 		if (cur_mem_pos)
329*4882a593Smuzhiyun 			cur_mem_pos->free_node(dma_node_fd, node);
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (cur_mem_pos)
333*4882a593Smuzhiyun 		cur_mem_pos->deinit(dma_node_fd);
334*4882a593Smuzhiyun exit:
335*4882a593Smuzhiyun 	pthread_mutex_unlock(&dma_mutex);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
rk_crypto_mem_alloc(size_t size)338*4882a593Smuzhiyun rk_crypto_mem *rk_crypto_mem_alloc(size_t size)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	struct mem_pool_node *node;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	pthread_mutex_lock(&dma_mutex);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	if (IS_DMA_INVALID() || !cur_mem_pos)
345*4882a593Smuzhiyun 		goto error;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	node = cur_mem_pos->alloc_node(dma_node_fd, size);
348*4882a593Smuzhiyun 	if (!node)
349*4882a593Smuzhiyun 		goto error;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	list_add_tail(&node->list, &mem_pool_list);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	pthread_mutex_unlock(&dma_mutex);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	return &node->mem;
356*4882a593Smuzhiyun error:
357*4882a593Smuzhiyun 	pthread_mutex_unlock(&dma_mutex);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	return NULL;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
rk_crypto_mem_free(rk_crypto_mem * memory)362*4882a593Smuzhiyun void rk_crypto_mem_free(rk_crypto_mem *memory)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	struct mem_pool_node *node;
365*4882a593Smuzhiyun 	struct list_head *pos = NULL, *n = NULL;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	pthread_mutex_lock(&dma_mutex);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	if (IS_DMA_INVALID() || !cur_mem_pos)
370*4882a593Smuzhiyun 		goto exit;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	if (!memory)
373*4882a593Smuzhiyun 		goto exit;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	list_for_each_safe(pos, n, &mem_pool_list) {
376*4882a593Smuzhiyun 		node = list_entry(pos, struct mem_pool_node, list);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 		if (&node->mem == memory) {
379*4882a593Smuzhiyun 			list_del(pos);
380*4882a593Smuzhiyun 			cur_mem_pos->free_node(dma_node_fd, node);
381*4882a593Smuzhiyun 			goto exit;
382*4882a593Smuzhiyun 		}
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun exit:
386*4882a593Smuzhiyun 	pthread_mutex_unlock(&dma_mutex);
387*4882a593Smuzhiyun }
388