xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/linux/mali_memory_manager.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10 
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/mm_types.h>
14 #include <linux/fs.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/version.h>
18 #include <linux/sched.h>
19 
20 #include <linux/platform_device.h>
21 #if defined(CONFIG_DMA_SHARED_BUFFER)
22 #include <linux/dma-buf.h>
23 #endif
24 #include <linux/idr.h>
25 
26 #include "mali_osk.h"
27 #include "mali_osk_mali.h"
28 #include "mali_kernel_linux.h"
29 #include "mali_scheduler.h"
30 #include "mali_memory.h"
31 #include "mali_memory_os_alloc.h"
32 #if defined(CONFIG_DMA_SHARED_BUFFER)
33 #include "mali_memory_dma_buf.h"
34 #include "mali_memory_secure.h"
35 #endif
36 #if defined(CONFIG_MALI400_UMP)
37 #include "mali_memory_ump.h"
38 #endif
39 #include "mali_memory_manager.h"
40 #include "mali_memory_virtual.h"
41 #include "mali_memory_util.h"
42 #include "mali_memory_external.h"
43 #include "mali_memory_cow.h"
44 #include "mali_memory_block_alloc.h"
45 #include "mali_ukk.h"
46 #include "mali_memory_swap_alloc.h"
47 
48 /*
49 * New memory system interface
50 */
51 
52 /*inti idr for backend memory */
53 struct idr mali_backend_idr;
54 struct mutex mali_idr_mutex;
55 
56 /* init allocation manager */
mali_memory_manager_init(struct mali_allocation_manager * mgr)57 int mali_memory_manager_init(struct mali_allocation_manager *mgr)
58 {
59 	/* init Locks */
60 	rwlock_init(&mgr->vm_lock);
61 	mutex_init(&mgr->list_mutex);
62 
63 	/* init link */
64 	INIT_LIST_HEAD(&mgr->head);
65 
66 	/* init RB tree */
67 	mgr->allocation_mgr_rb = RB_ROOT;
68 	mgr->mali_allocation_num = 0;
69 	return 0;
70 }
71 
72 /* Deinit allocation manager
73 * Do some check for debug
74 */
mali_memory_manager_uninit(struct mali_allocation_manager * mgr)75 void mali_memory_manager_uninit(struct mali_allocation_manager *mgr)
76 {
77 	/* check RB tree is empty */
78 	MALI_DEBUG_ASSERT(((void *)(mgr->allocation_mgr_rb.rb_node) == (void *)rb_last(&mgr->allocation_mgr_rb)));
79 	/* check allocation List */
80 	MALI_DEBUG_ASSERT(list_empty(&mgr->head));
81 }
82 
83 /* Prepare memory descriptor */
mali_mem_allocation_struct_create(struct mali_session_data * session)84 static mali_mem_allocation *mali_mem_allocation_struct_create(struct mali_session_data *session)
85 {
86 	mali_mem_allocation *mali_allocation;
87 
88 	/* Allocate memory */
89 	mali_allocation = (mali_mem_allocation *)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL);
90 	if (NULL == mali_allocation) {
91 		MALI_DEBUG_PRINT(1, ("mali_mem_allocation_struct_create: descriptor was NULL\n"));
92 		return NULL;
93 	}
94 
95 	MALI_DEBUG_CODE(mali_allocation->magic = MALI_MEM_ALLOCATION_VALID_MAGIC);
96 
97 	/* do init */
98 	mali_allocation->flags = 0;
99 	mali_allocation->session = session;
100 
101 	INIT_LIST_HEAD(&mali_allocation->list);
102 	_mali_osk_atomic_init(&mali_allocation->mem_alloc_refcount, 1);
103 
104 	/**
105 	*add to session list
106 	*/
107 	mutex_lock(&session->allocation_mgr.list_mutex);
108 	list_add_tail(&mali_allocation->list, &session->allocation_mgr.head);
109 	session->allocation_mgr.mali_allocation_num++;
110 	mutex_unlock(&session->allocation_mgr.list_mutex);
111 
112 	return mali_allocation;
113 }
114 
mali_mem_allocation_struct_destory(mali_mem_allocation * alloc)115 void  mali_mem_allocation_struct_destory(mali_mem_allocation *alloc)
116 {
117 	MALI_DEBUG_ASSERT_POINTER(alloc);
118 	MALI_DEBUG_ASSERT_POINTER(alloc->session);
119 	mutex_lock(&alloc->session->allocation_mgr.list_mutex);
120 	list_del(&alloc->list);
121 	alloc->session->allocation_mgr.mali_allocation_num--;
122 	mutex_unlock(&alloc->session->allocation_mgr.list_mutex);
123 
124 	kfree(alloc);
125 }
126 
mali_mem_backend_struct_create(mali_mem_backend ** backend,u32 psize)127 int mali_mem_backend_struct_create(mali_mem_backend **backend, u32 psize)
128 {
129 	mali_mem_backend *mem_backend = NULL;
130 	s32 ret = -ENOSPC;
131 	s32 index = -1;
132 	*backend = (mali_mem_backend *)kzalloc(sizeof(mali_mem_backend), GFP_KERNEL);
133 	if (NULL == *backend) {
134 		MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: backend descriptor was NULL\n"));
135 		return -1;
136 	}
137 	mem_backend = *backend;
138 	mem_backend->size = psize;
139 	mutex_init(&mem_backend->mutex);
140 	INIT_LIST_HEAD(&mem_backend->list);
141 	mem_backend->using_count = 0;
142 
143 
144 	/* link backend with id */
145 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
146 again:
147 	if (!idr_pre_get(&mali_backend_idr, GFP_KERNEL)) {
148 		kfree(mem_backend);
149 		return -ENOMEM;
150 	}
151 	mutex_lock(&mali_idr_mutex);
152 	ret = idr_get_new_above(&mali_backend_idr, mem_backend, 1, &index);
153 	mutex_unlock(&mali_idr_mutex);
154 
155 	if (-ENOSPC == ret) {
156 		kfree(mem_backend);
157 		return -ENOSPC;
158 	}
159 	if (-EAGAIN == ret)
160 		goto again;
161 #else
162 	mutex_lock(&mali_idr_mutex);
163 	ret = idr_alloc(&mali_backend_idr, mem_backend, 1, MALI_S32_MAX, GFP_KERNEL);
164 	mutex_unlock(&mali_idr_mutex);
165 	index = ret;
166 	if (ret < 0) {
167 		MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: Can't allocate idr for backend! \n"));
168 		kfree(mem_backend);
169 		return -ENOSPC;
170 	}
171 #endif
172 	return index;
173 }
174 
175 
mali_mem_backend_struct_destory(mali_mem_backend ** backend,s32 backend_handle)176 static void mali_mem_backend_struct_destory(mali_mem_backend **backend, s32 backend_handle)
177 {
178 	mali_mem_backend *mem_backend = *backend;
179 
180 	mutex_lock(&mali_idr_mutex);
181 	idr_remove(&mali_backend_idr, backend_handle);
182 	mutex_unlock(&mali_idr_mutex);
183 	kfree(mem_backend);
184 	*backend = NULL;
185 }
186 
mali_mem_backend_struct_search(struct mali_session_data * session,u32 mali_address)187 mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address)
188 {
189 	struct mali_vma_node *mali_vma_node = NULL;
190 	mali_mem_backend *mem_bkend = NULL;
191 	mali_mem_allocation *mali_alloc = NULL;
192 	MALI_DEBUG_ASSERT_POINTER(session);
193 	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_address, 0);
194 	if (NULL == mali_vma_node)  {
195 		MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_search:vma node was NULL\n"));
196 		return NULL;
197 	}
198 	mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
199 	/* Get backend memory & Map on CPU */
200 	mutex_lock(&mali_idr_mutex);
201 	mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
202 	mutex_unlock(&mali_idr_mutex);
203 	MALI_DEBUG_ASSERT(NULL != mem_bkend);
204 	return mem_bkend;
205 }
206 
mali_mem_resize(struct mali_session_data * session,mali_mem_backend * mem_backend,u32 physical_size)207 static _mali_osk_errcode_t mali_mem_resize(struct mali_session_data *session, mali_mem_backend *mem_backend, u32 physical_size)
208 {
209 	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
210 	int retval = 0;
211 	mali_mem_allocation *mali_allocation = NULL;
212 	mali_mem_os_mem tmp_os_mem;
213 	s32 change_page_count;
214 
215 	MALI_DEBUG_ASSERT_POINTER(session);
216 	MALI_DEBUG_ASSERT_POINTER(mem_backend);
217 	MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
218 	MALI_DEBUG_ASSERT(0 == physical_size %  MALI_MMU_PAGE_SIZE);
219 
220 	mali_allocation = mem_backend->mali_allocation;
221 	MALI_DEBUG_ASSERT_POINTER(mali_allocation);
222 
223 	MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE & mali_allocation->flags);
224 	MALI_DEBUG_ASSERT(MALI_MEM_OS == mali_allocation->type);
225 
226 	mutex_lock(&mem_backend->mutex);
227 
228 	/* Do resize*/
229 	if (physical_size > mem_backend->size) {
230 		u32 add_size = physical_size - mem_backend->size;
231 
232 		MALI_DEBUG_ASSERT(0 == add_size %  MALI_MMU_PAGE_SIZE);
233 
234 		/* Allocate new pages from os mem */
235 		retval = mali_mem_os_alloc_pages(&tmp_os_mem, add_size);
236 
237 		if (retval) {
238 			if (-ENOMEM == retval) {
239 				ret = _MALI_OSK_ERR_NOMEM;
240 			} else {
241 				ret = _MALI_OSK_ERR_FAULT;
242 			}
243 			MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory allocation failed !\n"));
244 			goto failed_alloc_memory;
245 		}
246 
247 		MALI_DEBUG_ASSERT(tmp_os_mem.count == add_size / MALI_MMU_PAGE_SIZE);
248 
249 		/* Resize the memory of the backend */
250 		ret = mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
251 
252 		if (ret) {
253 			MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory	resizing failed !\n"));
254 			goto failed_resize_pages;
255 		}
256 
257 		/*Resize cpu mapping */
258 		if (NULL != mali_allocation->cpu_mapping.vma) {
259 			ret = mali_mem_os_resize_cpu_map_locked(mem_backend, mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start  + mem_backend->size, add_size);
260 			if (unlikely(ret != _MALI_OSK_ERR_OK)) {
261 				MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: cpu mapping failed !\n"));
262 				goto  failed_cpu_map;
263 			}
264 		}
265 
266 		/* Resize mali mapping */
267 		_mali_osk_mutex_wait(session->memory_lock);
268 		ret = mali_mem_mali_map_resize(mali_allocation, physical_size);
269 
270 		if (ret) {
271 			MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_resize: mali map resize fail !\n"));
272 			goto failed_gpu_map;
273 		}
274 
275 		ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, mali_allocation->mali_vma_node.vm_node.start,
276 					   mali_allocation->psize / MALI_MMU_PAGE_SIZE, add_size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
277 		if (ret) {
278 			MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: mali mapping failed !\n"));
279 			goto failed_gpu_map;
280 		}
281 
282 		_mali_osk_mutex_signal(session->memory_lock);
283 	} else {
284 		u32 dec_size, page_count;
285 		u32 vaddr = 0;
286 		INIT_LIST_HEAD(&tmp_os_mem.pages);
287 		tmp_os_mem.count = 0;
288 
289 		dec_size = mem_backend->size - physical_size;
290 		MALI_DEBUG_ASSERT(0 == dec_size %  MALI_MMU_PAGE_SIZE);
291 
292 		page_count = dec_size / MALI_MMU_PAGE_SIZE;
293 		vaddr = mali_allocation->mali_vma_node.vm_node.start + physical_size;
294 
295 		/* Resize the memory of the backend */
296 		ret = mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, physical_size / MALI_MMU_PAGE_SIZE, page_count);
297 
298 		if (ret) {
299 			MALI_DEBUG_PRINT(4, ("_mali_ukk_mem_resize: mali map resize failed!\n"));
300 			goto failed_resize_pages;
301 		}
302 
303 		/* Resize mali map */
304 		_mali_osk_mutex_wait(session->memory_lock);
305 		mali_mem_mali_map_free(session, dec_size, vaddr, mali_allocation->flags);
306 		_mali_osk_mutex_signal(session->memory_lock);
307 
308 		/* Zap cpu mapping */
309 		if (0 != mali_allocation->cpu_mapping.addr) {
310 			MALI_DEBUG_ASSERT(NULL != mali_allocation->cpu_mapping.vma);
311 			zap_vma_ptes(mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + physical_size, dec_size);
312 		}
313 
314 		/* Free those extra pages */
315 		mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
316 	}
317 
318 	/* Resize memory allocation and memory backend */
319 	change_page_count = (s32)(physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE;
320 	mali_allocation->psize = physical_size;
321 	mem_backend->size = physical_size;
322 	mutex_unlock(&mem_backend->mutex);
323 
324 	if (change_page_count > 0) {
325 		atomic_add(change_page_count, &session->mali_mem_allocated_pages);
326 		if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
327 			session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
328 		}
329 
330 	} else {
331 		atomic_sub((s32)(-change_page_count), &session->mali_mem_allocated_pages);
332 	}
333 
334 	return _MALI_OSK_ERR_OK;
335 
336 failed_gpu_map:
337 	_mali_osk_mutex_signal(session->memory_lock);
338 failed_cpu_map:
339 	if (physical_size > mem_backend->size) {
340 		mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, mem_backend->size / MALI_MMU_PAGE_SIZE,
341 					 (physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE);
342 	} else {
343 		mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
344 	}
345 failed_resize_pages:
346 	if (0 != tmp_os_mem.count)
347 		mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
348 failed_alloc_memory:
349 
350 	mutex_unlock(&mem_backend->mutex);
351 	return ret;
352 }
353 
354 
355 /* Set GPU MMU properties */
_mali_memory_gpu_map_property_set(u32 * properties,u32 flags)356 static void _mali_memory_gpu_map_property_set(u32 *properties, u32 flags)
357 {
358 	if (_MALI_MEMORY_GPU_READ_ALLOCATE & flags) {
359 		*properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
360 	} else {
361 		*properties = MALI_MMU_FLAGS_DEFAULT;
362 	}
363 }
364 
mali_mem_add_mem_size(struct mali_session_data * session,u32 mali_addr,u32 add_size)365 _mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size)
366 {
367 	mali_mem_backend *mem_backend = NULL;
368 	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
369 	mali_mem_allocation *mali_allocation = NULL;
370 	u32 new_physical_size;
371 	MALI_DEBUG_ASSERT_POINTER(session);
372 	MALI_DEBUG_ASSERT(0 == add_size %  MALI_MMU_PAGE_SIZE);
373 
374 	/* Get the memory backend that need to be resize. */
375 	mem_backend = mali_mem_backend_struct_search(session, mali_addr);
376 
377 	if (NULL == mem_backend)  {
378 		MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
379 		return ret;
380 	}
381 
382 	mali_allocation = mem_backend->mali_allocation;
383 
384 	MALI_DEBUG_ASSERT_POINTER(mali_allocation);
385 
386 	new_physical_size = add_size + mem_backend->size;
387 
388 	if (new_physical_size > (mali_allocation->mali_vma_node.vm_node.size))
389 		return ret;
390 
391 	MALI_DEBUG_ASSERT(new_physical_size != mem_backend->size);
392 
393 	ret = mali_mem_resize(session, mem_backend, new_physical_size);
394 
395 	return ret;
396 }
397 
398 /**
399 *  function@_mali_ukk_mem_allocate - allocate mali memory
400 */
_mali_ukk_mem_allocate(_mali_uk_alloc_mem_s * args)401 _mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
402 {
403 	struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
404 	mali_mem_backend *mem_backend = NULL;
405 	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
406 	int retval = 0;
407 	mali_mem_allocation *mali_allocation = NULL;
408 	struct mali_vma_node *mali_vma_node = NULL;
409 
410 	MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));
411 
412 	/* Check if the address is allocated
413 	*/
414 	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
415 
416 	if (unlikely(mali_vma_node)) {
417 		MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
418 		return _MALI_OSK_ERR_FAULT;
419 	}
420 	/**
421 	*create mali memory allocation
422 	*/
423 
424 	mali_allocation = mali_mem_allocation_struct_create(session);
425 
426 	if (mali_allocation == NULL) {
427 		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
428 		return _MALI_OSK_ERR_NOMEM;
429 	}
430 	mali_allocation->psize = args->psize;
431 	mali_allocation->vsize = args->vsize;
432 
433 	/* MALI_MEM_OS if need to support mem resize,
434 	 * or MALI_MEM_BLOCK if have dedicated memory,
435 	 * or MALI_MEM_OS,
436 	 * or MALI_MEM_SWAP.
437 	 */
438 	if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) {
439 		mali_allocation->type = MALI_MEM_SWAP;
440 	} else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) {
441 		mali_allocation->type = MALI_MEM_OS;
442 		mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE;
443 	} else if (args->flags & _MALI_MEMORY_ALLOCATE_SECURE) {
444 		mali_allocation->type = MALI_MEM_SECURE;
445 	} else if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
446 		mali_allocation->type = MALI_MEM_BLOCK;
447 	} else {
448 		mali_allocation->type = MALI_MEM_OS;
449 	}
450 
451 	/**
452 	*add allocation node to RB tree for index
453 	*/
454 	mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
455 	mali_allocation->mali_vma_node.vm_node.size = args->vsize;
456 
457 	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
458 
459 	mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
460 	if (mali_allocation->backend_handle < 0) {
461 		ret = _MALI_OSK_ERR_NOMEM;
462 		MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
463 		goto failed_alloc_backend;
464 	}
465 
466 
467 	mem_backend->mali_allocation = mali_allocation;
468 	mem_backend->type = mali_allocation->type;
469 
470 	mali_allocation->mali_mapping.addr = args->gpu_vaddr;
471 
472 	/* set gpu mmu propery */
473 	_mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
474 	/* do prepare for MALI mapping */
475 	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
476 		_mali_osk_mutex_wait(session->memory_lock);
477 
478 		ret = mali_mem_mali_map_prepare(mali_allocation);
479 		if (0 != ret) {
480 			_mali_osk_mutex_signal(session->memory_lock);
481 			goto failed_prepare_map;
482 		}
483 		_mali_osk_mutex_signal(session->memory_lock);
484 	}
485 
486 	if (mali_allocation->psize == 0) {
487 		mem_backend->os_mem.count = 0;
488 		INIT_LIST_HEAD(&mem_backend->os_mem.pages);
489 		goto done;
490 	}
491 
492 	if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
493 		mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND;
494 		mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED;
495 		/* init for defer bind backend*/
496 		mem_backend->os_mem.count = 0;
497 		INIT_LIST_HEAD(&mem_backend->os_mem.pages);
498 
499 		goto done;
500 	}
501 
502 	if (likely(mali_allocation->psize > 0)) {
503 
504 		if (MALI_MEM_SECURE == mem_backend->type) {
505 #if defined(CONFIG_DMA_SHARED_BUFFER)
506 			ret = mali_mem_secure_attach_dma_buf(&mem_backend->secure_mem, mem_backend->size, args->secure_shared_fd);
507 			if (_MALI_OSK_ERR_OK != ret) {
508 				MALI_DEBUG_PRINT(1, ("Failed to attach dma buf for secure memory! \n"));
509 				goto failed_alloc_pages;
510 			}
511 #else
512 			ret = _MALI_OSK_ERR_UNSUPPORTED;
513 			MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory! \n"));
514 			goto failed_alloc_pages;
515 #endif
516 		} else {
517 
518 			/**
519 			*allocate physical memory
520 			*/
521 			if (mem_backend->type == MALI_MEM_OS) {
522 				retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
523 			} else if (mem_backend->type == MALI_MEM_BLOCK) {
524 				/* try to allocated from BLOCK memory first, then try OS memory if failed.*/
525 				if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
526 					retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
527 					mem_backend->type = MALI_MEM_OS;
528 					mali_allocation->type = MALI_MEM_OS;
529 				}
530 			} else if (MALI_MEM_SWAP == mem_backend->type) {
531 				retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx);
532 			}  else {
533 				/* ONLY support mem_os type */
534 				MALI_DEBUG_ASSERT(0);
535 			}
536 
537 			if (retval) {
538 				ret = _MALI_OSK_ERR_NOMEM;
539 				MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
540 				goto failed_alloc_pages;
541 			}
542 		}
543 	}
544 
545 	/**
546 	*map to GPU side
547 	*/
548 	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
549 		_mali_osk_mutex_wait(session->memory_lock);
550 		/* Map on Mali */
551 
552 		if (mem_backend->type == MALI_MEM_OS) {
553 			ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0,
554 						   mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
555 
556 		} else if (mem_backend->type == MALI_MEM_BLOCK) {
557 			mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
558 						mali_allocation->mali_mapping.properties);
559 		} else if (mem_backend->type == MALI_MEM_SWAP) {
560 			ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr,
561 						     mali_allocation->mali_mapping.properties);
562 		} else if (mem_backend->type == MALI_MEM_SECURE) {
563 #if defined(CONFIG_DMA_SHARED_BUFFER)
564 			ret = mali_mem_secure_mali_map(&mem_backend->secure_mem, session, args->gpu_vaddr, mali_allocation->mali_mapping.properties);
565 #endif
566 		} else { /* unsupport type */
567 			MALI_DEBUG_ASSERT(0);
568 		}
569 
570 		_mali_osk_mutex_signal(session->memory_lock);
571 	}
572 done:
573 	if (MALI_MEM_OS == mem_backend->type) {
574 		atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
575 	} else if (MALI_MEM_BLOCK == mem_backend->type) {
576 		atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
577 	} else if (MALI_MEM_SECURE == mem_backend->type) {
578 		atomic_add(mem_backend->secure_mem.count, &session->mali_mem_allocated_pages);
579 	} else {
580 		MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type);
581 		atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages);
582 		atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]);
583 	}
584 
585 	if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
586 		session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
587 	}
588 	return _MALI_OSK_ERR_OK;
589 
590 failed_alloc_pages:
591 	mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags);
592 failed_prepare_map:
593 	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
594 failed_alloc_backend:
595 	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
596 	mali_mem_allocation_struct_destory(mali_allocation);
597 
598 	return ret;
599 }
600 
601 
_mali_ukk_mem_free(_mali_uk_free_mem_s * args)602 _mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args)
603 {
604 	struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
605 	u32 vaddr = args->gpu_vaddr;
606 	mali_mem_allocation *mali_alloc = NULL;
607 	struct mali_vma_node *mali_vma_node = NULL;
608 
609 	/* find mali allocation structure by vaddress*/
610 	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, vaddr, 0);
611 	if (NULL == mali_vma_node) {
612 		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_free: invalid addr: 0x%x\n", vaddr));
613 		return _MALI_OSK_ERR_INVALID_ARGS;
614 	}
615 	MALI_DEBUG_ASSERT(NULL != mali_vma_node);
616 	mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
617 
618 	if (mali_alloc)
619 		/* check ref_count */
620 		args->free_pages_nr = mali_allocation_unref(&mali_alloc);
621 
622 	return _MALI_OSK_ERR_OK;
623 }
624 
625 
626 /**
627 * Function _mali_ukk_mem_bind -- bind a external memory to a new GPU address
628 * It will allocate a new mem allocation and bind external memory to it.
629 * Supported backend type are:
630 * _MALI_MEMORY_BIND_BACKEND_UMP
631 * _MALI_MEMORY_BIND_BACKEND_DMA_BUF
632 * _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
633 * CPU access is not supported yet
634 */
_mali_ukk_mem_bind(_mali_uk_bind_mem_s * args)635 _mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args)
636 {
637 	struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
638 	mali_mem_backend *mem_backend = NULL;
639 	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
640 	mali_mem_allocation *mali_allocation = NULL;
641 	MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_bind, vaddr=0x%x, size =0x%x! \n", args->vaddr, args->size));
642 
643 	/**
644 	* allocate mali allocation.
645 	*/
646 	mali_allocation = mali_mem_allocation_struct_create(session);
647 
648 	if (mali_allocation == NULL) {
649 		return _MALI_OSK_ERR_NOMEM;
650 	}
651 	mali_allocation->psize = args->size;
652 	mali_allocation->vsize = args->size;
653 	mali_allocation->mali_mapping.addr = args->vaddr;
654 
655 	/* add allocation node to RB tree for index  */
656 	mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
657 	mali_allocation->mali_vma_node.vm_node.size = args->size;
658 	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
659 
660 	/* allocate backend*/
661 	if (mali_allocation->psize > 0) {
662 		mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
663 		if (mali_allocation->backend_handle < 0) {
664 			goto Failed_alloc_backend;
665 		}
666 
667 	} else {
668 		goto Failed_alloc_backend;
669 	}
670 
671 	mem_backend->size = mali_allocation->psize;
672 	mem_backend->mali_allocation = mali_allocation;
673 
674 	switch (args->flags & _MALI_MEMORY_BIND_BACKEND_MASK) {
675 	case  _MALI_MEMORY_BIND_BACKEND_UMP:
676 #if defined(CONFIG_MALI400_UMP)
677 		mali_allocation->type = MALI_MEM_UMP;
678 		mem_backend->type = MALI_MEM_UMP;
679 		ret = mali_mem_bind_ump_buf(mali_allocation, mem_backend,
680 					    args->mem_union.bind_ump.secure_id, args->mem_union.bind_ump.flags);
681 		if (_MALI_OSK_ERR_OK != ret) {
682 			MALI_DEBUG_PRINT(1, ("Bind ump buf failed\n"));
683 			goto  Failed_bind_backend;
684 		}
685 #else
686 		MALI_DEBUG_PRINT(1, ("UMP not supported\n"));
687 		goto Failed_bind_backend;
688 #endif
689 		break;
690 	case  _MALI_MEMORY_BIND_BACKEND_DMA_BUF:
691 #if defined(CONFIG_DMA_SHARED_BUFFER)
692 		mali_allocation->type = MALI_MEM_DMA_BUF;
693 		mem_backend->type = MALI_MEM_DMA_BUF;
694 		ret = mali_mem_bind_dma_buf(mali_allocation, mem_backend,
695 					    args->mem_union.bind_dma_buf.mem_fd, args->mem_union.bind_dma_buf.flags);
696 		if (_MALI_OSK_ERR_OK != ret) {
697 			MALI_DEBUG_PRINT(1, ("Bind dma buf failed\n"));
698 			goto Failed_bind_backend;
699 		}
700 #else
701 		MALI_DEBUG_PRINT(1, ("DMA not supported\n"));
702 		goto Failed_bind_backend;
703 #endif
704 		break;
705 	case _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY:
706 		/* not allowed */
707 		MALI_DEBUG_PRINT_ERROR(("Mali internal memory type not supported !\n"));
708 		goto Failed_bind_backend;
709 		break;
710 
711 	case _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY:
712 		mali_allocation->type = MALI_MEM_EXTERNAL;
713 		mem_backend->type = MALI_MEM_EXTERNAL;
714 		ret = mali_mem_bind_ext_buf(mali_allocation, mem_backend, args->mem_union.bind_ext_memory.phys_addr,
715 					    args->mem_union.bind_ext_memory.flags);
716 		if (_MALI_OSK_ERR_OK != ret) {
717 			MALI_DEBUG_PRINT(1, ("Bind external buf failed\n"));
718 			goto Failed_bind_backend;
719 		}
720 		break;
721 
722 	case _MALI_MEMORY_BIND_BACKEND_EXT_COW:
723 		/* not allowed */
724 		MALI_DEBUG_PRINT_ERROR(("External cow memory  type not supported !\n"));
725 		goto Failed_bind_backend;
726 		break;
727 
728 	default:
729 		MALI_DEBUG_PRINT_ERROR(("Invalid memory type  not supported !\n"));
730 		goto Failed_bind_backend;
731 		break;
732 	}
733 	MALI_DEBUG_ASSERT(0 == mem_backend->size % MALI_MMU_PAGE_SIZE);
734 	atomic_add(mem_backend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_backend->type]);
735 	return _MALI_OSK_ERR_OK;
736 
737 Failed_bind_backend:
738 	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
739 
740 Failed_alloc_backend:
741 	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
742 	mali_mem_allocation_struct_destory(mali_allocation);
743 
744 	MALI_DEBUG_PRINT(1, (" _mali_ukk_mem_bind, return ERROR! \n"));
745 	return ret;
746 }
747 
748 
749 /*
750 * Function _mali_ukk_mem_unbind -- unbind a external memory to a new GPU address
751 * This function unbind the backend memory and free the allocation
752 * no ref_count for this type of memory
753 */
_mali_ukk_mem_unbind(_mali_uk_unbind_mem_s * args)754 _mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args)
755 {
756 	/**/
757 	struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
758 	mali_mem_allocation *mali_allocation = NULL;
759 	struct mali_vma_node *mali_vma_node = NULL;
760 	u32 mali_addr = args->vaddr;
761 	MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_unbind, vaddr=0x%x! \n", args->vaddr));
762 
763 	/* find the allocation by vaddr */
764 	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
765 	if (likely(mali_vma_node)) {
766 		MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
767 		mali_allocation = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
768 	} else {
769 		MALI_DEBUG_ASSERT(NULL != mali_vma_node);
770 		return _MALI_OSK_ERR_INVALID_ARGS;
771 	}
772 
773 	if (NULL != mali_allocation)
774 		/* check ref_count */
775 		mali_allocation_unref(&mali_allocation);
776 	return _MALI_OSK_ERR_OK;
777 }
778 
779 /*
780 * Function _mali_ukk_mem_cow --  COW for an allocation
781 * This function allocate new pages for  a range (range, range+size) of allocation
782 *  And Map it(keep use the not in range pages from target allocation ) to an GPU vaddr
783 */
_mali_ukk_mem_cow(_mali_uk_cow_mem_s * args)784 _mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args)
785 {
786 	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
787 	mali_mem_backend *target_backend = NULL;
788 	mali_mem_backend *mem_backend = NULL;
789 	struct mali_vma_node *mali_vma_node = NULL;
790 	mali_mem_allocation *mali_allocation = NULL;
791 
792 	struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
793 	/* Get the target backend for cow */
794 	target_backend = mali_mem_backend_struct_search(session, args->target_handle);
795 
796 	if (NULL == target_backend || 0 == target_backend->size) {
797 		MALI_DEBUG_ASSERT_POINTER(target_backend);
798 		MALI_DEBUG_ASSERT(0 != target_backend->size);
799 		return ret;
800 	}
801 
802 	/*Cow not support resized mem */
803 	MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE != (MALI_MEM_FLAG_CAN_RESIZE & target_backend->mali_allocation->flags));
804 
805 	/* Check if the new mali address is allocated */
806 	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->vaddr, 0);
807 
808 	if (unlikely(mali_vma_node)) {
809 		MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
810 		return ret;
811 	}
812 
813 	/* create new alloction for COW*/
814 	mali_allocation = mali_mem_allocation_struct_create(session);
815 	if (mali_allocation == NULL) {
816 		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to create allocation struct!\n"));
817 		return _MALI_OSK_ERR_NOMEM;
818 	}
819 	mali_allocation->psize = args->target_size;
820 	mali_allocation->vsize = args->target_size;
821 	mali_allocation->type = MALI_MEM_COW;
822 
823 	/*add allocation node to RB tree for index*/
824 	mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
825 	mali_allocation->mali_vma_node.vm_node.size = mali_allocation->vsize;
826 	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
827 
828 	/* create new backend for COW memory */
829 	mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
830 	if (mali_allocation->backend_handle < 0) {
831 		ret = _MALI_OSK_ERR_NOMEM;
832 		MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
833 		goto failed_alloc_backend;
834 	}
835 	mem_backend->mali_allocation = mali_allocation;
836 	mem_backend->type = mali_allocation->type;
837 
838 	if (target_backend->type == MALI_MEM_SWAP ||
839 	    (MALI_MEM_COW == target_backend->type && (MALI_MEM_BACKEND_FLAG_SWAP_COWED & target_backend->flags))) {
840 		mem_backend->flags |= MALI_MEM_BACKEND_FLAG_SWAP_COWED;
841 		/**
842 		 *     CoWed swap backends couldn't be mapped as non-linear vma, because if one
843 		 * vma is set with flag VM_NONLINEAR, the vma->vm_private_data will be used by kernel,
844 		 * while in mali driver, we use this variable to store the pointer of mali_allocation, so there
845 		 * is a conflict.
846 		 *     To resolve this problem, we have to do some fake things, we reserved about 64MB
847 		 * space from index 0, there isn't really page's index will be set from 0 to (64MB>>PAGE_SHIFT_NUM),
848 		 * and all of CoWed swap memory backends' start_idx will be assigned with 0, and these
849 		 * backends will be mapped as linear and will add to priority tree of global swap file, while
850 		 * these vmas will never be found by using normal page->index, these pages in those vma
851 		 * also couldn't be swapped out.
852 		 */
853 		mem_backend->start_idx = 0;
854 	}
855 
856 	/* Add the target backend's cow count, also allocate new pages for COW backend from os mem
857 	*for a modified range and keep the page which not in the modified range and Add ref to it
858 	*/
859 	MALI_DEBUG_PRINT(3, ("Cow mapping: target_addr: 0x%x;  cow_addr: 0x%x,  size: %u\n", target_backend->mali_allocation->mali_vma_node.vm_node.start,
860 			     mali_allocation->mali_vma_node.vm_node.start, mali_allocation->mali_vma_node.vm_node.size));
861 
862 	ret = mali_memory_do_cow(target_backend, args->target_offset, args->target_size, mem_backend, args->range_start, args->range_size);
863 	if (_MALI_OSK_ERR_OK != ret) {
864 		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to cow!\n"));
865 		goto failed_do_cow;
866 	}
867 
868 	/**
869 	*map to GPU side
870 	*/
871 	mali_allocation->mali_mapping.addr = args->vaddr;
872 	/* set gpu mmu propery */
873 	_mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
874 
875 	_mali_osk_mutex_wait(session->memory_lock);
876 	/* Map on Mali */
877 	ret = mali_mem_mali_map_prepare(mali_allocation);
878 	if (0 != ret) {
879 		MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
880 		goto failed_gpu_map;
881 	}
882 
883 	if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
884 		mali_mem_cow_mali_map(mem_backend, 0, mem_backend->size);
885 	}
886 
887 	_mali_osk_mutex_signal(session->memory_lock);
888 
889 	mutex_lock(&target_backend->mutex);
890 	target_backend->flags |= MALI_MEM_BACKEND_FLAG_COWED;
891 	mutex_unlock(&target_backend->mutex);
892 
893 	atomic_add(args->range_size / MALI_MMU_PAGE_SIZE, &session->mali_mem_allocated_pages);
894 	if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
895 		session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
896 	}
897 	return _MALI_OSK_ERR_OK;
898 
899 failed_gpu_map:
900 	_mali_osk_mutex_signal(session->memory_lock);
901 	mali_mem_cow_release(mem_backend, MALI_FALSE);
902 	mem_backend->cow_mem.count = 0;
903 failed_do_cow:
904 	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
905 failed_alloc_backend:
906 	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
907 	mali_mem_allocation_struct_destory(mali_allocation);
908 
909 	return ret;
910 }
911 
_mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s * args)912 _mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args)
913 {
914 	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
915 	mali_mem_backend *mem_backend = NULL;
916 	struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
917 
918 	MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_cow_modify_range called! \n"));
919 	/* Get the backend that need to be modified. */
920 	mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
921 
922 	if (NULL == mem_backend || 0 == mem_backend->size) {
923 		MALI_DEBUG_ASSERT_POINTER(mem_backend);
924 		MALI_DEBUG_ASSERT(0 != mem_backend->size);
925 		return ret;
926 	}
927 
928 	MALI_DEBUG_ASSERT(MALI_MEM_COW  == mem_backend->type);
929 
930 	ret =  mali_memory_cow_modify_range(mem_backend, args->range_start, args->size);
931 	args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
932 	if (_MALI_OSK_ERR_OK != ret)
933 		return  ret;
934 	_mali_osk_mutex_wait(session->memory_lock);
935 	if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
936 		mali_mem_cow_mali_map(mem_backend, args->range_start, args->size);
937 	}
938 	_mali_osk_mutex_signal(session->memory_lock);
939 
940 	atomic_add(args->change_pages_nr, &session->mali_mem_allocated_pages);
941 	if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
942 		session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
943 	}
944 
945 	return _MALI_OSK_ERR_OK;
946 }
947 
948 
_mali_ukk_mem_resize(_mali_uk_mem_resize_s * args)949 _mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args)
950 {
951 	mali_mem_backend *mem_backend = NULL;
952 	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
953 
954 	struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
955 
956 	MALI_DEBUG_ASSERT_POINTER(session);
957 	MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
958 	MALI_DEBUG_ASSERT(0 == args->psize %  MALI_MMU_PAGE_SIZE);
959 
960 	/* Get the memory backend that need to be resize. */
961 	mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
962 
963 	if (NULL == mem_backend)  {
964 		MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
965 		return ret;
966 	}
967 
968 	MALI_DEBUG_ASSERT(args->psize != mem_backend->size);
969 
970 	ret = mali_mem_resize(session, mem_backend, args->psize);
971 
972 	return ret;
973 }
974 
_mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s * args)975 _mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args)
976 {
977 	args->memory_usage = _mali_ukk_report_memory_usage();
978 	if (0 != args->vaddr) {
979 		mali_mem_backend *mem_backend = NULL;
980 		struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
981 		/* Get the backend that need to be modified. */
982 		mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
983 		if (NULL == mem_backend) {
984 			MALI_DEBUG_ASSERT_POINTER(mem_backend);
985 			return _MALI_OSK_ERR_FAULT;
986 		}
987 
988 		if (MALI_MEM_COW == mem_backend->type)
989 			args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
990 	}
991 	return _MALI_OSK_ERR_OK;
992 }
993