xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/linux/mali_memory.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10 
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/mm_types.h>
14 #include <linux/fs.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/version.h>
18 #include <linux/platform_device.h>
19 #include <linux/idr.h>
20 
21 #include "mali_osk.h"
22 #include "mali_executor.h"
23 
24 #include "mali_memory.h"
25 #include "mali_memory_os_alloc.h"
26 #include "mali_memory_block_alloc.h"
27 #include "mali_memory_util.h"
28 #include "mali_memory_virtual.h"
29 #include "mali_memory_manager.h"
30 #include "mali_memory_cow.h"
31 #include "mali_memory_swap_alloc.h"
32 #include "mali_memory_defer_bind.h"
33 #if defined(CONFIG_DMA_SHARED_BUFFER)
34 #include "mali_memory_secure.h"
35 #endif
36 
37 extern unsigned int mali_dedicated_mem_size;
38 extern unsigned int mali_shared_mem_size;
39 
40 #define MALI_VM_NUM_FAULT_PREFETCH (0x8)
41 
mali_mem_vma_open(struct vm_area_struct * vma)42 static void mali_mem_vma_open(struct vm_area_struct *vma)
43 {
44 	mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
45 	MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
46 
47 	/* If need to share the allocation, add ref_count here */
48 	mali_allocation_ref(alloc);
49 	return;
50 }
mali_mem_vma_close(struct vm_area_struct * vma)51 static void mali_mem_vma_close(struct vm_area_struct *vma)
52 {
53 	/* If need to share the allocation, unref ref_count here */
54 	mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
55 
56 	mali_allocation_unref(&alloc);
57 	vma->vm_private_data = NULL;
58 }
59 
mali_mem_vma_fault(struct vm_fault * vmf)60 static vm_fault_t mali_mem_vma_fault(struct vm_fault *vmf)
61 {
62 	struct vm_area_struct *vma = vmf->vma;
63 	mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
64 	mali_mem_backend *mem_bkend = NULL;
65 	int ret;
66 	int prefetch_num = MALI_VM_NUM_FAULT_PREFETCH;
67 
68 	unsigned long address = (unsigned long)vmf->address;
69 	MALI_DEBUG_ASSERT(alloc->backend_handle);
70 	MALI_DEBUG_ASSERT((unsigned long)alloc->cpu_mapping.addr <= address);
71 
72 	/* Get backend memory & Map on CPU */
73 	mutex_lock(&mali_idr_mutex);
74 	if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {
75 		MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
76 		mutex_unlock(&mali_idr_mutex);
77 		return VM_FAULT_SIGBUS;
78 	}
79 	mutex_unlock(&mali_idr_mutex);
80 	MALI_DEBUG_ASSERT(mem_bkend->type == alloc->type);
81 
82 	if ((mem_bkend->type == MALI_MEM_COW && (MALI_MEM_BACKEND_FLAG_SWAP_COWED !=
83 			(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) &&
84 	    (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE)) {
85 		/*check if use page fault to do COW*/
86 		MALI_DEBUG_PRINT(4, ("mali_vma_fault: do cow allocate on demand!, address=0x%x\n", address));
87 		mutex_lock(&mem_bkend->mutex);
88 		ret = mali_mem_cow_allocate_on_demand(mem_bkend,
89 						      (address - vma->vm_start) / PAGE_SIZE);
90 		mutex_unlock(&mem_bkend->mutex);
91 
92 		if (ret != _MALI_OSK_ERR_OK) {
93 			return VM_FAULT_OOM;
94 		}
95 		prefetch_num = 1;
96 
97 		/* handle COW modified range cpu mapping
98 		 we zap the mapping in cow_modify_range, it will trigger page fault
99 		 when CPU access it, so here we map it to CPU*/
100 		mutex_lock(&mem_bkend->mutex);
101 		ret = mali_mem_cow_cpu_map_pages_locked(mem_bkend, vma, address, prefetch_num);
102 		mutex_unlock(&mem_bkend->mutex);
103 
104 		if (unlikely(ret != _MALI_OSK_ERR_OK)) {
105 			return VM_FAULT_SIGBUS;
106 		}
107 	} else if ((mem_bkend->type == MALI_MEM_SWAP) ||
108 		   (mem_bkend->type == MALI_MEM_COW && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
109 		u32 offset_in_bkend = (address - vma->vm_start) / PAGE_SIZE;
110 		int ret = _MALI_OSK_ERR_OK;
111 
112 		mutex_lock(&mem_bkend->mutex);
113 		if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE) {
114 			ret = mali_mem_swap_cow_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
115 		} else {
116 			ret = mali_mem_swap_allocate_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
117 		}
118 		mutex_unlock(&mem_bkend->mutex);
119 
120 		if (ret != _MALI_OSK_ERR_OK) {
121 			MALI_DEBUG_PRINT(2, ("Mali swap memory page fault process failed, address=0x%x\n", address));
122 			return VM_FAULT_OOM;
123 		} else {
124 			return VM_FAULT_LOCKED;
125 		}
126 	} else {
127 		MALI_PRINT_ERROR(("Mali vma fault! It never happen, indicating some logic errors in caller.\n"));
128 		/*NOT support yet or OOM*/
129 		return VM_FAULT_OOM;
130 	}
131 	return VM_FAULT_NOPAGE;
132 }
133 
134 static struct vm_operations_struct mali_kernel_vm_ops = {
135 	.open = mali_mem_vma_open,
136 	.close = mali_mem_vma_close,
137 	.fault = mali_mem_vma_fault,
138 };
139 
140 
141 /** @ map mali allocation to CPU address
142 *
143 * Supported backend types:
144 * --MALI_MEM_OS
145 * -- need to add COW?
146  *Not supported backend types:
147 * -_MALI_MEMORY_BIND_BACKEND_UMP
148 * -_MALI_MEMORY_BIND_BACKEND_DMA_BUF
149 * -_MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
150 *
151 */
mali_mmap(struct file * filp,struct vm_area_struct * vma)152 int mali_mmap(struct file *filp, struct vm_area_struct *vma)
153 {
154 	struct mali_session_data *session;
155 	mali_mem_allocation *mali_alloc = NULL;
156 	u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;
157 	struct mali_vma_node *mali_vma_node = NULL;
158 	mali_mem_backend *mem_bkend = NULL;
159 	int ret = -EFAULT;
160 
161 	session = (struct mali_session_data *)filp->private_data;
162 	if (NULL == session) {
163 		MALI_PRINT_ERROR(("mmap called without any session data available\n"));
164 		return -EFAULT;
165 	}
166 
167 	MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
168 			     (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
169 			     (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
170 
171 	/* Operations used on any memory system */
172 	/* do not need to anything in vm open/close now */
173 
174 	/* find mali allocation structure by vaddress*/
175 	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
176 	if (likely(mali_vma_node)) {
177 		mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
178 		MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
179 		if (unlikely(mali_addr != mali_vma_node->vm_node.start)) {
180 			/* only allow to use start address for mmap */
181 			MALI_DEBUG_PRINT(1, ("mali_addr != mali_vma_node->vm_node.start\n"));
182 			return -EFAULT;
183 		}
184 	} else {
185 		MALI_DEBUG_ASSERT(NULL == mali_vma_node);
186 		return -EFAULT;
187 	}
188 
189 	mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
190 
191 	if (mali_alloc->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
192 		MALI_DEBUG_PRINT(1, ("ERROR : trying to access varying memory by CPU!\n"));
193 		return -EFAULT;
194 	}
195 
196 	/* Get backend memory & Map on CPU */
197 	mutex_lock(&mali_idr_mutex);
198 	if (!(mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle))) {
199 		MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
200 		mutex_unlock(&mali_idr_mutex);
201 		return -EFAULT;
202 	}
203 	mutex_unlock(&mali_idr_mutex);
204 
205 	if (!(MALI_MEM_SWAP == mali_alloc->type ||
206 	      (MALI_MEM_COW == mali_alloc->type && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
207 		/* Set some bits which indicate that, the memory is IO memory, meaning
208 		 * that no paging is to be performed and the memory should not be
209 		 * included in crash dumps. And that the memory is reserved, meaning
210 		 * that it's present and can never be paged out (see also previous
211 		 * entry)
212 		 */
213 		vma->vm_flags |= VM_IO;
214 		vma->vm_flags |= VM_DONTCOPY;
215 		vma->vm_flags |= VM_PFNMAP;
216 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
217 		vma->vm_flags |= VM_RESERVED;
218 #else
219 		vma->vm_flags |= VM_DONTDUMP;
220 		vma->vm_flags |= VM_DONTEXPAND;
221 #endif
222 	} else if (MALI_MEM_SWAP == mali_alloc->type) {
223 		vma->vm_pgoff = mem_bkend->start_idx;
224 	}
225 
226 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
227 	vma->vm_ops = &mali_kernel_vm_ops;
228 
229 	mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
230 
231 	/* If it's a copy-on-write mapping, map to read only */
232 	if (!(vma->vm_flags & VM_WRITE)) {
233 		MALI_DEBUG_PRINT(4, ("mmap allocation with read only !\n"));
234 		/* add VM_WRITE for do_page_fault will check this when a write fault */
235 		vma->vm_flags |= VM_WRITE | VM_READ;
236 		vma->vm_page_prot = PAGE_READONLY;
237 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
238 		mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE;
239 		goto out;
240 	}
241 
242 	if (mem_bkend->type == MALI_MEM_OS) {
243 		ret = mali_mem_os_cpu_map(mem_bkend, vma);
244 	} else if (mem_bkend->type == MALI_MEM_COW &&
245 		   (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
246 		ret = mali_mem_cow_cpu_map(mem_bkend, vma);
247 	} else if (mem_bkend->type == MALI_MEM_BLOCK) {
248 		ret = mali_mem_block_cpu_map(mem_bkend, vma);
249 	} else if ((mem_bkend->type == MALI_MEM_SWAP) || (mem_bkend->type == MALI_MEM_COW &&
250 			(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
251 		/*For swappable memory, CPU page table will be created by page fault handler. */
252 		ret = 0;
253 	} else if (mem_bkend->type == MALI_MEM_SECURE) {
254 #if defined(CONFIG_DMA_SHARED_BUFFER)
255 		ret = mali_mem_secure_cpu_map(mem_bkend, vma);
256 #else
257 		MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory\n"));
258 		return -EFAULT;
259 #endif
260 	} else {
261 		/* Not support yet*/
262 		MALI_DEBUG_PRINT_ERROR(("Invalid type of backend memory! \n"));
263 		return -EFAULT;
264 	}
265 
266 	if (ret != 0) {
267 		MALI_DEBUG_PRINT(1, ("ret != 0\n"));
268 		return -EFAULT;
269 	}
270 out:
271 	MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == mali_alloc->magic);
272 
273 	vma->vm_private_data = (void *)mali_alloc;
274 	mali_alloc->cpu_mapping.vma = vma;
275 
276 	mali_allocation_ref(mali_alloc);
277 
278 	return 0;
279 }
280 
mali_mem_mali_map_prepare(mali_mem_allocation * descriptor)281 _mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor)
282 {
283 	u32 size = descriptor->psize;
284 	struct mali_session_data *session = descriptor->session;
285 
286 	MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
287 
288 	/* Map dma-buf into this session's page tables */
289 
290 	if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
291 		size += MALI_MMU_PAGE_SIZE;
292 	}
293 
294 	return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start, size);
295 }
296 
mali_mem_mali_map_resize(mali_mem_allocation * descriptor,u32 new_size)297 _mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size)
298 {
299 	u32 old_size = descriptor->psize;
300 	struct mali_session_data *session = descriptor->session;
301 
302 	MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
303 
304 	if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
305 		new_size  += MALI_MMU_PAGE_SIZE;
306 	}
307 
308 	if (new_size > old_size) {
309 		MALI_DEBUG_ASSERT(new_size <= descriptor->mali_vma_node.vm_node.size);
310 		return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start + old_size, new_size - old_size);
311 	}
312 	return _MALI_OSK_ERR_OK;
313 }
314 
mali_mem_mali_map_free(struct mali_session_data * session,u32 size,mali_address_t vaddr,u32 flags)315 void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags)
316 {
317 	if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
318 		size += MALI_MMU_PAGE_SIZE;
319 	}
320 
321 	/* Umap and flush L2 */
322 	mali_mmu_pagedir_unmap(session->page_directory, vaddr, size);
323 	mali_executor_zap_all_active(session);
324 }
325 
_mali_ukk_report_memory_usage(void)326 u32 _mali_ukk_report_memory_usage(void)
327 {
328 	u32 sum = 0;
329 
330 	if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
331 		sum += mali_mem_block_allocator_stat();
332 	}
333 
334 	sum += mali_mem_os_stat();
335 
336 	return sum;
337 }
338 
_mali_ukk_report_total_memory_size(void)339 u32 _mali_ukk_report_total_memory_size(void)
340 {
341 	return mali_dedicated_mem_size + mali_shared_mem_size;
342 }
343 
344 
345 /**
346  * Per-session memory descriptor mapping table sizes
347  */
348 #define MALI_MEM_DESCRIPTORS_INIT 64
349 #define MALI_MEM_DESCRIPTORS_MAX 65536
350 
mali_memory_session_begin(struct mali_session_data * session_data)351 _mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session_data)
352 {
353 	MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
354 
355 	session_data->memory_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
356 				    _MALI_OSK_LOCK_ORDER_MEM_SESSION);
357 
358 	if (NULL == session_data->memory_lock) {
359 		MALI_ERROR(_MALI_OSK_ERR_FAULT);
360 	}
361 
362 	session_data->cow_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
363 	if (NULL == session_data->cow_lock) {
364 		_mali_osk_mutex_term(session_data->memory_lock);
365 		MALI_ERROR(_MALI_OSK_ERR_FAULT);
366 	}
367 
368 	mali_memory_manager_init(&session_data->allocation_mgr);
369 
370 	MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
371 	MALI_SUCCESS;
372 }
373 
mali_memory_session_end(struct mali_session_data * session)374 void mali_memory_session_end(struct mali_session_data *session)
375 {
376 	MALI_DEBUG_PRINT(3, ("MMU session end\n"));
377 
378 	if (NULL == session) {
379 		MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
380 		return;
381 	}
382 	/* free allocation */
383 	mali_free_session_allocations(session);
384 	/* do some check in unint*/
385 	mali_memory_manager_uninit(&session->allocation_mgr);
386 
387 	/* Free the lock */
388 	_mali_osk_mutex_term(session->memory_lock);
389 	_mali_osk_mutex_term(session->cow_lock);
390 	return;
391 }
392 
mali_memory_initialize(void)393 _mali_osk_errcode_t mali_memory_initialize(void)
394 {
395 	_mali_osk_errcode_t err;
396 
397 	idr_init(&mali_backend_idr);
398 	mutex_init(&mali_idr_mutex);
399 
400 	err = mali_mem_swap_init();
401 	if (err != _MALI_OSK_ERR_OK) {
402 		return err;
403 	}
404 	err = mali_mem_os_init();
405 	if (_MALI_OSK_ERR_OK == err) {
406 		err = mali_mem_defer_bind_manager_init();
407 	}
408 
409 	return err;
410 }
411 
mali_memory_terminate(void)412 void mali_memory_terminate(void)
413 {
414 	mali_mem_swap_term();
415 	mali_mem_defer_bind_manager_destory();
416 	mali_mem_os_term();
417 	if (mali_memory_have_dedicated_memory()) {
418 		mali_mem_block_allocator_destroy();
419 	}
420 }
421 
422 
_mali_page_node_allocate(mali_page_node_type type)423 struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type)
424 {
425 	mali_page_node *page_node = NULL;
426 
427 	page_node = kzalloc(sizeof(mali_page_node), GFP_KERNEL);
428 	MALI_DEBUG_ASSERT(NULL != page_node);
429 
430 	if (page_node) {
431 		page_node->type = type;
432 		INIT_LIST_HEAD(&page_node->list);
433 	}
434 
435 	return page_node;
436 }
437 
_mali_page_node_ref(struct mali_page_node * node)438 void _mali_page_node_ref(struct mali_page_node *node)
439 {
440 	if (node->type == MALI_PAGE_NODE_OS) {
441 		/* add ref to this page */
442 		get_page(node->page);
443 	} else if (node->type == MALI_PAGE_NODE_BLOCK) {
444 		mali_mem_block_add_ref(node);
445 	} else if (node->type == MALI_PAGE_NODE_SWAP) {
446 		atomic_inc(&node->swap_it->ref_count);
447 	} else {
448 		MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
449 	}
450 }
451 
_mali_page_node_unref(struct mali_page_node * node)452 void _mali_page_node_unref(struct mali_page_node *node)
453 {
454 	if (node->type == MALI_PAGE_NODE_OS) {
455 		/* unref to this page */
456 		put_page(node->page);
457 	} else if (node->type == MALI_PAGE_NODE_BLOCK) {
458 		mali_mem_block_dec_ref(node);
459 	} else {
460 		MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
461 	}
462 }
463 
464 
_mali_page_node_add_page(struct mali_page_node * node,struct page * page)465 void _mali_page_node_add_page(struct mali_page_node *node, struct page *page)
466 {
467 	MALI_DEBUG_ASSERT(MALI_PAGE_NODE_OS == node->type);
468 	node->page = page;
469 }
470 
471 
_mali_page_node_add_swap_item(struct mali_page_node * node,struct mali_swap_item * item)472 void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item)
473 {
474 	MALI_DEBUG_ASSERT(MALI_PAGE_NODE_SWAP == node->type);
475 	node->swap_it = item;
476 }
477 
_mali_page_node_add_block_item(struct mali_page_node * node,mali_block_item * item)478 void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item)
479 {
480 	MALI_DEBUG_ASSERT(MALI_PAGE_NODE_BLOCK == node->type);
481 	node->blk_it = item;
482 }
483 
484 
_mali_page_node_get_ref_count(struct mali_page_node * node)485 int _mali_page_node_get_ref_count(struct mali_page_node *node)
486 {
487 	if (node->type == MALI_PAGE_NODE_OS) {
488 		/* get ref count of this page */
489 		return page_count(node->page);
490 	} else if (node->type == MALI_PAGE_NODE_BLOCK) {
491 		return mali_mem_block_get_ref_count(node);
492 	} else if (node->type == MALI_PAGE_NODE_SWAP) {
493 		return atomic_read(&node->swap_it->ref_count);
494 	} else {
495 		MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
496 	}
497 	return -1;
498 }
499 
500 
_mali_page_node_get_dma_addr(struct mali_page_node * node)501 dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node)
502 {
503 	if (node->type == MALI_PAGE_NODE_OS) {
504 		return page_private(node->page);
505 	} else if (node->type == MALI_PAGE_NODE_BLOCK) {
506 		return _mali_blk_item_get_phy_addr(node->blk_it);
507 	} else if (node->type == MALI_PAGE_NODE_SWAP) {
508 		return node->swap_it->dma_addr;
509 	} else {
510 		MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
511 	}
512 	return 0;
513 }
514 
515 
_mali_page_node_get_pfn(struct mali_page_node * node)516 unsigned long _mali_page_node_get_pfn(struct mali_page_node *node)
517 {
518 	if (node->type == MALI_PAGE_NODE_OS) {
519 		return page_to_pfn(node->page);
520 	} else if (node->type == MALI_PAGE_NODE_BLOCK) {
521 		/* get phy addr for BLOCK page*/
522 		return _mali_blk_item_get_pfn(node->blk_it);
523 	} else if (node->type == MALI_PAGE_NODE_SWAP) {
524 		return page_to_pfn(node->swap_it->page);
525 	} else {
526 		MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
527 	}
528 	return 0;
529 }
530 
531 
532