xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/linux/mali_memory_cow.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10 #include <linux/mm.h>
11 #include <linux/list.h>
12 #include <linux/mm_types.h>
13 #include <linux/fs.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/highmem.h>
16 #include <asm/cacheflush.h>
17 #include <linux/sched.h>
18 #ifdef CONFIG_ARM
19 #include <asm/outercache.h>
20 #endif
21 #include <asm/dma-mapping.h>
22 
23 #include "mali_memory.h"
24 #include "mali_kernel_common.h"
25 #include "mali_uk_types.h"
26 #include "mali_osk.h"
27 #include "mali_kernel_linux.h"
28 #include "mali_memory_cow.h"
29 #include "mali_memory_block_alloc.h"
30 #include "mali_memory_swap_alloc.h"
31 
32 /**
33 * allocate pages for COW backend and flush cache
34 */
mali_mem_cow_alloc_page(void)35 static struct page *mali_mem_cow_alloc_page(void)
36 
37 {
38 	mali_mem_os_mem os_mem;
39 	struct mali_page_node *node;
40 	struct page *new_page;
41 
42 	int ret = 0;
43 	/* allocate pages from os mem */
44 	ret = mali_mem_os_alloc_pages(&os_mem, _MALI_OSK_MALI_PAGE_SIZE);
45 
46 	if (ret) {
47 		return NULL;
48 	}
49 
50 	MALI_DEBUG_ASSERT(1 == os_mem.count);
51 
52 	node = _MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list);
53 	new_page = node->page;
54 	node->page = NULL;
55 	list_del(&node->list);
56 	kfree(node);
57 
58 	return new_page;
59 }
60 
61 
_mali_memory_cow_get_node_list(mali_mem_backend * target_bk,u32 target_offset,u32 target_size)62 static struct list_head *_mali_memory_cow_get_node_list(mali_mem_backend *target_bk,
63 		u32 target_offset,
64 		u32 target_size)
65 {
66 	MALI_DEBUG_ASSERT(MALI_MEM_OS == target_bk->type || MALI_MEM_COW == target_bk->type ||
67 			  MALI_MEM_BLOCK == target_bk->type || MALI_MEM_SWAP == target_bk->type);
68 
69 	if (MALI_MEM_OS == target_bk->type) {
70 		MALI_DEBUG_ASSERT(&target_bk->os_mem);
71 		MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->os_mem.count);
72 		return &target_bk->os_mem.pages;
73 	} else if (MALI_MEM_COW == target_bk->type) {
74 		MALI_DEBUG_ASSERT(&target_bk->cow_mem);
75 		MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->cow_mem.count);
76 		return  &target_bk->cow_mem.pages;
77 	} else if (MALI_MEM_BLOCK == target_bk->type) {
78 		MALI_DEBUG_ASSERT(&target_bk->block_mem);
79 		MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->block_mem.count);
80 		return  &target_bk->block_mem.pfns;
81 	} else if (MALI_MEM_SWAP == target_bk->type) {
82 		MALI_DEBUG_ASSERT(&target_bk->swap_mem);
83 		MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->swap_mem.count);
84 		return  &target_bk->swap_mem.pages;
85 	}
86 
87 	return NULL;
88 }
89 
90 /**
91 * Do COW for os memory - support do COW for memory from bank memory
92 * The range_start/size can be zero, which means it will call cow_modify_range
93 * latter.
94 * This function allocate new pages for COW backend from os mem for a modified range
95 * It will keep the page which not in the modified range and Add ref to it
96 *
97 * @target_bk - target allocation's backend(the allocation need to do COW)
98 * @target_offset - the offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, 4K align)
99 * @target_size - size of target allocation to do COW (for support memory bank)
100 * @backend -COW backend
101 * @range_start - offset of modified range (4K align)
102 * @range_size - size of modified range
103 */
mali_memory_cow_os_memory(mali_mem_backend * target_bk,u32 target_offset,u32 target_size,mali_mem_backend * backend,u32 range_start,u32 range_size)104 _mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,
105 		u32 target_offset,
106 		u32 target_size,
107 		mali_mem_backend *backend,
108 		u32 range_start,
109 		u32 range_size)
110 {
111 	mali_mem_cow *cow = &backend->cow_mem;
112 	struct mali_page_node *m_page, *m_tmp, *page_node;
113 	int target_page = 0;
114 	struct page *new_page;
115 	struct list_head *pages = NULL;
116 
117 	pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);
118 
119 	if (NULL == pages) {
120 		MALI_DEBUG_PRINT_ERROR(("No memory page  need to cow ! \n"));
121 		return _MALI_OSK_ERR_FAULT;
122 	}
123 
124 	MALI_DEBUG_ASSERT(0 == cow->count);
125 
126 	INIT_LIST_HEAD(&cow->pages);
127 	mutex_lock(&target_bk->mutex);
128 	list_for_each_entry_safe(m_page, m_tmp, pages, list) {
129 		/* add page from (target_offset,target_offset+size) to cow backend */
130 		if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&
131 		    (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {
132 
133 			/* allocate a new page node, alway use OS memory for COW */
134 			page_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
135 
136 			if (NULL == page_node) {
137 				mutex_unlock(&target_bk->mutex);
138 				goto error;
139 			}
140 
141 			INIT_LIST_HEAD(&page_node->list);
142 
143 			/* check if in the modified range*/
144 			if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
145 			    (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
146 				/* need to allocate a new page */
147 				/* To simplify the case, All COW memory is allocated from os memory ?*/
148 				new_page = mali_mem_cow_alloc_page();
149 
150 				if (NULL == new_page) {
151 					kfree(page_node);
152 					mutex_unlock(&target_bk->mutex);
153 					goto error;
154 				}
155 
156 				_mali_page_node_add_page(page_node, new_page);
157 			} else {
158 				/*Add Block memory case*/
159 				if (m_page->type != MALI_PAGE_NODE_BLOCK) {
160 					_mali_page_node_add_page(page_node, m_page->page);
161 				} else {
162 					page_node->type = MALI_PAGE_NODE_BLOCK;
163 					_mali_page_node_add_block_item(page_node, m_page->blk_it);
164 				}
165 
166 				/* add ref to this page */
167 				_mali_page_node_ref(m_page);
168 			}
169 
170 			/* add it to COW backend page list */
171 			list_add_tail(&page_node->list, &cow->pages);
172 			cow->count++;
173 		}
174 		target_page++;
175 	}
176 	mutex_unlock(&target_bk->mutex);
177 	return _MALI_OSK_ERR_OK;
178 error:
179 	mali_mem_cow_release(backend, MALI_FALSE);
180 	return _MALI_OSK_ERR_FAULT;
181 }
182 
mali_memory_cow_swap_memory(mali_mem_backend * target_bk,u32 target_offset,u32 target_size,mali_mem_backend * backend,u32 range_start,u32 range_size)183 _mali_osk_errcode_t mali_memory_cow_swap_memory(mali_mem_backend *target_bk,
184 		u32 target_offset,
185 		u32 target_size,
186 		mali_mem_backend *backend,
187 		u32 range_start,
188 		u32 range_size)
189 {
190 	mali_mem_cow *cow = &backend->cow_mem;
191 	struct mali_page_node *m_page, *m_tmp, *page_node;
192 	int target_page = 0;
193 	struct mali_swap_item *swap_item;
194 	struct list_head *pages = NULL;
195 
196 	pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);
197 	if (NULL == pages) {
198 		MALI_DEBUG_PRINT_ERROR(("No swap memory page need to cow ! \n"));
199 		return _MALI_OSK_ERR_FAULT;
200 	}
201 
202 	MALI_DEBUG_ASSERT(0 == cow->count);
203 
204 	INIT_LIST_HEAD(&cow->pages);
205 	mutex_lock(&target_bk->mutex);
206 
207 	backend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN;
208 
209 	list_for_each_entry_safe(m_page, m_tmp, pages, list) {
210 		/* add page from (target_offset,target_offset+size) to cow backend */
211 		if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&
212 		    (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {
213 
214 			/* allocate a new page node, use swap memory for COW memory swap cowed flag. */
215 			page_node = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP);
216 
217 			if (NULL == page_node) {
218 				mutex_unlock(&target_bk->mutex);
219 				goto error;
220 			}
221 
222 			/* check if in the modified range*/
223 			if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
224 			    (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
225 				/* need to allocate a new page */
226 				/* To simplify the case, All COW memory is allocated from os memory ?*/
227 				swap_item = mali_mem_swap_alloc_swap_item();
228 
229 				if (NULL == swap_item) {
230 					kfree(page_node);
231 					mutex_unlock(&target_bk->mutex);
232 					goto error;
233 				}
234 
235 				swap_item->idx = mali_mem_swap_idx_alloc();
236 
237 				if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {
238 					MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW.\n"));
239 					kfree(page_node);
240 					kfree(swap_item);
241 					mutex_unlock(&target_bk->mutex);
242 					goto error;
243 				}
244 
245 				_mali_page_node_add_swap_item(page_node, swap_item);
246 			} else {
247 				_mali_page_node_add_swap_item(page_node, m_page->swap_it);
248 
249 				/* add ref to this page */
250 				_mali_page_node_ref(m_page);
251 			}
252 
253 			list_add_tail(&page_node->list, &cow->pages);
254 			cow->count++;
255 		}
256 		target_page++;
257 	}
258 	mutex_unlock(&target_bk->mutex);
259 
260 	return _MALI_OSK_ERR_OK;
261 error:
262 	mali_mem_swap_release(backend, MALI_FALSE);
263 	return _MALI_OSK_ERR_FAULT;
264 
265 }
266 
267 
_mali_mem_put_page_node(mali_page_node * node)268 _mali_osk_errcode_t _mali_mem_put_page_node(mali_page_node *node)
269 {
270 	if (node->type == MALI_PAGE_NODE_OS) {
271 		return mali_mem_os_put_page(node->page);
272 	} else if (node->type == MALI_PAGE_NODE_BLOCK) {
273 		return mali_mem_block_unref_node(node);
274 	} else if (node->type == MALI_PAGE_NODE_SWAP) {
275 		return _mali_mem_swap_put_page_node(node);
276 	} else
277 		MALI_DEBUG_ASSERT(0);
278 	return _MALI_OSK_ERR_FAULT;
279 }
280 
281 
282 /**
283 * Modify a range of a exist COW backend
284 * @backend -COW backend
285 * @range_start - offset of modified range (4K align)
286 * @range_size - size of modified range(in byte)
287 */
mali_memory_cow_modify_range(mali_mem_backend * backend,u32 range_start,u32 range_size)288 _mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,
289 		u32 range_start,
290 		u32 range_size)
291 {
292 	mali_mem_allocation *alloc = NULL;
293 	struct mali_session_data *session;
294 	mali_mem_cow *cow = &backend->cow_mem;
295 	struct mali_page_node *m_page, *m_tmp;
296 	LIST_HEAD(pages);
297 	struct page *new_page;
298 	u32 count = 0;
299 	s32 change_pages_nr = 0;
300 	_mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
301 
302 	if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
303 	if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
304 
305 	alloc = backend->mali_allocation;
306 	MALI_DEBUG_ASSERT_POINTER(alloc);
307 
308 	session = alloc->session;
309 	MALI_DEBUG_ASSERT_POINTER(session);
310 
311 	MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);
312 	MALI_DEBUG_ASSERT(((range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE) <= cow->count);
313 
314 	mutex_lock(&backend->mutex);
315 
316 	/* free pages*/
317 	list_for_each_entry_safe(m_page, m_tmp, &cow->pages, list) {
318 
319 		/* check if in the modified range*/
320 		if ((count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
321 		    (count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
322 			if (MALI_PAGE_NODE_SWAP != m_page->type) {
323 				new_page = mali_mem_cow_alloc_page();
324 
325 				if (NULL == new_page) {
326 					goto error;
327 				}
328 				if (1 != _mali_page_node_get_ref_count(m_page))
329 					change_pages_nr++;
330 				/* unref old page*/
331 				_mali_osk_mutex_wait(session->cow_lock);
332 				if (_mali_mem_put_page_node(m_page)) {
333 					__free_page(new_page);
334 					_mali_osk_mutex_signal(session->cow_lock);
335 					goto error;
336 				}
337 				_mali_osk_mutex_signal(session->cow_lock);
338 				/* add new page*/
339 				/* always use OS for COW*/
340 				m_page->type = MALI_PAGE_NODE_OS;
341 				_mali_page_node_add_page(m_page, new_page);
342 			} else {
343 				struct mali_swap_item *swap_item;
344 
345 				swap_item = mali_mem_swap_alloc_swap_item();
346 
347 				if (NULL == swap_item) {
348 					goto error;
349 				}
350 
351 				swap_item->idx = mali_mem_swap_idx_alloc();
352 
353 				if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {
354 					MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW modify range.\n"));
355 					kfree(swap_item);
356 					goto error;
357 				}
358 
359 				if (1 != _mali_page_node_get_ref_count(m_page)) {
360 					change_pages_nr++;
361 				}
362 
363 				if (_mali_mem_put_page_node(m_page)) {
364 					mali_mem_swap_free_swap_item(swap_item);
365 					goto error;
366 				}
367 
368 				_mali_page_node_add_swap_item(m_page, swap_item);
369 			}
370 		}
371 		count++;
372 	}
373 	cow->change_pages_nr  = change_pages_nr;
374 
375 	MALI_DEBUG_ASSERT(MALI_MEM_COW == alloc->type);
376 
377 	/* ZAP cpu mapping(modified range), and do cpu mapping here if need */
378 	if (NULL != alloc->cpu_mapping.vma) {
379 		MALI_DEBUG_ASSERT(0 != alloc->backend_handle);
380 		MALI_DEBUG_ASSERT(NULL != alloc->cpu_mapping.vma);
381 		MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma->vm_end - alloc->cpu_mapping.vma->vm_start >= range_size);
382 
383 		if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
384 			zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);
385 
386 			ret = mali_mem_cow_cpu_map_pages_locked(backend, alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start  + range_start, range_size / _MALI_OSK_MALI_PAGE_SIZE);
387 
388 			if (unlikely(ret != _MALI_OSK_ERR_OK)) {
389 				MALI_DEBUG_PRINT(2, ("mali_memory_cow_modify_range: cpu mapping failed !\n"));
390 				ret =  _MALI_OSK_ERR_FAULT;
391 			}
392 		} else {
393 			/* used to trigger page fault for swappable cowed memory. */
394 			alloc->cpu_mapping.vma->vm_flags |= VM_PFNMAP;
395 			alloc->cpu_mapping.vma->vm_flags |= VM_MIXEDMAP;
396 
397 			zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);
398 			/* delete this flag to let swappble is ummapped regard to stauct page not page frame. */
399 			alloc->cpu_mapping.vma->vm_flags &= ~VM_PFNMAP;
400 			alloc->cpu_mapping.vma->vm_flags &= ~VM_MIXEDMAP;
401 		}
402 	}
403 
404 error:
405 	mutex_unlock(&backend->mutex);
406 	return ret;
407 
408 }
409 
410 
411 /**
412 * Allocate pages for COW backend
413 * @alloc  -allocation for COW allocation
414 * @target_bk - target allocation's backend(the allocation need to do COW)
415 * @target_offset - the offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, 4K align)
416 * @target_size - size of target allocation to do COW (for support memory bank)(in byte)
417 * @backend -COW backend
418 * @range_start - offset of modified range (4K align)
419 * @range_size - size of modified range(in byte)
420 */
mali_memory_do_cow(mali_mem_backend * target_bk,u32 target_offset,u32 target_size,mali_mem_backend * backend,u32 range_start,u32 range_size)421 _mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,
422 				       u32 target_offset,
423 				       u32 target_size,
424 				       mali_mem_backend *backend,
425 				       u32 range_start,
426 				       u32 range_size)
427 {
428 	struct mali_session_data *session = backend->mali_allocation->session;
429 
430 	MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
431 
432 	/* size & offset must be a multiple of the system page size */
433 	if (target_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
434 	if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
435 	if (target_offset % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
436 	if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
437 
438 	/* check backend type */
439 	MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);
440 
441 	switch (target_bk->type) {
442 	case MALI_MEM_OS:
443 	case MALI_MEM_BLOCK:
444 		return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
445 		break;
446 	case MALI_MEM_COW:
447 		if (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) {
448 			return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
449 		} else {
450 			return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
451 		}
452 		break;
453 	case MALI_MEM_SWAP:
454 		return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
455 		break;
456 	case MALI_MEM_EXTERNAL:
457 		/*NOT support yet*/
458 		MALI_DEBUG_PRINT_ERROR(("External physical memory not supported ! \n"));
459 		return _MALI_OSK_ERR_UNSUPPORTED;
460 		break;
461 	case MALI_MEM_DMA_BUF:
462 		/*NOT support yet*/
463 		MALI_DEBUG_PRINT_ERROR(("DMA buffer not supported ! \n"));
464 		return _MALI_OSK_ERR_UNSUPPORTED;
465 		break;
466 	case MALI_MEM_UMP:
467 		/*NOT support yet*/
468 		MALI_DEBUG_PRINT_ERROR(("UMP buffer not supported ! \n"));
469 		return _MALI_OSK_ERR_UNSUPPORTED;
470 		break;
471 	default:
472 		/*Not support yet*/
473 		MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported ! \n"));
474 		return _MALI_OSK_ERR_UNSUPPORTED;
475 		break;
476 	}
477 	return _MALI_OSK_ERR_OK;
478 }
479 
480 
481 /**
482 * Map COW backend memory to mali
483 * Support OS/BLOCK for mali_page_node
484 */
mali_mem_cow_mali_map(mali_mem_backend * mem_bkend,u32 range_start,u32 range_size)485 int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size)
486 {
487 	mali_mem_allocation *cow_alloc;
488 	struct mali_page_node *m_page;
489 	struct mali_session_data *session;
490 	struct mali_page_directory *pagedir;
491 	u32 virt, start;
492 
493 	cow_alloc = mem_bkend->mali_allocation;
494 	virt = cow_alloc->mali_vma_node.vm_node.start;
495 	start = virt;
496 
497 	MALI_DEBUG_ASSERT_POINTER(mem_bkend);
498 	MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
499 	MALI_DEBUG_ASSERT_POINTER(cow_alloc);
500 
501 	session = cow_alloc->session;
502 	pagedir = session->page_directory;
503 	MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
504 	list_for_each_entry(m_page, &mem_bkend->cow_mem.pages, list) {
505 		if ((virt - start >= range_start) && (virt - start < range_start + range_size)) {
506 			dma_addr_t phys = _mali_page_node_get_dma_addr(m_page);
507 #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
508 			MALI_DEBUG_ASSERT(0 == (phys >> 32));
509 #endif
510 			mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys,
511 						MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
512 		}
513 		virt += MALI_MMU_PAGE_SIZE;
514 	}
515 	return 0;
516 }
517 
518 /**
519 * Map COW backend to cpu
520 * support OS/BLOCK memory
521 */
mali_mem_cow_cpu_map(mali_mem_backend * mem_bkend,struct vm_area_struct * vma)522 int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
523 {
524 	mali_mem_cow *cow = &mem_bkend->cow_mem;
525 	struct mali_page_node *m_page;
526 	int ret;
527 	unsigned long addr = vma->vm_start;
528 	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);
529 
530 	list_for_each_entry(m_page, &cow->pages, list) {
531 		/* We should use vm_insert_page, but it does a dcache
532 		 * flush which makes it way slower than remap_pfn_range or vmf_insert_pfn.
533 		ret = vm_insert_page(vma, addr, page);
534 		*/
535 		ret = vmf_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
536 
537 		if (unlikely(VM_FAULT_NOPAGE != ret)) {
538 			return -EFAULT;
539 		}
540 		addr += _MALI_OSK_MALI_PAGE_SIZE;
541 	}
542 
543 	return 0;
544 }
545 
546 /**
547 * Map some pages(COW backend) to CPU vma@vaddr
548 *@ mem_bkend - COW backend
549 *@ vma
550 *@ vaddr -start CPU vaddr mapped to
551 *@ num - max number of pages to map to CPU vaddr
552 */
mali_mem_cow_cpu_map_pages_locked(mali_mem_backend * mem_bkend,struct vm_area_struct * vma,unsigned long vaddr,int num)553 _mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,
554 		struct vm_area_struct *vma,
555 		unsigned long vaddr,
556 		int num)
557 {
558 	mali_mem_cow *cow = &mem_bkend->cow_mem;
559 	struct mali_page_node *m_page;
560 	int ret;
561 	int offset;
562 	int count ;
563 	unsigned long vstart = vma->vm_start;
564 	count = 0;
565 	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);
566 	MALI_DEBUG_ASSERT(0 == vaddr % _MALI_OSK_MALI_PAGE_SIZE);
567 	MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);
568 	offset = (vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;
569 
570 	list_for_each_entry(m_page, &cow->pages, list) {
571 		if ((count >= offset) && (count < offset + num)) {
572 			ret = vmf_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));
573 
574 			if (unlikely(VM_FAULT_NOPAGE != ret)) {
575 				if (count == offset) {
576 					return _MALI_OSK_ERR_FAULT;
577 				} else {
578 					/* ret is EBUSY when page isn't in modify range, but now it's OK*/
579 					return _MALI_OSK_ERR_OK;
580 				}
581 			}
582 			vaddr += _MALI_OSK_MALI_PAGE_SIZE;
583 		}
584 		count++;
585 	}
586 	return _MALI_OSK_ERR_OK;
587 }
588 
589 /**
590 * Release COW backend memory
591 * free it directly(put_page--unref page), not put into pool
592 */
mali_mem_cow_release(mali_mem_backend * mem_bkend,mali_bool is_mali_mapped)593 u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)
594 {
595 	mali_mem_allocation *alloc;
596 	struct mali_session_data *session;
597 	u32 free_pages_nr = 0;
598 	MALI_DEBUG_ASSERT_POINTER(mem_bkend);
599 	MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
600 	alloc = mem_bkend->mali_allocation;
601 	MALI_DEBUG_ASSERT_POINTER(alloc);
602 
603 	session = alloc->session;
604 	MALI_DEBUG_ASSERT_POINTER(session);
605 
606 	if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags)) {
607 		/* Unmap the memory from the mali virtual address space. */
608 		if (MALI_TRUE == is_mali_mapped)
609 			mali_mem_os_mali_unmap(alloc);
610 		/* free cow backend list*/
611 		_mali_osk_mutex_wait(session->cow_lock);
612 		free_pages_nr = mali_mem_os_free(&mem_bkend->cow_mem.pages, mem_bkend->cow_mem.count, MALI_TRUE);
613 		_mali_osk_mutex_signal(session->cow_lock);
614 
615 		free_pages_nr += mali_mem_block_free_list(&mem_bkend->cow_mem.pages);
616 
617 		MALI_DEBUG_ASSERT(list_empty(&mem_bkend->cow_mem.pages));
618 	} else {
619 		free_pages_nr = mali_mem_swap_release(mem_bkend, is_mali_mapped);
620 	}
621 
622 
623 	MALI_DEBUG_PRINT(4, ("COW Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->cow_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
624 			     free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
625 
626 	mem_bkend->cow_mem.count = 0;
627 	return free_pages_nr;
628 }
629 
630 
631 /* Dst node could os node or swap node. */
_mali_mem_cow_copy_page(mali_page_node * src_node,mali_page_node * dst_node)632 void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node)
633 {
634 	void *dst, *src;
635 	struct page *dst_page;
636 	dma_addr_t dma_addr;
637 
638 	MALI_DEBUG_ASSERT(src_node != NULL);
639 	MALI_DEBUG_ASSERT(dst_node != NULL);
640 	MALI_DEBUG_ASSERT(dst_node->type == MALI_PAGE_NODE_OS
641 			  || dst_node->type == MALI_PAGE_NODE_SWAP);
642 
643 	if (dst_node->type == MALI_PAGE_NODE_OS) {
644 		dst_page = dst_node->page;
645 	} else {
646 		dst_page = dst_node->swap_it->page;
647 	}
648 
649 	dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(dst_node),
650 		       _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
651 
652 	/* map it , and copy the content*/
653 	dst = kmap_atomic(dst_page);
654 
655 	if (src_node->type == MALI_PAGE_NODE_OS ||
656 	    src_node->type == MALI_PAGE_NODE_SWAP) {
657 		struct page *src_page;
658 
659 		if (src_node->type == MALI_PAGE_NODE_OS) {
660 			src_page = src_node->page;
661 		} else {
662 			src_page = src_node->swap_it->page;
663 		}
664 
665 		/* Clear and invaliate cache */
666 		/* In ARM architecture, speculative read may pull stale data into L1 cache
667 		 * for kernel linear mapping page table. DMA_BIDIRECTIONAL could
668 		 * invalidate the L1 cache so that following read get the latest data
669 		*/
670 		dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(src_node),
671 			       _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
672 
673 		src = kmap_atomic(src_page);
674 		memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
675 		kunmap_atomic(src);
676 		dma_addr = dma_map_page(&mali_platform_device->dev, src_page,
677 					0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
678 
679 		if (src_node->type == MALI_PAGE_NODE_SWAP) {
680 			src_node->swap_it->dma_addr = dma_addr;
681 		}
682 	} else if (src_node->type == MALI_PAGE_NODE_BLOCK) {
683 		/*
684 		* use ioremap to map src for BLOCK memory
685 		*/
686 		src = ioremap(_mali_page_node_get_dma_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE);
687 		memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
688 		iounmap(src);
689 	}
690 	kunmap_atomic(dst);
691 	dma_addr = dma_map_page(&mali_platform_device->dev, dst_page,
692 				0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
693 
694 	if (dst_node->type == MALI_PAGE_NODE_SWAP) {
695 		dst_node->swap_it->dma_addr = dma_addr;
696 	}
697 }
698 
699 
700 /*
701 * allocate page on demand when CPU access it,
702 * THis used in page fault handler
703 */
mali_mem_cow_allocate_on_demand(mali_mem_backend * mem_bkend,u32 offset_page)704 _mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page)
705 {
706 	struct page *new_page = NULL;
707 	struct mali_page_node *new_node = NULL;
708 	int i = 0;
709 	struct mali_page_node *m_page, *found_node = NULL;
710 	struct  mali_session_data *session = NULL;
711 	mali_mem_cow *cow = &mem_bkend->cow_mem;
712 	MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
713 	MALI_DEBUG_ASSERT(offset_page < mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE);
714 	MALI_DEBUG_PRINT(4, ("mali_mem_cow_allocate_on_demand !, offset_page =0x%x\n", offset_page));
715 
716 	/* allocate new page here */
717 	new_page = mali_mem_cow_alloc_page();
718 	if (!new_page)
719 		return _MALI_OSK_ERR_NOMEM;
720 
721 	new_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
722 	if (!new_node) {
723 		__free_page(new_page);
724 		return _MALI_OSK_ERR_NOMEM;
725 	}
726 
727 	/* find the page in backend*/
728 	list_for_each_entry(m_page, &cow->pages, list) {
729 		if (i == offset_page) {
730 			found_node = m_page;
731 			break;
732 		}
733 		i++;
734 	}
735 	MALI_DEBUG_ASSERT(found_node);
736 	if (NULL == found_node) {
737 		__free_page(new_page);
738 		kfree(new_node);
739 		return _MALI_OSK_ERR_ITEM_NOT_FOUND;
740 	}
741 
742 	_mali_page_node_add_page(new_node, new_page);
743 
744 	/* Copy the src page's content to new page */
745 	_mali_mem_cow_copy_page(found_node, new_node);
746 
747 	MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation);
748 	session = mem_bkend->mali_allocation->session;
749 	MALI_DEBUG_ASSERT_POINTER(session);
750 	if (1 != _mali_page_node_get_ref_count(found_node)) {
751 		atomic_add(1, &session->mali_mem_allocated_pages);
752 		if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
753 			session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
754 		}
755 		mem_bkend->cow_mem.change_pages_nr++;
756 	}
757 
758 	_mali_osk_mutex_wait(session->cow_lock);
759 	if (_mali_mem_put_page_node(found_node)) {
760 		__free_page(new_page);
761 		kfree(new_node);
762 		_mali_osk_mutex_signal(session->cow_lock);
763 		return _MALI_OSK_ERR_NOMEM;
764 	}
765 	_mali_osk_mutex_signal(session->cow_lock);
766 
767 	list_replace(&found_node->list, &new_node->list);
768 
769 	kfree(found_node);
770 
771 	/* map to GPU side*/
772 	_mali_osk_mutex_wait(session->memory_lock);
773 	mali_mem_cow_mali_map(mem_bkend, offset_page * _MALI_OSK_MALI_PAGE_SIZE, _MALI_OSK_MALI_PAGE_SIZE);
774 	_mali_osk_mutex_signal(session->memory_lock);
775 	return _MALI_OSK_ERR_OK;
776 }
777