1 /*
2 * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
3 *
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6 *
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
9 */
10
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/mm_types.h>
14 #include <linux/fs.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/version.h>
18 #include <linux/sched.h>
19 #include <linux/idr.h>
20 #include <linux/platform_device.h>
21 #include <linux/workqueue.h>
22 #include <linux/shmem_fs.h>
23 #include <linux/file.h>
24 #include <linux/swap.h>
25 #include <linux/pagemap.h>
26 #include "mali_osk.h"
27 #include "mali_osk_mali.h"
28 #include "mali_memory.h"
29 #include "mali_memory_manager.h"
30 #include "mali_memory_virtual.h"
31 #include "mali_memory_cow.h"
32 #include "mali_ukk.h"
33 #include "mali_kernel_utilization.h"
34 #include "mali_memory_swap_alloc.h"
35
36
37 static struct _mali_osk_bitmap idx_mgr;
38 static struct file *global_swap_file;
39 static struct address_space *global_swap_space;
40 static _mali_osk_wq_work_t *mali_mem_swap_out_workq = NULL;
41 static u32 mem_backend_swapped_pool_size;
42 #ifdef MALI_MEM_SWAP_TRACKING
43 static u32 mem_backend_swapped_unlock_size;
44 #endif
45 /* Lock order: mem_backend_swapped_pool_lock > each memory backend's mutex lock.
46 * This lock used to protect mem_backend_swapped_pool_size and mem_backend_swapped_pool. */
47 static struct mutex mem_backend_swapped_pool_lock;
48 static struct list_head mem_backend_swapped_pool;
49
50 extern struct mali_mem_os_allocator mali_mem_os_allocator;
51
52 #define MALI_SWAP_LOW_MEM_DEFAULT_VALUE (60*1024*1024)
53 #define MALI_SWAP_INVALIDATE_MALI_ADDRESS (0) /* Used to mark the given memory cookie is invalidate. */
54 #define MALI_SWAP_GLOBAL_SWAP_FILE_SIZE (0xFFFFFFFF)
55 #define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX \
56 ((MALI_SWAP_GLOBAL_SWAP_FILE_SIZE) >> PAGE_SHIFT)
57 #define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE (1 << 15) /* Reserved for CoW nonlinear swap backend memory, the space size is 128MB. */
58
59 unsigned int mali_mem_swap_out_threshold_value = MALI_SWAP_LOW_MEM_DEFAULT_VALUE;
60
61 /**
62 * We have two situations to do shrinking things, one is we met low GPU utilization which shows GPU needn't touch too
63 * swappable backends in short time, and the other one is we add new swappable backends, the total pool size exceed
64 * the threshold value of the swapped pool size.
65 */
66 typedef enum {
67 MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION = 100,
68 MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS = 257,
69 } _mali_mem_swap_pool_shrink_type_t;
70
71 static void mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void *arg);
72
mali_mem_swap_init(void)73 _mali_osk_errcode_t mali_mem_swap_init(void)
74 {
75 gfp_t flags = __GFP_NORETRY | __GFP_NOWARN;
76
77 if (_MALI_OSK_ERR_OK != _mali_osk_bitmap_init(&idx_mgr, MALI_SWAP_GLOBAL_SWAP_FILE_INDEX, MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE)) {
78 return _MALI_OSK_ERR_NOMEM;
79 }
80
81 global_swap_file = shmem_file_setup("mali_swap", MALI_SWAP_GLOBAL_SWAP_FILE_SIZE, VM_NORESERVE);
82 if (IS_ERR(global_swap_file)) {
83 _mali_osk_bitmap_term(&idx_mgr);
84 return _MALI_OSK_ERR_NOMEM;
85 }
86
87 global_swap_space = global_swap_file->f_path.dentry->d_inode->i_mapping;
88
89 mali_mem_swap_out_workq = _mali_osk_wq_create_work(mali_mem_swap_swapped_bkend_pool_check_for_low_utilization, NULL);
90 if (NULL == mali_mem_swap_out_workq) {
91 _mali_osk_bitmap_term(&idx_mgr);
92 fput(global_swap_file);
93 return _MALI_OSK_ERR_NOMEM;
94 }
95
96 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
97 flags |= GFP_HIGHUSER;
98 #else
99 #ifdef CONFIG_ZONE_DMA32
100 flags |= GFP_DMA32;
101 #else
102 #ifdef CONFIG_ZONE_DMA
103 flags |= GFP_DMA;
104 #else
105 /* arm64 utgard only work on < 4G, but the kernel
106 * didn't provide method to allocte memory < 4G
107 */
108 MALI_DEBUG_ASSERT(0);
109 #endif
110 #endif
111 #endif
112
113 /* When we use shmem_read_mapping_page to allocate/swap-in, it will
114 * use these flags to allocate new page if need.*/
115 mapping_set_gfp_mask(global_swap_space, flags);
116
117 mem_backend_swapped_pool_size = 0;
118 #ifdef MALI_MEM_SWAP_TRACKING
119 mem_backend_swapped_unlock_size = 0;
120 #endif
121 mutex_init(&mem_backend_swapped_pool_lock);
122 INIT_LIST_HEAD(&mem_backend_swapped_pool);
123
124 MALI_DEBUG_PRINT(2, ("Mali SWAP: Swap out threshold vaule is %uM\n", mali_mem_swap_out_threshold_value >> 20));
125
126 return _MALI_OSK_ERR_OK;
127 }
128
mali_mem_swap_term(void)129 void mali_mem_swap_term(void)
130 {
131 _mali_osk_bitmap_term(&idx_mgr);
132
133 fput(global_swap_file);
134
135 _mali_osk_wq_delete_work(mali_mem_swap_out_workq);
136
137 MALI_DEBUG_ASSERT(list_empty(&mem_backend_swapped_pool));
138 MALI_DEBUG_ASSERT(0 == mem_backend_swapped_pool_size);
139
140 return;
141 }
142
mali_mem_swap_get_global_swap_file(void)143 struct file *mali_mem_swap_get_global_swap_file(void)
144 {
145 return global_swap_file;
146 }
147
148 /* Judge if swappable backend in swapped pool. */
mali_memory_swap_backend_in_swapped_pool(mali_mem_backend * mem_bkend)149 static mali_bool mali_memory_swap_backend_in_swapped_pool(mali_mem_backend *mem_bkend)
150 {
151 MALI_DEBUG_ASSERT_POINTER(mem_bkend);
152
153 return !list_empty(&mem_bkend->list);
154 }
155
mali_memory_swap_list_backend_delete(mali_mem_backend * mem_bkend)156 void mali_memory_swap_list_backend_delete(mali_mem_backend *mem_bkend)
157 {
158 MALI_DEBUG_ASSERT_POINTER(mem_bkend);
159
160 mutex_lock(&mem_backend_swapped_pool_lock);
161 mutex_lock(&mem_bkend->mutex);
162
163 if (MALI_FALSE == mali_memory_swap_backend_in_swapped_pool(mem_bkend)) {
164 mutex_unlock(&mem_bkend->mutex);
165 mutex_unlock(&mem_backend_swapped_pool_lock);
166 return;
167 }
168
169 MALI_DEBUG_ASSERT(!list_empty(&mem_bkend->list));
170
171 list_del_init(&mem_bkend->list);
172
173 mutex_unlock(&mem_bkend->mutex);
174
175 mem_backend_swapped_pool_size -= mem_bkend->size;
176
177 mutex_unlock(&mem_backend_swapped_pool_lock);
178 }
179
mali_mem_swap_out_page_node(mali_page_node * page_node)180 static void mali_mem_swap_out_page_node(mali_page_node *page_node)
181 {
182 MALI_DEBUG_ASSERT(page_node);
183
184 dma_unmap_page(&mali_platform_device->dev, page_node->swap_it->dma_addr,
185 _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
186 set_page_dirty(page_node->swap_it->page);
187 put_page(page_node->swap_it->page);
188 }
189
mali_mem_swap_unlock_single_mem_backend(mali_mem_backend * mem_bkend)190 void mali_mem_swap_unlock_single_mem_backend(mali_mem_backend *mem_bkend)
191 {
192 mali_page_node *m_page;
193
194 MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_bkend->mutex));
195
196 if (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN)) {
197 return;
198 }
199
200 mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN;
201
202 list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) {
203 mali_mem_swap_out_page_node(m_page);
204 }
205
206 return;
207 }
208
mali_mem_swap_unlock_partial_locked_mem_backend(mali_mem_backend * mem_bkend,mali_page_node * page_node)209 static void mali_mem_swap_unlock_partial_locked_mem_backend(mali_mem_backend *mem_bkend, mali_page_node *page_node)
210 {
211 mali_page_node *m_page;
212
213 MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_bkend->mutex));
214
215 list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) {
216 if (m_page == page_node) {
217 break;
218 }
219 mali_mem_swap_out_page_node(m_page);
220 }
221 }
222
mali_mem_swap_swapped_bkend_pool_shrink(_mali_mem_swap_pool_shrink_type_t shrink_type)223 static void mali_mem_swap_swapped_bkend_pool_shrink(_mali_mem_swap_pool_shrink_type_t shrink_type)
224 {
225 mali_mem_backend *bkend, *tmp_bkend;
226 long system_free_size;
227 u32 last_gpu_utilization, gpu_utilization_threshold_value, temp_swap_out_threshold_value;
228
229 MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_backend_swapped_pool_lock));
230
231 if (MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION == shrink_type) {
232 /**
233 * When we met that system memory is very low and Mali locked swappable memory size is less than
234 * threshold value, and at the same time, GPU load is very low and don't need high performance,
235 * at this condition, we can unlock more swap memory backend from swapped backends pool.
236 */
237 gpu_utilization_threshold_value = MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION;
238 temp_swap_out_threshold_value = (mali_mem_swap_out_threshold_value >> 2);
239 } else {
240 /* When we add swappable memory backends to swapped pool, we need to think that we couldn't
241 * hold too much swappable backends in Mali driver, and also we need considering performance.
242 * So there is a balance for swapping out memory backend, we should follow the following conditions:
243 * 1. Total memory size in global mem backend swapped pool is more than the defined threshold value.
244 * 2. System level free memory size is less than the defined threshold value.
245 * 3. Please note that GPU utilization problem isn't considered in this condition.
246 */
247 gpu_utilization_threshold_value = MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS;
248 temp_swap_out_threshold_value = mali_mem_swap_out_threshold_value;
249 }
250
251 /* Get system free pages number. */
252 system_free_size = global_zone_page_state(NR_FREE_PAGES) * PAGE_SIZE;
253 last_gpu_utilization = _mali_ukk_utilization_gp_pp();
254
255 if ((last_gpu_utilization < gpu_utilization_threshold_value)
256 && (system_free_size < mali_mem_swap_out_threshold_value)
257 && (mem_backend_swapped_pool_size > temp_swap_out_threshold_value)) {
258 list_for_each_entry_safe(bkend, tmp_bkend, &mem_backend_swapped_pool, list) {
259 if (mem_backend_swapped_pool_size <= temp_swap_out_threshold_value) {
260 break;
261 }
262
263 mutex_lock(&bkend->mutex);
264
265 /* check if backend is in use. */
266 if (0 < bkend->using_count) {
267 mutex_unlock(&bkend->mutex);
268 continue;
269 }
270
271 mali_mem_swap_unlock_single_mem_backend(bkend);
272 list_del_init(&bkend->list);
273 mem_backend_swapped_pool_size -= bkend->size;
274 #ifdef MALI_MEM_SWAP_TRACKING
275 mem_backend_swapped_unlock_size += bkend->size;
276 #endif
277 mutex_unlock(&bkend->mutex);
278 }
279 }
280
281 return;
282 }
283
mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void * arg)284 static void mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void *arg)
285 {
286 MALI_IGNORE(arg);
287
288 mutex_lock(&mem_backend_swapped_pool_lock);
289
290 mali_mem_swap_swapped_bkend_pool_shrink(MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION);
291
292 mutex_unlock(&mem_backend_swapped_pool_lock);
293 }
294
295 /**
296 * After PP job finished, we add all of swappable memory backend used by this PP
297 * job to the tail of the global swapped pool, and if the total size of swappable memory is more than threshold
298 * value, we also need to shrink the swapped pool start from the head of the list.
299 */
mali_memory_swap_list_backend_add(mali_mem_backend * mem_bkend)300 void mali_memory_swap_list_backend_add(mali_mem_backend *mem_bkend)
301 {
302 mutex_lock(&mem_backend_swapped_pool_lock);
303 mutex_lock(&mem_bkend->mutex);
304
305 if (mali_memory_swap_backend_in_swapped_pool(mem_bkend)) {
306 MALI_DEBUG_ASSERT(!list_empty(&mem_bkend->list));
307
308 list_del_init(&mem_bkend->list);
309 list_add_tail(&mem_bkend->list, &mem_backend_swapped_pool);
310 mutex_unlock(&mem_bkend->mutex);
311 mutex_unlock(&mem_backend_swapped_pool_lock);
312 return;
313 }
314
315 list_add_tail(&mem_bkend->list, &mem_backend_swapped_pool);
316
317 mutex_unlock(&mem_bkend->mutex);
318 mem_backend_swapped_pool_size += mem_bkend->size;
319
320 mali_mem_swap_swapped_bkend_pool_shrink(MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS);
321
322 mutex_unlock(&mem_backend_swapped_pool_lock);
323 return;
324 }
325
326
mali_mem_swap_idx_alloc(void)327 u32 mali_mem_swap_idx_alloc(void)
328 {
329 return _mali_osk_bitmap_alloc(&idx_mgr);
330 }
331
mali_mem_swap_idx_free(u32 idx)332 void mali_mem_swap_idx_free(u32 idx)
333 {
334 _mali_osk_bitmap_free(&idx_mgr, idx);
335 }
336
mali_mem_swap_idx_range_alloc(u32 count)337 static u32 mali_mem_swap_idx_range_alloc(u32 count)
338 {
339 u32 index;
340
341 index = _mali_osk_bitmap_alloc_range(&idx_mgr, count);
342
343 return index;
344 }
345
mali_mem_swap_idx_range_free(u32 idx,int num)346 static void mali_mem_swap_idx_range_free(u32 idx, int num)
347 {
348 _mali_osk_bitmap_free_range(&idx_mgr, idx, num);
349 }
350
mali_mem_swap_alloc_swap_item(void)351 struct mali_swap_item *mali_mem_swap_alloc_swap_item(void)
352 {
353 mali_swap_item *swap_item;
354
355 swap_item = kzalloc(sizeof(mali_swap_item), GFP_KERNEL);
356
357 if (NULL == swap_item) {
358 return NULL;
359 }
360
361 atomic_set(&swap_item->ref_count, 1);
362 swap_item->page = NULL;
363 atomic_add(1, &mali_mem_os_allocator.allocated_pages);
364
365 return swap_item;
366 }
367
mali_mem_swap_free_swap_item(mali_swap_item * swap_item)368 void mali_mem_swap_free_swap_item(mali_swap_item *swap_item)
369 {
370 struct inode *file_node;
371 long long start, end;
372
373 /* If this swap item is shared, we just reduce the reference counter. */
374 if (0 == atomic_dec_return(&swap_item->ref_count)) {
375 file_node = global_swap_file->f_path.dentry->d_inode;
376 start = swap_item->idx;
377 start = start << 12;
378 end = start + PAGE_SIZE;
379
380 shmem_truncate_range(file_node, start, (end - 1));
381
382 mali_mem_swap_idx_free(swap_item->idx);
383
384 atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
385
386 kfree(swap_item);
387 }
388 }
389
390 /* Used to allocate new swap item for new memory allocation and cow page for write. */
_mali_mem_swap_page_node_allocate(void)391 struct mali_page_node *_mali_mem_swap_page_node_allocate(void)
392 {
393 struct mali_page_node *m_page;
394
395 m_page = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP);
396
397 if (NULL == m_page) {
398 return NULL;
399 }
400
401 m_page->swap_it = mali_mem_swap_alloc_swap_item();
402
403 if (NULL == m_page->swap_it) {
404 kfree(m_page);
405 return NULL;
406 }
407
408 return m_page;
409 }
410
_mali_mem_swap_put_page_node(struct mali_page_node * m_page)411 _mali_osk_errcode_t _mali_mem_swap_put_page_node(struct mali_page_node *m_page)
412 {
413
414 mali_mem_swap_free_swap_item(m_page->swap_it);
415
416 return _MALI_OSK_ERR_OK;
417 }
418
_mali_mem_swap_page_node_free(struct mali_page_node * m_page)419 void _mali_mem_swap_page_node_free(struct mali_page_node *m_page)
420 {
421 _mali_mem_swap_put_page_node(m_page);
422
423 kfree(m_page);
424
425 return;
426 }
427
mali_mem_swap_free(mali_mem_swap * swap_mem)428 u32 mali_mem_swap_free(mali_mem_swap *swap_mem)
429 {
430 struct mali_page_node *m_page, *m_tmp;
431 u32 free_pages_nr = 0;
432
433 MALI_DEBUG_ASSERT_POINTER(swap_mem);
434
435 list_for_each_entry_safe(m_page, m_tmp, &swap_mem->pages, list) {
436 MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_SWAP);
437
438 /* free the page node and release the swap item, if the ref count is 1,
439 * then need also free the swap item. */
440 list_del(&m_page->list);
441 if (1 == _mali_page_node_get_ref_count(m_page)) {
442 free_pages_nr++;
443 }
444
445 _mali_mem_swap_page_node_free(m_page);
446 }
447
448 return free_pages_nr;
449 }
450
mali_mem_swap_cow_free(mali_mem_cow * cow_mem)451 static u32 mali_mem_swap_cow_free(mali_mem_cow *cow_mem)
452 {
453 struct mali_page_node *m_page, *m_tmp;
454 u32 free_pages_nr = 0;
455
456 MALI_DEBUG_ASSERT_POINTER(cow_mem);
457
458 list_for_each_entry_safe(m_page, m_tmp, &cow_mem->pages, list) {
459 MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_SWAP);
460
461 /* free the page node and release the swap item, if the ref count is 1,
462 * then need also free the swap item. */
463 list_del(&m_page->list);
464 if (1 == _mali_page_node_get_ref_count(m_page)) {
465 free_pages_nr++;
466 }
467
468 _mali_mem_swap_page_node_free(m_page);
469 }
470
471 return free_pages_nr;
472 }
473
mali_mem_swap_release(mali_mem_backend * mem_bkend,mali_bool is_mali_mapped)474 u32 mali_mem_swap_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)
475 {
476 mali_mem_allocation *alloc;
477 u32 free_pages_nr = 0;
478
479 MALI_DEBUG_ASSERT_POINTER(mem_bkend);
480 alloc = mem_bkend->mali_allocation;
481 MALI_DEBUG_ASSERT_POINTER(alloc);
482
483 if (is_mali_mapped) {
484 mali_mem_swap_mali_unmap(alloc);
485 }
486
487 mali_memory_swap_list_backend_delete(mem_bkend);
488
489 mutex_lock(&mem_bkend->mutex);
490 /* To make sure the given memory backend was unlocked from Mali side,
491 * and then free this memory block. */
492 mali_mem_swap_unlock_single_mem_backend(mem_bkend);
493 mutex_unlock(&mem_bkend->mutex);
494
495 if (MALI_MEM_SWAP == mem_bkend->type) {
496 free_pages_nr = mali_mem_swap_free(&mem_bkend->swap_mem);
497 } else {
498 free_pages_nr = mali_mem_swap_cow_free(&mem_bkend->cow_mem);
499 }
500
501 return free_pages_nr;
502 }
503
mali_mem_swap_in_page_node(struct mali_page_node * page_node)504 mali_bool mali_mem_swap_in_page_node(struct mali_page_node *page_node)
505 {
506 MALI_DEBUG_ASSERT(NULL != page_node);
507
508 page_node->swap_it->page = shmem_read_mapping_page(global_swap_space, page_node->swap_it->idx);
509
510 if (IS_ERR(page_node->swap_it->page)) {
511 MALI_DEBUG_PRINT_ERROR(("SWAP Mem: failed to swap in page with index: %d.\n", page_node->swap_it->idx));
512 return MALI_FALSE;
513 }
514
515 /* Ensure page is flushed from CPU caches. */
516 page_node->swap_it->dma_addr = dma_map_page(&mali_platform_device->dev, page_node->swap_it->page,
517 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
518
519 return MALI_TRUE;
520 }
521
mali_mem_swap_alloc_pages(mali_mem_swap * swap_mem,u32 size,u32 * bkend_idx)522 int mali_mem_swap_alloc_pages(mali_mem_swap *swap_mem, u32 size, u32 *bkend_idx)
523 {
524 size_t page_count = PAGE_ALIGN(size) / PAGE_SIZE;
525 struct mali_page_node *m_page;
526 long system_free_size;
527 u32 i, index;
528 mali_bool ret;
529
530 MALI_DEBUG_ASSERT(NULL != swap_mem);
531 MALI_DEBUG_ASSERT(NULL != bkend_idx);
532 MALI_DEBUG_ASSERT(page_count <= MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE);
533
534 if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
535 MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
536 size,
537 atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
538 mali_mem_os_allocator.allocation_limit));
539 return _MALI_OSK_ERR_NOMEM;
540 }
541
542 INIT_LIST_HEAD(&swap_mem->pages);
543 swap_mem->count = page_count;
544 index = mali_mem_swap_idx_range_alloc(page_count);
545
546 if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == index) {
547 MALI_PRINT_ERROR(("Mali Swap: Failed to allocate continuous index for swappable Mali memory."));
548 return _MALI_OSK_ERR_FAULT;
549 }
550
551 for (i = 0; i < page_count; i++) {
552 m_page = _mali_mem_swap_page_node_allocate();
553
554 if (NULL == m_page) {
555 MALI_DEBUG_PRINT_ERROR(("SWAP Mem: Failed to allocate mali page node."));
556 swap_mem->count = i;
557
558 mali_mem_swap_free(swap_mem);
559 mali_mem_swap_idx_range_free(index + i, page_count - i);
560 return _MALI_OSK_ERR_FAULT;
561 }
562
563 m_page->swap_it->idx = index + i;
564
565 ret = mali_mem_swap_in_page_node(m_page);
566
567 if (MALI_FALSE == ret) {
568 MALI_DEBUG_PRINT_ERROR(("SWAP Mem: Allocate new page from SHMEM file failed."));
569 _mali_mem_swap_page_node_free(m_page);
570 mali_mem_swap_idx_range_free(index + i + 1, page_count - i - 1);
571
572 swap_mem->count = i;
573 mali_mem_swap_free(swap_mem);
574 return _MALI_OSK_ERR_NOMEM;
575 }
576
577 list_add_tail(&m_page->list, &swap_mem->pages);
578 }
579
580 system_free_size = global_zone_page_state(NR_FREE_PAGES) * PAGE_SIZE;
581
582 if ((system_free_size < mali_mem_swap_out_threshold_value)
583 && (mem_backend_swapped_pool_size > (mali_mem_swap_out_threshold_value >> 2))
584 && mali_utilization_enabled()) {
585 _mali_osk_wq_schedule_work(mali_mem_swap_out_workq);
586 }
587
588 *bkend_idx = index;
589 return 0;
590 }
591
mali_mem_swap_mali_unmap(mali_mem_allocation * alloc)592 void mali_mem_swap_mali_unmap(mali_mem_allocation *alloc)
593 {
594 struct mali_session_data *session;
595
596 MALI_DEBUG_ASSERT_POINTER(alloc);
597 session = alloc->session;
598 MALI_DEBUG_ASSERT_POINTER(session);
599
600 mali_session_memory_lock(session);
601 mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
602 alloc->flags);
603 mali_session_memory_unlock(session);
604 }
605
606
607 /* Insert these pages from shmem to mali page table*/
mali_mem_swap_mali_map(mali_mem_swap * swap_mem,struct mali_session_data * session,u32 vaddr,u32 props)608 _mali_osk_errcode_t mali_mem_swap_mali_map(mali_mem_swap *swap_mem, struct mali_session_data *session, u32 vaddr, u32 props)
609 {
610 struct mali_page_directory *pagedir = session->page_directory;
611 struct mali_page_node *m_page;
612 dma_addr_t phys;
613 u32 virt = vaddr;
614 u32 prop = props;
615
616 list_for_each_entry(m_page, &swap_mem->pages, list) {
617 MALI_DEBUG_ASSERT(NULL != m_page->swap_it->page);
618 phys = m_page->swap_it->dma_addr;
619
620 mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop);
621 virt += MALI_MMU_PAGE_SIZE;
622 }
623
624 return _MALI_OSK_ERR_OK;
625 }
626
mali_mem_swap_in_pages(struct mali_pp_job * job)627 int mali_mem_swap_in_pages(struct mali_pp_job *job)
628 {
629 u32 num_memory_cookies;
630 struct mali_session_data *session;
631 struct mali_vma_node *mali_vma_node = NULL;
632 mali_mem_allocation *mali_alloc = NULL;
633 mali_mem_backend *mem_bkend = NULL;
634 struct mali_page_node *m_page;
635 mali_bool swap_in_success = MALI_TRUE;
636 int i;
637
638 MALI_DEBUG_ASSERT_POINTER(job);
639
640 num_memory_cookies = mali_pp_job_num_memory_cookies(job);
641 session = mali_pp_job_get_session(job);
642
643 MALI_DEBUG_ASSERT_POINTER(session);
644
645 for (i = 0; i < num_memory_cookies; i++) {
646
647 u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
648
649 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
650 if (NULL == mali_vma_node) {
651 job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS;
652 swap_in_success = MALI_FALSE;
653 MALI_PRINT_ERROR(("SWAP Mem: failed to find mali_vma_node through Mali address: 0x%08x.\n", mali_addr));
654 continue;
655 }
656
657 mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
658 MALI_DEBUG_ASSERT(NULL != mali_alloc);
659
660 if (MALI_MEM_SWAP != mali_alloc->type &&
661 MALI_MEM_COW != mali_alloc->type) {
662 continue;
663 }
664
665 /* Get backend memory & Map on GPU */
666 mutex_lock(&mali_idr_mutex);
667 mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
668 mutex_unlock(&mali_idr_mutex);
669 MALI_DEBUG_ASSERT(NULL != mem_bkend);
670
671 /* We neednot hold backend's lock here, race safe.*/
672 if ((MALI_MEM_COW == mem_bkend->type) &&
673 (!(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
674 continue;
675 }
676
677 mutex_lock(&mem_bkend->mutex);
678
679 /* When swap_in_success is MALI_FALSE, it means this job has memory backend that could not be swapped in,
680 * and it will be aborted in mali scheduler, so here, we just mark those memory cookies which
681 * should not be swapped out when delete job to invalide */
682 if (MALI_FALSE == swap_in_success) {
683 job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS;
684 mutex_unlock(&mem_bkend->mutex);
685 continue;
686 }
687
688 /* Before swap in, checking if this memory backend has been swapped in by the latest flushed jobs. */
689 ++mem_bkend->using_count;
690
691 if (1 < mem_bkend->using_count) {
692 MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN != (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags));
693 mutex_unlock(&mem_bkend->mutex);
694 continue;
695 }
696
697 if (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN != (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags)) {
698 mutex_unlock(&mem_bkend->mutex);
699 continue;
700 }
701
702
703 list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) {
704 if (MALI_FALSE == mali_mem_swap_in_page_node(m_page)) {
705 /* Don't have enough memory to swap in page, so release pages have already been swapped
706 * in and then mark this pp job to be fail. */
707 mali_mem_swap_unlock_partial_locked_mem_backend(mem_bkend, m_page);
708 swap_in_success = MALI_FALSE;
709 break;
710 }
711 }
712
713 if (swap_in_success) {
714 #ifdef MALI_MEM_SWAP_TRACKING
715 mem_backend_swapped_unlock_size -= mem_bkend->size;
716 #endif
717 _mali_osk_mutex_wait(session->memory_lock);
718 mali_mem_swap_mali_map(&mem_bkend->swap_mem, session, mali_alloc->mali_mapping.addr, mali_alloc->mali_mapping.properties);
719 _mali_osk_mutex_signal(session->memory_lock);
720
721 /* Remove the unlock flag from mem backend flags, mark this backend has been swapped in. */
722 mem_bkend->flags &= ~(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN);
723 mutex_unlock(&mem_bkend->mutex);
724 } else {
725 --mem_bkend->using_count;
726 /* Marking that this backend is not swapped in, need not to be processed anymore. */
727 job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS;
728 mutex_unlock(&mem_bkend->mutex);
729 }
730 }
731
732 job->swap_status = swap_in_success ? MALI_SWAP_IN_SUCC : MALI_SWAP_IN_FAIL;
733
734 return _MALI_OSK_ERR_OK;
735 }
736
mali_mem_swap_out_pages(struct mali_pp_job * job)737 int mali_mem_swap_out_pages(struct mali_pp_job *job)
738 {
739 u32 num_memory_cookies;
740 struct mali_session_data *session;
741 struct mali_vma_node *mali_vma_node = NULL;
742 mali_mem_allocation *mali_alloc = NULL;
743 mali_mem_backend *mem_bkend = NULL;
744 int i;
745
746 MALI_DEBUG_ASSERT_POINTER(job);
747
748 num_memory_cookies = mali_pp_job_num_memory_cookies(job);
749 session = mali_pp_job_get_session(job);
750
751 MALI_DEBUG_ASSERT_POINTER(session);
752
753
754 for (i = 0; i < num_memory_cookies; i++) {
755 u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
756
757 if (MALI_SWAP_INVALIDATE_MALI_ADDRESS == mali_addr) {
758 continue;
759 }
760
761 mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
762
763 if (NULL == mali_vma_node) {
764 MALI_PRINT_ERROR(("SWAP Mem: failed to find mali_vma_node through Mali address: 0x%08x.\n", mali_addr));
765 continue;
766 }
767
768 mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
769 MALI_DEBUG_ASSERT(NULL != mali_alloc);
770
771 if (MALI_MEM_SWAP != mali_alloc->type &&
772 MALI_MEM_COW != mali_alloc->type) {
773 continue;
774 }
775
776 mutex_lock(&mali_idr_mutex);
777 mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
778 mutex_unlock(&mali_idr_mutex);
779 MALI_DEBUG_ASSERT(NULL != mem_bkend);
780
781 /* We neednot hold backend's lock here, race safe.*/
782 if ((MALI_MEM_COW == mem_bkend->type) &&
783 (!(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
784 continue;
785 }
786
787 mutex_lock(&mem_bkend->mutex);
788
789 MALI_DEBUG_ASSERT(0 < mem_bkend->using_count);
790
791 /* Reducing the using_count of mem backend means less pp job are using this memory backend,
792 * if this count get to zero, it means no pp job is using it now, could put it to swap out list. */
793 --mem_bkend->using_count;
794
795 if (0 < mem_bkend->using_count) {
796 mutex_unlock(&mem_bkend->mutex);
797 continue;
798 }
799 mutex_unlock(&mem_bkend->mutex);
800
801 mali_memory_swap_list_backend_add(mem_bkend);
802 }
803
804 return _MALI_OSK_ERR_OK;
805 }
806
mali_mem_swap_allocate_page_on_demand(mali_mem_backend * mem_bkend,u32 offset,struct page ** pagep)807 int mali_mem_swap_allocate_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep)
808 {
809 struct mali_page_node *m_page, *found_node = NULL;
810 struct page *found_page;
811 mali_mem_swap *swap = NULL;
812 mali_mem_cow *cow = NULL;
813 dma_addr_t dma_addr;
814 u32 i = 0;
815
816 if (MALI_MEM_SWAP == mem_bkend->type) {
817 swap = &mem_bkend->swap_mem;
818 list_for_each_entry(m_page, &swap->pages, list) {
819 if (i == offset) {
820 found_node = m_page;
821 break;
822 }
823 i++;
824 }
825 } else {
826 MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
827 MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags));
828
829 cow = &mem_bkend->cow_mem;
830 list_for_each_entry(m_page, &cow->pages, list) {
831 if (i == offset) {
832 found_node = m_page;
833 break;
834 }
835 i++;
836 }
837 }
838
839 if (NULL == found_node) {
840 return _MALI_OSK_ERR_FAULT;
841 }
842
843 found_page = shmem_read_mapping_page(global_swap_space, found_node->swap_it->idx);
844
845 if (!IS_ERR(found_page)) {
846 lock_page(found_page);
847 dma_addr = dma_map_page(&mali_platform_device->dev, found_page,
848 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
849 dma_unmap_page(&mali_platform_device->dev, dma_addr,
850 _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
851
852 *pagep = found_page;
853 } else {
854 return _MALI_OSK_ERR_NOMEM;
855 }
856
857 return _MALI_OSK_ERR_OK;
858 }
859
mali_mem_swap_cow_page_on_demand(mali_mem_backend * mem_bkend,u32 offset,struct page ** pagep)860 int mali_mem_swap_cow_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep)
861 {
862 struct mali_page_node *m_page, *found_node = NULL, *new_node = NULL;
863 mali_mem_cow *cow = NULL;
864 u32 i = 0;
865
866 MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
867 MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED));
868 MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN == (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags));
869 MALI_DEBUG_ASSERT(!mali_memory_swap_backend_in_swapped_pool(mem_bkend));
870
871 cow = &mem_bkend->cow_mem;
872 list_for_each_entry(m_page, &cow->pages, list) {
873 if (i == offset) {
874 found_node = m_page;
875 break;
876 }
877 i++;
878 }
879
880 if (NULL == found_node) {
881 return _MALI_OSK_ERR_FAULT;
882 }
883
884 new_node = _mali_mem_swap_page_node_allocate();
885
886 if (NULL == new_node) {
887 return _MALI_OSK_ERR_FAULT;
888 }
889
890 new_node->swap_it->idx = mali_mem_swap_idx_alloc();
891
892 if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == new_node->swap_it->idx) {
893 MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW on demand.\n"));
894 kfree(new_node->swap_it);
895 kfree(new_node);
896 return _MALI_OSK_ERR_FAULT;
897 }
898
899 if (MALI_FALSE == mali_mem_swap_in_page_node(new_node)) {
900 _mali_mem_swap_page_node_free(new_node);
901 return _MALI_OSK_ERR_FAULT;
902 }
903
904 /* swap in found node for copy in kernel. */
905 if (MALI_FALSE == mali_mem_swap_in_page_node(found_node)) {
906 mali_mem_swap_out_page_node(new_node);
907 _mali_mem_swap_page_node_free(new_node);
908 return _MALI_OSK_ERR_FAULT;
909 }
910
911 _mali_mem_cow_copy_page(found_node, new_node);
912
913 list_replace(&found_node->list, &new_node->list);
914
915 if (1 != _mali_page_node_get_ref_count(found_node)) {
916 atomic_add(1, &mem_bkend->mali_allocation->session->mali_mem_allocated_pages);
917 if (atomic_read(&mem_bkend->mali_allocation->session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > mem_bkend->mali_allocation->session->max_mali_mem_allocated_size) {
918 mem_bkend->mali_allocation->session->max_mali_mem_allocated_size = atomic_read(&mem_bkend->mali_allocation->session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
919 }
920 mem_bkend->cow_mem.change_pages_nr++;
921 }
922
923 mali_mem_swap_out_page_node(found_node);
924 _mali_mem_swap_page_node_free(found_node);
925
926 /* When swap in the new page node, we have called dma_map_page for this page.\n */
927 dma_unmap_page(&mali_platform_device->dev, new_node->swap_it->dma_addr,
928 _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
929
930 lock_page(new_node->swap_it->page);
931
932 *pagep = new_node->swap_it->page;
933
934 return _MALI_OSK_ERR_OK;
935 }
936
937 #ifdef MALI_MEM_SWAP_TRACKING
mali_mem_swap_tracking(u32 * swap_pool_size,u32 * unlock_size)938 void mali_mem_swap_tracking(u32 *swap_pool_size, u32 *unlock_size)
939 {
940 *swap_pool_size = mem_backend_swapped_pool_size;
941 *unlock_size = mem_backend_swapped_unlock_size;
942 }
943 #endif
944