1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2022-2023 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 /**
23 * DOC: Base kernel page migration implementation.
24 */
25 #include <linux/migrate.h>
26
27 #include <mali_kbase.h>
28 #include <mali_kbase_mem_migrate.h>
29 #include <mmu/mali_kbase_mmu.h>
30
31 /* Global integer used to determine if module parameter value has been
32 * provided and if page migration feature is enabled.
33 * Feature is disabled on all platforms by default.
34 */
35 int kbase_page_migration_enabled;
36 module_param(kbase_page_migration_enabled, int, 0444);
37 KBASE_EXPORT_TEST_API(kbase_page_migration_enabled);
38
39 #if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
40 static const struct movable_operations movable_ops;
41 #endif
42
kbase_alloc_page_metadata(struct kbase_device * kbdev,struct page * p,dma_addr_t dma_addr,u8 group_id)43 bool kbase_alloc_page_metadata(struct kbase_device *kbdev, struct page *p, dma_addr_t dma_addr,
44 u8 group_id)
45 {
46 struct kbase_page_metadata *page_md =
47 kzalloc(sizeof(struct kbase_page_metadata), GFP_KERNEL);
48
49 if (!page_md)
50 return false;
51
52 SetPagePrivate(p);
53 set_page_private(p, (unsigned long)page_md);
54 page_md->dma_addr = dma_addr;
55 page_md->status = PAGE_STATUS_SET(page_md->status, (u8)ALLOCATE_IN_PROGRESS);
56 page_md->vmap_count = 0;
57 page_md->group_id = group_id;
58 spin_lock_init(&page_md->migrate_lock);
59
60 lock_page(p);
61 #if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
62 __SetPageMovable(p, &movable_ops);
63 page_md->status = PAGE_MOVABLE_SET(page_md->status);
64 #else
65 /* In some corner cases, the driver may attempt to allocate memory pages
66 * even before the device file is open and the mapping for address space
67 * operations is created. In that case, it is impossible to assign address
68 * space operations to memory pages: simply pretend that they are movable,
69 * even if they are not.
70 *
71 * The page will go through all state transitions but it will never be
72 * actually considered movable by the kernel. This is due to the fact that
73 * the page cannot be marked as NOT_MOVABLE upon creation, otherwise the
74 * memory pool will always refuse to add it to the pool and schedule
75 * a worker thread to free it later.
76 *
77 * Page metadata may seem redundant in this case, but they are not,
78 * because memory pools expect metadata to be present when page migration
79 * is enabled and because the pages may always return to memory pools and
80 * gain the movable property later on in their life cycle.
81 */
82 if (kbdev->mem_migrate.inode && kbdev->mem_migrate.inode->i_mapping) {
83 __SetPageMovable(p, kbdev->mem_migrate.inode->i_mapping);
84 page_md->status = PAGE_MOVABLE_SET(page_md->status);
85 }
86 #endif
87 unlock_page(p);
88
89 return true;
90 }
91
kbase_free_page_metadata(struct kbase_device * kbdev,struct page * p,u8 * group_id)92 static void kbase_free_page_metadata(struct kbase_device *kbdev, struct page *p, u8 *group_id)
93 {
94 struct device *const dev = kbdev->dev;
95 struct kbase_page_metadata *page_md;
96 dma_addr_t dma_addr;
97
98 page_md = kbase_page_private(p);
99 if (!page_md)
100 return;
101
102 if (group_id)
103 *group_id = page_md->group_id;
104 dma_addr = kbase_dma_addr(p);
105 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
106
107 kfree(page_md);
108 set_page_private(p, 0);
109 ClearPagePrivate(p);
110 }
111
kbase_free_pages_worker(struct work_struct * work)112 static void kbase_free_pages_worker(struct work_struct *work)
113 {
114 struct kbase_mem_migrate *mem_migrate =
115 container_of(work, struct kbase_mem_migrate, free_pages_work);
116 struct kbase_device *kbdev = container_of(mem_migrate, struct kbase_device, mem_migrate);
117 struct page *p, *tmp;
118 struct kbase_page_metadata *page_md;
119 LIST_HEAD(free_list);
120
121 spin_lock(&mem_migrate->free_pages_lock);
122 list_splice_init(&mem_migrate->free_pages_list, &free_list);
123 spin_unlock(&mem_migrate->free_pages_lock);
124
125 list_for_each_entry_safe(p, tmp, &free_list, lru) {
126 u8 group_id = 0;
127 list_del_init(&p->lru);
128
129 lock_page(p);
130 page_md = kbase_page_private(p);
131 if (IS_PAGE_MOVABLE(page_md->status)) {
132 __ClearPageMovable(p);
133 page_md->status = PAGE_MOVABLE_CLEAR(page_md->status);
134 }
135 unlock_page(p);
136
137 kbase_free_page_metadata(kbdev, p, &group_id);
138 kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, group_id, p, 0);
139 }
140 }
141
kbase_free_page_later(struct kbase_device * kbdev,struct page * p)142 void kbase_free_page_later(struct kbase_device *kbdev, struct page *p)
143 {
144 struct kbase_mem_migrate *mem_migrate = &kbdev->mem_migrate;
145
146 spin_lock(&mem_migrate->free_pages_lock);
147 list_add(&p->lru, &mem_migrate->free_pages_list);
148 spin_unlock(&mem_migrate->free_pages_lock);
149 }
150
151 /**
152 * kbasep_migrate_page_pt_mapped - Migrate a memory page that is mapped
153 * in a PGD of kbase_mmu_table.
154 *
155 * @old_page: Existing PGD page to remove
156 * @new_page: Destination for migrating the existing PGD page to
157 *
158 * Replace an existing PGD page with a new page by migrating its content. More specifically:
159 * the new page shall replace the existing PGD page in the MMU page table. Before returning,
160 * the new page shall be set as movable and not isolated, while the old page shall lose
161 * the movable property. The meta data attached to the PGD page is transferred to the
162 * new (replacement) page.
163 *
164 * Return: 0 on migration success, or -EAGAIN for a later retry. Otherwise it's a failure
165 * and the migration is aborted.
166 */
kbasep_migrate_page_pt_mapped(struct page * old_page,struct page * new_page)167 static int kbasep_migrate_page_pt_mapped(struct page *old_page, struct page *new_page)
168 {
169 struct kbase_page_metadata *page_md = kbase_page_private(old_page);
170 struct kbase_context *kctx = page_md->data.pt_mapped.mmut->kctx;
171 struct kbase_device *kbdev = kctx->kbdev;
172 dma_addr_t old_dma_addr = page_md->dma_addr;
173 dma_addr_t new_dma_addr;
174 int ret;
175
176 /* Create a new dma map for the new page */
177 new_dma_addr = dma_map_page(kbdev->dev, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
178 if (dma_mapping_error(kbdev->dev, new_dma_addr))
179 return -ENOMEM;
180
181 /* Lock context to protect access to the page in physical allocation.
182 * This blocks the CPU page fault handler from remapping pages.
183 * Only MCU's mmut is device wide, i.e. no corresponding kctx.
184 */
185 kbase_gpu_vm_lock(kctx);
186
187 ret = kbase_mmu_migrate_page(
188 as_tagged(page_to_phys(old_page)), as_tagged(page_to_phys(new_page)), old_dma_addr,
189 new_dma_addr, PGD_VPFN_LEVEL_GET_LEVEL(page_md->data.pt_mapped.pgd_vpfn_level));
190
191 if (ret == 0) {
192 dma_unmap_page(kbdev->dev, old_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
193 __ClearPageMovable(old_page);
194 page_md->status = PAGE_MOVABLE_CLEAR(page_md->status);
195 ClearPagePrivate(old_page);
196 put_page(old_page);
197
198 page_md = kbase_page_private(new_page);
199 #if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
200 __SetPageMovable(new_page, &movable_ops);
201 page_md->status = PAGE_MOVABLE_SET(page_md->status);
202 #else
203 if (kbdev->mem_migrate.inode->i_mapping) {
204 __SetPageMovable(new_page, kbdev->mem_migrate.inode->i_mapping);
205 page_md->status = PAGE_MOVABLE_SET(page_md->status);
206 }
207 #endif
208 SetPagePrivate(new_page);
209 get_page(new_page);
210 } else
211 dma_unmap_page(kbdev->dev, new_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
212
213 /* Page fault handler for CPU mapping unblocked. */
214 kbase_gpu_vm_unlock(kctx);
215
216 return ret;
217 }
218
219 /*
220 * kbasep_migrate_page_allocated_mapped - Migrate a memory page that is both
221 * allocated and mapped.
222 *
223 * @old_page: Page to remove.
224 * @new_page: Page to add.
225 *
226 * Replace an old page with a new page by migrating its content and all its
227 * CPU and GPU mappings. More specifically: the new page shall replace the
228 * old page in the MMU page table, as well as in the page array of the physical
229 * allocation, which is used to create CPU mappings. Before returning, the new
230 * page shall be set as movable and not isolated, while the old page shall lose
231 * the movable property.
232 */
kbasep_migrate_page_allocated_mapped(struct page * old_page,struct page * new_page)233 static int kbasep_migrate_page_allocated_mapped(struct page *old_page, struct page *new_page)
234 {
235 struct kbase_page_metadata *page_md = kbase_page_private(old_page);
236 struct kbase_context *kctx = page_md->data.mapped.mmut->kctx;
237 dma_addr_t old_dma_addr, new_dma_addr;
238 int ret;
239
240 old_dma_addr = page_md->dma_addr;
241 new_dma_addr = dma_map_page(kctx->kbdev->dev, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
242 if (dma_mapping_error(kctx->kbdev->dev, new_dma_addr))
243 return -ENOMEM;
244
245 /* Lock context to protect access to array of pages in physical allocation.
246 * This blocks the CPU page fault handler from remapping pages.
247 */
248 kbase_gpu_vm_lock(kctx);
249
250 /* Unmap the old physical range. */
251 unmap_mapping_range(kctx->filp->f_inode->i_mapping, page_md->data.mapped.vpfn << PAGE_SHIFT,
252 PAGE_SIZE, 1);
253
254 ret = kbase_mmu_migrate_page(as_tagged(page_to_phys(old_page)),
255 as_tagged(page_to_phys(new_page)), old_dma_addr, new_dma_addr,
256 MIDGARD_MMU_BOTTOMLEVEL);
257
258 if (ret == 0) {
259 dma_unmap_page(kctx->kbdev->dev, old_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
260
261 SetPagePrivate(new_page);
262 get_page(new_page);
263
264 /* Clear PG_movable from the old page and release reference. */
265 ClearPagePrivate(old_page);
266 __ClearPageMovable(old_page);
267 page_md->status = PAGE_MOVABLE_CLEAR(page_md->status);
268 put_page(old_page);
269
270 page_md = kbase_page_private(new_page);
271 /* Set PG_movable to the new page. */
272 #if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
273 __SetPageMovable(new_page, &movable_ops);
274 page_md->status = PAGE_MOVABLE_SET(page_md->status);
275 #else
276 if (kctx->kbdev->mem_migrate.inode->i_mapping) {
277 __SetPageMovable(new_page, kctx->kbdev->mem_migrate.inode->i_mapping);
278 page_md->status = PAGE_MOVABLE_SET(page_md->status);
279 }
280 #endif
281 } else
282 dma_unmap_page(kctx->kbdev->dev, new_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
283
284 /* Page fault handler for CPU mapping unblocked. */
285 kbase_gpu_vm_unlock(kctx);
286
287 return ret;
288 }
289
290 /**
291 * kbase_page_isolate - Isolate a page for migration.
292 *
293 * @p: Pointer of the page struct of page to isolate.
294 * @mode: LRU Isolation modes.
295 *
296 * Callback function for Linux to isolate a page and prepare it for migration.
297 *
298 * Return: true on success, false otherwise.
299 */
kbase_page_isolate(struct page * p,isolate_mode_t mode)300 static bool kbase_page_isolate(struct page *p, isolate_mode_t mode)
301 {
302 bool status_mem_pool = false;
303 struct kbase_mem_pool *mem_pool = NULL;
304 struct kbase_page_metadata *page_md = kbase_page_private(p);
305
306 CSTD_UNUSED(mode);
307
308 if (!page_md || !IS_PAGE_MOVABLE(page_md->status))
309 return false;
310
311 if (!spin_trylock(&page_md->migrate_lock))
312 return false;
313
314 if (WARN_ON(IS_PAGE_ISOLATED(page_md->status))) {
315 spin_unlock(&page_md->migrate_lock);
316 return false;
317 }
318
319 switch (PAGE_STATUS_GET(page_md->status)) {
320 case MEM_POOL:
321 /* Prepare to remove page from memory pool later only if pool is not
322 * in the process of termination.
323 */
324 mem_pool = page_md->data.mem_pool.pool;
325 status_mem_pool = true;
326 preempt_disable();
327 atomic_inc(&mem_pool->isolation_in_progress_cnt);
328 break;
329 case ALLOCATED_MAPPED:
330 /* Mark the page into isolated state, but only if it has no
331 * kernel CPU mappings
332 */
333 if (page_md->vmap_count == 0)
334 page_md->status = PAGE_ISOLATE_SET(page_md->status, 1);
335 break;
336 case PT_MAPPED:
337 /* Mark the page into isolated state. */
338 page_md->status = PAGE_ISOLATE_SET(page_md->status, 1);
339 break;
340 case SPILL_IN_PROGRESS:
341 case ALLOCATE_IN_PROGRESS:
342 case FREE_IN_PROGRESS:
343 break;
344 case NOT_MOVABLE:
345 /* Opportunistically clear the movable property for these pages */
346 __ClearPageMovable(p);
347 page_md->status = PAGE_MOVABLE_CLEAR(page_md->status);
348 break;
349 default:
350 /* State should always fall in one of the previous cases!
351 * Also notice that FREE_ISOLATED_IN_PROGRESS or
352 * FREE_PT_ISOLATED_IN_PROGRESS is impossible because
353 * that state only applies to pages that are already isolated.
354 */
355 page_md->status = PAGE_ISOLATE_SET(page_md->status, 0);
356 break;
357 }
358
359 spin_unlock(&page_md->migrate_lock);
360
361 /* If the page is still in the memory pool: try to remove it. This will fail
362 * if pool lock is taken which could mean page no longer exists in pool.
363 */
364 if (status_mem_pool) {
365 if (!spin_trylock(&mem_pool->pool_lock)) {
366 atomic_dec(&mem_pool->isolation_in_progress_cnt);
367 preempt_enable();
368 return false;
369 }
370
371 spin_lock(&page_md->migrate_lock);
372 /* Check status again to ensure page has not been removed from memory pool. */
373 if (PAGE_STATUS_GET(page_md->status) == MEM_POOL) {
374 page_md->status = PAGE_ISOLATE_SET(page_md->status, 1);
375 list_del_init(&p->lru);
376 mem_pool->cur_size--;
377 }
378 spin_unlock(&page_md->migrate_lock);
379 spin_unlock(&mem_pool->pool_lock);
380 atomic_dec(&mem_pool->isolation_in_progress_cnt);
381 preempt_enable();
382 }
383
384 return IS_PAGE_ISOLATED(page_md->status);
385 }
386
387 /**
388 * kbase_page_migrate - Migrate content of old page to new page provided.
389 *
390 * @mapping: Pointer to address_space struct associated with pages.
391 * @new_page: Pointer to the page struct of new page.
392 * @old_page: Pointer to the page struct of old page.
393 * @mode: Mode to determine if migration will be synchronised.
394 *
395 * Callback function for Linux to migrate the content of the old page to the
396 * new page provided.
397 *
398 * Return: 0 on success, error code otherwise.
399 */
400 #if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
kbase_page_migrate(struct address_space * mapping,struct page * new_page,struct page * old_page,enum migrate_mode mode)401 static int kbase_page_migrate(struct address_space *mapping, struct page *new_page,
402 struct page *old_page, enum migrate_mode mode)
403 #else
404 static int kbase_page_migrate(struct page *new_page, struct page *old_page, enum migrate_mode mode)
405 #endif
406 {
407 int err = 0;
408 bool status_mem_pool = false;
409 bool status_free_pt_isolated_in_progress = false;
410 bool status_free_isolated_in_progress = false;
411 bool status_pt_mapped = false;
412 bool status_mapped = false;
413 bool status_not_movable = false;
414 struct kbase_page_metadata *page_md = kbase_page_private(old_page);
415 struct kbase_device *kbdev = NULL;
416
417 #if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
418 CSTD_UNUSED(mapping);
419 #endif
420 CSTD_UNUSED(mode);
421
422 if (!page_md || !IS_PAGE_MOVABLE(page_md->status))
423 return -EINVAL;
424
425 if (!spin_trylock(&page_md->migrate_lock))
426 return -EAGAIN;
427
428 if (WARN_ON(!IS_PAGE_ISOLATED(page_md->status))) {
429 spin_unlock(&page_md->migrate_lock);
430 return -EINVAL;
431 }
432
433 switch (PAGE_STATUS_GET(page_md->status)) {
434 case MEM_POOL:
435 status_mem_pool = true;
436 kbdev = page_md->data.mem_pool.kbdev;
437 break;
438 case ALLOCATED_MAPPED:
439 status_mapped = true;
440 break;
441 case PT_MAPPED:
442 status_pt_mapped = true;
443 break;
444 case FREE_ISOLATED_IN_PROGRESS:
445 status_free_isolated_in_progress = true;
446 kbdev = page_md->data.free_isolated.kbdev;
447 break;
448 case FREE_PT_ISOLATED_IN_PROGRESS:
449 status_free_pt_isolated_in_progress = true;
450 kbdev = page_md->data.free_pt_isolated.kbdev;
451 break;
452 case NOT_MOVABLE:
453 status_not_movable = true;
454 break;
455 default:
456 /* State should always fall in one of the previous cases! */
457 err = -EAGAIN;
458 break;
459 }
460
461 spin_unlock(&page_md->migrate_lock);
462
463 if (status_mem_pool || status_free_isolated_in_progress ||
464 status_free_pt_isolated_in_progress) {
465 struct kbase_mem_migrate *mem_migrate = &kbdev->mem_migrate;
466
467 kbase_free_page_metadata(kbdev, old_page, NULL);
468 __ClearPageMovable(old_page);
469 page_md->status = PAGE_MOVABLE_CLEAR(page_md->status);
470 put_page(old_page);
471
472 /* Just free new page to avoid lock contention. */
473 INIT_LIST_HEAD(&new_page->lru);
474 get_page(new_page);
475 set_page_private(new_page, 0);
476 kbase_free_page_later(kbdev, new_page);
477 queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
478 } else if (status_not_movable) {
479 err = -EINVAL;
480 } else if (status_mapped) {
481 err = kbasep_migrate_page_allocated_mapped(old_page, new_page);
482 } else if (status_pt_mapped) {
483 err = kbasep_migrate_page_pt_mapped(old_page, new_page);
484 }
485
486 /* While we want to preserve the movability of pages for which we return
487 * EAGAIN, according to the kernel docs, movable pages for which a critical
488 * error is returned are called putback on, which may not be what we
489 * expect.
490 */
491 if (err < 0 && err != -EAGAIN) {
492 __ClearPageMovable(old_page);
493 page_md->status = PAGE_MOVABLE_CLEAR(page_md->status);
494 }
495
496 return err;
497 }
498
499 /**
500 * kbase_page_putback - Return isolated page back to kbase.
501 *
502 * @p: Pointer of the page struct of page.
503 *
504 * Callback function for Linux to return isolated page back to kbase. This
505 * will only be called for a page that has been isolated but failed to
506 * migrate. This function will put back the given page to the state it was
507 * in before it was isolated.
508 */
kbase_page_putback(struct page * p)509 static void kbase_page_putback(struct page *p)
510 {
511 bool status_mem_pool = false;
512 bool status_free_isolated_in_progress = false;
513 bool status_free_pt_isolated_in_progress = false;
514 struct kbase_page_metadata *page_md = kbase_page_private(p);
515 struct kbase_device *kbdev = NULL;
516
517 /* If we don't have page metadata, the page may not belong to the
518 * driver or may already have been freed, and there's nothing we can do
519 */
520 if (!page_md)
521 return;
522
523 spin_lock(&page_md->migrate_lock);
524
525 if (WARN_ON(!IS_PAGE_ISOLATED(page_md->status))) {
526 spin_unlock(&page_md->migrate_lock);
527 return;
528 }
529
530 switch (PAGE_STATUS_GET(page_md->status)) {
531 case MEM_POOL:
532 status_mem_pool = true;
533 kbdev = page_md->data.mem_pool.kbdev;
534 break;
535 case ALLOCATED_MAPPED:
536 page_md->status = PAGE_ISOLATE_SET(page_md->status, 0);
537 break;
538 case PT_MAPPED:
539 case NOT_MOVABLE:
540 /* Pages should no longer be isolated if they are in a stable state
541 * and used by the driver.
542 */
543 page_md->status = PAGE_ISOLATE_SET(page_md->status, 0);
544 break;
545 case FREE_ISOLATED_IN_PROGRESS:
546 status_free_isolated_in_progress = true;
547 kbdev = page_md->data.free_isolated.kbdev;
548 break;
549 case FREE_PT_ISOLATED_IN_PROGRESS:
550 status_free_pt_isolated_in_progress = true;
551 kbdev = page_md->data.free_pt_isolated.kbdev;
552 break;
553 default:
554 /* State should always fall in one of the previous cases! */
555 break;
556 }
557
558 spin_unlock(&page_md->migrate_lock);
559
560 /* If page was in a memory pool then just free it to avoid lock contention. The
561 * same is also true to status_free_pt_isolated_in_progress.
562 */
563 if (status_mem_pool || status_free_isolated_in_progress ||
564 status_free_pt_isolated_in_progress) {
565 __ClearPageMovable(p);
566 page_md->status = PAGE_MOVABLE_CLEAR(page_md->status);
567
568 if (!WARN_ON_ONCE(!kbdev)) {
569 struct kbase_mem_migrate *mem_migrate = &kbdev->mem_migrate;
570
571 kbase_free_page_later(kbdev, p);
572 queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
573 }
574 }
575 }
576
577 #if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
578 static const struct movable_operations movable_ops = {
579 .isolate_page = kbase_page_isolate,
580 .migrate_page = kbase_page_migrate,
581 .putback_page = kbase_page_putback,
582 };
583 #else
584 static const struct address_space_operations kbase_address_space_ops = {
585 .isolate_page = kbase_page_isolate,
586 .migratepage = kbase_page_migrate,
587 .putback_page = kbase_page_putback,
588 };
589 #endif
590
591 #if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
kbase_mem_migrate_set_address_space_ops(struct kbase_device * kbdev,struct file * const filp)592 void kbase_mem_migrate_set_address_space_ops(struct kbase_device *kbdev, struct file *const filp)
593 {
594 mutex_lock(&kbdev->fw_load_lock);
595
596 if (filp) {
597 filp->f_inode->i_mapping->a_ops = &kbase_address_space_ops;
598
599 if (!kbdev->mem_migrate.inode) {
600 kbdev->mem_migrate.inode = filp->f_inode;
601 /* This reference count increment is balanced by iput()
602 * upon termination.
603 */
604 atomic_inc(&filp->f_inode->i_count);
605 } else {
606 WARN_ON(kbdev->mem_migrate.inode != filp->f_inode);
607 }
608 }
609
610 mutex_unlock(&kbdev->fw_load_lock);
611 }
612 #endif
613
kbase_mem_migrate_init(struct kbase_device * kbdev)614 void kbase_mem_migrate_init(struct kbase_device *kbdev)
615 {
616 struct kbase_mem_migrate *mem_migrate = &kbdev->mem_migrate;
617
618 if (kbase_page_migration_enabled < 0)
619 kbase_page_migration_enabled = 0;
620
621 spin_lock_init(&mem_migrate->free_pages_lock);
622 INIT_LIST_HEAD(&mem_migrate->free_pages_list);
623
624 #if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
625 mem_migrate->inode = NULL;
626 #endif
627 mem_migrate->free_pages_workq =
628 alloc_workqueue("free_pages_workq", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
629 INIT_WORK(&mem_migrate->free_pages_work, kbase_free_pages_worker);
630 }
631
kbase_mem_migrate_term(struct kbase_device * kbdev)632 void kbase_mem_migrate_term(struct kbase_device *kbdev)
633 {
634 struct kbase_mem_migrate *mem_migrate = &kbdev->mem_migrate;
635
636 if (mem_migrate->free_pages_workq)
637 destroy_workqueue(mem_migrate->free_pages_workq);
638 #if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
639 iput(mem_migrate->inode);
640 #endif
641 }
642