xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mali_kbase_mem_pool.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2015-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <mali_kbase.h>
23 #include <linux/mm.h>
24 #include <linux/migrate.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/highmem.h>
27 #include <linux/spinlock.h>
28 #include <linux/shrinker.h>
29 #include <linux/atomic.h>
30 #include <linux/version.h>
31 #if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
32 #include <linux/sched/signal.h>
33 #else
34 #include <linux/signal.h>
35 #endif
36 
37 #define pool_dbg(pool, format, ...) \
38 	dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format,	\
39 		(pool->next_pool) ? "kctx" : "kbdev",	\
40 		kbase_mem_pool_size(pool),	\
41 		kbase_mem_pool_max_size(pool),	\
42 		##__VA_ARGS__)
43 
44 #define NOT_DIRTY false
45 #define NOT_RECLAIMED false
46 
47 /**
48  * can_alloc_page() - Check if the current thread can allocate a physical page
49  *
50  * @pool:                Pointer to the memory pool.
51  * @page_owner:          Pointer to the task/process that created the Kbase context
52  *                       for which a page needs to be allocated. It can be NULL if
53  *                       the page won't be associated with Kbase context.
54  * @alloc_from_kthread:  Flag indicating that the current thread is a kernel thread.
55  *
56  * This function checks if the current thread is a kernel thread and can make a
57  * request to kernel to allocate a physical page. If the kernel thread is allocating
58  * a page for the Kbase context and the process that created the context is exiting
59  * or is being killed, then there is no point in doing a page allocation.
60  *
61  * The check done by the function is particularly helpful when the system is running
62  * low on memory. When a page is allocated from the context of a kernel thread, OoM
63  * killer doesn't consider the kernel thread for killing and kernel keeps retrying
64  * to allocate the page as long as the OoM killer is able to kill processes.
65  * The check allows kernel thread to quickly exit the page allocation loop once OoM
66  * killer has initiated the killing of @page_owner, thereby unblocking the context
67  * termination for @page_owner and freeing of GPU memory allocated by it. This helps
68  * in preventing the kernel panic and also limits the number of innocent processes
69  * that get killed.
70  *
71  * Return: true if the page can be allocated otherwise false.
72  */
can_alloc_page(struct kbase_mem_pool * pool,struct task_struct * page_owner,const bool alloc_from_kthread)73 static inline bool can_alloc_page(struct kbase_mem_pool *pool, struct task_struct *page_owner,
74 				  const bool alloc_from_kthread)
75 {
76 	if (likely(!alloc_from_kthread || !page_owner))
77 		return true;
78 
79 	if ((page_owner->flags & PF_EXITING) || fatal_signal_pending(page_owner)) {
80 		dev_info(pool->kbdev->dev, "%s : Process %s/%d exiting",
81 			__func__, page_owner->comm, task_pid_nr(page_owner));
82 		return false;
83 	}
84 
85 	return true;
86 }
87 
kbase_mem_pool_capacity(struct kbase_mem_pool * pool)88 static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
89 {
90 	ssize_t max_size = kbase_mem_pool_max_size(pool);
91 	ssize_t cur_size = kbase_mem_pool_size(pool);
92 
93 	return max(max_size - cur_size, (ssize_t)0);
94 }
95 
kbase_mem_pool_is_full(struct kbase_mem_pool * pool)96 static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool)
97 {
98 	return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool);
99 }
100 
kbase_mem_pool_is_empty(struct kbase_mem_pool * pool)101 static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
102 {
103 	return kbase_mem_pool_size(pool) == 0;
104 }
105 
set_pool_new_page_metadata(struct kbase_mem_pool * pool,struct page * p,struct list_head * page_list,size_t * list_size)106 static bool set_pool_new_page_metadata(struct kbase_mem_pool *pool, struct page *p,
107 				       struct list_head *page_list, size_t *list_size)
108 {
109 	struct kbase_page_metadata *page_md = kbase_page_private(p);
110 	bool not_movable = false;
111 
112 	lockdep_assert_held(&pool->pool_lock);
113 
114 	/* Free the page instead of adding it to the pool if it's not movable.
115 	 * Only update page status and add the page to the memory pool if
116 	 * it is not isolated.
117 	 */
118 	spin_lock(&page_md->migrate_lock);
119 	if (PAGE_STATUS_GET(page_md->status) == (u8)NOT_MOVABLE) {
120 		not_movable = true;
121 	} else if (!WARN_ON_ONCE(IS_PAGE_ISOLATED(page_md->status))) {
122 		page_md->status = PAGE_STATUS_SET(page_md->status, (u8)MEM_POOL);
123 		page_md->data.mem_pool.pool = pool;
124 		page_md->data.mem_pool.kbdev = pool->kbdev;
125 		list_add(&p->lru, page_list);
126 		(*list_size)++;
127 	}
128 	spin_unlock(&page_md->migrate_lock);
129 
130 	if (not_movable) {
131 		kbase_free_page_later(pool->kbdev, p);
132 		pool_dbg(pool, "skipping a not movable page\n");
133 	}
134 
135 	return not_movable;
136 }
137 
kbase_mem_pool_add_locked(struct kbase_mem_pool * pool,struct page * p)138 static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
139 		struct page *p)
140 {
141 	bool queue_work_to_free = false;
142 
143 	lockdep_assert_held(&pool->pool_lock);
144 
145 	if (!pool->order && kbase_page_migration_enabled) {
146 		if (set_pool_new_page_metadata(pool, p, &pool->page_list, &pool->cur_size))
147 			queue_work_to_free = true;
148 	} else {
149 		list_add(&p->lru, &pool->page_list);
150 		pool->cur_size++;
151 	}
152 
153 	if (queue_work_to_free) {
154 		struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate;
155 
156 		queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
157 	}
158 
159 	pool_dbg(pool, "added page\n");
160 }
161 
kbase_mem_pool_add(struct kbase_mem_pool * pool,struct page * p)162 static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
163 {
164 	kbase_mem_pool_lock(pool);
165 	kbase_mem_pool_add_locked(pool, p);
166 	kbase_mem_pool_unlock(pool);
167 }
168 
kbase_mem_pool_add_list_locked(struct kbase_mem_pool * pool,struct list_head * page_list,size_t nr_pages)169 static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
170 		struct list_head *page_list, size_t nr_pages)
171 {
172 	bool queue_work_to_free = false;
173 
174 	lockdep_assert_held(&pool->pool_lock);
175 
176 	if (!pool->order && kbase_page_migration_enabled) {
177 		struct page *p, *tmp;
178 
179 		list_for_each_entry_safe(p, tmp, page_list, lru) {
180 			list_del_init(&p->lru);
181 			if (set_pool_new_page_metadata(pool, p, &pool->page_list, &pool->cur_size))
182 				queue_work_to_free = true;
183 		}
184 	} else {
185 		list_splice(page_list, &pool->page_list);
186 		pool->cur_size += nr_pages;
187 	}
188 
189 	if (queue_work_to_free) {
190 		struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate;
191 
192 		queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
193 	}
194 
195 	pool_dbg(pool, "added %zu pages\n", nr_pages);
196 }
197 
kbase_mem_pool_add_list(struct kbase_mem_pool * pool,struct list_head * page_list,size_t nr_pages)198 static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
199 		struct list_head *page_list, size_t nr_pages)
200 {
201 	kbase_mem_pool_lock(pool);
202 	kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
203 	kbase_mem_pool_unlock(pool);
204 }
205 
kbase_mem_pool_remove_locked(struct kbase_mem_pool * pool,enum kbase_page_status status)206 static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool,
207 						 enum kbase_page_status status)
208 {
209 	struct page *p;
210 
211 	lockdep_assert_held(&pool->pool_lock);
212 
213 	if (kbase_mem_pool_is_empty(pool))
214 		return NULL;
215 
216 	p = list_first_entry(&pool->page_list, struct page, lru);
217 
218 	if (!pool->order && kbase_page_migration_enabled) {
219 		struct kbase_page_metadata *page_md = kbase_page_private(p);
220 
221 		spin_lock(&page_md->migrate_lock);
222 		WARN_ON(PAGE_STATUS_GET(page_md->status) != (u8)MEM_POOL);
223 		page_md->status = PAGE_STATUS_SET(page_md->status, (u8)status);
224 		spin_unlock(&page_md->migrate_lock);
225 	}
226 
227 	list_del_init(&p->lru);
228 	pool->cur_size--;
229 
230 	pool_dbg(pool, "removed page\n");
231 
232 	return p;
233 }
234 
kbase_mem_pool_remove(struct kbase_mem_pool * pool,enum kbase_page_status status)235 static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool,
236 					  enum kbase_page_status status)
237 {
238 	struct page *p;
239 
240 	kbase_mem_pool_lock(pool);
241 	p = kbase_mem_pool_remove_locked(pool, status);
242 	kbase_mem_pool_unlock(pool);
243 
244 	return p;
245 }
246 
kbase_mem_pool_sync_page(struct kbase_mem_pool * pool,struct page * p)247 static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
248 		struct page *p)
249 {
250 	struct device *dev = pool->kbdev->dev;
251 	dma_addr_t dma_addr = pool->order ? kbase_dma_addr_as_priv(p) : kbase_dma_addr(p);
252 
253 	dma_sync_single_for_device(dev, dma_addr, (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
254 }
255 
kbase_mem_pool_zero_page(struct kbase_mem_pool * pool,struct page * p)256 static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
257 		struct page *p)
258 {
259 	int i;
260 
261 	for (i = 0; i < (1U << pool->order); i++)
262 		clear_highpage(p+i);
263 
264 	kbase_mem_pool_sync_page(pool, p);
265 }
266 
kbase_mem_pool_spill(struct kbase_mem_pool * next_pool,struct page * p)267 static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
268 		struct page *p)
269 {
270 	/* Zero page before spilling */
271 	kbase_mem_pool_zero_page(next_pool, p);
272 
273 	kbase_mem_pool_add(next_pool, p);
274 }
275 
kbase_mem_alloc_page(struct kbase_mem_pool * pool)276 struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
277 {
278 	struct page *p;
279 	gfp_t gfp = __GFP_ZERO;
280 	struct kbase_device *const kbdev = pool->kbdev;
281 	struct device *const dev = kbdev->dev;
282 	dma_addr_t dma_addr;
283 	int i;
284 
285 	/* don't warn on higher order failures */
286 	if (pool->order)
287 		gfp |= GFP_HIGHUSER | __GFP_NOWARN;
288 	else
289 		gfp |= kbase_page_migration_enabled ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
290 
291 	p = kbdev->mgm_dev->ops.mgm_alloc_page(kbdev->mgm_dev,
292 		pool->group_id, gfp, pool->order);
293 	if (!p)
294 		return NULL;
295 
296 	dma_addr = dma_map_page(dev, p, 0, (PAGE_SIZE << pool->order),
297 				DMA_BIDIRECTIONAL);
298 
299 	if (dma_mapping_error(dev, dma_addr)) {
300 		kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev,
301 			pool->group_id, p, pool->order);
302 		return NULL;
303 	}
304 
305 	/* Setup page metadata for 4KB pages when page migration is enabled */
306 	if (!pool->order && kbase_page_migration_enabled) {
307 		INIT_LIST_HEAD(&p->lru);
308 		if (!kbase_alloc_page_metadata(kbdev, p, dma_addr, pool->group_id)) {
309 			dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
310 			kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, pool->group_id, p,
311 							  pool->order);
312 			return NULL;
313 		}
314 	} else {
315 		WARN_ON(dma_addr != page_to_phys(p));
316 		for (i = 0; i < (1u << pool->order); i++)
317 			kbase_set_dma_addr_as_priv(p + i, dma_addr + PAGE_SIZE * i);
318 	}
319 
320 	return p;
321 }
322 
enqueue_free_pool_pages_work(struct kbase_mem_pool * pool)323 static void enqueue_free_pool_pages_work(struct kbase_mem_pool *pool)
324 {
325 	struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate;
326 
327 	if (!pool->order && kbase_page_migration_enabled)
328 		queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
329 }
330 
kbase_mem_pool_free_page(struct kbase_mem_pool * pool,struct page * p)331 void kbase_mem_pool_free_page(struct kbase_mem_pool *pool, struct page *p)
332 {
333 	struct kbase_device *kbdev;
334 
335 	if (WARN_ON(!pool))
336 		return;
337 	if (WARN_ON(!p))
338 		return;
339 
340 	kbdev = pool->kbdev;
341 
342 	if (!pool->order && kbase_page_migration_enabled) {
343 		kbase_free_page_later(kbdev, p);
344 		pool_dbg(pool, "page to be freed to kernel later\n");
345 	} else {
346 		int i;
347 		dma_addr_t dma_addr = kbase_dma_addr_as_priv(p);
348 
349 		for (i = 0; i < (1u << pool->order); i++)
350 			kbase_clear_dma_addr_as_priv(p + i);
351 
352 		dma_unmap_page(kbdev->dev, dma_addr, (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
353 
354 		kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, pool->group_id, p, pool->order);
355 
356 		pool_dbg(pool, "freed page to kernel\n");
357 	}
358 }
359 
kbase_mem_pool_shrink_locked(struct kbase_mem_pool * pool,size_t nr_to_shrink)360 static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
361 		size_t nr_to_shrink)
362 {
363 	struct page *p;
364 	size_t i;
365 
366 	lockdep_assert_held(&pool->pool_lock);
367 
368 	for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
369 		p = kbase_mem_pool_remove_locked(pool, FREE_IN_PROGRESS);
370 		kbase_mem_pool_free_page(pool, p);
371 	}
372 
373 	/* Freeing of pages will be deferred when page migration is enabled. */
374 	enqueue_free_pool_pages_work(pool);
375 
376 	return i;
377 }
378 
kbase_mem_pool_shrink(struct kbase_mem_pool * pool,size_t nr_to_shrink)379 static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
380 		size_t nr_to_shrink)
381 {
382 	size_t nr_freed;
383 
384 	kbase_mem_pool_lock(pool);
385 	nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
386 	kbase_mem_pool_unlock(pool);
387 
388 	return nr_freed;
389 }
390 
kbase_mem_pool_grow(struct kbase_mem_pool * pool,size_t nr_to_grow,struct task_struct * page_owner)391 int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow,
392 			struct task_struct *page_owner)
393 {
394 	struct page *p;
395 	size_t i;
396 	const bool alloc_from_kthread = !!(current->flags & PF_KTHREAD);
397 
398 	kbase_mem_pool_lock(pool);
399 
400 	pool->dont_reclaim = true;
401 	for (i = 0; i < nr_to_grow; i++) {
402 		if (pool->dying) {
403 			pool->dont_reclaim = false;
404 			kbase_mem_pool_shrink_locked(pool, nr_to_grow);
405 			kbase_mem_pool_unlock(pool);
406 
407 			return -ENOMEM;
408 		}
409 		kbase_mem_pool_unlock(pool);
410 
411 		if (unlikely(!can_alloc_page(pool, page_owner, alloc_from_kthread)))
412 			return -ENOMEM;
413 
414 		p = kbase_mem_alloc_page(pool);
415 		if (!p) {
416 			kbase_mem_pool_lock(pool);
417 			pool->dont_reclaim = false;
418 			kbase_mem_pool_unlock(pool);
419 
420 			return -ENOMEM;
421 		}
422 
423 		kbase_mem_pool_lock(pool);
424 		kbase_mem_pool_add_locked(pool, p);
425 	}
426 	pool->dont_reclaim = false;
427 	kbase_mem_pool_unlock(pool);
428 
429 	return 0;
430 }
431 KBASE_EXPORT_TEST_API(kbase_mem_pool_grow);
432 
kbase_mem_pool_trim(struct kbase_mem_pool * pool,size_t new_size)433 void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
434 {
435 	size_t cur_size;
436 	int err = 0;
437 
438 	cur_size = kbase_mem_pool_size(pool);
439 
440 	if (new_size > pool->max_size)
441 		new_size = pool->max_size;
442 
443 	if (new_size < cur_size)
444 		kbase_mem_pool_shrink(pool, cur_size - new_size);
445 	else if (new_size > cur_size)
446 		err = kbase_mem_pool_grow(pool, new_size - cur_size, NULL);
447 
448 	if (err) {
449 		size_t grown_size = kbase_mem_pool_size(pool);
450 
451 		dev_warn(pool->kbdev->dev,
452 			 "Mem pool not grown to the required size of %zu bytes, grown for additional %zu bytes instead!\n",
453 			 (new_size - cur_size), (grown_size - cur_size));
454 	}
455 }
456 
kbase_mem_pool_set_max_size(struct kbase_mem_pool * pool,size_t max_size)457 void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
458 {
459 	size_t cur_size;
460 	size_t nr_to_shrink;
461 
462 	kbase_mem_pool_lock(pool);
463 
464 	pool->max_size = max_size;
465 
466 	cur_size = kbase_mem_pool_size(pool);
467 	if (max_size < cur_size) {
468 		nr_to_shrink = cur_size - max_size;
469 		kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
470 	}
471 
472 	kbase_mem_pool_unlock(pool);
473 }
474 KBASE_EXPORT_TEST_API(kbase_mem_pool_set_max_size);
475 
kbase_mem_pool_reclaim_count_objects(struct shrinker * s,struct shrink_control * sc)476 static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
477 		struct shrink_control *sc)
478 {
479 	struct kbase_mem_pool *pool;
480 	size_t pool_size;
481 
482 	pool = container_of(s, struct kbase_mem_pool, reclaim);
483 
484 	kbase_mem_pool_lock(pool);
485 	if (pool->dont_reclaim && !pool->dying) {
486 		kbase_mem_pool_unlock(pool);
487 		/* Tell shrinker to skip reclaim
488 		 * even though freeable pages are available
489 		 */
490 		return 0;
491 	}
492 	pool_size = kbase_mem_pool_size(pool);
493 	kbase_mem_pool_unlock(pool);
494 
495 	return pool_size;
496 }
497 
kbase_mem_pool_reclaim_scan_objects(struct shrinker * s,struct shrink_control * sc)498 static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
499 		struct shrink_control *sc)
500 {
501 	struct kbase_mem_pool *pool;
502 	unsigned long freed;
503 
504 	pool = container_of(s, struct kbase_mem_pool, reclaim);
505 
506 	kbase_mem_pool_lock(pool);
507 	if (pool->dont_reclaim && !pool->dying) {
508 		kbase_mem_pool_unlock(pool);
509 		/* Tell shrinker that reclaim can't be made and
510 		 * do not attempt again for this reclaim context.
511 		 */
512 		return SHRINK_STOP;
513 	}
514 
515 	pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
516 
517 	freed = kbase_mem_pool_shrink_locked(pool, sc->nr_to_scan);
518 
519 	kbase_mem_pool_unlock(pool);
520 
521 	pool_dbg(pool, "reclaim freed %ld pages\n", freed);
522 
523 	return freed;
524 }
525 
kbase_mem_pool_init(struct kbase_mem_pool * pool,const struct kbase_mem_pool_config * config,unsigned int order,int group_id,struct kbase_device * kbdev,struct kbase_mem_pool * next_pool)526 int kbase_mem_pool_init(struct kbase_mem_pool *pool, const struct kbase_mem_pool_config *config,
527 			unsigned int order, int group_id, struct kbase_device *kbdev,
528 			struct kbase_mem_pool *next_pool)
529 {
530 	if (WARN_ON(group_id < 0) ||
531 		WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)) {
532 		return -EINVAL;
533 	}
534 
535 	pool->cur_size = 0;
536 	pool->max_size = kbase_mem_pool_config_get_max_size(config);
537 	pool->order = order;
538 	pool->group_id = group_id;
539 	pool->kbdev = kbdev;
540 	pool->next_pool = next_pool;
541 	pool->dying = false;
542 	atomic_set(&pool->isolation_in_progress_cnt, 0);
543 
544 	spin_lock_init(&pool->pool_lock);
545 	INIT_LIST_HEAD(&pool->page_list);
546 
547 	pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects;
548 	pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects;
549 	pool->reclaim.seeks = DEFAULT_SEEKS;
550 	/* Kernel versions prior to 3.1 :
551 	 * struct shrinker does not define batch
552 	 */
553 	pool->reclaim.batch = 0;
554 #if KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE
555 	register_shrinker(&pool->reclaim);
556 #else
557 	register_shrinker(&pool->reclaim, "mali-mem-pool");
558 #endif
559 
560 	pool_dbg(pool, "initialized\n");
561 
562 	return 0;
563 }
564 KBASE_EXPORT_TEST_API(kbase_mem_pool_init);
565 
kbase_mem_pool_mark_dying(struct kbase_mem_pool * pool)566 void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool)
567 {
568 	kbase_mem_pool_lock(pool);
569 	pool->dying = true;
570 	kbase_mem_pool_unlock(pool);
571 }
572 
kbase_mem_pool_term(struct kbase_mem_pool * pool)573 void kbase_mem_pool_term(struct kbase_mem_pool *pool)
574 {
575 	struct kbase_mem_pool *next_pool = pool->next_pool;
576 	struct page *p, *tmp;
577 	size_t nr_to_spill = 0;
578 	LIST_HEAD(spill_list);
579 	LIST_HEAD(free_list);
580 	int i;
581 
582 	pool_dbg(pool, "terminate()\n");
583 
584 	unregister_shrinker(&pool->reclaim);
585 
586 	kbase_mem_pool_lock(pool);
587 	pool->max_size = 0;
588 
589 	if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
590 		/* Spill to next pool (may overspill) */
591 		nr_to_spill = kbase_mem_pool_capacity(next_pool);
592 		nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill);
593 
594 		/* Zero pages first without holding the next_pool lock */
595 		for (i = 0; i < nr_to_spill; i++) {
596 			p = kbase_mem_pool_remove_locked(pool, SPILL_IN_PROGRESS);
597 			if (p)
598 				list_add(&p->lru, &spill_list);
599 		}
600 	}
601 
602 	while (!kbase_mem_pool_is_empty(pool)) {
603 		/* Free remaining pages to kernel */
604 		p = kbase_mem_pool_remove_locked(pool, FREE_IN_PROGRESS);
605 		if (p)
606 			list_add(&p->lru, &free_list);
607 	}
608 
609 	kbase_mem_pool_unlock(pool);
610 
611 	if (next_pool && nr_to_spill) {
612 		list_for_each_entry(p, &spill_list, lru)
613 			kbase_mem_pool_zero_page(pool, p);
614 
615 		/* Add new page list to next_pool */
616 		kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
617 
618 		pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
619 	}
620 
621 	list_for_each_entry_safe(p, tmp, &free_list, lru) {
622 		list_del_init(&p->lru);
623 		kbase_mem_pool_free_page(pool, p);
624 	}
625 
626 	/* Freeing of pages will be deferred when page migration is enabled. */
627 	enqueue_free_pool_pages_work(pool);
628 
629 	/* Before returning wait to make sure there are no pages undergoing page isolation
630 	 * which will require reference to this pool.
631 	 */
632 	while (atomic_read(&pool->isolation_in_progress_cnt))
633 		cpu_relax();
634 
635 	pool_dbg(pool, "terminated\n");
636 }
637 KBASE_EXPORT_TEST_API(kbase_mem_pool_term);
638 
kbase_mem_pool_alloc(struct kbase_mem_pool * pool)639 struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
640 {
641 	struct page *p;
642 
643 	do {
644 		pool_dbg(pool, "alloc()\n");
645 		p = kbase_mem_pool_remove(pool, ALLOCATE_IN_PROGRESS);
646 
647 		if (p)
648 			return p;
649 
650 		pool = pool->next_pool;
651 	} while (pool);
652 
653 	return NULL;
654 }
655 
kbase_mem_pool_alloc_locked(struct kbase_mem_pool * pool)656 struct page *kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool)
657 {
658 	lockdep_assert_held(&pool->pool_lock);
659 
660 	pool_dbg(pool, "alloc_locked()\n");
661 	return kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS);
662 }
663 
kbase_mem_pool_free(struct kbase_mem_pool * pool,struct page * p,bool dirty)664 void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
665 		bool dirty)
666 {
667 	struct kbase_mem_pool *next_pool = pool->next_pool;
668 
669 	pool_dbg(pool, "free()\n");
670 
671 	if (!kbase_mem_pool_is_full(pool)) {
672 		/* Add to our own pool */
673 		if (dirty)
674 			kbase_mem_pool_sync_page(pool, p);
675 
676 		kbase_mem_pool_add(pool, p);
677 	} else if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
678 		/* Spill to next pool */
679 		kbase_mem_pool_spill(next_pool, p);
680 	} else {
681 		/* Free page */
682 		kbase_mem_pool_free_page(pool, p);
683 		/* Freeing of pages will be deferred when page migration is enabled. */
684 		enqueue_free_pool_pages_work(pool);
685 	}
686 }
687 
kbase_mem_pool_free_locked(struct kbase_mem_pool * pool,struct page * p,bool dirty)688 void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
689 		bool dirty)
690 {
691 	pool_dbg(pool, "free_locked()\n");
692 
693 	lockdep_assert_held(&pool->pool_lock);
694 
695 	if (!kbase_mem_pool_is_full(pool)) {
696 		/* Add to our own pool */
697 		if (dirty)
698 			kbase_mem_pool_sync_page(pool, p);
699 
700 		kbase_mem_pool_add_locked(pool, p);
701 	} else {
702 		/* Free page */
703 		kbase_mem_pool_free_page(pool, p);
704 		/* Freeing of pages will be deferred when page migration is enabled. */
705 		enqueue_free_pool_pages_work(pool);
706 	}
707 }
708 
kbase_mem_pool_alloc_pages(struct kbase_mem_pool * pool,size_t nr_4k_pages,struct tagged_addr * pages,bool partial_allowed,struct task_struct * page_owner)709 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
710 			       struct tagged_addr *pages, bool partial_allowed,
711 			       struct task_struct *page_owner)
712 {
713 	struct page *p;
714 	size_t nr_from_pool;
715 	size_t i = 0;
716 	int err = -ENOMEM;
717 	size_t nr_pages_internal;
718 	const bool alloc_from_kthread = !!(current->flags & PF_KTHREAD);
719 
720 	nr_pages_internal = nr_4k_pages / (1u << (pool->order));
721 
722 	if (nr_pages_internal * (1u << pool->order) != nr_4k_pages)
723 		return -EINVAL;
724 
725 	pool_dbg(pool, "alloc_pages(4k=%zu):\n", nr_4k_pages);
726 	pool_dbg(pool, "alloc_pages(internal=%zu):\n", nr_pages_internal);
727 
728 	/* Get pages from this pool */
729 	kbase_mem_pool_lock(pool);
730 	nr_from_pool = min(nr_pages_internal, kbase_mem_pool_size(pool));
731 
732 	while (nr_from_pool--) {
733 		int j;
734 
735 		p = kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS);
736 
737 		if (pool->order) {
738 			pages[i++] = as_tagged_tag(page_to_phys(p),
739 						   HUGE_HEAD | HUGE_PAGE);
740 			for (j = 1; j < (1u << pool->order); j++)
741 				pages[i++] = as_tagged_tag(page_to_phys(p) +
742 							   PAGE_SIZE * j,
743 							   HUGE_PAGE);
744 		} else {
745 			pages[i++] = as_tagged(page_to_phys(p));
746 		}
747 	}
748 	kbase_mem_pool_unlock(pool);
749 
750 	if (i != nr_4k_pages && pool->next_pool) {
751 		/* Allocate via next pool */
752 		err = kbase_mem_pool_alloc_pages(pool->next_pool, nr_4k_pages - i, pages + i,
753 						 partial_allowed, page_owner);
754 
755 		if (err < 0)
756 			goto err_rollback;
757 
758 		i += err;
759 	} else {
760 		/* Get any remaining pages from kernel */
761 		while (i != nr_4k_pages) {
762 			if (unlikely(!can_alloc_page(pool, page_owner, alloc_from_kthread)))
763 				goto err_rollback;
764 
765 			p = kbase_mem_alloc_page(pool);
766 			if (!p) {
767 				if (partial_allowed)
768 					goto done;
769 				else
770 					goto err_rollback;
771 			}
772 
773 			if (pool->order) {
774 				int j;
775 
776 				pages[i++] = as_tagged_tag(page_to_phys(p),
777 							   HUGE_PAGE |
778 							   HUGE_HEAD);
779 				for (j = 1; j < (1u << pool->order); j++) {
780 					phys_addr_t phys;
781 
782 					phys = page_to_phys(p) + PAGE_SIZE * j;
783 					pages[i++] = as_tagged_tag(phys,
784 								   HUGE_PAGE);
785 				}
786 			} else {
787 				pages[i++] = as_tagged(page_to_phys(p));
788 			}
789 		}
790 	}
791 
792 done:
793 	pool_dbg(pool, "alloc_pages(%zu) done\n", i);
794 	return i;
795 
796 err_rollback:
797 	kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
798 	return err;
799 }
800 
kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool * pool,size_t nr_4k_pages,struct tagged_addr * pages)801 int kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool,
802 		size_t nr_4k_pages, struct tagged_addr *pages)
803 {
804 	struct page *p;
805 	size_t i;
806 	size_t nr_pages_internal;
807 
808 	lockdep_assert_held(&pool->pool_lock);
809 
810 	nr_pages_internal = nr_4k_pages / (1u << (pool->order));
811 
812 	if (nr_pages_internal * (1u << pool->order) != nr_4k_pages)
813 		return -EINVAL;
814 
815 	pool_dbg(pool, "alloc_pages_locked(4k=%zu):\n", nr_4k_pages);
816 	pool_dbg(pool, "alloc_pages_locked(internal=%zu):\n",
817 			nr_pages_internal);
818 
819 	if (kbase_mem_pool_size(pool) < nr_pages_internal) {
820 		pool_dbg(pool, "Failed alloc\n");
821 		return -ENOMEM;
822 	}
823 
824 	for (i = 0; i < nr_pages_internal; i++) {
825 		int j;
826 
827 		p = kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS);
828 		if (pool->order) {
829 			*pages++ = as_tagged_tag(page_to_phys(p),
830 						   HUGE_HEAD | HUGE_PAGE);
831 			for (j = 1; j < (1u << pool->order); j++) {
832 				*pages++ = as_tagged_tag(page_to_phys(p) +
833 							   PAGE_SIZE * j,
834 							   HUGE_PAGE);
835 			}
836 		} else {
837 			*pages++ = as_tagged(page_to_phys(p));
838 		}
839 	}
840 
841 	return nr_4k_pages;
842 }
843 
kbase_mem_pool_add_array(struct kbase_mem_pool * pool,size_t nr_pages,struct tagged_addr * pages,bool zero,bool sync)844 static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
845 				     size_t nr_pages, struct tagged_addr *pages,
846 				     bool zero, bool sync)
847 {
848 	struct page *p;
849 	size_t nr_to_pool = 0;
850 	LIST_HEAD(new_page_list);
851 	size_t i;
852 
853 	if (!nr_pages)
854 		return;
855 
856 	pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n",
857 			nr_pages, zero, sync);
858 
859 	/* Zero/sync pages first without holding the pool lock */
860 	for (i = 0; i < nr_pages; i++) {
861 		if (unlikely(!as_phys_addr_t(pages[i])))
862 			continue;
863 
864 		if (is_huge_head(pages[i]) || !is_huge(pages[i])) {
865 			p = as_page(pages[i]);
866 			if (zero)
867 				kbase_mem_pool_zero_page(pool, p);
868 			else if (sync)
869 				kbase_mem_pool_sync_page(pool, p);
870 
871 			list_add(&p->lru, &new_page_list);
872 			nr_to_pool++;
873 		}
874 		pages[i] = as_tagged(0);
875 	}
876 
877 	/* Add new page list to pool */
878 	kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool);
879 
880 	pool_dbg(pool, "add_array(%zu) added %zu pages\n",
881 			nr_pages, nr_to_pool);
882 }
883 
kbase_mem_pool_add_array_locked(struct kbase_mem_pool * pool,size_t nr_pages,struct tagged_addr * pages,bool zero,bool sync)884 static void kbase_mem_pool_add_array_locked(struct kbase_mem_pool *pool,
885 		size_t nr_pages, struct tagged_addr *pages,
886 		bool zero, bool sync)
887 {
888 	struct page *p;
889 	size_t nr_to_pool = 0;
890 	LIST_HEAD(new_page_list);
891 	size_t i;
892 
893 	lockdep_assert_held(&pool->pool_lock);
894 
895 	if (!nr_pages)
896 		return;
897 
898 	pool_dbg(pool, "add_array_locked(%zu, zero=%d, sync=%d):\n",
899 			nr_pages, zero, sync);
900 
901 	/* Zero/sync pages first */
902 	for (i = 0; i < nr_pages; i++) {
903 		if (unlikely(!as_phys_addr_t(pages[i])))
904 			continue;
905 
906 		if (is_huge_head(pages[i]) || !is_huge(pages[i])) {
907 			p = as_page(pages[i]);
908 			if (zero)
909 				kbase_mem_pool_zero_page(pool, p);
910 			else if (sync)
911 				kbase_mem_pool_sync_page(pool, p);
912 
913 			list_add(&p->lru, &new_page_list);
914 			nr_to_pool++;
915 		}
916 		pages[i] = as_tagged(0);
917 	}
918 
919 	/* Add new page list to pool */
920 	kbase_mem_pool_add_list_locked(pool, &new_page_list, nr_to_pool);
921 
922 	pool_dbg(pool, "add_array_locked(%zu) added %zu pages\n",
923 			nr_pages, nr_to_pool);
924 }
925 
kbase_mem_pool_free_pages(struct kbase_mem_pool * pool,size_t nr_pages,struct tagged_addr * pages,bool dirty,bool reclaimed)926 void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
927 		struct tagged_addr *pages, bool dirty, bool reclaimed)
928 {
929 	struct kbase_mem_pool *next_pool = pool->next_pool;
930 	struct page *p;
931 	size_t nr_to_pool;
932 	LIST_HEAD(to_pool_list);
933 	size_t i = 0;
934 	bool pages_released = false;
935 
936 	pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
937 
938 	if (!reclaimed) {
939 		/* Add to this pool */
940 		nr_to_pool = kbase_mem_pool_capacity(pool);
941 		nr_to_pool = min(nr_pages, nr_to_pool);
942 
943 		kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
944 
945 		i += nr_to_pool;
946 
947 		if (i != nr_pages && next_pool) {
948 			/* Spill to next pool (may overspill) */
949 			nr_to_pool = kbase_mem_pool_capacity(next_pool);
950 			nr_to_pool = min(nr_pages - i, nr_to_pool);
951 
952 			kbase_mem_pool_add_array(next_pool, nr_to_pool,
953 					pages + i, true, dirty);
954 			i += nr_to_pool;
955 		}
956 	}
957 
958 	/* Free any remaining pages to kernel */
959 	for (; i < nr_pages; i++) {
960 		if (unlikely(!as_phys_addr_t(pages[i])))
961 			continue;
962 
963 		if (is_huge(pages[i]) && !is_huge_head(pages[i])) {
964 			pages[i] = as_tagged(0);
965 			continue;
966 		}
967 		p = as_page(pages[i]);
968 
969 		kbase_mem_pool_free_page(pool, p);
970 		pages[i] = as_tagged(0);
971 		pages_released = true;
972 	}
973 
974 	/* Freeing of pages will be deferred when page migration is enabled. */
975 	if (pages_released)
976 		enqueue_free_pool_pages_work(pool);
977 
978 	pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);
979 }
980 
981 
kbase_mem_pool_free_pages_locked(struct kbase_mem_pool * pool,size_t nr_pages,struct tagged_addr * pages,bool dirty,bool reclaimed)982 void kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool,
983 		size_t nr_pages, struct tagged_addr *pages, bool dirty,
984 		bool reclaimed)
985 {
986 	struct page *p;
987 	size_t nr_to_pool;
988 	LIST_HEAD(to_pool_list);
989 	size_t i = 0;
990 	bool pages_released = false;
991 
992 	lockdep_assert_held(&pool->pool_lock);
993 
994 	pool_dbg(pool, "free_pages_locked(%zu):\n", nr_pages);
995 
996 	if (!reclaimed) {
997 		/* Add to this pool */
998 		nr_to_pool = kbase_mem_pool_capacity(pool);
999 		nr_to_pool = min(nr_pages, nr_to_pool);
1000 
1001 		kbase_mem_pool_add_array_locked(pool, nr_to_pool, pages, false,
1002 						dirty);
1003 
1004 		i += nr_to_pool;
1005 	}
1006 
1007 	/* Free any remaining pages to kernel */
1008 	for (; i < nr_pages; i++) {
1009 		if (unlikely(!as_phys_addr_t(pages[i])))
1010 			continue;
1011 
1012 		if (is_huge(pages[i]) && !is_huge_head(pages[i])) {
1013 			pages[i] = as_tagged(0);
1014 			continue;
1015 		}
1016 
1017 		p = as_page(pages[i]);
1018 
1019 		kbase_mem_pool_free_page(pool, p);
1020 		pages[i] = as_tagged(0);
1021 		pages_released = true;
1022 	}
1023 
1024 	/* Freeing of pages will be deferred when page migration is enabled. */
1025 	if (pages_released)
1026 		enqueue_free_pool_pages_work(pool);
1027 
1028 	pool_dbg(pool, "free_pages_locked(%zu) done\n", nr_pages);
1029 }
1030