xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mali_kbase_ctx_sched.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2017-2023 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <mali_kbase.h>
23 #include <mali_kbase_defs.h>
24 #include "mali_kbase_ctx_sched.h"
25 #include "tl/mali_kbase_tracepoints.h"
26 #if MALI_USE_CSF
27 #include "mali_kbase_reset_gpu.h"
28 #else
29 #include <mali_kbase_hwaccess_jm.h>
30 #endif
31 
32 /* Helper for ktrace */
33 #if KBASE_KTRACE_ENABLE
kbase_ktrace_get_ctx_refcnt(struct kbase_context * kctx)34 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx)
35 {
36 	return atomic_read(&kctx->refcount);
37 }
38 #else /* KBASE_KTRACE_ENABLE  */
kbase_ktrace_get_ctx_refcnt(struct kbase_context * kctx)39 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx)
40 {
41 	CSTD_UNUSED(kctx);
42 	return 0;
43 }
44 #endif /* KBASE_KTRACE_ENABLE  */
45 
kbase_ctx_sched_init(struct kbase_device * kbdev)46 int kbase_ctx_sched_init(struct kbase_device *kbdev)
47 {
48 	int as_present = (1U << kbdev->nr_hw_address_spaces) - 1;
49 
50 	/* These two must be recalculated if nr_hw_address_spaces changes
51 	 * (e.g. for HW workarounds)
52 	 */
53 	kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces;
54 	kbdev->as_free = as_present; /* All ASs initially free */
55 
56 	memset(kbdev->as_to_kctx, 0, sizeof(kbdev->as_to_kctx));
57 
58 	return 0;
59 }
60 
kbase_ctx_sched_term(struct kbase_device * kbdev)61 void kbase_ctx_sched_term(struct kbase_device *kbdev)
62 {
63 	s8 i;
64 
65 	/* Sanity checks */
66 	for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) {
67 		WARN_ON(kbdev->as_to_kctx[i] != NULL);
68 		WARN_ON(!(kbdev->as_free & (1u << i)));
69 	}
70 }
71 
kbase_ctx_sched_init_ctx(struct kbase_context * kctx)72 void kbase_ctx_sched_init_ctx(struct kbase_context *kctx)
73 {
74 	kctx->as_nr = KBASEP_AS_NR_INVALID;
75 	atomic_set(&kctx->refcount, 0);
76 }
77 
78 /* kbasep_ctx_sched_find_as_for_ctx - Find a free address space
79  *
80  * @kbdev: The context for which to find a free address space
81  *
82  * Return: A valid AS if successful, otherwise KBASEP_AS_NR_INVALID
83  *
84  * This function returns an address space available for use. It would prefer
85  * returning an AS that has been previously assigned to the context to
86  * avoid having to reprogram the MMU.
87  */
kbasep_ctx_sched_find_as_for_ctx(struct kbase_context * kctx)88 static int kbasep_ctx_sched_find_as_for_ctx(struct kbase_context *kctx)
89 {
90 	struct kbase_device *const kbdev = kctx->kbdev;
91 	int free_as;
92 
93 	lockdep_assert_held(&kbdev->hwaccess_lock);
94 
95 	/* First check if the previously assigned AS is available */
96 	if ((kctx->as_nr != KBASEP_AS_NR_INVALID) &&
97 			(kbdev->as_free & (1u << kctx->as_nr)))
98 		return kctx->as_nr;
99 
100 	/* The previously assigned AS was taken, we'll be returning any free
101 	 * AS at this point.
102 	 */
103 	free_as = ffs(kbdev->as_free) - 1;
104 	if (free_as >= 0 && free_as < kbdev->nr_hw_address_spaces)
105 		return free_as;
106 
107 	return KBASEP_AS_NR_INVALID;
108 }
109 
kbase_ctx_sched_retain_ctx(struct kbase_context * kctx)110 int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx)
111 {
112 	struct kbase_device *const kbdev = kctx->kbdev;
113 
114 	lockdep_assert_held(&kbdev->mmu_hw_mutex);
115 	lockdep_assert_held(&kbdev->hwaccess_lock);
116 
117 	WARN_ON(!kbdev->pm.backend.gpu_powered);
118 
119 	if (atomic_inc_return(&kctx->refcount) == 1) {
120 		int const free_as = kbasep_ctx_sched_find_as_for_ctx(kctx);
121 
122 		if (free_as >= 0) {
123 			kbdev->as_free &= ~(1u << free_as);
124 			/* Only program the MMU if the context has not been
125 			 * assigned the same address space before.
126 			 */
127 			if (free_as != kctx->as_nr) {
128 				struct kbase_context *const prev_kctx =
129 					kbdev->as_to_kctx[free_as];
130 
131 				if (prev_kctx) {
132 					WARN_ON(atomic_read(&prev_kctx->refcount) != 0);
133 					kbase_mmu_disable(prev_kctx);
134 					KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS(
135 						kbdev, prev_kctx->id);
136 					prev_kctx->as_nr = KBASEP_AS_NR_INVALID;
137 				}
138 				kctx->as_nr = free_as;
139 				kbdev->as_to_kctx[free_as] = kctx;
140 				KBASE_TLSTREAM_TL_KBASE_CTX_ASSIGN_AS(
141 					kbdev, kctx->id, free_as);
142 				kbase_mmu_update(kbdev, &kctx->mmu,
143 					kctx->as_nr);
144 			}
145 		} else {
146 			atomic_dec(&kctx->refcount);
147 
148 			/* Failed to find an available address space, we must
149 			 * be returning an error at this point.
150 			 */
151 			WARN_ON(kctx->as_nr != KBASEP_AS_NR_INVALID);
152 		}
153 	}
154 
155 	return kctx->as_nr;
156 }
157 
kbase_ctx_sched_retain_ctx_refcount(struct kbase_context * kctx)158 void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx)
159 {
160 	struct kbase_device *const kbdev = kctx->kbdev;
161 
162 	lockdep_assert_held(&kbdev->hwaccess_lock);
163 #if MALI_USE_CSF
164 	/* We expect the context to be active when this function is called,
165 	 * except for the case where a page fault is reported for it during
166 	 * the GPU reset sequence, in which case we can expect the refcount
167 	 * to be 0.
168 	 */
169 	WARN_ON(!atomic_read(&kctx->refcount) && !kbase_reset_gpu_is_active(kbdev));
170 #else
171 	/* We expect the context to be active (and thus refcount should be non-zero)
172          * when this function is called
173          */
174 	WARN_ON(!atomic_read(&kctx->refcount));
175 #endif
176 	if (likely((kctx->as_nr >= 0) && (kctx->as_nr < BASE_MAX_NR_AS)))
177 		WARN_ON(kbdev->as_to_kctx[kctx->as_nr] != kctx);
178 	else
179 		WARN(true, "Invalid as_nr(%d)", kctx->as_nr);
180 
181 	atomic_inc(&kctx->refcount);
182 }
183 
kbase_ctx_sched_release_ctx(struct kbase_context * kctx)184 void kbase_ctx_sched_release_ctx(struct kbase_context *kctx)
185 {
186 	struct kbase_device *const kbdev = kctx->kbdev;
187 	int new_ref_count;
188 
189 	lockdep_assert_held(&kbdev->hwaccess_lock);
190 
191 	new_ref_count = atomic_dec_return(&kctx->refcount);
192 	if (new_ref_count == 0) {
193 		if (likely((kctx->as_nr >= 0) && (kctx->as_nr < BASE_MAX_NR_AS))) {
194 			kbdev->as_free |= (1u << kctx->as_nr);
195 			if (kbase_ctx_flag(kctx, KCTX_AS_DISABLED_ON_FAULT)) {
196 				KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS(kbdev, kctx->id);
197 				kbdev->as_to_kctx[kctx->as_nr] = NULL;
198 				kctx->as_nr = KBASEP_AS_NR_INVALID;
199 				kbase_ctx_flag_clear(kctx, KCTX_AS_DISABLED_ON_FAULT);
200 #if !MALI_USE_CSF
201 				kbase_backend_slot_kctx_purge_locked(kbdev, kctx);
202 #endif
203 			}
204 		}
205 	}
206 
207 	KBASE_KTRACE_ADD(kbdev, SCHED_RELEASE_CTX, kctx, new_ref_count);
208 }
209 
kbase_ctx_sched_remove_ctx(struct kbase_context * kctx)210 void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx)
211 {
212 	struct kbase_device *const kbdev = kctx->kbdev;
213 	unsigned long flags;
214 
215 	mutex_lock(&kbdev->mmu_hw_mutex);
216 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
217 
218 	WARN_ON(atomic_read(&kctx->refcount) != 0);
219 
220 	if ((kctx->as_nr >= 0) && (kctx->as_nr < BASE_MAX_NR_AS)) {
221 		if (kbdev->pm.backend.gpu_powered)
222 			kbase_mmu_disable(kctx);
223 
224 		KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS(kbdev, kctx->id);
225 		kbdev->as_to_kctx[kctx->as_nr] = NULL;
226 		kctx->as_nr = KBASEP_AS_NR_INVALID;
227 	}
228 
229 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
230 	mutex_unlock(&kbdev->mmu_hw_mutex);
231 }
232 
kbase_ctx_sched_restore_all_as(struct kbase_device * kbdev)233 void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev)
234 {
235 	s8 i;
236 
237 	lockdep_assert_held(&kbdev->mmu_hw_mutex);
238 	lockdep_assert_held(&kbdev->hwaccess_lock);
239 
240 	WARN_ON(!kbdev->pm.backend.gpu_powered);
241 
242 	for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) {
243 		struct kbase_context *kctx;
244 
245 		kbdev->as[i].is_unresponsive = false;
246 #if MALI_USE_CSF
247 		if ((i == MCU_AS_NR) && kbdev->csf.firmware_inited) {
248 			kbase_mmu_update(kbdev, &kbdev->csf.mcu_mmu,
249 					 MCU_AS_NR);
250 			continue;
251 		}
252 #endif
253 		kctx = kbdev->as_to_kctx[i];
254 		if (kctx) {
255 			if (atomic_read(&kctx->refcount)) {
256 				WARN_ON(kctx->as_nr != i);
257 
258 				kbase_mmu_update(kbdev, &kctx->mmu,
259 					kctx->as_nr);
260 				kbase_ctx_flag_clear(kctx,
261 					KCTX_AS_DISABLED_ON_FAULT);
262 			} else {
263 				/* This context might have been assigned an
264 				 * AS before, clear it.
265 				 */
266 				if (kctx->as_nr != KBASEP_AS_NR_INVALID) {
267 					KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS(
268 						kbdev, kctx->id);
269 					kbdev->as_to_kctx[kctx->as_nr] = NULL;
270 					kctx->as_nr = KBASEP_AS_NR_INVALID;
271 				}
272 			}
273 		} else {
274 			kbase_mmu_disable_as(kbdev, i);
275 		}
276 	}
277 }
278 
kbase_ctx_sched_as_to_ctx_refcount(struct kbase_device * kbdev,size_t as_nr)279 struct kbase_context *kbase_ctx_sched_as_to_ctx_refcount(
280 		struct kbase_device *kbdev, size_t as_nr)
281 {
282 	unsigned long flags;
283 	struct kbase_context *found_kctx = NULL;
284 
285 	if (WARN_ON(kbdev == NULL))
286 		return NULL;
287 
288 	if (WARN_ON(as_nr >= BASE_MAX_NR_AS))
289 		return NULL;
290 
291 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
292 
293 	found_kctx = kbdev->as_to_kctx[as_nr];
294 
295 	if (!WARN_ON(found_kctx == NULL))
296 		kbase_ctx_sched_retain_ctx_refcount(found_kctx);
297 
298 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
299 
300 	return found_kctx;
301 }
302 
kbase_ctx_sched_as_to_ctx(struct kbase_device * kbdev,size_t as_nr)303 struct kbase_context *kbase_ctx_sched_as_to_ctx(struct kbase_device *kbdev,
304 		size_t as_nr)
305 {
306 	unsigned long flags;
307 	struct kbase_context *found_kctx;
308 
309 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
310 
311 	found_kctx = kbase_ctx_sched_as_to_ctx_nolock(kbdev, as_nr);
312 
313 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
314 
315 	return found_kctx;
316 }
317 
kbase_ctx_sched_as_to_ctx_nolock(struct kbase_device * kbdev,size_t as_nr)318 struct kbase_context *kbase_ctx_sched_as_to_ctx_nolock(
319 		struct kbase_device *kbdev, size_t as_nr)
320 {
321 	struct kbase_context *found_kctx;
322 
323 	if (WARN_ON(kbdev == NULL))
324 		return NULL;
325 
326 	if (WARN_ON(as_nr >= BASE_MAX_NR_AS))
327 		return NULL;
328 
329 	lockdep_assert_held(&kbdev->hwaccess_lock);
330 
331 	found_kctx = kbdev->as_to_kctx[as_nr];
332 
333 	if (found_kctx) {
334 		if (atomic_read(&found_kctx->refcount) <= 0)
335 			found_kctx = NULL;
336 	}
337 
338 	return found_kctx;
339 }
340 
kbase_ctx_sched_inc_refcount_nolock(struct kbase_context * kctx)341 bool kbase_ctx_sched_inc_refcount_nolock(struct kbase_context *kctx)
342 {
343 	bool result = false;
344 
345 	if (WARN_ON(kctx == NULL))
346 		return result;
347 
348 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
349 
350 	if (atomic_read(&kctx->refcount) > 0) {
351 		KBASE_DEBUG_ASSERT(kctx->as_nr >= 0);
352 
353 		kbase_ctx_sched_retain_ctx_refcount(kctx);
354 		KBASE_KTRACE_ADD(kctx->kbdev, SCHED_RETAIN_CTX_NOLOCK, kctx,
355 				kbase_ktrace_get_ctx_refcnt(kctx));
356 		result = true;
357 	}
358 
359 	return result;
360 }
361 
kbase_ctx_sched_inc_refcount(struct kbase_context * kctx)362 bool kbase_ctx_sched_inc_refcount(struct kbase_context *kctx)
363 {
364 	unsigned long flags;
365 	bool result = false;
366 
367 	if (WARN_ON(kctx == NULL))
368 		return result;
369 
370 	if (WARN_ON(kctx->kbdev == NULL))
371 		return result;
372 
373 	mutex_lock(&kctx->kbdev->mmu_hw_mutex);
374 	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
375 	result = kbase_ctx_sched_inc_refcount_nolock(kctx);
376 	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
377 	mutex_unlock(&kctx->kbdev->mmu_hw_mutex);
378 
379 	return result;
380 }
381 
kbase_ctx_sched_release_ctx_lock(struct kbase_context * kctx)382 void kbase_ctx_sched_release_ctx_lock(struct kbase_context *kctx)
383 {
384 	unsigned long flags;
385 
386 	if (WARN_ON(!kctx))
387 		return;
388 
389 	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
390 
391 	if (!WARN_ON(kctx->as_nr == KBASEP_AS_NR_INVALID) &&
392 			!WARN_ON(atomic_read(&kctx->refcount) <= 0))
393 		kbase_ctx_sched_release_ctx(kctx);
394 
395 	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
396 }
397 
398 #if MALI_USE_CSF
kbase_ctx_sched_inc_refcount_if_as_valid(struct kbase_context * kctx)399 bool kbase_ctx_sched_inc_refcount_if_as_valid(struct kbase_context *kctx)
400 {
401 	struct kbase_device *kbdev;
402 	bool added_ref = false;
403 	unsigned long flags;
404 
405 	if (WARN_ON(kctx == NULL))
406 		return added_ref;
407 
408 	kbdev = kctx->kbdev;
409 
410 	if (WARN_ON(kbdev == NULL))
411 		return added_ref;
412 
413 	mutex_lock(&kbdev->mmu_hw_mutex);
414 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
415 
416 	if ((kctx->as_nr != KBASEP_AS_NR_INVALID) &&
417 	    (kctx == kbdev->as_to_kctx[kctx->as_nr])) {
418 		atomic_inc(&kctx->refcount);
419 
420 		if (kbdev->as_free & (1u << kctx->as_nr))
421 			kbdev->as_free &= ~(1u << kctx->as_nr);
422 
423 		KBASE_KTRACE_ADD(kbdev, SCHED_RETAIN_CTX_NOLOCK, kctx,
424 				 kbase_ktrace_get_ctx_refcnt(kctx));
425 		added_ref = true;
426 	}
427 
428 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
429 	mutex_unlock(&kbdev->mmu_hw_mutex);
430 
431 	return added_ref;
432 }
433 #endif
434