xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mmu/backend/mali_kbase_mmu_csf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /**
23  * DOC: Base kernel MMU management specific for CSF GPU.
24  */
25 
26 #include <mali_kbase.h>
27 #include <gpu/mali_kbase_gpu_fault.h>
28 #include <mali_kbase_ctx_sched.h>
29 #include <mali_kbase_reset_gpu.h>
30 #include <mali_kbase_as_fault_debugfs.h>
31 #include <mmu/mali_kbase_mmu_internal.h>
32 
kbase_mmu_get_as_setup(struct kbase_mmu_table * mmut,struct kbase_mmu_setup * const setup)33 void kbase_mmu_get_as_setup(struct kbase_mmu_table *mmut,
34 		struct kbase_mmu_setup * const setup)
35 {
36 	/* Set up the required caching policies at the correct indices
37 	 * in the memattr register.
38 	 */
39 	setup->memattr =
40 		(AS_MEMATTR_IMPL_DEF_CACHE_POLICY <<
41 			(AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
42 		(AS_MEMATTR_FORCE_TO_CACHE_ALL <<
43 			(AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
44 		(AS_MEMATTR_WRITE_ALLOC <<
45 			(AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
46 		(AS_MEMATTR_AARCH64_OUTER_IMPL_DEF   <<
47 			(AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
48 		(AS_MEMATTR_AARCH64_OUTER_WA <<
49 			(AS_MEMATTR_INDEX_OUTER_WA * 8)) |
50 		(AS_MEMATTR_AARCH64_NON_CACHEABLE <<
51 			(AS_MEMATTR_INDEX_NON_CACHEABLE * 8)) |
52 		(AS_MEMATTR_AARCH64_SHARED <<
53 			(AS_MEMATTR_INDEX_SHARED * 8));
54 
55 	setup->transtab = (u64)mmut->pgd & AS_TRANSTAB_BASE_MASK;
56 	setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K;
57 }
58 
59 /**
60  * submit_work_pagefault() - Submit a work for MMU page fault.
61  *
62  * @kbdev:    Kbase device pointer
63  * @as_nr:    Faulty address space
64  * @fault:    Data relating to the fault
65  *
66  * This function submits a work for reporting the details of MMU fault.
67  */
submit_work_pagefault(struct kbase_device * kbdev,u32 as_nr,struct kbase_fault * fault)68 static void submit_work_pagefault(struct kbase_device *kbdev, u32 as_nr,
69 		struct kbase_fault *fault)
70 {
71 	unsigned long flags;
72 	struct kbase_as *const as = &kbdev->as[as_nr];
73 	struct kbase_context *kctx;
74 
75 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
76 	kctx = kbase_ctx_sched_as_to_ctx_nolock(kbdev, as_nr);
77 
78 	if (kctx) {
79 		kbase_ctx_sched_retain_ctx_refcount(kctx);
80 
81 		as->pf_data = (struct kbase_fault) {
82 			.status = fault->status,
83 			.addr = fault->addr,
84 		};
85 
86 		/*
87 		 * A page fault work item could already be pending for the
88 		 * context's address space, when the page fault occurs for
89 		 * MCU's address space.
90 		 */
91 		if (!queue_work(as->pf_wq, &as->work_pagefault)) {
92 			dev_dbg(kbdev->dev,
93 				"Page fault is already pending for as %u", as_nr);
94 			kbase_ctx_sched_release_ctx(kctx);
95 		} else {
96 			atomic_inc(&kbdev->faults_pending);
97 		}
98 	}
99 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
100 }
101 
kbase_mmu_report_mcu_as_fault_and_reset(struct kbase_device * kbdev,struct kbase_fault * fault)102 void kbase_mmu_report_mcu_as_fault_and_reset(struct kbase_device *kbdev,
103 		struct kbase_fault *fault)
104 {
105 	/* decode the fault status */
106 	u32 exception_type = fault->status & 0xFF;
107 	u32 access_type = (fault->status >> 8) & 0x3;
108 	u32 source_id = (fault->status >> 16);
109 	int as_no;
110 
111 	/* terminal fault, print info about the fault */
112 	dev_err(kbdev->dev,
113 		"Unexpected Page fault in firmware address space at VA 0x%016llX\n"
114 		"raw fault status: 0x%X\n"
115 		"exception type 0x%X: %s\n"
116 		"access type 0x%X: %s\n"
117 		"source id 0x%X\n",
118 		fault->addr,
119 		fault->status,
120 		exception_type, kbase_gpu_exception_name(exception_type),
121 		access_type, kbase_gpu_access_type_name(fault->status),
122 		source_id);
123 
124 	kbase_debug_csf_fault_notify(kbdev, NULL, DF_GPU_PAGE_FAULT);
125 
126 	/* Report MMU fault for all address spaces (except MCU_AS_NR) */
127 	for (as_no = 1; as_no < kbdev->nr_hw_address_spaces; as_no++)
128 		submit_work_pagefault(kbdev, as_no, fault);
129 
130 	/* GPU reset is required to recover */
131 	if (kbase_prepare_to_reset_gpu(kbdev,
132 				       RESET_FLAGS_HWC_UNRECOVERABLE_ERROR))
133 		kbase_reset_gpu(kbdev);
134 
135 }
136 KBASE_EXPORT_TEST_API(kbase_mmu_report_mcu_as_fault_and_reset);
137 
kbase_gpu_report_bus_fault_and_kill(struct kbase_context * kctx,struct kbase_as * as,struct kbase_fault * fault)138 void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx,
139 		struct kbase_as *as, struct kbase_fault *fault)
140 {
141 	struct kbase_device *kbdev = kctx->kbdev;
142 	u32 const status = fault->status;
143 	int exception_type = (status & GPU_FAULTSTATUS_EXCEPTION_TYPE_MASK) >>
144 				GPU_FAULTSTATUS_EXCEPTION_TYPE_SHIFT;
145 	int access_type = (status & GPU_FAULTSTATUS_ACCESS_TYPE_MASK) >>
146 				GPU_FAULTSTATUS_ACCESS_TYPE_SHIFT;
147 	int source_id = (status & GPU_FAULTSTATUS_SOURCE_ID_MASK) >>
148 				GPU_FAULTSTATUS_SOURCE_ID_SHIFT;
149 	const char *addr_valid = (status & GPU_FAULTSTATUS_ADDR_VALID_FLAG) ?
150 					"true" : "false";
151 	int as_no = as->number;
152 	unsigned long flags;
153 	const uintptr_t fault_addr = fault->addr;
154 
155 	/* terminal fault, print info about the fault */
156 	dev_err(kbdev->dev,
157 		"GPU bus fault in AS%d at PA %pK\n"
158 		"PA_VALID: %s\n"
159 		"raw fault status: 0x%X\n"
160 		"exception type 0x%X: %s\n"
161 		"access type 0x%X: %s\n"
162 		"source id 0x%X\n"
163 		"pid: %d\n",
164 		as_no, (void *)fault_addr,
165 		addr_valid,
166 		status,
167 		exception_type, kbase_gpu_exception_name(exception_type),
168 		access_type, kbase_gpu_access_type_name(access_type),
169 		source_id,
170 		kctx->pid);
171 
172 	/* AS transaction begin */
173 	mutex_lock(&kbdev->mmu_hw_mutex);
174 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
175 	kbase_mmu_disable(kctx);
176 	kbase_ctx_flag_set(kctx, KCTX_AS_DISABLED_ON_FAULT);
177 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
178 	mutex_unlock(&kbdev->mmu_hw_mutex);
179 
180 	/* Switching to UNMAPPED mode above would have enabled the firmware to
181 	 * recover from the fault (if the memory access was made by firmware)
182 	 * and it can then respond to CSG termination requests to be sent now.
183 	 * All GPU command queue groups associated with the context would be
184 	 * affected as they use the same GPU address space.
185 	 */
186 	kbase_csf_ctx_handle_fault(kctx, fault);
187 
188 	/* Now clear the GPU fault */
189 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
190 	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
191 			GPU_COMMAND_CLEAR_FAULT);
192 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
193 
194 }
195 
196 /*
197  * The caller must ensure it's retained the ctx to prevent it from being
198  * scheduled out whilst it's being worked on.
199  */
kbase_mmu_report_fault_and_kill(struct kbase_context * kctx,struct kbase_as * as,const char * reason_str,struct kbase_fault * fault)200 void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
201 		struct kbase_as *as, const char *reason_str,
202 		struct kbase_fault *fault)
203 {
204 	unsigned long flags;
205 	unsigned int exception_type;
206 	unsigned int access_type;
207 	unsigned int source_id;
208 	int as_no;
209 	struct kbase_device *kbdev;
210 	const u32 status = fault->status;
211 
212 	as_no = as->number;
213 	kbdev = kctx->kbdev;
214 
215 	/* Make sure the context was active */
216 	if (WARN_ON(atomic_read(&kctx->refcount) <= 0))
217 		return;
218 
219 	/* decode the fault status */
220 	exception_type = AS_FAULTSTATUS_EXCEPTION_TYPE_GET(status);
221 	access_type = AS_FAULTSTATUS_ACCESS_TYPE_GET(status);
222 	source_id = AS_FAULTSTATUS_SOURCE_ID_GET(status);
223 
224 	/* terminal fault, print info about the fault */
225 	dev_err(kbdev->dev,
226 		"Unhandled Page fault in AS%d at VA 0x%016llX\n"
227 		"Reason: %s\n"
228 		"raw fault status: 0x%X\n"
229 		"exception type 0x%X: %s\n"
230 		"access type 0x%X: %s\n"
231 		"source id 0x%X\n"
232 		"pid: %d\n",
233 		as_no, fault->addr,
234 		reason_str,
235 		status,
236 		exception_type, kbase_gpu_exception_name(exception_type),
237 		access_type, kbase_gpu_access_type_name(status),
238 		source_id,
239 		kctx->pid);
240 
241 	/* AS transaction begin */
242 	mutex_lock(&kbdev->mmu_hw_mutex);
243 
244 	/* switch to UNMAPPED mode,
245 	 * will abort all jobs and stop any hw counter dumping
246 	 */
247 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
248 	kbase_mmu_disable(kctx);
249 	kbase_ctx_flag_set(kctx, KCTX_AS_DISABLED_ON_FAULT);
250 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
251 
252 	mutex_unlock(&kbdev->mmu_hw_mutex);
253 	/* AS transaction end */
254 
255 	kbase_debug_csf_fault_notify(kbdev, kctx, DF_GPU_PAGE_FAULT);
256 	/* Switching to UNMAPPED mode above would have enabled the firmware to
257 	 * recover from the fault (if the memory access was made by firmware)
258 	 * and it can then respond to CSG termination requests to be sent now.
259 	 * All GPU command queue groups associated with the context would be
260 	 * affected as they use the same GPU address space.
261 	 */
262 	kbase_csf_ctx_handle_fault(kctx, fault);
263 
264 	/* Clear down the fault */
265 	kbase_mmu_hw_clear_fault(kbdev, as,
266 			KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
267 	kbase_mmu_hw_enable_fault(kbdev, as,
268 			KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
269 
270 }
271 
272 /**
273  * kbase_mmu_interrupt_process() - Process a bus or page fault.
274  * @kbdev:	The kbase_device the fault happened on
275  * @kctx:	The kbase_context for the faulting address space if one was
276  *		found.
277  * @as:		The address space that has the fault
278  * @fault:	Data relating to the fault
279  *
280  * This function will process a fault on a specific address space
281  */
kbase_mmu_interrupt_process(struct kbase_device * kbdev,struct kbase_context * kctx,struct kbase_as * as,struct kbase_fault * fault)282 static void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
283 		struct kbase_context *kctx, struct kbase_as *as,
284 		struct kbase_fault *fault)
285 {
286 	lockdep_assert_held(&kbdev->hwaccess_lock);
287 
288 	if (!kctx) {
289 		dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Spurious IRQ or SW Design Error?\n",
290 				kbase_as_has_bus_fault(as, fault) ?
291 						"Bus error" : "Page fault",
292 				as->number, fault->addr);
293 
294 		/* Since no ctx was found, the MMU must be disabled. */
295 		WARN_ON(as->current_setup.transtab);
296 
297 		if (kbase_as_has_bus_fault(as, fault))
298 			kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
299 				GPU_COMMAND_CLEAR_FAULT);
300 		else if (kbase_as_has_page_fault(as, fault)) {
301 			kbase_mmu_hw_clear_fault(kbdev, as,
302 					KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
303 			kbase_mmu_hw_enable_fault(kbdev, as,
304 					KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
305 		}
306 
307 		return;
308 	}
309 
310 	if (kbase_as_has_bus_fault(as, fault)) {
311 		/*
312 		 * We need to switch to UNMAPPED mode - but we do this in a
313 		 * worker so that we can sleep
314 		 */
315 		WARN_ON(!queue_work(as->pf_wq, &as->work_busfault));
316 		atomic_inc(&kbdev->faults_pending);
317 	} else {
318 		WARN_ON(!queue_work(as->pf_wq, &as->work_pagefault));
319 		atomic_inc(&kbdev->faults_pending);
320 	}
321 }
322 
kbase_mmu_bus_fault_interrupt(struct kbase_device * kbdev,u32 status,u32 as_nr)323 int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev,
324 		u32 status, u32 as_nr)
325 {
326 	struct kbase_context *kctx;
327 	unsigned long flags;
328 	struct kbase_as *as;
329 	struct kbase_fault *fault;
330 
331 	if (WARN_ON(as_nr == MCU_AS_NR))
332 		return -EINVAL;
333 
334 	if (WARN_ON(as_nr >= BASE_MAX_NR_AS))
335 		return -EINVAL;
336 
337 	as = &kbdev->as[as_nr];
338 	fault = &as->bf_data;
339 	fault->status = status;
340 	fault->addr = (u64) kbase_reg_read(kbdev,
341 		GPU_CONTROL_REG(GPU_FAULTADDRESS_HI)) << 32;
342 	fault->addr |= kbase_reg_read(kbdev,
343 		GPU_CONTROL_REG(GPU_FAULTADDRESS_LO));
344 	fault->protected_mode = false;
345 
346 	/* report the fault to debugfs */
347 	kbase_as_fault_debugfs_new(kbdev, as_nr);
348 
349 	kctx = kbase_ctx_sched_as_to_ctx_refcount(kbdev, as_nr);
350 
351 	/* Process the bus fault interrupt for this address space */
352 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
353 	kbase_mmu_interrupt_process(kbdev, kctx, as, fault);
354 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
355 
356 	return 0;
357 }
358 
kbase_mmu_interrupt(struct kbase_device * kbdev,u32 irq_stat)359 void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
360 {
361 	const int num_as = 16;
362 	const int pf_shift = 0;
363 	const unsigned long as_bit_mask = (1UL << num_as) - 1;
364 	unsigned long flags;
365 	u32 new_mask;
366 	u32 tmp;
367 	u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask);
368 
369 	/* remember current mask */
370 	spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
371 	new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
372 	/* mask interrupts for now */
373 	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0);
374 	spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
375 
376 	while (pf_bits) {
377 		struct kbase_context *kctx;
378 		int as_no = ffs(pf_bits) - 1;
379 		struct kbase_as *as = &kbdev->as[as_no];
380 		struct kbase_fault *fault = &as->pf_data;
381 
382 		/* find faulting address */
383 		fault->addr = kbase_reg_read(kbdev, MMU_AS_REG(as_no,
384 				AS_FAULTADDRESS_HI));
385 		fault->addr <<= 32;
386 		fault->addr |= kbase_reg_read(kbdev, MMU_AS_REG(as_no,
387 				AS_FAULTADDRESS_LO));
388 
389 		/* Mark the fault protected or not */
390 		fault->protected_mode = false;
391 
392 		/* report the fault to debugfs */
393 		kbase_as_fault_debugfs_new(kbdev, as_no);
394 
395 		/* record the fault status */
396 		fault->status = kbase_reg_read(kbdev, MMU_AS_REG(as_no,
397 				AS_FAULTSTATUS));
398 
399 		fault->extra_addr = kbase_reg_read(kbdev,
400 					MMU_AS_REG(as_no, AS_FAULTEXTRA_HI));
401 		fault->extra_addr <<= 32;
402 		fault->extra_addr |= kbase_reg_read(kbdev,
403 					MMU_AS_REG(as_no, AS_FAULTEXTRA_LO));
404 
405 		/* Mark page fault as handled */
406 		pf_bits &= ~(1UL << as_no);
407 
408 		/* remove the queued PF from the mask */
409 		new_mask &= ~MMU_PAGE_FAULT(as_no);
410 
411 		if (as_no == MCU_AS_NR) {
412 			kbase_mmu_report_mcu_as_fault_and_reset(kbdev, fault);
413 			/* Pointless to handle remaining faults */
414 			break;
415 		}
416 
417 		/*
418 		 * Refcount the kctx - it shouldn't disappear anyway, since
419 		 * Page faults _should_ only occur whilst GPU commands are
420 		 * executing, and a command causing the Page fault shouldn't
421 		 * complete until the MMU is updated.
422 		 * Reference is released at the end of bottom half of page
423 		 * fault handling.
424 		 */
425 		kctx = kbase_ctx_sched_as_to_ctx_refcount(kbdev, as_no);
426 
427 		/* Process the interrupt for this address space */
428 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
429 		kbase_mmu_interrupt_process(kbdev, kctx, as, fault);
430 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
431 	}
432 
433 	/* reenable interrupts */
434 	spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
435 	tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
436 	new_mask |= tmp;
437 	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask);
438 	spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
439 }
440 
kbase_mmu_switch_to_ir(struct kbase_context * const kctx,struct kbase_va_region * const reg)441 int kbase_mmu_switch_to_ir(struct kbase_context *const kctx,
442 	struct kbase_va_region *const reg)
443 {
444 	/* Can't soft-stop the provoking job */
445 	return -EPERM;
446 }
447 
448 /**
449  * kbase_mmu_gpu_fault_worker() - Process a GPU fault for the device.
450  *
451  * @data:  work_struct passed by queue_work()
452  *
453  * Report a GPU fatal error for all GPU command queue groups that are
454  * using the address space and terminate them.
455  */
kbase_mmu_gpu_fault_worker(struct work_struct * data)456 static void kbase_mmu_gpu_fault_worker(struct work_struct *data)
457 {
458 	struct kbase_as *const faulting_as = container_of(data, struct kbase_as,
459 			work_gpufault);
460 	const u32 as_nr = faulting_as->number;
461 	struct kbase_device *const kbdev = container_of(faulting_as, struct
462 			kbase_device, as[as_nr]);
463 	struct kbase_fault *fault;
464 	struct kbase_context *kctx;
465 	u32 status;
466 	u64 address;
467 	u32 as_valid;
468 	unsigned long flags;
469 
470 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
471 	fault = &faulting_as->gf_data;
472 	status = fault->status;
473 	as_valid = status & GPU_FAULTSTATUS_JASID_VALID_FLAG;
474 	address = fault->addr;
475 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
476 
477 	dev_warn(kbdev->dev,
478 		 "GPU Fault 0x%08x (%s) in AS%u at 0x%016llx\n"
479 		 "ASID_VALID: %s,  ADDRESS_VALID: %s\n",
480 		 status,
481 		 kbase_gpu_exception_name(
482 			GPU_FAULTSTATUS_EXCEPTION_TYPE_GET(status)),
483 		 as_nr, address,
484 		 as_valid ? "true" : "false",
485 		 status & GPU_FAULTSTATUS_ADDR_VALID_FLAG ? "true" : "false");
486 
487 	kctx = kbase_ctx_sched_as_to_ctx(kbdev, as_nr);
488 	kbase_csf_ctx_handle_fault(kctx, fault);
489 	kbase_ctx_sched_release_ctx_lock(kctx);
490 
491 	/* A work for GPU fault is complete.
492 	 * Till reaching here, no further GPU fault will be reported.
493 	 * Now clear the GPU fault to allow next GPU fault interrupt report.
494 	 */
495 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
496 	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
497 			GPU_COMMAND_CLEAR_FAULT);
498 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
499 
500 	atomic_dec(&kbdev->faults_pending);
501 }
502 
503 /**
504  * submit_work_gpufault() - Submit a work for GPU fault.
505  *
506  * @kbdev:    Kbase device pointer
507  * @status:   GPU fault status
508  * @as_nr:    Faulty address space
509  * @address:  GPU fault address
510  *
511  * This function submits a work for reporting the details of GPU fault.
512  */
submit_work_gpufault(struct kbase_device * kbdev,u32 status,u32 as_nr,u64 address)513 static void submit_work_gpufault(struct kbase_device *kbdev, u32 status,
514 		u32 as_nr, u64 address)
515 {
516 	unsigned long flags;
517 	struct kbase_as *const as = &kbdev->as[as_nr];
518 	struct kbase_context *kctx;
519 
520 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
521 	kctx = kbase_ctx_sched_as_to_ctx_nolock(kbdev, as_nr);
522 
523 	if (kctx) {
524 		kbase_ctx_sched_retain_ctx_refcount(kctx);
525 
526 		as->gf_data = (struct kbase_fault) {
527 			.status = status,
528 			.addr = address,
529 		};
530 
531 		if (WARN_ON(!queue_work(as->pf_wq, &as->work_gpufault)))
532 			kbase_ctx_sched_release_ctx(kctx);
533 		else
534 			atomic_inc(&kbdev->faults_pending);
535 	}
536 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
537 }
538 
kbase_mmu_gpu_fault_interrupt(struct kbase_device * kbdev,u32 status,u32 as_nr,u64 address,bool as_valid)539 void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status,
540 		u32 as_nr, u64 address, bool as_valid)
541 {
542 	if (!as_valid || (as_nr == MCU_AS_NR)) {
543 		int as;
544 
545 		/* Report GPU fault for all contexts (except MCU_AS_NR) in case either
546 		 * the address space is invalid or it's MCU address space.
547 		 */
548 		for (as = 1; as < kbdev->nr_hw_address_spaces; as++)
549 			submit_work_gpufault(kbdev, status, as, address);
550 	} else
551 		submit_work_gpufault(kbdev, status, as_nr, address);
552 }
553 KBASE_EXPORT_TEST_API(kbase_mmu_gpu_fault_interrupt);
554 
kbase_mmu_as_init(struct kbase_device * kbdev,unsigned int i)555 int kbase_mmu_as_init(struct kbase_device *kbdev, unsigned int i)
556 {
557 	kbdev->as[i].number = i;
558 	kbdev->as[i].bf_data.addr = 0ULL;
559 	kbdev->as[i].pf_data.addr = 0ULL;
560 	kbdev->as[i].gf_data.addr = 0ULL;
561 	kbdev->as[i].is_unresponsive = false;
562 
563 	kbdev->as[i].pf_wq = alloc_workqueue("mali_mmu%d", WQ_UNBOUND, 1, i);
564 	if (!kbdev->as[i].pf_wq)
565 		return -ENOMEM;
566 
567 	INIT_WORK(&kbdev->as[i].work_pagefault, kbase_mmu_page_fault_worker);
568 	INIT_WORK(&kbdev->as[i].work_busfault, kbase_mmu_bus_fault_worker);
569 	INIT_WORK(&kbdev->as[i].work_gpufault, kbase_mmu_gpu_fault_worker);
570 
571 	return 0;
572 }
573