1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include <device/mali_kbase_device.h>
23 #include <linux/bitops.h>
24 #include <mali_kbase.h>
25 #include <mali_kbase_ctx_sched.h>
26 #include <mali_kbase_mem.h>
27 #include <mali_kbase_reset_gpu.h>
28 #include <mmu/mali_kbase_mmu_hw.h>
29 #include <tl/mali_kbase_tracepoints.h>
30 #include <linux/delay.h>
31
32 #if MALI_USE_CSF
33 /**
34 * mmu_has_flush_skip_pgd_levels() - Check if the GPU has the feature
35 * AS_LOCKADDR_FLUSH_SKIP_LEVELS
36 *
37 * @gpu_props: GPU properties for the GPU instance.
38 *
39 * This function returns whether a cache flush can apply the skip flags of
40 * AS_LOCKADDR_FLUSH_SKIP_LEVELS.
41 *
42 * Return: True if cache flush has the said feature.
43 */
mmu_has_flush_skip_pgd_levels(struct kbase_gpu_props const * gpu_props)44 static bool mmu_has_flush_skip_pgd_levels(struct kbase_gpu_props const *gpu_props)
45 {
46 u32 const signature =
47 gpu_props->props.raw_props.gpu_id & (GPU_ID2_ARCH_MAJOR | GPU_ID2_ARCH_REV);
48
49 return signature >= (u32)GPU_ID2_PRODUCT_MAKE(12, 0, 4, 0);
50 }
51 #endif
52
53 /**
54 * lock_region() - Generate lockaddr to lock memory region in MMU
55 *
56 * @gpu_props: GPU properties for finding the MMU lock region size.
57 * @lockaddr: Address and size of memory region to lock.
58 * @op_param: Pointer to a struct containing the starting page frame number of
59 * the region to lock, the number of pages to lock and page table
60 * levels to skip when flushing (if supported).
61 *
62 * The lockaddr value is a combination of the starting address and
63 * the size of the region that encompasses all the memory pages to lock.
64 *
65 * Bits 5:0 are used to represent the size, which must be a power of 2.
66 * The smallest amount of memory to be locked corresponds to 32 kB,
67 * i.e. 8 memory pages, because a MMU cache line is made of 64 bytes
68 * and every page table entry is 8 bytes. Therefore it is not possible
69 * to lock less than 8 memory pages at a time.
70 *
71 * The size is expressed as a logarithm minus one:
72 * - A value of 14 is thus interpreted as log(32 kB) = 15, where 32 kB
73 * is the smallest possible size.
74 * - Likewise, a value of 47 is interpreted as log(256 TB) = 48, where 256 TB
75 * is the largest possible size (implementation defined value according
76 * to the HW spec).
77 *
78 * Bits 11:6 are reserved.
79 *
80 * Bits 63:12 are used to represent the base address of the region to lock.
81 * Only the upper bits of the address are used; lowest bits are cleared
82 * to avoid confusion.
83 *
84 * The address is aligned to a multiple of the region size. This has profound
85 * implications on the region size itself: often the MMU will lock a region
86 * larger than the given number of pages, because the lock region cannot start
87 * from any arbitrary address.
88 *
89 * Return: 0 if success, or an error code on failure.
90 */
lock_region(struct kbase_gpu_props const * gpu_props,u64 * lockaddr,const struct kbase_mmu_hw_op_param * op_param)91 static int lock_region(struct kbase_gpu_props const *gpu_props, u64 *lockaddr,
92 const struct kbase_mmu_hw_op_param *op_param)
93 {
94 const u64 lockaddr_base = op_param->vpfn << PAGE_SHIFT;
95 const u64 lockaddr_end = ((op_param->vpfn + op_param->nr) << PAGE_SHIFT) - 1;
96 u64 lockaddr_size_log2;
97
98 if (op_param->nr == 0)
99 return -EINVAL;
100
101 /* The MMU lock region is a self-aligned region whose size
102 * is a power of 2 and that contains both start and end
103 * of the address range determined by pfn and num_pages.
104 * The size of the MMU lock region can be defined as the
105 * largest divisor that yields the same result when both
106 * start and end addresses are divided by it.
107 *
108 * For instance: pfn=0x4F000 num_pages=2 describe the
109 * address range between 0x4F000 and 0x50FFF. It is only
110 * 2 memory pages. However there isn't a single lock region
111 * of 8 kB that encompasses both addresses because 0x4F000
112 * would fall into the [0x4E000, 0x4FFFF] region while
113 * 0x50000 would fall into the [0x50000, 0x51FFF] region.
114 * The minimum lock region size that includes the entire
115 * address range is 128 kB, and the region would be
116 * [0x40000, 0x5FFFF].
117 *
118 * The region size can be found by comparing the desired
119 * start and end addresses and finding the highest bit
120 * that differs. The smallest naturally aligned region
121 * must include this bit change, hence the desired region
122 * starts with this bit (and subsequent bits) set to 0
123 * and ends with the bit (and subsequent bits) set to 1.
124 *
125 * In the example above: 0x4F000 ^ 0x50FFF = 0x1FFFF
126 * therefore the highest bit that differs is bit #16
127 * and the region size (as a logarithm) is 16 + 1 = 17, i.e. 128 kB.
128 */
129 lockaddr_size_log2 = fls64(lockaddr_base ^ lockaddr_end);
130
131 /* Cap the size against minimum and maximum values allowed. */
132 if (lockaddr_size_log2 > KBASE_LOCK_REGION_MAX_SIZE_LOG2)
133 return -EINVAL;
134
135 lockaddr_size_log2 =
136 MAX(lockaddr_size_log2, kbase_get_lock_region_min_size_log2(gpu_props));
137
138 /* Represent the result in a way that is compatible with HW spec.
139 *
140 * Upper bits are used for the base address, whose lower bits
141 * are cleared to avoid confusion because they are going to be ignored
142 * by the MMU anyway, since lock regions shall be aligned with
143 * a multiple of their size and cannot start from any address.
144 *
145 * Lower bits are used for the size, which is represented as
146 * logarithm minus one of the actual size.
147 */
148 *lockaddr = lockaddr_base & ~((1ull << lockaddr_size_log2) - 1);
149 *lockaddr |= lockaddr_size_log2 - 1;
150
151 #if MALI_USE_CSF
152 if (mmu_has_flush_skip_pgd_levels(gpu_props))
153 *lockaddr =
154 AS_LOCKADDR_FLUSH_SKIP_LEVELS_SET(*lockaddr, op_param->flush_skip_levels);
155 #endif
156
157 return 0;
158 }
159
160 /**
161 * wait_ready() - Wait for previously issued MMU command to complete.
162 *
163 * @kbdev: Kbase device to wait for a MMU command to complete.
164 * @as_nr: Address space to wait for a MMU command to complete.
165 *
166 * Reset GPU if the wait for previously issued command fails.
167 *
168 * Return: 0 on successful completion. negative error on failure.
169 */
wait_ready(struct kbase_device * kbdev,unsigned int as_nr)170 static int wait_ready(struct kbase_device *kbdev, unsigned int as_nr)
171 {
172 const ktime_t wait_loop_start = ktime_get_raw();
173 const u32 mmu_as_inactive_wait_time_ms = kbdev->mmu_as_inactive_wait_time_ms;
174 s64 diff;
175
176 if (unlikely(kbdev->as[as_nr].is_unresponsive))
177 return -EBUSY;
178
179 do {
180 unsigned int i;
181
182 for (i = 0; i < 1000; i++) {
183 /* Wait for the MMU status to indicate there is no active command */
184 if (!(kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS)) &
185 AS_STATUS_AS_ACTIVE))
186 return 0;
187 }
188
189 diff = ktime_to_ms(ktime_sub(ktime_get_raw(), wait_loop_start));
190 } while (diff < mmu_as_inactive_wait_time_ms);
191
192 dev_err(kbdev->dev,
193 "AS_ACTIVE bit stuck for as %u. Might be caused by unstable GPU clk/pwr or faulty system",
194 as_nr);
195 kbdev->as[as_nr].is_unresponsive = true;
196 if (kbase_prepare_to_reset_gpu_locked(kbdev, RESET_FLAGS_HWC_UNRECOVERABLE_ERROR))
197 kbase_reset_gpu_locked(kbdev);
198
199 return -ETIMEDOUT;
200 }
201
write_cmd(struct kbase_device * kbdev,int as_nr,u32 cmd)202 static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd)
203 {
204 /* write AS_COMMAND when MMU is ready to accept another command */
205 const int status = wait_ready(kbdev, as_nr);
206
207 if (likely(status == 0))
208 kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd);
209 else if (status == -EBUSY) {
210 dev_dbg(kbdev->dev,
211 "Skipped the wait for AS_ACTIVE bit for as %u, before sending MMU command %u",
212 as_nr, cmd);
213 } else {
214 dev_err(kbdev->dev,
215 "Wait for AS_ACTIVE bit failed for as %u, before sending MMU command %u",
216 as_nr, cmd);
217 }
218
219 return status;
220 }
221
222 #if MALI_USE_CSF && !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
wait_cores_power_trans_complete(struct kbase_device * kbdev)223 static int wait_cores_power_trans_complete(struct kbase_device *kbdev)
224 {
225 #define WAIT_TIMEOUT 1000 /* 1ms timeout */
226 #define DELAY_TIME_IN_US 1
227 const int max_iterations = WAIT_TIMEOUT;
228 int loop;
229
230 lockdep_assert_held(&kbdev->hwaccess_lock);
231
232 for (loop = 0; loop < max_iterations; loop++) {
233 u32 lo =
234 kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_LO));
235 u32 hi =
236 kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_HI));
237
238 if (!lo && !hi)
239 break;
240
241 udelay(DELAY_TIME_IN_US);
242 }
243
244 if (loop == max_iterations) {
245 dev_warn(kbdev->dev, "SHADER_PWRTRANS set for too long");
246 return -ETIMEDOUT;
247 }
248
249 return 0;
250 }
251
252 /**
253 * apply_hw_issue_GPU2019_3901_wa - Apply WA for the HW issue GPU2019_3901
254 *
255 * @kbdev: Kbase device to issue the MMU operation on.
256 * @mmu_cmd: Pointer to the variable contain the value of MMU command
257 * that needs to be sent to flush the L2 cache and do an
258 * implicit unlock.
259 * @as_nr: Address space number for which MMU command needs to be
260 * sent.
261 *
262 * This function ensures that the flush of LSC is not missed for the pages that
263 * were unmapped from the GPU, due to the power down transition of shader cores.
264 *
265 * Return: 0 if the WA was successfully applied, non-zero otherwise.
266 */
apply_hw_issue_GPU2019_3901_wa(struct kbase_device * kbdev,u32 * mmu_cmd,unsigned int as_nr)267 static int apply_hw_issue_GPU2019_3901_wa(struct kbase_device *kbdev, u32 *mmu_cmd,
268 unsigned int as_nr)
269 {
270 int ret = 0;
271
272 lockdep_assert_held(&kbdev->hwaccess_lock);
273
274 /* Check if L2 is OFF. The cores also must be OFF if L2 is not up, so
275 * the workaround can be safely skipped.
276 */
277 if (kbdev->pm.backend.l2_state != KBASE_L2_OFF) {
278 if (*mmu_cmd != AS_COMMAND_FLUSH_MEM) {
279 dev_warn(kbdev->dev,
280 "Unexpected mmu command received");
281 return -EINVAL;
282 }
283
284 /* Wait for the LOCK MMU command to complete, issued by the caller */
285 ret = wait_ready(kbdev, as_nr);
286 if (unlikely(ret))
287 return ret;
288
289 ret = kbase_gpu_cache_flush_and_busy_wait(kbdev,
290 GPU_COMMAND_CACHE_CLN_INV_LSC);
291 if (unlikely(ret))
292 return ret;
293
294 ret = wait_cores_power_trans_complete(kbdev);
295 if (unlikely(ret)) {
296 if (kbase_prepare_to_reset_gpu_locked(kbdev,
297 RESET_FLAGS_HWC_UNRECOVERABLE_ERROR))
298 kbase_reset_gpu_locked(kbdev);
299 return ret;
300 }
301
302 /* As LSC is guaranteed to have been flushed we can use FLUSH_PT
303 * MMU command to only flush the L2.
304 */
305 *mmu_cmd = AS_COMMAND_FLUSH_PT;
306 }
307
308 return ret;
309 }
310 #endif
311
kbase_mmu_hw_configure(struct kbase_device * kbdev,struct kbase_as * as)312 void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
313 {
314 struct kbase_mmu_setup *current_setup = &as->current_setup;
315 u64 transcfg = 0;
316
317 lockdep_assert_held(&kbdev->hwaccess_lock);
318 lockdep_assert_held(&kbdev->mmu_hw_mutex);
319
320 transcfg = current_setup->transcfg;
321
322 /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK
323 * Clear PTW_MEMATTR bits
324 */
325 transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
326 /* Enable correct PTW_MEMATTR bits */
327 transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
328 /* Ensure page-tables reads use read-allocate cache-policy in
329 * the L2
330 */
331 transcfg |= AS_TRANSCFG_R_ALLOCATE;
332
333 if (kbdev->system_coherency != COHERENCY_NONE) {
334 /* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable)
335 * Clear PTW_SH bits
336 */
337 transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
338 /* Enable correct PTW_SH bits */
339 transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
340 }
341
342 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
343 transcfg);
344 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
345 (transcfg >> 32) & 0xFFFFFFFFUL);
346
347 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
348 current_setup->transtab & 0xFFFFFFFFUL);
349 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
350 (current_setup->transtab >> 32) & 0xFFFFFFFFUL);
351
352 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
353 current_setup->memattr & 0xFFFFFFFFUL);
354 kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
355 (current_setup->memattr >> 32) & 0xFFFFFFFFUL);
356
357 KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(kbdev, as,
358 current_setup->transtab,
359 current_setup->memattr,
360 transcfg);
361
362 write_cmd(kbdev, as->number, AS_COMMAND_UPDATE);
363 #if MALI_USE_CSF
364 /* Wait for UPDATE command to complete */
365 wait_ready(kbdev, as->number);
366 #endif
367 }
368
369 /**
370 * mmu_command_instr - Record an MMU command for instrumentation purposes.
371 *
372 * @kbdev: Kbase device used to issue MMU operation on.
373 * @kctx_id: Kernel context ID for MMU command tracepoint.
374 * @cmd: Command issued to the MMU.
375 * @lock_addr: Address of memory region locked for the operation.
376 * @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops.
377 */
mmu_command_instr(struct kbase_device * kbdev,u32 kctx_id,u32 cmd,u64 lock_addr,enum kbase_caller_mmu_sync_info mmu_sync_info)378 static void mmu_command_instr(struct kbase_device *kbdev, u32 kctx_id, u32 cmd, u64 lock_addr,
379 enum kbase_caller_mmu_sync_info mmu_sync_info)
380 {
381 u64 lock_addr_base = AS_LOCKADDR_LOCKADDR_BASE_GET(lock_addr);
382 u32 lock_addr_size = AS_LOCKADDR_LOCKADDR_SIZE_GET(lock_addr);
383
384 bool is_mmu_synchronous = (mmu_sync_info == CALLER_MMU_SYNC);
385
386 KBASE_TLSTREAM_AUX_MMU_COMMAND(kbdev, kctx_id, cmd, is_mmu_synchronous, lock_addr_base,
387 lock_addr_size);
388 }
389
390 /* Helper function to program the LOCKADDR register before LOCK/UNLOCK command
391 * is issued.
392 */
mmu_hw_set_lock_addr(struct kbase_device * kbdev,int as_nr,u64 * lock_addr,const struct kbase_mmu_hw_op_param * op_param)393 static int mmu_hw_set_lock_addr(struct kbase_device *kbdev, int as_nr, u64 *lock_addr,
394 const struct kbase_mmu_hw_op_param *op_param)
395 {
396 int ret;
397
398 ret = lock_region(&kbdev->gpu_props, lock_addr, op_param);
399
400 if (!ret) {
401 /* Set the region that needs to be updated */
402 kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_LOCKADDR_LO),
403 *lock_addr & 0xFFFFFFFFUL);
404 kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_LOCKADDR_HI),
405 (*lock_addr >> 32) & 0xFFFFFFFFUL);
406 }
407 return ret;
408 }
409
410 /**
411 * mmu_hw_do_lock_no_wait - Issue LOCK command to the MMU and return without
412 * waiting for it's completion.
413 *
414 * @kbdev: Kbase device to issue the MMU operation on.
415 * @as: Address space to issue the MMU operation on.
416 * @lock_addr: Address of memory region locked for this operation.
417 * @op_param: Pointer to a struct containing information about the MMU operation.
418 *
419 * Return: 0 if issuing the command was successful, otherwise an error code.
420 */
mmu_hw_do_lock_no_wait(struct kbase_device * kbdev,struct kbase_as * as,u64 * lock_addr,const struct kbase_mmu_hw_op_param * op_param)421 static int mmu_hw_do_lock_no_wait(struct kbase_device *kbdev, struct kbase_as *as, u64 *lock_addr,
422 const struct kbase_mmu_hw_op_param *op_param)
423 {
424 int ret;
425
426 ret = mmu_hw_set_lock_addr(kbdev, as->number, lock_addr, op_param);
427
428 if (likely(!ret))
429 ret = write_cmd(kbdev, as->number, AS_COMMAND_LOCK);
430
431 return ret;
432 }
433
434 /**
435 * mmu_hw_do_lock - Issue LOCK command to the MMU and wait for its completion.
436 *
437 * @kbdev: Kbase device to issue the MMU operation on.
438 * @as: Address space to issue the MMU operation on.
439 * @op_param: Pointer to a struct containing information about the MMU operation.
440 *
441 * Return: 0 if issuing the LOCK command was successful, otherwise an error code.
442 */
mmu_hw_do_lock(struct kbase_device * kbdev,struct kbase_as * as,const struct kbase_mmu_hw_op_param * op_param)443 static int mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as,
444 const struct kbase_mmu_hw_op_param *op_param)
445 {
446 int ret;
447 u64 lock_addr = 0x0;
448
449 if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
450 return -EINVAL;
451
452 ret = mmu_hw_do_lock_no_wait(kbdev, as, &lock_addr, op_param);
453
454 if (!ret)
455 ret = wait_ready(kbdev, as->number);
456
457 if (!ret)
458 mmu_command_instr(kbdev, op_param->kctx_id, AS_COMMAND_LOCK, lock_addr,
459 op_param->mmu_sync_info);
460
461 return ret;
462 }
463
kbase_mmu_hw_do_lock(struct kbase_device * kbdev,struct kbase_as * as,const struct kbase_mmu_hw_op_param * op_param)464 int kbase_mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as,
465 const struct kbase_mmu_hw_op_param *op_param)
466 {
467 lockdep_assert_held(&kbdev->hwaccess_lock);
468
469 return mmu_hw_do_lock(kbdev, as, op_param);
470 }
471
kbase_mmu_hw_do_unlock_no_addr(struct kbase_device * kbdev,struct kbase_as * as,const struct kbase_mmu_hw_op_param * op_param)472 int kbase_mmu_hw_do_unlock_no_addr(struct kbase_device *kbdev, struct kbase_as *as,
473 const struct kbase_mmu_hw_op_param *op_param)
474 {
475 int ret = 0;
476
477 if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
478 return -EINVAL;
479
480 ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
481
482 /* Wait for UNLOCK command to complete */
483 if (likely(!ret))
484 ret = wait_ready(kbdev, as->number);
485
486 if (likely(!ret)) {
487 u64 lock_addr = 0x0;
488 /* read MMU_AS_CONTROL.LOCKADDR register */
489 lock_addr |= (u64)kbase_reg_read(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI))
490 << 32;
491 lock_addr |= (u64)kbase_reg_read(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO));
492
493 mmu_command_instr(kbdev, op_param->kctx_id, AS_COMMAND_UNLOCK,
494 lock_addr, op_param->mmu_sync_info);
495 }
496
497 return ret;
498 }
499
kbase_mmu_hw_do_unlock(struct kbase_device * kbdev,struct kbase_as * as,const struct kbase_mmu_hw_op_param * op_param)500 int kbase_mmu_hw_do_unlock(struct kbase_device *kbdev, struct kbase_as *as,
501 const struct kbase_mmu_hw_op_param *op_param)
502 {
503 int ret = 0;
504 u64 lock_addr = 0x0;
505
506 if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
507 return -EINVAL;
508
509 ret = mmu_hw_set_lock_addr(kbdev, as->number, &lock_addr, op_param);
510
511 if (!ret)
512 ret = kbase_mmu_hw_do_unlock_no_addr(kbdev, as,
513 op_param);
514
515 return ret;
516 }
517
518 /**
519 * mmu_hw_do_flush - Flush MMU and wait for its completion.
520 *
521 * @kbdev: Kbase device to issue the MMU operation on.
522 * @as: Address space to issue the MMU operation on.
523 * @op_param: Pointer to a struct containing information about the MMU operation.
524 * @hwaccess_locked: Flag to indicate if the lock has been held.
525 *
526 * Return: 0 if flushing MMU was successful, otherwise an error code.
527 */
mmu_hw_do_flush(struct kbase_device * kbdev,struct kbase_as * as,const struct kbase_mmu_hw_op_param * op_param,bool hwaccess_locked)528 static int mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
529 const struct kbase_mmu_hw_op_param *op_param, bool hwaccess_locked)
530 {
531 int ret;
532 u64 lock_addr = 0x0;
533 u32 mmu_cmd = AS_COMMAND_FLUSH_MEM;
534
535 if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
536 return -EINVAL;
537
538 /* MMU operations can be either FLUSH_PT or FLUSH_MEM, anything else at
539 * this point would be unexpected.
540 */
541 if (op_param->op != KBASE_MMU_OP_FLUSH_PT &&
542 op_param->op != KBASE_MMU_OP_FLUSH_MEM) {
543 dev_err(kbdev->dev, "Unexpected flush operation received");
544 return -EINVAL;
545 }
546
547 lockdep_assert_held(&kbdev->mmu_hw_mutex);
548
549 if (op_param->op == KBASE_MMU_OP_FLUSH_PT)
550 mmu_cmd = AS_COMMAND_FLUSH_PT;
551
552 /* Lock the region that needs to be updated */
553 ret = mmu_hw_do_lock_no_wait(kbdev, as, &lock_addr, op_param);
554 if (ret)
555 return ret;
556
557 #if MALI_USE_CSF && !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
558 /* WA for the BASE_HW_ISSUE_GPU2019_3901. */
559 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2019_3901) &&
560 mmu_cmd == AS_COMMAND_FLUSH_MEM) {
561 if (!hwaccess_locked) {
562 unsigned long flags = 0;
563
564 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
565 ret = apply_hw_issue_GPU2019_3901_wa(kbdev, &mmu_cmd, as->number);
566 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
567 } else {
568 ret = apply_hw_issue_GPU2019_3901_wa(kbdev, &mmu_cmd, as->number);
569 }
570
571 if (ret)
572 return ret;
573 }
574 #endif
575
576 ret = write_cmd(kbdev, as->number, mmu_cmd);
577
578 /* Wait for the command to complete */
579 if (likely(!ret))
580 ret = wait_ready(kbdev, as->number);
581
582 if (likely(!ret))
583 mmu_command_instr(kbdev, op_param->kctx_id, mmu_cmd, lock_addr,
584 op_param->mmu_sync_info);
585
586 return ret;
587 }
588
kbase_mmu_hw_do_flush_locked(struct kbase_device * kbdev,struct kbase_as * as,const struct kbase_mmu_hw_op_param * op_param)589 int kbase_mmu_hw_do_flush_locked(struct kbase_device *kbdev, struct kbase_as *as,
590 const struct kbase_mmu_hw_op_param *op_param)
591 {
592 lockdep_assert_held(&kbdev->hwaccess_lock);
593
594 return mmu_hw_do_flush(kbdev, as, op_param, true);
595 }
596
kbase_mmu_hw_do_flush(struct kbase_device * kbdev,struct kbase_as * as,const struct kbase_mmu_hw_op_param * op_param)597 int kbase_mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
598 const struct kbase_mmu_hw_op_param *op_param)
599 {
600 return mmu_hw_do_flush(kbdev, as, op_param, false);
601 }
602
kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device * kbdev,struct kbase_as * as,const struct kbase_mmu_hw_op_param * op_param)603 int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_as *as,
604 const struct kbase_mmu_hw_op_param *op_param)
605 {
606 int ret, ret2;
607 u32 gpu_cmd = GPU_COMMAND_CACHE_CLN_INV_L2_LSC;
608
609 if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
610 return -EINVAL;
611
612 /* MMU operations can be either FLUSH_PT or FLUSH_MEM, anything else at
613 * this point would be unexpected.
614 */
615 if (op_param->op != KBASE_MMU_OP_FLUSH_PT &&
616 op_param->op != KBASE_MMU_OP_FLUSH_MEM) {
617 dev_err(kbdev->dev, "Unexpected flush operation received");
618 return -EINVAL;
619 }
620
621 lockdep_assert_held(&kbdev->hwaccess_lock);
622 lockdep_assert_held(&kbdev->mmu_hw_mutex);
623
624 if (op_param->op == KBASE_MMU_OP_FLUSH_PT)
625 gpu_cmd = GPU_COMMAND_CACHE_CLN_INV_L2;
626
627 /* 1. Issue MMU_AS_CONTROL.COMMAND.LOCK operation. */
628 ret = mmu_hw_do_lock(kbdev, as, op_param);
629 if (ret)
630 return ret;
631
632 /* 2. Issue GPU_CONTROL.COMMAND.FLUSH_CACHES operation */
633 ret = kbase_gpu_cache_flush_and_busy_wait(kbdev, gpu_cmd);
634
635 /* 3. Issue MMU_AS_CONTROL.COMMAND.UNLOCK operation. */
636 ret2 = kbase_mmu_hw_do_unlock_no_addr(kbdev, as, op_param);
637
638 return ret ?: ret2;
639 }
640
kbase_mmu_hw_clear_fault(struct kbase_device * kbdev,struct kbase_as * as,enum kbase_mmu_fault_type type)641 void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
642 enum kbase_mmu_fault_type type)
643 {
644 unsigned long flags;
645 u32 pf_bf_mask;
646
647 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
648
649 /*
650 * A reset is in-flight and we're flushing the IRQ + bottom half
651 * so don't update anything as it could race with the reset code.
652 */
653 if (kbdev->irq_reset_flush)
654 goto unlock;
655
656 /* Clear the page (and bus fault IRQ as well in case one occurred) */
657 pf_bf_mask = MMU_PAGE_FAULT(as->number);
658 #if !MALI_USE_CSF
659 if (type == KBASE_MMU_FAULT_TYPE_BUS ||
660 type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
661 pf_bf_mask |= MMU_BUS_ERROR(as->number);
662 #endif
663 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask);
664
665 unlock:
666 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
667 }
668
kbase_mmu_hw_enable_fault(struct kbase_device * kbdev,struct kbase_as * as,enum kbase_mmu_fault_type type)669 void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
670 enum kbase_mmu_fault_type type)
671 {
672 unsigned long flags;
673 u32 irq_mask;
674
675 /* Enable the page fault IRQ
676 * (and bus fault IRQ as well in case one occurred)
677 */
678 spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
679
680 /*
681 * A reset is in-flight and we're flushing the IRQ + bottom half
682 * so don't update anything as it could race with the reset code.
683 */
684 if (kbdev->irq_reset_flush)
685 goto unlock;
686
687 irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK)) |
688 MMU_PAGE_FAULT(as->number);
689
690 #if !MALI_USE_CSF
691 if (type == KBASE_MMU_FAULT_TYPE_BUS ||
692 type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
693 irq_mask |= MMU_BUS_ERROR(as->number);
694 #endif
695 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask);
696
697 unlock:
698 spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
699 }
700