xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_mcu_shared_reg.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2022-2023 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <linux/protected_memory_allocator.h>
23 #include <mali_kbase.h>
24 #include "mali_kbase_csf.h"
25 #include "mali_kbase_csf_mcu_shared_reg.h"
26 #include <mali_kbase_mem_migrate.h>
27 
28 /* Scaling factor in pre-allocating shared regions for suspend bufs and userios */
29 #define MCU_SHARED_REGS_PREALLOCATE_SCALE (8)
30 
31 /* MCU shared region map attempt limit */
32 #define MCU_SHARED_REGS_BIND_ATTEMPT_LIMIT (4)
33 
34 /* Convert a VPFN to its start addr */
35 #define GET_VPFN_VA(vpfn) ((vpfn) << PAGE_SHIFT)
36 
37 /* Macros for extract the corresponding VPFNs from a CSG_REG */
38 #define CSG_REG_SUSP_BUF_VPFN(reg, nr_susp_pages) (reg->start_pfn)
39 #define CSG_REG_PMOD_BUF_VPFN(reg, nr_susp_pages) (reg->start_pfn + nr_susp_pages)
40 #define CSG_REG_USERIO_VPFN(reg, csi, nr_susp_pages) (reg->start_pfn + 2 * (nr_susp_pages + csi))
41 
42 /* MCU shared segment dummy page mapping flags */
43 #define DUMMY_PAGE_MAP_FLAGS (KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT) | KBASE_REG_GPU_NX)
44 
45 /* MCU shared segment suspend buffer mapping flags */
46 #define SUSP_PAGE_MAP_FLAGS                                                                        \
47 	(KBASE_REG_GPU_RD | KBASE_REG_GPU_WR | KBASE_REG_GPU_NX |                                  \
48 	 KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT))
49 
50 /**
51  * struct kbase_csg_shared_region - Wrapper object for use with a CSG on runtime
52  *                                  resources for suspend buffer pages, userio pages
53  *                                  and their corresponding mapping GPU VA addresses
54  *                                  from the MCU shared interface segment
55  *
56  * @link:       Link to the managing list for the wrapper object.
57  * @reg:        pointer to the region allocated from the shared interface segment, which
58  *              covers the normal/P-mode suspend buffers, userio pages of the queues
59  * @grp:        Pointer to the bound kbase_queue_group, or NULL if no binding (free).
60  * @pmode_mapped: Boolean for indicating the region has MMU mapped with the bound group's
61  *              protected mode suspend buffer pages.
62  */
63 struct kbase_csg_shared_region {
64 	struct list_head link;
65 	struct kbase_va_region *reg;
66 	struct kbase_queue_group *grp;
67 	bool pmode_mapped;
68 };
69 
get_userio_mmu_flags(struct kbase_device * kbdev)70 static unsigned long get_userio_mmu_flags(struct kbase_device *kbdev)
71 {
72 	unsigned long userio_map_flags;
73 
74 	if (kbdev->system_coherency == COHERENCY_NONE)
75 		userio_map_flags =
76 			KBASE_REG_GPU_RD | KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_NON_CACHEABLE);
77 	else
78 		userio_map_flags = KBASE_REG_GPU_RD | KBASE_REG_SHARE_BOTH |
79 				   KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_SHARED);
80 
81 	return (userio_map_flags | KBASE_REG_GPU_NX);
82 }
83 
set_page_meta_status_not_movable(struct tagged_addr phy)84 static void set_page_meta_status_not_movable(struct tagged_addr phy)
85 {
86 	if (kbase_page_migration_enabled) {
87 		struct kbase_page_metadata *page_md = kbase_page_private(as_page(phy));
88 
89 		if (page_md) {
90 			spin_lock(&page_md->migrate_lock);
91 			page_md->status = PAGE_STATUS_SET(page_md->status, (u8)NOT_MOVABLE);
92 			spin_unlock(&page_md->migrate_lock);
93 		}
94 	}
95 }
96 
get_group_bound_csg_reg(struct kbase_queue_group * group)97 static struct kbase_csg_shared_region *get_group_bound_csg_reg(struct kbase_queue_group *group)
98 {
99 	return (struct kbase_csg_shared_region *)group->csg_reg;
100 }
101 
update_mapping_with_dummy_pages(struct kbase_device * kbdev,u64 vpfn,u32 nr_pages)102 static inline int update_mapping_with_dummy_pages(struct kbase_device *kbdev, u64 vpfn,
103 						  u32 nr_pages)
104 {
105 	struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data;
106 	const unsigned long mem_flags = DUMMY_PAGE_MAP_FLAGS;
107 
108 	return kbase_mmu_update_csf_mcu_pages(kbdev, vpfn, shared_regs->dummy_phys, nr_pages,
109 					      mem_flags, KBASE_MEM_GROUP_CSF_FW);
110 }
111 
insert_dummy_pages(struct kbase_device * kbdev,u64 vpfn,u32 nr_pages)112 static inline int insert_dummy_pages(struct kbase_device *kbdev, u64 vpfn, u32 nr_pages)
113 {
114 	struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data;
115 	const unsigned long mem_flags = DUMMY_PAGE_MAP_FLAGS;
116 	const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC;
117 
118 	return kbase_mmu_insert_pages(kbdev, &kbdev->csf.mcu_mmu, vpfn, shared_regs->dummy_phys,
119 				      nr_pages, mem_flags, MCU_AS_NR, KBASE_MEM_GROUP_CSF_FW,
120 				      mmu_sync_info, NULL, false);
121 }
122 
123 /* Reset consecutive retry count to zero */
notify_group_csg_reg_map_done(struct kbase_queue_group * group)124 static void notify_group_csg_reg_map_done(struct kbase_queue_group *group)
125 {
126 	lockdep_assert_held(&group->kctx->kbdev->csf.scheduler.lock);
127 
128 	/* Just clear the internal map retry count */
129 	group->csg_reg_bind_retries = 0;
130 }
131 
132 /* Return true if a fatal group error has already been triggered */
notify_group_csg_reg_map_error(struct kbase_queue_group * group)133 static bool notify_group_csg_reg_map_error(struct kbase_queue_group *group)
134 {
135 	struct kbase_device *kbdev = group->kctx->kbdev;
136 
137 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
138 
139 	if (group->csg_reg_bind_retries < U8_MAX)
140 		group->csg_reg_bind_retries++;
141 
142 	/* Allow only one fatal error notification */
143 	if (group->csg_reg_bind_retries == MCU_SHARED_REGS_BIND_ATTEMPT_LIMIT) {
144 		struct base_gpu_queue_group_error const err_payload = {
145 			.error_type = BASE_GPU_QUEUE_GROUP_ERROR_FATAL,
146 			.payload = { .fatal_group = { .status = GPU_EXCEPTION_TYPE_SW_FAULT_0 } }
147 		};
148 
149 		dev_err(kbdev->dev, "Fatal: group_%d_%d_%d exceeded shared region map retry limit",
150 			group->kctx->tgid, group->kctx->id, group->handle);
151 		kbase_csf_add_group_fatal_error(group, &err_payload);
152 		kbase_event_wakeup(group->kctx);
153 	}
154 
155 	return group->csg_reg_bind_retries >= MCU_SHARED_REGS_BIND_ATTEMPT_LIMIT;
156 }
157 
158 /* Replace the given phys at vpfn (reflecting a queue's userio_pages) mapping.
159  * If phys is NULL, the internal dummy_phys is used, which effectively
160  * restores back to the initialized state for the given queue's userio_pages
161  * (i.e. mapped to the default dummy page).
162  * In case of CSF mmu update error on a queue, the dummy phy is used to restore
163  * back the default 'unbound' (i.e. mapped to dummy) condition.
164  *
165  * It's the caller's responsibility to ensure that the given vpfn is extracted
166  * correctly from a CSG_REG object, for example, using CSG_REG_USERIO_VPFN().
167  */
userio_pages_replace_phys(struct kbase_device * kbdev,u64 vpfn,struct tagged_addr * phys)168 static int userio_pages_replace_phys(struct kbase_device *kbdev, u64 vpfn, struct tagged_addr *phys)
169 {
170 	struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data;
171 	int err = 0, err1;
172 
173 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
174 
175 	if (phys) {
176 		unsigned long mem_flags_input = shared_regs->userio_mem_rd_flags;
177 		unsigned long mem_flags_output = mem_flags_input | KBASE_REG_GPU_WR;
178 
179 		/* Dealing with a queue's INPUT page */
180 		err = kbase_mmu_update_csf_mcu_pages(kbdev, vpfn, &phys[0], 1, mem_flags_input,
181 						     KBASE_MEM_GROUP_CSF_IO);
182 		/* Dealing with a queue's OUTPUT page */
183 		err1 = kbase_mmu_update_csf_mcu_pages(kbdev, vpfn + 1, &phys[1], 1,
184 						      mem_flags_output, KBASE_MEM_GROUP_CSF_IO);
185 		if (unlikely(err1))
186 			err = err1;
187 	}
188 
189 	if (unlikely(err) || !phys) {
190 		/* Restore back to dummy_userio_phy */
191 		update_mapping_with_dummy_pages(kbdev, vpfn, KBASEP_NUM_CS_USER_IO_PAGES);
192 	}
193 
194 	return err;
195 }
196 
197 /* Update a group's queues' mappings for a group with its runtime bound group region */
csg_reg_update_on_csis(struct kbase_device * kbdev,struct kbase_queue_group * group,struct kbase_queue_group * prev_grp)198 static int csg_reg_update_on_csis(struct kbase_device *kbdev, struct kbase_queue_group *group,
199 				  struct kbase_queue_group *prev_grp)
200 {
201 	struct kbase_csg_shared_region *csg_reg = get_group_bound_csg_reg(group);
202 	const u32 nr_susp_pages = PFN_UP(kbdev->csf.global_iface.groups[0].suspend_size);
203 	const u32 nr_csis = kbdev->csf.global_iface.groups[0].stream_num;
204 	struct tagged_addr *phy;
205 	int err = 0, err1;
206 	u32 i;
207 
208 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
209 
210 	if (WARN_ONCE(!csg_reg, "Update_userio pages: group has no bound csg_reg"))
211 		return -EINVAL;
212 
213 	for (i = 0; i < nr_csis; i++) {
214 		struct kbase_queue *queue = group->bound_queues[i];
215 		struct kbase_queue *prev_queue = prev_grp ? prev_grp->bound_queues[i] : NULL;
216 
217 		/* Set the phy if the group's queue[i] needs mapping, otherwise NULL */
218 		phy = (queue && queue->enabled && !queue->user_io_gpu_va) ? queue->phys : NULL;
219 
220 		/* Either phy is valid, or this update is for a transition change from
221 		 * prev_group, and the prev_queue was mapped, so an update is required.
222 		 */
223 		if (phy || (prev_queue && prev_queue->user_io_gpu_va)) {
224 			u64 vpfn = CSG_REG_USERIO_VPFN(csg_reg->reg, i, nr_susp_pages);
225 
226 			err1 = userio_pages_replace_phys(kbdev, vpfn, phy);
227 
228 			if (unlikely(err1)) {
229 				dev_warn(kbdev->dev,
230 					 "%s: Error in update queue-%d mapping for csg_%d_%d_%d",
231 					 __func__, i, group->kctx->tgid, group->kctx->id,
232 					 group->handle);
233 				err = err1;
234 			} else if (phy)
235 				queue->user_io_gpu_va = GET_VPFN_VA(vpfn);
236 
237 			/* Mark prev_group's queue has lost its mapping */
238 			if (prev_queue)
239 				prev_queue->user_io_gpu_va = 0;
240 		}
241 	}
242 
243 	return err;
244 }
245 
246 /* Bind a group to a given csg_reg, any previous mappings with the csg_reg are replaced
247  * with the given group's phy pages, or, if no replacement, the default dummy pages.
248  * Note, the csg_reg's fields are in transition step-by-step from the prev_grp to its
249  * new binding owner in this function. At the end, the prev_grp would be completely
250  * detached away from the previously bound csg_reg.
251  */
group_bind_csg_reg(struct kbase_device * kbdev,struct kbase_queue_group * group,struct kbase_csg_shared_region * csg_reg)252 static int group_bind_csg_reg(struct kbase_device *kbdev, struct kbase_queue_group *group,
253 			      struct kbase_csg_shared_region *csg_reg)
254 {
255 	const unsigned long mem_flags = SUSP_PAGE_MAP_FLAGS;
256 	const u32 nr_susp_pages = PFN_UP(kbdev->csf.global_iface.groups[0].suspend_size);
257 	struct kbase_queue_group *prev_grp = csg_reg->grp;
258 	struct kbase_va_region *reg = csg_reg->reg;
259 	struct tagged_addr *phy;
260 	int err = 0, err1;
261 
262 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
263 
264 	/* The csg_reg is expected still on the unused list so its link is not empty */
265 	if (WARN_ON_ONCE(list_empty(&csg_reg->link))) {
266 		dev_dbg(kbdev->dev, "csg_reg is marked in active use");
267 		return -EINVAL;
268 	}
269 
270 	if (WARN_ON_ONCE(prev_grp && prev_grp->csg_reg != csg_reg)) {
271 		dev_dbg(kbdev->dev, "Unexpected bound lost on prev_group");
272 		prev_grp->csg_reg = NULL;
273 		return -EINVAL;
274 	}
275 
276 	/* Replacing the csg_reg bound group to the newly given one */
277 	csg_reg->grp = group;
278 	group->csg_reg = csg_reg;
279 
280 	/* Resolving mappings, deal with protected mode first */
281 	if (group->protected_suspend_buf.pma) {
282 		/* We are binding a new group with P-mode ready, the prev_grp's P-mode mapping
283 		 * status is now stale during this transition of ownership. For the new owner,
284 		 * its mapping would have been updated away when it lost its binding previously.
285 		 * So it needs an update to this pma map. By clearing here the mapped flag
286 		 * ensures it reflects the new owner's condition.
287 		 */
288 		csg_reg->pmode_mapped = false;
289 		err = kbase_csf_mcu_shared_group_update_pmode_map(kbdev, group);
290 	} else if (csg_reg->pmode_mapped) {
291 		/* Need to unmap the previous one, use the dummy pages */
292 		err = update_mapping_with_dummy_pages(
293 			kbdev, CSG_REG_PMOD_BUF_VPFN(reg, nr_susp_pages), nr_susp_pages);
294 
295 		if (unlikely(err))
296 			dev_warn(kbdev->dev, "%s: Failed to update P-mode dummy for csg_%d_%d_%d",
297 				 __func__, group->kctx->tgid, group->kctx->id, group->handle);
298 
299 		csg_reg->pmode_mapped = false;
300 	}
301 
302 	/* Unlike the normal suspend buf, the mapping of the protected mode suspend buffer is
303 	 * actually reflected by a specific mapped flag (due to phys[] is only allocated on
304 	 * in-need basis). So the GPU_VA is always updated to the bound region's corresponding
305 	 * VA, as a reflection of the binding to the csg_reg.
306 	 */
307 	group->protected_suspend_buf.gpu_va =
308 		GET_VPFN_VA(CSG_REG_PMOD_BUF_VPFN(reg, nr_susp_pages));
309 
310 	/* Deal with normal mode suspend buffer */
311 	phy = group->normal_suspend_buf.phy;
312 	err1 = kbase_mmu_update_csf_mcu_pages(kbdev, CSG_REG_SUSP_BUF_VPFN(reg, nr_susp_pages), phy,
313 					      nr_susp_pages, mem_flags, KBASE_MEM_GROUP_CSF_FW);
314 
315 	if (unlikely(err1)) {
316 		dev_warn(kbdev->dev, "%s: Failed to update suspend buffer for csg_%d_%d_%d",
317 			 __func__, group->kctx->tgid, group->kctx->id, group->handle);
318 
319 		/* Attempt a restore to default dummy for removing previous mapping */
320 		if (prev_grp)
321 			update_mapping_with_dummy_pages(
322 				kbdev, CSG_REG_SUSP_BUF_VPFN(reg, nr_susp_pages), nr_susp_pages);
323 		err = err1;
324 		/* Marking the normal suspend buffer is not mapped (due to error) */
325 		group->normal_suspend_buf.gpu_va = 0;
326 	} else {
327 		/* Marking the normal suspend buffer is actually mapped */
328 		group->normal_suspend_buf.gpu_va =
329 			GET_VPFN_VA(CSG_REG_SUSP_BUF_VPFN(reg, nr_susp_pages));
330 	}
331 
332 	/* Deal with queue uerio_pages */
333 	err1 = csg_reg_update_on_csis(kbdev, group, prev_grp);
334 	if (likely(!err1))
335 		err = err1;
336 
337 	/* Reset the previous group's suspend buffers' GPU_VAs as it has lost its bound */
338 	if (prev_grp) {
339 		prev_grp->normal_suspend_buf.gpu_va = 0;
340 		prev_grp->protected_suspend_buf.gpu_va = 0;
341 		prev_grp->csg_reg = NULL;
342 	}
343 
344 	return err;
345 }
346 
347 /* Notify the group is placed on-slot, hence the bound csg_reg is active in use */
kbase_csf_mcu_shared_set_group_csg_reg_active(struct kbase_device * kbdev,struct kbase_queue_group * group)348 void kbase_csf_mcu_shared_set_group_csg_reg_active(struct kbase_device *kbdev,
349 						   struct kbase_queue_group *group)
350 {
351 	struct kbase_csg_shared_region *csg_reg = get_group_bound_csg_reg(group);
352 
353 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
354 
355 	if (WARN_ONCE(!csg_reg || csg_reg->grp != group, "Group_%d_%d_%d has no csg_reg bounding",
356 		      group->kctx->tgid, group->kctx->id, group->handle))
357 		return;
358 
359 	/* By dropping out the csg_reg from the unused list, it becomes active and is tracked
360 	 * by its bound group that is on-slot. The design is that, when this on-slot group is
361 	 * moved to off-slot, the scheduler slot-clean up will add it back to the tail of the
362 	 * unused list.
363 	 */
364 	if (!WARN_ON_ONCE(list_empty(&csg_reg->link)))
365 		list_del_init(&csg_reg->link);
366 }
367 
368 /* Notify the group is placed off-slot, hence the bound csg_reg is not in active use
369  * anymore. Existing bounding/mappings are left untouched. These would only be dealt with
370  * if the bound csg_reg is to be reused with another group.
371  */
kbase_csf_mcu_shared_set_group_csg_reg_unused(struct kbase_device * kbdev,struct kbase_queue_group * group)372 void kbase_csf_mcu_shared_set_group_csg_reg_unused(struct kbase_device *kbdev,
373 						   struct kbase_queue_group *group)
374 {
375 	struct kbase_csg_shared_region *csg_reg = get_group_bound_csg_reg(group);
376 	struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data;
377 
378 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
379 
380 	if (WARN_ONCE(!csg_reg || csg_reg->grp != group, "Group_%d_%d_%d has no csg_reg bound",
381 		      group->kctx->tgid, group->kctx->id, group->handle))
382 		return;
383 
384 	/* By adding back the csg_reg to the unused list, it becomes available for another
385 	 * group to break its existing binding and set up a new one.
386 	 */
387 	if (!list_empty(&csg_reg->link)) {
388 		WARN_ONCE(group->csg_nr >= 0, "Group is assumed vacated from slot");
389 		list_move_tail(&csg_reg->link, &shared_regs->unused_csg_regs);
390 	} else
391 		list_add_tail(&csg_reg->link, &shared_regs->unused_csg_regs);
392 }
393 
394 /* Adding a new queue to an existing on-slot group */
kbase_csf_mcu_shared_add_queue(struct kbase_device * kbdev,struct kbase_queue * queue)395 int kbase_csf_mcu_shared_add_queue(struct kbase_device *kbdev, struct kbase_queue *queue)
396 {
397 	struct kbase_queue_group *group = queue->group;
398 	struct kbase_csg_shared_region *csg_reg;
399 	const u32 nr_susp_pages = PFN_UP(kbdev->csf.global_iface.groups[0].suspend_size);
400 	u64 vpfn;
401 	int err;
402 
403 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
404 
405 	if (WARN_ONCE(!group || group->csg_nr < 0, "No bound group, or group is not on-slot"))
406 		return -EIO;
407 
408 	csg_reg = get_group_bound_csg_reg(group);
409 	if (WARN_ONCE(!csg_reg || !list_empty(&csg_reg->link),
410 		      "No bound csg_reg, or in wrong state"))
411 		return -EIO;
412 
413 	vpfn = CSG_REG_USERIO_VPFN(csg_reg->reg, queue->csi_index, nr_susp_pages);
414 	err = userio_pages_replace_phys(kbdev, vpfn, queue->phys);
415 	if (likely(!err)) {
416 		/* Mark the queue has been successfully mapped */
417 		queue->user_io_gpu_va = GET_VPFN_VA(vpfn);
418 	} else {
419 		/* Mark the queue has no mapping on its phys[] */
420 		queue->user_io_gpu_va = 0;
421 		dev_dbg(kbdev->dev,
422 			"%s: Error in mapping userio pages for queue-%d of csg_%d_%d_%d", __func__,
423 			queue->csi_index, group->kctx->tgid, group->kctx->id, group->handle);
424 
425 		/* notify the error for the bound group */
426 		if (notify_group_csg_reg_map_error(group))
427 			err = -EIO;
428 	}
429 
430 	return err;
431 }
432 
433 /* Unmap a given queue's userio pages, when the queue is deleted */
kbase_csf_mcu_shared_drop_stopped_queue(struct kbase_device * kbdev,struct kbase_queue * queue)434 void kbase_csf_mcu_shared_drop_stopped_queue(struct kbase_device *kbdev, struct kbase_queue *queue)
435 {
436 	struct kbase_queue_group *group;
437 	struct kbase_csg_shared_region *csg_reg;
438 	const u32 nr_susp_pages = PFN_UP(kbdev->csf.global_iface.groups[0].suspend_size);
439 	u64 vpfn;
440 
441 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
442 
443 	/* The queue has no existing mapping, nothing to do */
444 	if (!queue || !queue->user_io_gpu_va)
445 		return;
446 
447 	group = queue->group;
448 	if (WARN_ONCE(!group || !group->csg_reg, "Queue/Group has no bound region"))
449 		return;
450 
451 	csg_reg = get_group_bound_csg_reg(group);
452 
453 	vpfn = CSG_REG_USERIO_VPFN(csg_reg->reg, queue->csi_index, nr_susp_pages);
454 
455 	WARN_ONCE(userio_pages_replace_phys(kbdev, vpfn, NULL),
456 		  "Unexpected restoring to dummy map update error");
457 	queue->user_io_gpu_va = 0;
458 }
459 
kbase_csf_mcu_shared_group_update_pmode_map(struct kbase_device * kbdev,struct kbase_queue_group * group)460 int kbase_csf_mcu_shared_group_update_pmode_map(struct kbase_device *kbdev,
461 						struct kbase_queue_group *group)
462 {
463 	struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data;
464 	struct kbase_csg_shared_region *csg_reg = get_group_bound_csg_reg(group);
465 	const u32 nr_susp_pages = PFN_UP(kbdev->csf.global_iface.groups[0].suspend_size);
466 	int err = 0, err1;
467 
468 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
469 
470 	if (WARN_ONCE(!csg_reg, "Update_pmode_map: the bound csg_reg can't be NULL"))
471 		return -EINVAL;
472 
473 	/* If the pmode already mapped, nothing to do */
474 	if (csg_reg->pmode_mapped)
475 		return 0;
476 
477 	/* P-mode map not in place and the group has allocated P-mode pages, map it */
478 	if (group->protected_suspend_buf.pma) {
479 		unsigned long mem_flags = SUSP_PAGE_MAP_FLAGS;
480 		struct tagged_addr *phy = shared_regs->pma_phys;
481 		struct kbase_va_region *reg = csg_reg->reg;
482 		u64 vpfn = CSG_REG_PMOD_BUF_VPFN(reg, nr_susp_pages);
483 		u32 i;
484 
485 		/* Populate the protected phys from pma to phy[] */
486 		for (i = 0; i < nr_susp_pages; i++)
487 			phy[i] = as_tagged(group->protected_suspend_buf.pma[i]->pa);
488 
489 		/* Add the P-mode suspend buffer mapping */
490 		err = kbase_mmu_update_csf_mcu_pages(kbdev, vpfn, phy, nr_susp_pages, mem_flags,
491 						     KBASE_MEM_GROUP_CSF_FW);
492 
493 		/* If error, restore to default dummpy */
494 		if (unlikely(err)) {
495 			err1 = update_mapping_with_dummy_pages(kbdev, vpfn, nr_susp_pages);
496 			if (unlikely(err1))
497 				dev_warn(
498 					kbdev->dev,
499 					"%s: Failed in recovering to P-mode dummy for csg_%d_%d_%d",
500 					__func__, group->kctx->tgid, group->kctx->id,
501 					group->handle);
502 
503 			csg_reg->pmode_mapped = false;
504 		} else
505 			csg_reg->pmode_mapped = true;
506 	}
507 
508 	return err;
509 }
510 
kbase_csf_mcu_shared_clear_evicted_group_csg_reg(struct kbase_device * kbdev,struct kbase_queue_group * group)511 void kbase_csf_mcu_shared_clear_evicted_group_csg_reg(struct kbase_device *kbdev,
512 						      struct kbase_queue_group *group)
513 {
514 	struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data;
515 	struct kbase_csg_shared_region *csg_reg = get_group_bound_csg_reg(group);
516 	struct kbase_va_region *reg;
517 	const u32 nr_susp_pages = PFN_UP(kbdev->csf.global_iface.groups[0].suspend_size);
518 	u32 nr_csis = kbdev->csf.global_iface.groups[0].stream_num;
519 	int err = 0;
520 	u32 i;
521 
522 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
523 
524 	/* Nothing to do for clearing up if no bound csg_reg */
525 	if (!csg_reg)
526 		return;
527 
528 	reg = csg_reg->reg;
529 	/* Restore mappings default dummy pages for any mapped pages */
530 	if (csg_reg->pmode_mapped) {
531 		err = update_mapping_with_dummy_pages(
532 			kbdev, CSG_REG_PMOD_BUF_VPFN(reg, nr_susp_pages), nr_susp_pages);
533 		WARN_ONCE(unlikely(err), "Restore dummy failed for clearing pmod buffer mapping");
534 
535 		csg_reg->pmode_mapped = false;
536 	}
537 
538 	if (group->normal_suspend_buf.gpu_va) {
539 		err = update_mapping_with_dummy_pages(
540 			kbdev, CSG_REG_SUSP_BUF_VPFN(reg, nr_susp_pages), nr_susp_pages);
541 		WARN_ONCE(err, "Restore dummy failed for clearing suspend buffer mapping");
542 	}
543 
544 	/* Deal with queue uerio pages */
545 	for (i = 0; i < nr_csis; i++)
546 		kbase_csf_mcu_shared_drop_stopped_queue(kbdev, group->bound_queues[i]);
547 
548 	group->normal_suspend_buf.gpu_va = 0;
549 	group->protected_suspend_buf.gpu_va = 0;
550 
551 	/* Break the binding */
552 	group->csg_reg = NULL;
553 	csg_reg->grp = NULL;
554 
555 	/* Put the csg_reg to the front of the unused list */
556 	if (WARN_ON_ONCE(list_empty(&csg_reg->link)))
557 		list_add(&csg_reg->link, &shared_regs->unused_csg_regs);
558 	else
559 		list_move(&csg_reg->link, &shared_regs->unused_csg_regs);
560 }
561 
kbase_csf_mcu_shared_group_bind_csg_reg(struct kbase_device * kbdev,struct kbase_queue_group * group)562 int kbase_csf_mcu_shared_group_bind_csg_reg(struct kbase_device *kbdev,
563 					    struct kbase_queue_group *group)
564 {
565 	struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data;
566 	struct kbase_csg_shared_region *csg_reg;
567 	int err;
568 
569 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
570 
571 	csg_reg = get_group_bound_csg_reg(group);
572 	if (!csg_reg)
573 		csg_reg = list_first_entry_or_null(&shared_regs->unused_csg_regs,
574 						   struct kbase_csg_shared_region, link);
575 
576 	if (!WARN_ON_ONCE(!csg_reg)) {
577 		struct kbase_queue_group *prev_grp = csg_reg->grp;
578 
579 		/* Deal with the previous binding and lazy unmap, i.e if the previous mapping not
580 		 * the required one, unmap it.
581 		 */
582 		if (prev_grp == group) {
583 			/* Update existing bindings, if there have been some changes */
584 			err = kbase_csf_mcu_shared_group_update_pmode_map(kbdev, group);
585 			if (likely(!err))
586 				err = csg_reg_update_on_csis(kbdev, group, NULL);
587 		} else
588 			err = group_bind_csg_reg(kbdev, group, csg_reg);
589 	} else {
590 		/* This should not have been possible if the code operates rightly */
591 		dev_err(kbdev->dev, "%s: Unexpected NULL csg_reg for group %d of context %d_%d",
592 			__func__, group->handle, group->kctx->tgid, group->kctx->id);
593 		return -EIO;
594 	}
595 
596 	if (likely(!err))
597 		notify_group_csg_reg_map_done(group);
598 	else
599 		notify_group_csg_reg_map_error(group);
600 
601 	return err;
602 }
603 
shared_mcu_csg_reg_init(struct kbase_device * kbdev,struct kbase_csg_shared_region * csg_reg)604 static int shared_mcu_csg_reg_init(struct kbase_device *kbdev,
605 				   struct kbase_csg_shared_region *csg_reg)
606 {
607 	struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data;
608 	const u32 nr_susp_pages = PFN_UP(kbdev->csf.global_iface.groups[0].suspend_size);
609 	u32 nr_csis = kbdev->csf.global_iface.groups[0].stream_num;
610 	const size_t nr_csg_reg_pages = 2 * (nr_susp_pages + nr_csis);
611 	struct kbase_va_region *reg;
612 	u64 vpfn;
613 	int err, i;
614 
615 	INIT_LIST_HEAD(&csg_reg->link);
616 	reg = kbase_alloc_free_region(kbdev, &kbdev->csf.shared_reg_rbtree, 0, nr_csg_reg_pages,
617 				      KBASE_REG_ZONE_MCU_SHARED);
618 
619 	if (!reg) {
620 		dev_err(kbdev->dev, "%s: Failed to allocate a MCU shared region for %zu pages\n",
621 			__func__, nr_csg_reg_pages);
622 		return -ENOMEM;
623 	}
624 
625 	/* Insert the region into rbtree, so it becomes ready to use */
626 	mutex_lock(&kbdev->csf.reg_lock);
627 	err = kbase_add_va_region_rbtree(kbdev, reg, 0, nr_csg_reg_pages, 1);
628 	reg->flags &= ~KBASE_REG_FREE;
629 	mutex_unlock(&kbdev->csf.reg_lock);
630 	if (err) {
631 		kfree(reg);
632 		dev_err(kbdev->dev, "%s: Failed to add a region of %zu pages into rbtree", __func__,
633 			nr_csg_reg_pages);
634 		return err;
635 	}
636 
637 	/* Initialize the mappings so MMU only need to update the the corresponding
638 	 * mapped phy-pages at runtime.
639 	 * Map the normal suspend buffer pages to the prepared dummy phys[].
640 	 */
641 	vpfn = CSG_REG_SUSP_BUF_VPFN(reg, nr_susp_pages);
642 	err = insert_dummy_pages(kbdev, vpfn, nr_susp_pages);
643 
644 	if (unlikely(err))
645 		goto fail_susp_map_fail;
646 
647 	/* Map the protected suspend buffer pages to the prepared dummy phys[] */
648 	vpfn = CSG_REG_PMOD_BUF_VPFN(reg, nr_susp_pages);
649 	err = insert_dummy_pages(kbdev, vpfn, nr_susp_pages);
650 
651 	if (unlikely(err))
652 		goto fail_pmod_map_fail;
653 
654 	for (i = 0; i < nr_csis; i++) {
655 		vpfn = CSG_REG_USERIO_VPFN(reg, i, nr_susp_pages);
656 		err = insert_dummy_pages(kbdev, vpfn, KBASEP_NUM_CS_USER_IO_PAGES);
657 
658 		if (unlikely(err))
659 			goto fail_userio_pages_map_fail;
660 	}
661 
662 	/* Replace the previous NULL-valued field with the successully initialized reg */
663 	csg_reg->reg = reg;
664 
665 	return 0;
666 
667 fail_userio_pages_map_fail:
668 	while (i-- > 0) {
669 		vpfn = CSG_REG_USERIO_VPFN(reg, i, nr_susp_pages);
670 		kbase_mmu_teardown_pages(kbdev, &kbdev->csf.mcu_mmu, vpfn, shared_regs->dummy_phys,
671 					 KBASEP_NUM_CS_USER_IO_PAGES, KBASEP_NUM_CS_USER_IO_PAGES,
672 					 MCU_AS_NR, true);
673 	}
674 
675 	vpfn = CSG_REG_PMOD_BUF_VPFN(reg, nr_susp_pages);
676 	kbase_mmu_teardown_pages(kbdev, &kbdev->csf.mcu_mmu, vpfn, shared_regs->dummy_phys,
677 				 nr_susp_pages, nr_susp_pages, MCU_AS_NR, true);
678 fail_pmod_map_fail:
679 	vpfn = CSG_REG_SUSP_BUF_VPFN(reg, nr_susp_pages);
680 	kbase_mmu_teardown_pages(kbdev, &kbdev->csf.mcu_mmu, vpfn, shared_regs->dummy_phys,
681 				 nr_susp_pages, nr_susp_pages, MCU_AS_NR, true);
682 fail_susp_map_fail:
683 	mutex_lock(&kbdev->csf.reg_lock);
684 	kbase_remove_va_region(kbdev, reg);
685 	mutex_unlock(&kbdev->csf.reg_lock);
686 	kfree(reg);
687 
688 	return err;
689 }
690 
691 /* Note, this helper can only be called on scheduler shutdown */
shared_mcu_csg_reg_term(struct kbase_device * kbdev,struct kbase_csg_shared_region * csg_reg)692 static void shared_mcu_csg_reg_term(struct kbase_device *kbdev,
693 				    struct kbase_csg_shared_region *csg_reg)
694 {
695 	struct kbase_csf_mcu_shared_regions *shared_regs = &kbdev->csf.scheduler.mcu_regs_data;
696 	struct kbase_va_region *reg = csg_reg->reg;
697 	const u32 nr_susp_pages = PFN_UP(kbdev->csf.global_iface.groups[0].suspend_size);
698 	const u32 nr_csis = kbdev->csf.global_iface.groups[0].stream_num;
699 	u64 vpfn;
700 	int i;
701 
702 	for (i = 0; i < nr_csis; i++) {
703 		vpfn = CSG_REG_USERIO_VPFN(reg, i, nr_susp_pages);
704 		kbase_mmu_teardown_pages(kbdev, &kbdev->csf.mcu_mmu, vpfn, shared_regs->dummy_phys,
705 					 KBASEP_NUM_CS_USER_IO_PAGES, KBASEP_NUM_CS_USER_IO_PAGES,
706 					 MCU_AS_NR, true);
707 	}
708 
709 	vpfn = CSG_REG_PMOD_BUF_VPFN(reg, nr_susp_pages);
710 	kbase_mmu_teardown_pages(kbdev, &kbdev->csf.mcu_mmu, vpfn, shared_regs->dummy_phys,
711 				 nr_susp_pages, nr_susp_pages, MCU_AS_NR, true);
712 	vpfn = CSG_REG_SUSP_BUF_VPFN(reg, nr_susp_pages);
713 	kbase_mmu_teardown_pages(kbdev, &kbdev->csf.mcu_mmu, vpfn, shared_regs->dummy_phys,
714 				 nr_susp_pages, nr_susp_pages, MCU_AS_NR, true);
715 
716 	mutex_lock(&kbdev->csf.reg_lock);
717 	kbase_remove_va_region(kbdev, reg);
718 	mutex_unlock(&kbdev->csf.reg_lock);
719 	kfree(reg);
720 }
721 
kbase_csf_mcu_shared_regs_data_init(struct kbase_device * kbdev)722 int kbase_csf_mcu_shared_regs_data_init(struct kbase_device *kbdev)
723 {
724 	struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler;
725 	struct kbase_csf_mcu_shared_regions *shared_regs = &scheduler->mcu_regs_data;
726 	struct kbase_csg_shared_region *array_csg_regs;
727 	const size_t nr_susp_pages = PFN_UP(kbdev->csf.global_iface.groups[0].suspend_size);
728 	const u32 nr_groups = kbdev->csf.global_iface.group_num;
729 	const u32 nr_csg_regs = MCU_SHARED_REGS_PREALLOCATE_SCALE * nr_groups;
730 	const u32 nr_dummy_phys = MAX(nr_susp_pages, KBASEP_NUM_CS_USER_IO_PAGES);
731 	u32 i;
732 	int err;
733 
734 	shared_regs->userio_mem_rd_flags = get_userio_mmu_flags(kbdev);
735 	INIT_LIST_HEAD(&shared_regs->unused_csg_regs);
736 
737 	shared_regs->dummy_phys =
738 		kcalloc(nr_dummy_phys, sizeof(*shared_regs->dummy_phys), GFP_KERNEL);
739 	if (!shared_regs->dummy_phys)
740 		return -ENOMEM;
741 
742 	if (kbase_mem_pool_alloc_pages(&kbdev->mem_pools.small[KBASE_MEM_GROUP_CSF_FW], 1,
743 				       &shared_regs->dummy_phys[0], false, NULL) <= 0)
744 		return -ENOMEM;
745 
746 	shared_regs->dummy_phys_allocated = true;
747 	set_page_meta_status_not_movable(shared_regs->dummy_phys[0]);
748 
749 	/* Replicate the allocated single shared_regs->dummy_phys[0] to the full array */
750 	for (i = 1; i < nr_dummy_phys; i++)
751 		shared_regs->dummy_phys[i] = shared_regs->dummy_phys[0];
752 
753 	shared_regs->pma_phys = kcalloc(nr_susp_pages, sizeof(*shared_regs->pma_phys), GFP_KERNEL);
754 	if (!shared_regs->pma_phys)
755 		return -ENOMEM;
756 
757 	array_csg_regs = kcalloc(nr_csg_regs, sizeof(*array_csg_regs), GFP_KERNEL);
758 	if (!array_csg_regs)
759 		return -ENOMEM;
760 	shared_regs->array_csg_regs = array_csg_regs;
761 
762 	/* All fields in scheduler->mcu_regs_data except the shared_regs->array_csg_regs
763 	 * are properly populated and ready to use. Now initialize the items in
764 	 * shared_regs->array_csg_regs[]
765 	 */
766 	for (i = 0; i < nr_csg_regs; i++) {
767 		err = shared_mcu_csg_reg_init(kbdev, &array_csg_regs[i]);
768 		if (err)
769 			return err;
770 
771 		list_add_tail(&array_csg_regs[i].link, &shared_regs->unused_csg_regs);
772 	}
773 
774 	return 0;
775 }
776 
kbase_csf_mcu_shared_regs_data_term(struct kbase_device * kbdev)777 void kbase_csf_mcu_shared_regs_data_term(struct kbase_device *kbdev)
778 {
779 	struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler;
780 	struct kbase_csf_mcu_shared_regions *shared_regs = &scheduler->mcu_regs_data;
781 	struct kbase_csg_shared_region *array_csg_regs =
782 		(struct kbase_csg_shared_region *)shared_regs->array_csg_regs;
783 	const u32 nr_groups = kbdev->csf.global_iface.group_num;
784 	const u32 nr_csg_regs = MCU_SHARED_REGS_PREALLOCATE_SCALE * nr_groups;
785 
786 	if (array_csg_regs) {
787 		struct kbase_csg_shared_region *csg_reg;
788 		u32 i, cnt_csg_regs = 0;
789 
790 		for (i = 0; i < nr_csg_regs; i++) {
791 			csg_reg = &array_csg_regs[i];
792 			/* There should not be any group mapping bindings */
793 			WARN_ONCE(csg_reg->grp, "csg_reg has a bound group");
794 
795 			if (csg_reg->reg) {
796 				shared_mcu_csg_reg_term(kbdev, csg_reg);
797 				cnt_csg_regs++;
798 			}
799 		}
800 
801 		/* The nr_susp_regs counts should match the array_csg_regs' length */
802 		list_for_each_entry(csg_reg, &shared_regs->unused_csg_regs, link)
803 			cnt_csg_regs--;
804 
805 		WARN_ONCE(cnt_csg_regs, "Unmatched counts of susp_regs");
806 		kfree(shared_regs->array_csg_regs);
807 	}
808 
809 	if (shared_regs->dummy_phys_allocated) {
810 		struct page *page = as_page(shared_regs->dummy_phys[0]);
811 
812 		kbase_mem_pool_free(&kbdev->mem_pools.small[KBASE_MEM_GROUP_CSF_FW], page, false);
813 	}
814 
815 	kfree(shared_regs->dummy_phys);
816 	kfree(shared_regs->pma_phys);
817 }
818