1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include "mali_kbase_csf_csg_debugfs.h"
23 #include <mali_kbase.h>
24 #include <linux/seq_file.h>
25 #include <linux/delay.h>
26 #include <backend/gpu/mali_kbase_pm_internal.h>
27
28 #if IS_ENABLED(CONFIG_DEBUG_FS)
29 #include "mali_kbase_csf_tl_reader.h"
30
31 /* Wait time to be used cumulatively for all the CSG slots.
32 * Since scheduler lock is held when STATUS_UPDATE request is sent, there won't be
33 * any other Host request pending on the FW side and usually FW would be responsive
34 * to the Doorbell IRQs as it won't do any polling for a long time and also it won't
35 * have to wait for any HW state transition to complete for publishing the status.
36 * So it is reasonable to expect that handling of STATUS_UPDATE request would be
37 * relatively very quick.
38 */
39 #define STATUS_UPDATE_WAIT_TIMEOUT 500
40
41 /* The bitmask of CSG slots for which the STATUS_UPDATE request completed.
42 * The access to it is serialized with scheduler lock, so at a time it would
43 * get used either for "active_groups" or per context "groups" debugfs file.
44 */
45 static DECLARE_BITMAP(csg_slots_status_updated, MAX_SUPPORTED_CSGS);
46
47 static
csg_slot_status_update_finish(struct kbase_device * kbdev,u32 csg_nr)48 bool csg_slot_status_update_finish(struct kbase_device *kbdev, u32 csg_nr)
49 {
50 struct kbase_csf_cmd_stream_group_info const *const ginfo =
51 &kbdev->csf.global_iface.groups[csg_nr];
52
53 return !((kbase_csf_firmware_csg_input_read(ginfo, CSG_REQ) ^
54 kbase_csf_firmware_csg_output(ginfo, CSG_ACK)) &
55 CSG_REQ_STATUS_UPDATE_MASK);
56 }
57
58 static
csg_slots_status_update_finish(struct kbase_device * kbdev,const unsigned long * slots_mask)59 bool csg_slots_status_update_finish(struct kbase_device *kbdev,
60 const unsigned long *slots_mask)
61 {
62 const u32 max_csg_slots = kbdev->csf.global_iface.group_num;
63 bool changed = false;
64 u32 csg_nr;
65
66 lockdep_assert_held(&kbdev->csf.scheduler.lock);
67
68 for_each_set_bit(csg_nr, slots_mask, max_csg_slots) {
69 if (csg_slot_status_update_finish(kbdev, csg_nr)) {
70 set_bit(csg_nr, csg_slots_status_updated);
71 changed = true;
72 }
73 }
74
75 return changed;
76 }
77
wait_csg_slots_status_update_finish(struct kbase_device * kbdev,unsigned long * slots_mask)78 static void wait_csg_slots_status_update_finish(struct kbase_device *kbdev,
79 unsigned long *slots_mask)
80 {
81 const u32 max_csg_slots = kbdev->csf.global_iface.group_num;
82 long remaining = kbase_csf_timeout_in_jiffies(STATUS_UPDATE_WAIT_TIMEOUT);
83
84 lockdep_assert_held(&kbdev->csf.scheduler.lock);
85
86 bitmap_zero(csg_slots_status_updated, max_csg_slots);
87
88 while (!bitmap_empty(slots_mask, max_csg_slots) && remaining) {
89 remaining = wait_event_timeout(kbdev->csf.event_wait,
90 csg_slots_status_update_finish(kbdev, slots_mask),
91 remaining);
92 if (likely(remaining)) {
93 bitmap_andnot(slots_mask, slots_mask,
94 csg_slots_status_updated, max_csg_slots);
95 } else {
96 dev_warn(kbdev->dev,
97 "STATUS_UPDATE request timed out for slots 0x%lx",
98 slots_mask[0]);
99 }
100 }
101 }
102
kbase_csf_debugfs_update_active_groups_status(struct kbase_device * kbdev)103 void kbase_csf_debugfs_update_active_groups_status(struct kbase_device *kbdev)
104 {
105 u32 max_csg_slots = kbdev->csf.global_iface.group_num;
106 DECLARE_BITMAP(used_csgs, MAX_SUPPORTED_CSGS) = { 0 };
107 u32 csg_nr;
108 unsigned long flags;
109
110 lockdep_assert_held(&kbdev->csf.scheduler.lock);
111
112 /* Global doorbell ring for CSG STATUS_UPDATE request or User doorbell
113 * ring for Extract offset update, shall not be made when MCU has been
114 * put to sleep otherwise it will undesirably make MCU exit the sleep
115 * state. Also it isn't really needed as FW will implicitly update the
116 * status of all on-slot groups when MCU sleep request is sent to it.
117 */
118 if (kbdev->csf.scheduler.state == SCHED_SLEEPING) {
119 /* Wait for the MCU sleep request to complete. */
120 kbase_pm_wait_for_desired_state(kbdev);
121 bitmap_copy(csg_slots_status_updated,
122 kbdev->csf.scheduler.csg_inuse_bitmap, max_csg_slots);
123 return;
124 }
125
126 for (csg_nr = 0; csg_nr < max_csg_slots; csg_nr++) {
127 struct kbase_queue_group *const group =
128 kbdev->csf.scheduler.csg_slots[csg_nr].resident_group;
129 if (!group)
130 continue;
131 /* Ring the User doorbell for FW to update the Extract offset */
132 kbase_csf_ring_doorbell(kbdev, group->doorbell_nr);
133 set_bit(csg_nr, used_csgs);
134 }
135
136 /* Return early if there are no on-slot groups */
137 if (bitmap_empty(used_csgs, max_csg_slots))
138 return;
139
140 kbase_csf_scheduler_spin_lock(kbdev, &flags);
141 for_each_set_bit(csg_nr, used_csgs, max_csg_slots) {
142 struct kbase_csf_cmd_stream_group_info const *const ginfo =
143 &kbdev->csf.global_iface.groups[csg_nr];
144 kbase_csf_firmware_csg_input_mask(ginfo, CSG_REQ,
145 ~kbase_csf_firmware_csg_output(ginfo, CSG_ACK),
146 CSG_REQ_STATUS_UPDATE_MASK);
147 }
148
149 BUILD_BUG_ON(MAX_SUPPORTED_CSGS > (sizeof(used_csgs[0]) * BITS_PER_BYTE));
150 kbase_csf_ring_csg_slots_doorbell(kbdev, used_csgs[0]);
151 kbase_csf_scheduler_spin_unlock(kbdev, flags);
152 wait_csg_slots_status_update_finish(kbdev, used_csgs);
153 /* Wait for the User doobell ring to take effect */
154 msleep(100);
155 }
156
157 #define MAX_SCHED_STATE_STRING_LEN (16)
scheduler_state_to_string(struct kbase_device * kbdev,enum kbase_csf_scheduler_state sched_state)158 static const char *scheduler_state_to_string(struct kbase_device *kbdev,
159 enum kbase_csf_scheduler_state sched_state)
160 {
161 switch (sched_state) {
162 case SCHED_BUSY:
163 return "BUSY";
164 case SCHED_INACTIVE:
165 return "INACTIVE";
166 case SCHED_SUSPENDED:
167 return "SUSPENDED";
168 #ifdef KBASE_PM_RUNTIME
169 case SCHED_SLEEPING:
170 return "SLEEPING";
171 #endif
172 default:
173 dev_warn(kbdev->dev, "Unknown Scheduler state %d", sched_state);
174 return NULL;
175 }
176 }
177
178 /**
179 * blocked_reason_to_string() - Convert blocking reason id to a string
180 *
181 * @reason_id: blocked_reason
182 *
183 * Return: Suitable string
184 */
blocked_reason_to_string(u32 reason_id)185 static const char *blocked_reason_to_string(u32 reason_id)
186 {
187 /* possible blocking reasons of a cs */
188 static const char *const cs_blocked_reason[] = {
189 [CS_STATUS_BLOCKED_REASON_REASON_UNBLOCKED] = "UNBLOCKED",
190 [CS_STATUS_BLOCKED_REASON_REASON_WAIT] = "WAIT",
191 [CS_STATUS_BLOCKED_REASON_REASON_PROGRESS_WAIT] =
192 "PROGRESS_WAIT",
193 [CS_STATUS_BLOCKED_REASON_REASON_SYNC_WAIT] = "SYNC_WAIT",
194 [CS_STATUS_BLOCKED_REASON_REASON_DEFERRED] = "DEFERRED",
195 [CS_STATUS_BLOCKED_REASON_REASON_RESOURCE] = "RESOURCE",
196 [CS_STATUS_BLOCKED_REASON_REASON_FLUSH] = "FLUSH"
197 };
198
199 if (WARN_ON(reason_id >= ARRAY_SIZE(cs_blocked_reason)))
200 return "UNKNOWN_BLOCKED_REASON_ID";
201
202 return cs_blocked_reason[reason_id];
203 }
204
sb_source_supported(u32 glb_version)205 static bool sb_source_supported(u32 glb_version)
206 {
207 bool supported = false;
208
209 if (((GLB_VERSION_MAJOR_GET(glb_version) == 3) &&
210 (GLB_VERSION_MINOR_GET(glb_version) >= 5)) ||
211 ((GLB_VERSION_MAJOR_GET(glb_version) == 2) &&
212 (GLB_VERSION_MINOR_GET(glb_version) >= 6)) ||
213 ((GLB_VERSION_MAJOR_GET(glb_version) == 1) &&
214 (GLB_VERSION_MINOR_GET(glb_version) >= 3)))
215 supported = true;
216
217 return supported;
218 }
219
kbasep_csf_scheduler_dump_active_queue_cs_status_wait(struct seq_file * file,u32 glb_version,u32 wait_status,u32 wait_sync_value,u64 wait_sync_live_value,u64 wait_sync_pointer,u32 sb_status,u32 blocked_reason)220 static void kbasep_csf_scheduler_dump_active_queue_cs_status_wait(
221 struct seq_file *file, u32 glb_version, u32 wait_status, u32 wait_sync_value,
222 u64 wait_sync_live_value, u64 wait_sync_pointer, u32 sb_status, u32 blocked_reason)
223 {
224 #define WAITING "Waiting"
225 #define NOT_WAITING "Not waiting"
226
227 seq_printf(file, "SB_MASK: %d\n",
228 CS_STATUS_WAIT_SB_MASK_GET(wait_status));
229 if (sb_source_supported(glb_version))
230 seq_printf(file, "SB_SOURCE: %d\n", CS_STATUS_WAIT_SB_SOURCE_GET(wait_status));
231 seq_printf(file, "PROGRESS_WAIT: %s\n",
232 CS_STATUS_WAIT_PROGRESS_WAIT_GET(wait_status) ?
233 WAITING : NOT_WAITING);
234 seq_printf(file, "PROTM_PEND: %s\n",
235 CS_STATUS_WAIT_PROTM_PEND_GET(wait_status) ?
236 WAITING : NOT_WAITING);
237 seq_printf(file, "SYNC_WAIT: %s\n",
238 CS_STATUS_WAIT_SYNC_WAIT_GET(wait_status) ?
239 WAITING : NOT_WAITING);
240 seq_printf(file, "WAIT_CONDITION: %s\n",
241 CS_STATUS_WAIT_SYNC_WAIT_CONDITION_GET(wait_status) ?
242 "greater than" : "less or equal");
243 seq_printf(file, "SYNC_POINTER: 0x%llx\n", wait_sync_pointer);
244 seq_printf(file, "SYNC_VALUE: %d\n", wait_sync_value);
245 seq_printf(file, "SYNC_LIVE_VALUE: 0x%016llx\n", wait_sync_live_value);
246 seq_printf(file, "SB_STATUS: %u\n",
247 CS_STATUS_SCOREBOARDS_NONZERO_GET(sb_status));
248 seq_printf(file, "BLOCKED_REASON: %s\n",
249 blocked_reason_to_string(CS_STATUS_BLOCKED_REASON_REASON_GET(
250 blocked_reason)));
251 }
252
kbasep_csf_scheduler_dump_active_cs_trace(struct seq_file * file,struct kbase_csf_cmd_stream_info const * const stream)253 static void kbasep_csf_scheduler_dump_active_cs_trace(struct seq_file *file,
254 struct kbase_csf_cmd_stream_info const *const stream)
255 {
256 u32 val = kbase_csf_firmware_cs_input_read(stream,
257 CS_INSTR_BUFFER_BASE_LO);
258 u64 addr = ((u64)kbase_csf_firmware_cs_input_read(stream,
259 CS_INSTR_BUFFER_BASE_HI) << 32) | val;
260 val = kbase_csf_firmware_cs_input_read(stream,
261 CS_INSTR_BUFFER_SIZE);
262
263 seq_printf(file, "CS_TRACE_BUF_ADDR: 0x%16llx, SIZE: %u\n", addr, val);
264
265 /* Write offset variable address (pointer) */
266 val = kbase_csf_firmware_cs_input_read(stream,
267 CS_INSTR_BUFFER_OFFSET_POINTER_LO);
268 addr = ((u64)kbase_csf_firmware_cs_input_read(stream,
269 CS_INSTR_BUFFER_OFFSET_POINTER_HI) << 32) | val;
270 seq_printf(file, "CS_TRACE_BUF_OFFSET_PTR: 0x%16llx\n", addr);
271
272 /* EVENT_SIZE and EVENT_STATEs */
273 val = kbase_csf_firmware_cs_input_read(stream, CS_INSTR_CONFIG);
274 seq_printf(file, "TRACE_EVENT_SIZE: 0x%x, TRACE_EVENT_STAES 0x%x\n",
275 CS_INSTR_CONFIG_EVENT_SIZE_GET(val),
276 CS_INSTR_CONFIG_EVENT_STATE_GET(val));
277 }
278
279 /**
280 * kbasep_csf_scheduler_dump_active_queue() - Print GPU command queue
281 * debug information
282 *
283 * @file: seq_file for printing to
284 * @queue: Address of a GPU command queue to examine
285 */
kbasep_csf_scheduler_dump_active_queue(struct seq_file * file,struct kbase_queue * queue)286 static void kbasep_csf_scheduler_dump_active_queue(struct seq_file *file,
287 struct kbase_queue *queue)
288 {
289 u32 *addr;
290 u64 cs_extract;
291 u64 cs_insert;
292 u32 cs_active;
293 u64 wait_sync_pointer;
294 u32 wait_status, wait_sync_value;
295 u32 sb_status;
296 u32 blocked_reason;
297 struct kbase_vmap_struct *mapping;
298 u64 *evt;
299 u64 wait_sync_live_value;
300 u32 glb_version;
301
302 if (!queue)
303 return;
304
305 glb_version = queue->kctx->kbdev->csf.global_iface.version;
306
307 if (WARN_ON(queue->csi_index == KBASEP_IF_NR_INVALID ||
308 !queue->group))
309 return;
310
311 addr = (u32 *)queue->user_io_addr;
312 cs_insert = addr[CS_INSERT_LO/4] | ((u64)addr[CS_INSERT_HI/4] << 32);
313
314 addr = (u32 *)(queue->user_io_addr + PAGE_SIZE);
315 cs_extract = addr[CS_EXTRACT_LO/4] | ((u64)addr[CS_EXTRACT_HI/4] << 32);
316 cs_active = addr[CS_ACTIVE/4];
317
318 #define KBASEP_CSF_DEBUGFS_CS_HEADER_USER_IO \
319 "Bind Idx, Ringbuf addr, Size, Prio, Insert offset, Extract offset, Active, Doorbell\n"
320
321 seq_printf(file, KBASEP_CSF_DEBUGFS_CS_HEADER_USER_IO "%8d, %16llx, %8x, %4u, %16llx, %16llx, %6u, %8d\n",
322 queue->csi_index, queue->base_addr,
323 queue->size,
324 queue->priority, cs_insert, cs_extract, cs_active, queue->doorbell_nr);
325
326 /* Print status information for blocked group waiting for sync object. For on-slot queues,
327 * if cs_trace is enabled, dump the interface's cs_trace configuration.
328 */
329 if (kbase_csf_scheduler_group_get_slot(queue->group) < 0) {
330 seq_printf(file, "SAVED_CMD_PTR: 0x%llx\n", queue->saved_cmd_ptr);
331 if (CS_STATUS_WAIT_SYNC_WAIT_GET(queue->status_wait)) {
332 wait_status = queue->status_wait;
333 wait_sync_value = queue->sync_value;
334 wait_sync_pointer = queue->sync_ptr;
335 sb_status = queue->sb_status;
336 blocked_reason = queue->blocked_reason;
337
338 evt = (u64 *)kbase_phy_alloc_mapping_get(queue->kctx, wait_sync_pointer, &mapping);
339 if (evt) {
340 wait_sync_live_value = evt[0];
341 kbase_phy_alloc_mapping_put(queue->kctx, mapping);
342 } else {
343 wait_sync_live_value = U64_MAX;
344 }
345
346 kbasep_csf_scheduler_dump_active_queue_cs_status_wait(
347 file, glb_version, wait_status, wait_sync_value,
348 wait_sync_live_value, wait_sync_pointer, sb_status, blocked_reason);
349 }
350 } else {
351 struct kbase_device const *const kbdev =
352 queue->group->kctx->kbdev;
353 struct kbase_csf_cmd_stream_group_info const *const ginfo =
354 &kbdev->csf.global_iface.groups[queue->group->csg_nr];
355 struct kbase_csf_cmd_stream_info const *const stream =
356 &ginfo->streams[queue->csi_index];
357 u64 cmd_ptr;
358 u32 req_res;
359
360 if (WARN_ON(!stream))
361 return;
362
363 cmd_ptr = kbase_csf_firmware_cs_output(stream,
364 CS_STATUS_CMD_PTR_LO);
365 cmd_ptr |= (u64)kbase_csf_firmware_cs_output(stream,
366 CS_STATUS_CMD_PTR_HI) << 32;
367 req_res = kbase_csf_firmware_cs_output(stream,
368 CS_STATUS_REQ_RESOURCE);
369
370 seq_printf(file, "CMD_PTR: 0x%llx\n", cmd_ptr);
371 seq_printf(file, "REQ_RESOURCE [COMPUTE]: %d\n",
372 CS_STATUS_REQ_RESOURCE_COMPUTE_RESOURCES_GET(req_res));
373 seq_printf(file, "REQ_RESOURCE [FRAGMENT]: %d\n",
374 CS_STATUS_REQ_RESOURCE_FRAGMENT_RESOURCES_GET(req_res));
375 seq_printf(file, "REQ_RESOURCE [TILER]: %d\n",
376 CS_STATUS_REQ_RESOURCE_TILER_RESOURCES_GET(req_res));
377 seq_printf(file, "REQ_RESOURCE [IDVS]: %d\n",
378 CS_STATUS_REQ_RESOURCE_IDVS_RESOURCES_GET(req_res));
379
380 wait_status = kbase_csf_firmware_cs_output(stream,
381 CS_STATUS_WAIT);
382 wait_sync_value = kbase_csf_firmware_cs_output(stream,
383 CS_STATUS_WAIT_SYNC_VALUE);
384 wait_sync_pointer = kbase_csf_firmware_cs_output(stream,
385 CS_STATUS_WAIT_SYNC_POINTER_LO);
386 wait_sync_pointer |= (u64)kbase_csf_firmware_cs_output(stream,
387 CS_STATUS_WAIT_SYNC_POINTER_HI) << 32;
388
389 sb_status = kbase_csf_firmware_cs_output(stream,
390 CS_STATUS_SCOREBOARDS);
391 blocked_reason = kbase_csf_firmware_cs_output(
392 stream, CS_STATUS_BLOCKED_REASON);
393
394 evt = (u64 *)kbase_phy_alloc_mapping_get(queue->kctx, wait_sync_pointer, &mapping);
395 if (evt) {
396 wait_sync_live_value = evt[0];
397 kbase_phy_alloc_mapping_put(queue->kctx, mapping);
398 } else {
399 wait_sync_live_value = U64_MAX;
400 }
401
402 kbasep_csf_scheduler_dump_active_queue_cs_status_wait(
403 file, glb_version, wait_status, wait_sync_value, wait_sync_live_value,
404 wait_sync_pointer, sb_status, blocked_reason);
405 /* Dealing with cs_trace */
406 if (kbase_csf_scheduler_queue_has_trace(queue))
407 kbasep_csf_scheduler_dump_active_cs_trace(file, stream);
408 else
409 seq_puts(file, "NO CS_TRACE\n");
410 }
411
412 seq_puts(file, "\n");
413 }
414
kbasep_csf_scheduler_dump_active_group(struct seq_file * file,struct kbase_queue_group * const group)415 static void kbasep_csf_scheduler_dump_active_group(struct seq_file *file,
416 struct kbase_queue_group *const group)
417 {
418 if (kbase_csf_scheduler_group_get_slot(group) >= 0) {
419 struct kbase_device *const kbdev = group->kctx->kbdev;
420 u32 ep_c, ep_r;
421 char exclusive;
422 char idle = 'N';
423 struct kbase_csf_cmd_stream_group_info const *const ginfo =
424 &kbdev->csf.global_iface.groups[group->csg_nr];
425 u8 slot_priority =
426 kbdev->csf.scheduler.csg_slots[group->csg_nr].priority;
427
428 ep_c = kbase_csf_firmware_csg_output(ginfo,
429 CSG_STATUS_EP_CURRENT);
430 ep_r = kbase_csf_firmware_csg_output(ginfo, CSG_STATUS_EP_REQ);
431
432 if (CSG_STATUS_EP_REQ_EXCLUSIVE_COMPUTE_GET(ep_r))
433 exclusive = 'C';
434 else if (CSG_STATUS_EP_REQ_EXCLUSIVE_FRAGMENT_GET(ep_r))
435 exclusive = 'F';
436 else
437 exclusive = '0';
438
439 if (kbase_csf_firmware_csg_output(ginfo, CSG_STATUS_STATE) &
440 CSG_STATUS_STATE_IDLE_MASK)
441 idle = 'Y';
442
443 if (!test_bit(group->csg_nr, csg_slots_status_updated)) {
444 seq_printf(file, "*** Warn: Timed out for STATUS_UPDATE on slot %d\n",
445 group->csg_nr);
446 seq_puts(file, "*** The following group-record is likely stale\n");
447 }
448
449 seq_puts(file, "GroupID, CSG NR, CSG Prio, Run State, Priority, C_EP(Alloc/Req), F_EP(Alloc/Req), T_EP(Alloc/Req), Exclusive, Idle\n");
450 seq_printf(file, "%7d, %6d, %8d, %9d, %8d, %11d/%3d, %11d/%3d, %11d/%3d, %9c, %4c\n",
451 group->handle,
452 group->csg_nr,
453 slot_priority,
454 group->run_state,
455 group->priority,
456 CSG_STATUS_EP_CURRENT_COMPUTE_EP_GET(ep_c),
457 CSG_STATUS_EP_REQ_COMPUTE_EP_GET(ep_r),
458 CSG_STATUS_EP_CURRENT_FRAGMENT_EP_GET(ep_c),
459 CSG_STATUS_EP_REQ_FRAGMENT_EP_GET(ep_r),
460 CSG_STATUS_EP_CURRENT_TILER_EP_GET(ep_c),
461 CSG_STATUS_EP_REQ_TILER_EP_GET(ep_r),
462 exclusive,
463 idle);
464 } else {
465 seq_puts(file, "GroupID, CSG NR, Run State, Priority\n");
466 seq_printf(file, "%7d, %6d, %9d, %8d\n",
467 group->handle,
468 group->csg_nr,
469 group->run_state,
470 group->priority);
471 }
472
473 if (group->run_state != KBASE_CSF_GROUP_TERMINATED) {
474 unsigned int i;
475
476 seq_puts(file, "Bound queues:\n");
477
478 for (i = 0; i < MAX_SUPPORTED_STREAMS_PER_GROUP; i++) {
479 kbasep_csf_scheduler_dump_active_queue(file,
480 group->bound_queues[i]);
481 }
482 }
483
484 seq_puts(file, "\n");
485 }
486
487 /**
488 * kbasep_csf_queue_group_debugfs_show() - Print per-context GPU command queue
489 * group debug information
490 *
491 * @file: The seq_file for printing to
492 * @data: The debugfs dentry private data, a pointer to kbase context
493 *
494 * Return: Negative error code or 0 on success.
495 */
kbasep_csf_queue_group_debugfs_show(struct seq_file * file,void * data)496 static int kbasep_csf_queue_group_debugfs_show(struct seq_file *file,
497 void *data)
498 {
499 u32 gr;
500 struct kbase_context *const kctx = file->private;
501 struct kbase_device *kbdev;
502
503 if (WARN_ON(!kctx))
504 return -EINVAL;
505
506 kbdev = kctx->kbdev;
507
508 seq_printf(file, "MALI_CSF_CSG_DEBUGFS_VERSION: v%u\n",
509 MALI_CSF_CSG_DEBUGFS_VERSION);
510
511 mutex_lock(&kctx->csf.lock);
512 kbase_csf_scheduler_lock(kbdev);
513 kbase_csf_debugfs_update_active_groups_status(kbdev);
514 for (gr = 0; gr < MAX_QUEUE_GROUP_NUM; gr++) {
515 struct kbase_queue_group *const group =
516 kctx->csf.queue_groups[gr];
517
518 if (group)
519 kbasep_csf_scheduler_dump_active_group(file, group);
520 }
521 kbase_csf_scheduler_unlock(kbdev);
522 mutex_unlock(&kctx->csf.lock);
523
524 return 0;
525 }
526
527 /**
528 * kbasep_csf_scheduler_dump_active_groups() - Print debug info for active
529 * GPU command queue groups
530 *
531 * @file: The seq_file for printing to
532 * @data: The debugfs dentry private data, a pointer to kbase_device
533 *
534 * Return: Negative error code or 0 on success.
535 */
kbasep_csf_scheduler_dump_active_groups(struct seq_file * file,void * data)536 static int kbasep_csf_scheduler_dump_active_groups(struct seq_file *file,
537 void *data)
538 {
539 u32 csg_nr;
540 struct kbase_device *kbdev = file->private;
541 u32 num_groups = kbdev->csf.global_iface.group_num;
542
543 seq_printf(file, "MALI_CSF_CSG_DEBUGFS_VERSION: v%u\n",
544 MALI_CSF_CSG_DEBUGFS_VERSION);
545
546 kbase_csf_scheduler_lock(kbdev);
547 kbase_csf_debugfs_update_active_groups_status(kbdev);
548 for (csg_nr = 0; csg_nr < num_groups; csg_nr++) {
549 struct kbase_queue_group *const group =
550 kbdev->csf.scheduler.csg_slots[csg_nr].resident_group;
551
552 if (!group)
553 continue;
554
555 seq_printf(file, "\nCtx %d_%d\n", group->kctx->tgid,
556 group->kctx->id);
557
558 kbasep_csf_scheduler_dump_active_group(file, group);
559 }
560 kbase_csf_scheduler_unlock(kbdev);
561
562 return 0;
563 }
564
kbasep_csf_queue_group_debugfs_open(struct inode * in,struct file * file)565 static int kbasep_csf_queue_group_debugfs_open(struct inode *in,
566 struct file *file)
567 {
568 return single_open(file, kbasep_csf_queue_group_debugfs_show,
569 in->i_private);
570 }
571
kbasep_csf_active_queue_groups_debugfs_open(struct inode * in,struct file * file)572 static int kbasep_csf_active_queue_groups_debugfs_open(struct inode *in,
573 struct file *file)
574 {
575 return single_open(file, kbasep_csf_scheduler_dump_active_groups,
576 in->i_private);
577 }
578
579 static const struct file_operations kbasep_csf_queue_group_debugfs_fops = {
580 .open = kbasep_csf_queue_group_debugfs_open,
581 .read = seq_read,
582 .llseek = seq_lseek,
583 .release = single_release,
584 };
585
kbase_csf_queue_group_debugfs_init(struct kbase_context * kctx)586 void kbase_csf_queue_group_debugfs_init(struct kbase_context *kctx)
587 {
588 struct dentry *file;
589 const mode_t mode = 0444;
590
591 if (WARN_ON(!kctx || IS_ERR_OR_NULL(kctx->kctx_dentry)))
592 return;
593
594 file = debugfs_create_file("groups", mode,
595 kctx->kctx_dentry, kctx, &kbasep_csf_queue_group_debugfs_fops);
596
597 if (IS_ERR_OR_NULL(file)) {
598 dev_warn(kctx->kbdev->dev,
599 "Unable to create per context queue groups debugfs entry");
600 }
601 }
602
603 static const struct file_operations
604 kbasep_csf_active_queue_groups_debugfs_fops = {
605 .open = kbasep_csf_active_queue_groups_debugfs_open,
606 .read = seq_read,
607 .llseek = seq_lseek,
608 .release = single_release,
609 };
610
kbasep_csf_debugfs_scheduling_timer_enabled_get(void * data,u64 * val)611 static int kbasep_csf_debugfs_scheduling_timer_enabled_get(
612 void *data, u64 *val)
613 {
614 struct kbase_device *const kbdev = data;
615
616 *val = kbase_csf_scheduler_timer_is_enabled(kbdev);
617
618 return 0;
619 }
620
kbasep_csf_debugfs_scheduling_timer_enabled_set(void * data,u64 val)621 static int kbasep_csf_debugfs_scheduling_timer_enabled_set(
622 void *data, u64 val)
623 {
624 struct kbase_device *const kbdev = data;
625
626 kbase_csf_scheduler_timer_set_enabled(kbdev, val != 0);
627
628 return 0;
629 }
630
kbasep_csf_debugfs_scheduling_timer_kick_set(void * data,u64 val)631 static int kbasep_csf_debugfs_scheduling_timer_kick_set(
632 void *data, u64 val)
633 {
634 struct kbase_device *const kbdev = data;
635
636 kbase_csf_scheduler_kick(kbdev);
637
638 return 0;
639 }
640
641 DEFINE_DEBUGFS_ATTRIBUTE(kbasep_csf_debugfs_scheduling_timer_enabled_fops,
642 &kbasep_csf_debugfs_scheduling_timer_enabled_get,
643 &kbasep_csf_debugfs_scheduling_timer_enabled_set, "%llu\n");
644 DEFINE_DEBUGFS_ATTRIBUTE(kbasep_csf_debugfs_scheduling_timer_kick_fops, NULL,
645 &kbasep_csf_debugfs_scheduling_timer_kick_set, "%llu\n");
646
647 /**
648 * kbase_csf_debugfs_scheduler_state_get() - Get the state of scheduler.
649 *
650 * @file: Object of the file that is being read.
651 * @user_buf: User buffer that contains the string.
652 * @count: Length of user buffer
653 * @ppos: Offset within file object
654 *
655 * This function will return the current Scheduler state to Userspace
656 * Scheduler may exit that state by the time the state string is received
657 * by the Userspace.
658 *
659 * Return: 0 if Scheduler was found in an unexpected state, or the
660 * size of the state string if it was copied successfully to the
661 * User buffer or a negative value in case of an error.
662 */
kbase_csf_debugfs_scheduler_state_get(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)663 static ssize_t kbase_csf_debugfs_scheduler_state_get(struct file *file,
664 char __user *user_buf, size_t count, loff_t *ppos)
665 {
666 struct kbase_device *kbdev = file->private_data;
667 struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler;
668 const char *state_string;
669
670 kbase_csf_scheduler_lock(kbdev);
671 state_string = scheduler_state_to_string(kbdev, scheduler->state);
672 kbase_csf_scheduler_unlock(kbdev);
673
674 if (!state_string)
675 count = 0;
676
677 return simple_read_from_buffer(user_buf, count, ppos,
678 state_string, strlen(state_string));
679 }
680
681 /**
682 * kbase_csf_debugfs_scheduler_state_set() - Set the state of scheduler.
683 *
684 * @file: Object of the file that is being written to.
685 * @ubuf: User buffer that contains the string.
686 * @count: Length of user buffer
687 * @ppos: Offset within file object
688 *
689 * This function will update the Scheduler state as per the state string
690 * passed by the Userspace. Scheduler may or may not remain in new state
691 * for long.
692 *
693 * Return: Negative value if the string doesn't correspond to a valid Scheduler
694 * state or if copy from user buffer failed, otherwise the length of
695 * the User buffer.
696 */
kbase_csf_debugfs_scheduler_state_set(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)697 static ssize_t kbase_csf_debugfs_scheduler_state_set(struct file *file,
698 const char __user *ubuf, size_t count, loff_t *ppos)
699 {
700 struct kbase_device *kbdev = file->private_data;
701 char buf[MAX_SCHED_STATE_STRING_LEN];
702 ssize_t ret = count;
703
704 CSTD_UNUSED(ppos);
705
706 count = min_t(size_t, sizeof(buf) - 1, count);
707 if (copy_from_user(buf, ubuf, count))
708 return -EFAULT;
709
710 buf[count] = 0;
711
712 if (sysfs_streq(buf, "SUSPENDED"))
713 kbase_csf_scheduler_pm_suspend(kbdev);
714 #ifdef KBASE_PM_RUNTIME
715 else if (sysfs_streq(buf, "SLEEPING"))
716 kbase_csf_scheduler_force_sleep(kbdev);
717 #endif
718 else if (sysfs_streq(buf, "INACTIVE"))
719 kbase_csf_scheduler_force_wakeup(kbdev);
720 else {
721 dev_dbg(kbdev->dev, "Bad scheduler state %s", buf);
722 ret = -EINVAL;
723 }
724
725 return ret;
726 }
727
728 static const struct file_operations kbasep_csf_debugfs_scheduler_state_fops = {
729 .owner = THIS_MODULE,
730 .read = kbase_csf_debugfs_scheduler_state_get,
731 .write = kbase_csf_debugfs_scheduler_state_set,
732 .open = simple_open,
733 .llseek = default_llseek,
734 };
735
kbase_csf_debugfs_init(struct kbase_device * kbdev)736 void kbase_csf_debugfs_init(struct kbase_device *kbdev)
737 {
738 debugfs_create_file("active_groups", 0444,
739 kbdev->mali_debugfs_directory, kbdev,
740 &kbasep_csf_active_queue_groups_debugfs_fops);
741
742 debugfs_create_file("scheduling_timer_enabled", 0644,
743 kbdev->mali_debugfs_directory, kbdev,
744 &kbasep_csf_debugfs_scheduling_timer_enabled_fops);
745 debugfs_create_file("scheduling_timer_kick", 0200,
746 kbdev->mali_debugfs_directory, kbdev,
747 &kbasep_csf_debugfs_scheduling_timer_kick_fops);
748 debugfs_create_file("scheduler_state", 0644,
749 kbdev->mali_debugfs_directory, kbdev,
750 &kbasep_csf_debugfs_scheduler_state_fops);
751
752 kbase_csf_tl_reader_debugfs_init(kbdev);
753 }
754
755 #else
756 /*
757 * Stub functions for when debugfs is disabled
758 */
kbase_csf_queue_group_debugfs_init(struct kbase_context * kctx)759 void kbase_csf_queue_group_debugfs_init(struct kbase_context *kctx)
760 {
761 }
762
kbase_csf_debugfs_init(struct kbase_device * kbdev)763 void kbase_csf_debugfs_init(struct kbase_device *kbdev)
764 {
765 }
766
767 #endif /* CONFIG_DEBUG_FS */
768