1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2020-2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include <mali_kbase.h>
23 #include "debug/mali_kbase_debug_ktrace_internal.h"
24 #include "debug/backend/mali_kbase_debug_ktrace_csf.h"
25
26 #if KBASE_KTRACE_TARGET_RBUF
27
kbasep_ktrace_backend_format_header(char * buffer,int sz,s32 * written)28 void kbasep_ktrace_backend_format_header(char *buffer, int sz, s32 *written)
29 {
30 *written += MAX(snprintf(buffer + *written, MAX(sz - *written, 0),
31 "group,slot,prio,csi,kcpu"), 0);
32 }
33
kbasep_ktrace_backend_format_msg(struct kbase_ktrace_msg * trace_msg,char * buffer,int sz,s32 * written)34 void kbasep_ktrace_backend_format_msg(struct kbase_ktrace_msg *trace_msg,
35 char *buffer, int sz, s32 *written)
36 {
37 const union kbase_ktrace_backend * const be_msg = &trace_msg->backend;
38 /* At present, no need to check for KBASE_KTRACE_FLAG_BACKEND, as the
39 * other backend-specific flags currently imply this anyway
40 */
41
42 /* group parts */
43 if (be_msg->gpu.flags & KBASE_KTRACE_FLAG_CSF_GROUP) {
44 const s8 slot = be_msg->gpu.csg_nr;
45 /* group,slot, */
46 *written += MAX(snprintf(buffer + *written,
47 MAX(sz - *written, 0),
48 "%u,%d,", be_msg->gpu.group_handle, slot), 0);
49
50 /* prio */
51 if (slot >= 0)
52 *written += MAX(snprintf(buffer + *written,
53 MAX(sz - *written, 0),
54 "%u", be_msg->gpu.slot_prio), 0);
55
56 /* , */
57 *written += MAX(snprintf(buffer + *written,
58 MAX(sz - *written, 0),
59 ","), 0);
60 } else {
61 /* No group,slot,prio fields, but ensure ending with "," */
62 *written += MAX(snprintf(buffer + *written,
63 MAX(sz - *written, 0),
64 ",,,"), 0);
65 }
66
67 /* queue parts: csi */
68 if (trace_msg->backend.gpu.flags & KBASE_KTRACE_FLAG_CSF_QUEUE)
69 *written += MAX(snprintf(buffer + *written,
70 MAX(sz - *written, 0),
71 "%d", be_msg->gpu.csi_index), 0);
72
73 /* , */
74 *written += MAX(snprintf(buffer + *written,
75 MAX(sz - *written, 0),
76 ","), 0);
77
78 if (be_msg->gpu.flags & KBASE_KTRACE_FLAG_CSF_KCPU) {
79 /* kcpu data */
80 *written += MAX(snprintf(buffer + *written,
81 MAX(sz - *written, 0),
82 "kcpu %d (0x%llx)",
83 be_msg->kcpu.id,
84 be_msg->kcpu.extra_info_val), 0);
85 }
86
87 /* Don't end with a trailing "," - this is a 'standalone' formatted
88 * msg, caller will handle the delimiters
89 */
90 }
91
kbasep_ktrace_add_csf(struct kbase_device * kbdev,enum kbase_ktrace_code code,struct kbase_queue_group * group,struct kbase_queue * queue,kbase_ktrace_flag_t flags,u64 info_val)92 void kbasep_ktrace_add_csf(struct kbase_device *kbdev,
93 enum kbase_ktrace_code code, struct kbase_queue_group *group,
94 struct kbase_queue *queue, kbase_ktrace_flag_t flags,
95 u64 info_val)
96 {
97 unsigned long irqflags;
98 struct kbase_ktrace_msg *trace_msg;
99 struct kbase_context *kctx = NULL;
100
101 if (unlikely(!kbasep_ktrace_initialized(&kbdev->ktrace)))
102 return;
103
104 spin_lock_irqsave(&kbdev->ktrace.lock, irqflags);
105
106 /* Reserve and update indices */
107 trace_msg = kbasep_ktrace_reserve(&kbdev->ktrace);
108
109 /* Determine the kctx */
110 if (group)
111 kctx = group->kctx;
112 else if (queue)
113 kctx = queue->kctx;
114
115 /* Fill the common part of the message (including backend.gpu.flags) */
116 kbasep_ktrace_msg_init(&kbdev->ktrace, trace_msg, code, kctx, flags,
117 info_val);
118
119 /* Indicate to the common code that backend-specific parts will be
120 * valid
121 */
122 trace_msg->backend.gpu.flags |= KBASE_KTRACE_FLAG_BACKEND;
123
124 /* Fill the CSF-specific parts of the message
125 *
126 * Generally, no need to use default initializers when queue/group not
127 * present - can usually check the flags instead.
128 */
129
130 if (queue) {
131 trace_msg->backend.gpu.flags |= KBASE_KTRACE_FLAG_CSF_QUEUE;
132 trace_msg->backend.gpu.csi_index = queue->csi_index;
133 }
134
135 if (group) {
136 const s8 slot = group->csg_nr;
137
138 trace_msg->backend.gpu.flags |= KBASE_KTRACE_FLAG_CSF_GROUP;
139
140 trace_msg->backend.gpu.csg_nr = slot;
141
142 if (slot >= 0) {
143 struct kbase_csf_csg_slot *csg_slot =
144 &kbdev->csf.scheduler.csg_slots[slot];
145
146 trace_msg->backend.gpu.slot_prio =
147 csg_slot->priority;
148 }
149 /* slot >=0 indicates whether slot_prio valid, so no need to
150 * initialize in the case where it's invalid
151 */
152
153 trace_msg->backend.gpu.group_handle = group->handle;
154 }
155
156 WARN_ON((trace_msg->backend.gpu.flags & ~KBASE_KTRACE_FLAG_ALL));
157
158 /* Done */
159 spin_unlock_irqrestore(&kbdev->ktrace.lock, irqflags);
160 }
161
kbasep_ktrace_add_csf_kcpu(struct kbase_device * kbdev,enum kbase_ktrace_code code,struct kbase_kcpu_command_queue * queue,u64 info_val1,u64 info_val2)162 void kbasep_ktrace_add_csf_kcpu(struct kbase_device *kbdev,
163 enum kbase_ktrace_code code,
164 struct kbase_kcpu_command_queue *queue,
165 u64 info_val1, u64 info_val2)
166 {
167 unsigned long irqflags;
168 struct kbase_ktrace_msg *trace_msg;
169 struct kbase_context *kctx = queue->kctx;
170
171 if (unlikely(!kbasep_ktrace_initialized(&kbdev->ktrace)))
172 return;
173
174 spin_lock_irqsave(&kbdev->ktrace.lock, irqflags);
175
176 /* Reserve and update indices */
177 trace_msg = kbasep_ktrace_reserve(&kbdev->ktrace);
178
179 /* Fill the common part of the message */
180 kbasep_ktrace_msg_init(&kbdev->ktrace, trace_msg, code, kctx, 0,
181 info_val1);
182
183 /* Indicate to the common code that backend-specific parts will be
184 * valid
185 */
186 trace_msg->backend.gpu.flags |= KBASE_KTRACE_FLAG_BACKEND;
187
188 /* Fill the KCPU-specific parts of the message */
189 trace_msg->backend.kcpu.id = queue->id;
190 trace_msg->backend.kcpu.extra_info_val = info_val2;
191 trace_msg->backend.gpu.flags |= KBASE_KTRACE_FLAG_CSF_KCPU;
192
193 WARN_ON((trace_msg->backend.gpu.flags & ~KBASE_KTRACE_FLAG_ALL));
194
195 /* Done */
196 spin_unlock_irqrestore(&kbdev->ktrace.lock, irqflags);
197 }
198
199 #endif /* KBASE_KTRACE_TARGET_RBUF */
200