1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include <mali_kbase.h>
23 #include "backend/gpu/mali_kbase_pm_internal.h"
24 #include <csf/mali_kbase_csf_firmware_log.h>
25 #include <csf/mali_kbase_csf_trace_buffer.h>
26 #include <linux/debugfs.h>
27 #include <linux/string.h>
28 #include <linux/workqueue.h>
29
30 /*
31 * ARMv7 instruction: Branch with Link calls a subroutine at a PC-relative address.
32 */
33 #define ARMV7_T1_BL_IMM_INSTR 0xd800f000
34
35 /*
36 * ARMv7 instruction: Branch with Link calls a subroutine at a PC-relative address, maximum
37 * negative jump offset.
38 */
39 #define ARMV7_T1_BL_IMM_RANGE_MIN -16777216
40
41 /*
42 * ARMv7 instruction: Branch with Link calls a subroutine at a PC-relative address, maximum
43 * positive jump offset.
44 */
45 #define ARMV7_T1_BL_IMM_RANGE_MAX 16777214
46
47 /*
48 * ARMv7 instruction: Double NOP instructions.
49 */
50 #define ARMV7_DOUBLE_NOP_INSTR 0xbf00bf00
51
52 #if defined(CONFIG_DEBUG_FS)
53
kbase_csf_firmware_log_enable_mask_read(void * data,u64 * val)54 static int kbase_csf_firmware_log_enable_mask_read(void *data, u64 *val)
55 {
56 struct kbase_device *kbdev = (struct kbase_device *)data;
57 struct firmware_trace_buffer *tb =
58 kbase_csf_firmware_get_trace_buffer(kbdev, FIRMWARE_LOG_BUF_NAME);
59
60 if (tb == NULL) {
61 dev_err(kbdev->dev, "Couldn't get the firmware trace buffer");
62 return -EIO;
63 }
64 /* The enabled traces limited to u64 here, regarded practical */
65 *val = kbase_csf_firmware_trace_buffer_get_active_mask64(tb);
66 return 0;
67 }
68
kbase_csf_firmware_log_enable_mask_write(void * data,u64 val)69 static int kbase_csf_firmware_log_enable_mask_write(void *data, u64 val)
70 {
71 struct kbase_device *kbdev = (struct kbase_device *)data;
72 struct firmware_trace_buffer *tb =
73 kbase_csf_firmware_get_trace_buffer(kbdev, FIRMWARE_LOG_BUF_NAME);
74 u64 new_mask;
75 unsigned int enable_bits_count;
76
77 if (tb == NULL) {
78 dev_err(kbdev->dev, "Couldn't get the firmware trace buffer");
79 return -EIO;
80 }
81
82 /* Ignore unsupported types */
83 enable_bits_count = kbase_csf_firmware_trace_buffer_get_trace_enable_bits_count(tb);
84 if (enable_bits_count > 64) {
85 dev_dbg(kbdev->dev, "Limit enabled bits count from %u to 64", enable_bits_count);
86 enable_bits_count = 64;
87 }
88 new_mask = val & (UINT64_MAX >> (64 - enable_bits_count));
89
90 if (new_mask != kbase_csf_firmware_trace_buffer_get_active_mask64(tb))
91 return kbase_csf_firmware_trace_buffer_set_active_mask64(tb, new_mask);
92 else
93 return 0;
94 }
95
kbasep_csf_firmware_log_debugfs_open(struct inode * in,struct file * file)96 static int kbasep_csf_firmware_log_debugfs_open(struct inode *in, struct file *file)
97 {
98 struct kbase_device *kbdev = in->i_private;
99
100 file->private_data = kbdev;
101 dev_dbg(kbdev->dev, "Opened firmware trace buffer dump debugfs file");
102
103 return 0;
104 }
105
kbasep_csf_firmware_log_debugfs_read(struct file * file,char __user * buf,size_t size,loff_t * ppos)106 static ssize_t kbasep_csf_firmware_log_debugfs_read(struct file *file, char __user *buf,
107 size_t size, loff_t *ppos)
108 {
109 struct kbase_device *kbdev = file->private_data;
110 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log;
111 unsigned int n_read;
112 unsigned long not_copied;
113 /* Limit reads to the kernel dump buffer size */
114 size_t mem = MIN(size, FIRMWARE_LOG_DUMP_BUF_SIZE);
115 int ret;
116
117 struct firmware_trace_buffer *tb =
118 kbase_csf_firmware_get_trace_buffer(kbdev, FIRMWARE_LOG_BUF_NAME);
119
120 if (tb == NULL) {
121 dev_err(kbdev->dev, "Couldn't get the firmware trace buffer");
122 return -EIO;
123 }
124
125 if (atomic_cmpxchg(&fw_log->busy, 0, 1) != 0)
126 return -EBUSY;
127
128 /* Reading from userspace is only allowed in manual mode */
129 if (fw_log->mode != KBASE_CSF_FIRMWARE_LOG_MODE_MANUAL) {
130 ret = -EINVAL;
131 goto out;
132 }
133
134 n_read = kbase_csf_firmware_trace_buffer_read_data(tb, fw_log->dump_buf, mem);
135
136 /* Do the copy, if we have obtained some trace data */
137 not_copied = (n_read) ? copy_to_user(buf, fw_log->dump_buf, n_read) : 0;
138
139 if (not_copied) {
140 dev_err(kbdev->dev, "Couldn't copy trace buffer data to user space buffer");
141 ret = -EFAULT;
142 goto out;
143 }
144
145 *ppos += n_read;
146 ret = n_read;
147
148 out:
149 atomic_set(&fw_log->busy, 0);
150 return ret;
151 }
152
kbase_csf_firmware_log_mode_read(void * data,u64 * val)153 static int kbase_csf_firmware_log_mode_read(void *data, u64 *val)
154 {
155 struct kbase_device *kbdev = (struct kbase_device *)data;
156 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log;
157
158 *val = fw_log->mode;
159 return 0;
160 }
161
kbase_csf_firmware_log_mode_write(void * data,u64 val)162 static int kbase_csf_firmware_log_mode_write(void *data, u64 val)
163 {
164 struct kbase_device *kbdev = (struct kbase_device *)data;
165 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log;
166 int ret = 0;
167
168 if (atomic_cmpxchg(&fw_log->busy, 0, 1) != 0)
169 return -EBUSY;
170
171 if (val == fw_log->mode)
172 goto out;
173
174 switch (val) {
175 case KBASE_CSF_FIRMWARE_LOG_MODE_MANUAL:
176 cancel_delayed_work_sync(&fw_log->poll_work);
177 break;
178 case KBASE_CSF_FIRMWARE_LOG_MODE_AUTO_PRINT:
179 schedule_delayed_work(&fw_log->poll_work,
180 msecs_to_jiffies(KBASE_CSF_FIRMWARE_LOG_POLL_PERIOD_MS));
181 break;
182 default:
183 ret = -EINVAL;
184 goto out;
185 }
186
187 fw_log->mode = val;
188
189 out:
190 atomic_set(&fw_log->busy, 0);
191 return ret;
192 }
193
194 DEFINE_DEBUGFS_ATTRIBUTE(kbase_csf_firmware_log_enable_mask_fops,
195 kbase_csf_firmware_log_enable_mask_read,
196 kbase_csf_firmware_log_enable_mask_write, "%llx\n");
197
198 static const struct file_operations kbasep_csf_firmware_log_debugfs_fops = {
199 .owner = THIS_MODULE,
200 .open = kbasep_csf_firmware_log_debugfs_open,
201 .read = kbasep_csf_firmware_log_debugfs_read,
202 .llseek = no_llseek,
203 };
204
205 DEFINE_DEBUGFS_ATTRIBUTE(kbase_csf_firmware_log_mode_fops, kbase_csf_firmware_log_mode_read,
206 kbase_csf_firmware_log_mode_write, "%llu\n");
207
208 #endif /* CONFIG_DEBUG_FS */
209
kbase_csf_firmware_log_poll(struct work_struct * work)210 static void kbase_csf_firmware_log_poll(struct work_struct *work)
211 {
212 struct kbase_device *kbdev =
213 container_of(work, struct kbase_device, csf.fw_log.poll_work.work);
214 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log;
215
216 schedule_delayed_work(&fw_log->poll_work,
217 msecs_to_jiffies(KBASE_CSF_FIRMWARE_LOG_POLL_PERIOD_MS));
218
219 kbase_csf_firmware_log_dump_buffer(kbdev);
220 }
221
kbase_csf_firmware_log_init(struct kbase_device * kbdev)222 int kbase_csf_firmware_log_init(struct kbase_device *kbdev)
223 {
224 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log;
225
226 /* Add one byte for null-termination */
227 fw_log->dump_buf = kmalloc(FIRMWARE_LOG_DUMP_BUF_SIZE + 1, GFP_KERNEL);
228 if (fw_log->dump_buf == NULL)
229 return -ENOMEM;
230
231 /* Ensure null-termination for all strings */
232 fw_log->dump_buf[FIRMWARE_LOG_DUMP_BUF_SIZE] = 0;
233
234 fw_log->mode = KBASE_CSF_FIRMWARE_LOG_MODE_MANUAL;
235
236 atomic_set(&fw_log->busy, 0);
237 INIT_DEFERRABLE_WORK(&fw_log->poll_work, kbase_csf_firmware_log_poll);
238
239 #if defined(CONFIG_DEBUG_FS)
240 debugfs_create_file("fw_trace_enable_mask", 0644, kbdev->mali_debugfs_directory, kbdev,
241 &kbase_csf_firmware_log_enable_mask_fops);
242 debugfs_create_file("fw_traces", 0444, kbdev->mali_debugfs_directory, kbdev,
243 &kbasep_csf_firmware_log_debugfs_fops);
244 debugfs_create_file("fw_trace_mode", 0644, kbdev->mali_debugfs_directory, kbdev,
245 &kbase_csf_firmware_log_mode_fops);
246 #endif /* CONFIG_DEBUG_FS */
247
248 return 0;
249 }
250
kbase_csf_firmware_log_term(struct kbase_device * kbdev)251 void kbase_csf_firmware_log_term(struct kbase_device *kbdev)
252 {
253 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log;
254
255 if (fw_log->dump_buf) {
256 cancel_delayed_work_sync(&fw_log->poll_work);
257 kfree(fw_log->dump_buf);
258 fw_log->dump_buf = NULL;
259 }
260 }
261
kbase_csf_firmware_log_dump_buffer(struct kbase_device * kbdev)262 void kbase_csf_firmware_log_dump_buffer(struct kbase_device *kbdev)
263 {
264 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log;
265 u8 *buf = fw_log->dump_buf, *p, *pnewline, *pend, *pendbuf;
266 unsigned int read_size, remaining_size;
267 struct firmware_trace_buffer *tb =
268 kbase_csf_firmware_get_trace_buffer(kbdev, FIRMWARE_LOG_BUF_NAME);
269
270 if (tb == NULL) {
271 dev_dbg(kbdev->dev, "Can't get the trace buffer, firmware trace dump skipped");
272 return;
273 }
274
275 if (atomic_cmpxchg(&fw_log->busy, 0, 1) != 0)
276 return;
277
278 /* FW should only print complete messages, so there's no need to handle
279 * partial messages over multiple invocations of this function
280 */
281
282 p = buf;
283 pendbuf = &buf[FIRMWARE_LOG_DUMP_BUF_SIZE];
284
285 while ((read_size = kbase_csf_firmware_trace_buffer_read_data(tb, p, pendbuf - p))) {
286 pend = p + read_size;
287 p = buf;
288
289 while (p < pend && (pnewline = memchr(p, '\n', pend - p))) {
290 /* Null-terminate the string */
291 *pnewline = 0;
292
293 dev_err(kbdev->dev, "FW> %s", p);
294
295 p = pnewline + 1;
296 }
297
298 remaining_size = pend - p;
299
300 if (!remaining_size) {
301 p = buf;
302 } else if (remaining_size < FIRMWARE_LOG_DUMP_BUF_SIZE) {
303 /* Copy unfinished string to the start of the buffer */
304 memmove(buf, p, remaining_size);
305 p = &buf[remaining_size];
306 } else {
307 /* Print abnormally long string without newlines */
308 dev_err(kbdev->dev, "FW> %s", buf);
309 p = buf;
310 }
311 }
312
313 if (p != buf) {
314 /* Null-terminate and print last unfinished string */
315 *p = 0;
316 dev_err(kbdev->dev, "FW> %s", buf);
317 }
318
319 atomic_set(&fw_log->busy, 0);
320 }
321
kbase_csf_firmware_log_parse_logging_call_list_entry(struct kbase_device * kbdev,const uint32_t * entry)322 void kbase_csf_firmware_log_parse_logging_call_list_entry(struct kbase_device *kbdev,
323 const uint32_t *entry)
324 {
325 kbdev->csf.fw_log.func_call_list_va_start = entry[0];
326 kbdev->csf.fw_log.func_call_list_va_end = entry[1];
327 }
328
329 /**
330 * toggle_logging_calls_in_loaded_image - Toggles FW log func calls in loaded FW image.
331 *
332 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
333 * @enable: Whether to enable or disable the function calls.
334 */
toggle_logging_calls_in_loaded_image(struct kbase_device * kbdev,bool enable)335 static void toggle_logging_calls_in_loaded_image(struct kbase_device *kbdev, bool enable)
336 {
337 uint32_t bl_instruction, diff;
338 uint32_t imm11, imm10, i1, i2, j1, j2, sign;
339 uint32_t calling_address = 0, callee_address = 0;
340 uint32_t list_entry = kbdev->csf.fw_log.func_call_list_va_start;
341 const uint32_t list_va_end = kbdev->csf.fw_log.func_call_list_va_end;
342
343 if (list_entry == 0 || list_va_end == 0)
344 return;
345
346 if (enable) {
347 for (; list_entry < list_va_end; list_entry += 2 * sizeof(uint32_t)) {
348 /* Read calling address */
349 kbase_csf_read_firmware_memory(kbdev, list_entry, &calling_address);
350 /* Read callee address */
351 kbase_csf_read_firmware_memory(kbdev, list_entry + sizeof(uint32_t),
352 &callee_address);
353
354 diff = callee_address - calling_address - 4;
355 sign = !!(diff & 0x80000000);
356 if (ARMV7_T1_BL_IMM_RANGE_MIN > (int32_t)diff ||
357 ARMV7_T1_BL_IMM_RANGE_MAX < (int32_t)diff) {
358 dev_warn(kbdev->dev, "FW log patch 0x%x out of range, skipping",
359 calling_address);
360 continue;
361 }
362
363 i1 = (diff & 0x00800000) >> 23;
364 j1 = !i1 ^ sign;
365 i2 = (diff & 0x00400000) >> 22;
366 j2 = !i2 ^ sign;
367 imm11 = (diff & 0xffe) >> 1;
368 imm10 = (diff & 0x3ff000) >> 12;
369
370 /* Compose BL instruction */
371 bl_instruction = ARMV7_T1_BL_IMM_INSTR;
372 bl_instruction |= j1 << 29;
373 bl_instruction |= j2 << 27;
374 bl_instruction |= imm11 << 16;
375 bl_instruction |= sign << 10;
376 bl_instruction |= imm10;
377
378 /* Patch logging func calls in their load location */
379 dev_dbg(kbdev->dev, "FW log patch 0x%x: 0x%x\n", calling_address,
380 bl_instruction);
381 kbase_csf_update_firmware_memory_exe(kbdev, calling_address,
382 bl_instruction);
383 }
384 } else {
385 for (; list_entry < list_va_end; list_entry += 2 * sizeof(uint32_t)) {
386 /* Read calling address */
387 kbase_csf_read_firmware_memory(kbdev, list_entry, &calling_address);
388
389 /* Overwrite logging func calls with 2 NOP instructions */
390 kbase_csf_update_firmware_memory_exe(kbdev, calling_address,
391 ARMV7_DOUBLE_NOP_INSTR);
392 }
393 }
394 }
395
kbase_csf_firmware_log_toggle_logging_calls(struct kbase_device * kbdev,u32 val)396 int kbase_csf_firmware_log_toggle_logging_calls(struct kbase_device *kbdev, u32 val)
397 {
398 unsigned long flags;
399 struct kbase_csf_firmware_log *fw_log = &kbdev->csf.fw_log;
400 bool mcu_inactive;
401 bool resume_needed = false;
402 int ret = 0;
403 struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler;
404
405 if (atomic_cmpxchg(&fw_log->busy, 0, 1) != 0)
406 return -EBUSY;
407
408 /* Suspend all the active CS groups */
409 dev_dbg(kbdev->dev, "Suspend all the active CS groups");
410
411 kbase_csf_scheduler_lock(kbdev);
412 while (scheduler->state != SCHED_SUSPENDED) {
413 kbase_csf_scheduler_unlock(kbdev);
414 kbase_csf_scheduler_pm_suspend(kbdev);
415 kbase_csf_scheduler_lock(kbdev);
416 resume_needed = true;
417 }
418
419 /* Wait for the MCU to get disabled */
420 dev_info(kbdev->dev, "Wait for the MCU to get disabled");
421 ret = kbase_pm_wait_for_desired_state(kbdev);
422 if (ret) {
423 dev_err(kbdev->dev,
424 "wait for PM state failed when toggling FW logging calls");
425 ret = -EAGAIN;
426 goto out;
427 }
428
429 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
430 mcu_inactive =
431 kbase_pm_is_mcu_inactive(kbdev, kbdev->pm.backend.mcu_state);
432 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
433 if (!mcu_inactive) {
434 dev_err(kbdev->dev,
435 "MCU not inactive after PM state wait when toggling FW logging calls");
436 ret = -EAGAIN;
437 goto out;
438 }
439
440 /* Toggle FW logging call in the loaded FW image */
441 toggle_logging_calls_in_loaded_image(kbdev, val);
442 dev_dbg(kbdev->dev, "FW logging: %s", val ? "enabled" : "disabled");
443
444 out:
445 kbase_csf_scheduler_unlock(kbdev);
446 if (resume_needed)
447 /* Resume queue groups and start mcu */
448 kbase_csf_scheduler_pm_resume(kbdev);
449 atomic_set(&fw_log->busy, 0);
450 return ret;
451 }
452