1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include <mali_kbase.h>
23 #include <linux/slab.h>
24 #include <csf/mali_kbase_csf_registers.h>
25 #include <csf/mali_kbase_csf_firmware.h>
26 #include <backend/gpu/mali_kbase_pm_internal.h>
27 #include <linux/mali_kbase_debug_coresight_csf.h>
28 #include <debug/backend/mali_kbase_debug_coresight_internal_csf.h>
29
coresight_state_to_string(enum kbase_debug_coresight_csf_state state)30 static const char *coresight_state_to_string(enum kbase_debug_coresight_csf_state state)
31 {
32 switch (state) {
33 case KBASE_DEBUG_CORESIGHT_CSF_DISABLED:
34 return "DISABLED";
35 case KBASE_DEBUG_CORESIGHT_CSF_ENABLED:
36 return "ENABLED";
37 default:
38 break;
39 }
40
41 return "UNKNOWN";
42 }
43
validate_reg_addr(struct kbase_debug_coresight_csf_client * client,struct kbase_device * kbdev,u32 reg_addr,u8 op_type)44 static bool validate_reg_addr(struct kbase_debug_coresight_csf_client *client,
45 struct kbase_device *kbdev, u32 reg_addr, u8 op_type)
46 {
47 int i;
48
49 if (reg_addr & 0x3) {
50 dev_err(kbdev->dev, "Invalid operation %d: reg_addr (0x%x) not 32bit aligned",
51 op_type, reg_addr);
52 return false;
53 }
54
55 for (i = 0; i < client->nr_ranges; i++) {
56 struct kbase_debug_coresight_csf_address_range *range = &client->addr_ranges[i];
57
58 if ((range->start <= reg_addr) && (reg_addr <= range->end))
59 return true;
60 }
61
62 dev_err(kbdev->dev, "Invalid operation %d: reg_addr (0x%x) not in client range", op_type,
63 reg_addr);
64
65 return false;
66 }
67
validate_op(struct kbase_debug_coresight_csf_client * client,struct kbase_debug_coresight_csf_op * op)68 static bool validate_op(struct kbase_debug_coresight_csf_client *client,
69 struct kbase_debug_coresight_csf_op *op)
70 {
71 struct kbase_device *kbdev;
72 u32 reg;
73
74 if (!op)
75 return false;
76
77 if (!client)
78 return false;
79
80 kbdev = (struct kbase_device *)client->drv_data;
81
82 switch (op->type) {
83 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_NOP:
84 return true;
85 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_WRITE_IMM:
86 if (validate_reg_addr(client, kbdev, op->op.write_imm.reg_addr, op->type))
87 return true;
88
89 break;
90 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_WRITE_IMM_RANGE:
91 for (reg = op->op.write_imm_range.reg_start; reg <= op->op.write_imm_range.reg_end;
92 reg += sizeof(u32)) {
93 if (!validate_reg_addr(client, kbdev, reg, op->type))
94 return false;
95 }
96
97 return true;
98 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_WRITE:
99 if (!op->op.write.ptr) {
100 dev_err(kbdev->dev, "Invalid operation %d: ptr not set", op->type);
101 break;
102 }
103
104 if (validate_reg_addr(client, kbdev, op->op.write.reg_addr, op->type))
105 return true;
106
107 break;
108 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_READ:
109 if (!op->op.read.ptr) {
110 dev_err(kbdev->dev, "Invalid operation %d: ptr not set", op->type);
111 break;
112 }
113
114 if (validate_reg_addr(client, kbdev, op->op.read.reg_addr, op->type))
115 return true;
116
117 break;
118 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_POLL:
119 if (validate_reg_addr(client, kbdev, op->op.poll.reg_addr, op->type))
120 return true;
121
122 break;
123 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_BIT_AND:
124 fallthrough;
125 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_BIT_OR:
126 fallthrough;
127 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_BIT_XOR:
128 fallthrough;
129 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_BIT_NOT:
130 if (op->op.bitw.ptr != NULL)
131 return true;
132
133 dev_err(kbdev->dev, "Invalid bitwise operation pointer");
134
135 break;
136 default:
137 dev_err(kbdev->dev, "Invalid operation %d", op->type);
138 break;
139 }
140
141 return false;
142 }
143
validate_seq(struct kbase_debug_coresight_csf_client * client,struct kbase_debug_coresight_csf_sequence * seq)144 static bool validate_seq(struct kbase_debug_coresight_csf_client *client,
145 struct kbase_debug_coresight_csf_sequence *seq)
146 {
147 struct kbase_debug_coresight_csf_op *ops = seq->ops;
148 int nr_ops = seq->nr_ops;
149 int i;
150
151 for (i = 0; i < nr_ops; i++) {
152 if (!validate_op(client, &ops[i]))
153 return false;
154 }
155
156 return true;
157 }
158
execute_op(struct kbase_device * kbdev,struct kbase_debug_coresight_csf_op * op)159 static int execute_op(struct kbase_device *kbdev, struct kbase_debug_coresight_csf_op *op)
160 {
161 int result = -EINVAL;
162 u32 reg;
163
164 dev_dbg(kbdev->dev, "Execute operation %d", op->type);
165
166 switch (op->type) {
167 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_NOP:
168 result = 0;
169 break;
170 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_WRITE_IMM:
171 result = kbase_csf_firmware_mcu_register_write(kbdev, op->op.write.reg_addr,
172 op->op.write_imm.val);
173 break;
174 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_WRITE_IMM_RANGE:
175 for (reg = op->op.write_imm_range.reg_start; reg <= op->op.write_imm_range.reg_end;
176 reg += sizeof(u32)) {
177 result = kbase_csf_firmware_mcu_register_write(kbdev, reg,
178 op->op.write_imm_range.val);
179 if (!result)
180 break;
181 }
182 break;
183 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_WRITE:
184 result = kbase_csf_firmware_mcu_register_write(kbdev, op->op.write.reg_addr,
185 *op->op.write.ptr);
186 break;
187 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_READ:
188 result = kbase_csf_firmware_mcu_register_read(kbdev, op->op.read.reg_addr,
189 op->op.read.ptr);
190 break;
191 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_POLL:
192 result = kbase_csf_firmware_mcu_register_poll(kbdev, op->op.poll.reg_addr,
193 op->op.poll.mask, op->op.poll.val);
194 break;
195 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_BIT_AND:
196 *op->op.bitw.ptr &= op->op.bitw.val;
197 result = 0;
198 break;
199 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_BIT_OR:
200 *op->op.bitw.ptr |= op->op.bitw.val;
201 result = 0;
202 break;
203 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_BIT_XOR:
204 *op->op.bitw.ptr ^= op->op.bitw.val;
205 result = 0;
206 break;
207 case KBASE_DEBUG_CORESIGHT_CSF_OP_TYPE_BIT_NOT:
208 *op->op.bitw.ptr = ~(*op->op.bitw.ptr);
209 result = 0;
210 break;
211 default:
212 dev_err(kbdev->dev, "Invalid operation %d", op->type);
213 break;
214 }
215
216 return result;
217 }
218
coresight_config_enable(struct kbase_device * kbdev,struct kbase_debug_coresight_csf_config * config)219 static int coresight_config_enable(struct kbase_device *kbdev,
220 struct kbase_debug_coresight_csf_config *config)
221 {
222 int ret = 0;
223 int i;
224
225 if (!config)
226 return -EINVAL;
227
228 if (config->state == KBASE_DEBUG_CORESIGHT_CSF_ENABLED)
229 return ret;
230
231 for (i = 0; config->enable_seq && !ret && i < config->enable_seq->nr_ops; i++)
232 ret = execute_op(kbdev, &config->enable_seq->ops[i]);
233
234 if (!ret) {
235 dev_dbg(kbdev->dev, "Coresight config (0x%pK) state transition: %s to %s", config,
236 coresight_state_to_string(config->state),
237 coresight_state_to_string(KBASE_DEBUG_CORESIGHT_CSF_ENABLED));
238 config->state = KBASE_DEBUG_CORESIGHT_CSF_ENABLED;
239 }
240
241 /* Always assign the return code during config enable.
242 * It gets propagated when calling config disable.
243 */
244 config->error = ret;
245
246 return ret;
247 }
248
coresight_config_disable(struct kbase_device * kbdev,struct kbase_debug_coresight_csf_config * config)249 static int coresight_config_disable(struct kbase_device *kbdev,
250 struct kbase_debug_coresight_csf_config *config)
251 {
252 int ret = 0;
253 int i;
254
255 if (!config)
256 return -EINVAL;
257
258 if (config->state == KBASE_DEBUG_CORESIGHT_CSF_DISABLED)
259 return ret;
260
261 for (i = 0; config->disable_seq && !ret && i < config->disable_seq->nr_ops; i++)
262 ret = execute_op(kbdev, &config->disable_seq->ops[i]);
263
264 if (!ret) {
265 dev_dbg(kbdev->dev, "Coresight config (0x%pK) state transition: %s to %s", config,
266 coresight_state_to_string(config->state),
267 coresight_state_to_string(KBASE_DEBUG_CORESIGHT_CSF_DISABLED));
268 config->state = KBASE_DEBUG_CORESIGHT_CSF_DISABLED;
269 } else {
270 /* Only assign the error if ret is not 0.
271 * As we don't want to overwrite an error from config enable
272 */
273 if (!config->error)
274 config->error = ret;
275 }
276
277 return ret;
278 }
279
kbase_debug_coresight_csf_register(void * drv_data,struct kbase_debug_coresight_csf_address_range * ranges,int nr_ranges)280 void *kbase_debug_coresight_csf_register(void *drv_data,
281 struct kbase_debug_coresight_csf_address_range *ranges,
282 int nr_ranges)
283 {
284 struct kbase_debug_coresight_csf_client *client, *client_entry;
285 struct kbase_device *kbdev;
286 unsigned long flags;
287 int k;
288
289 if (unlikely(!drv_data)) {
290 pr_err("NULL drv_data");
291 return NULL;
292 }
293
294 kbdev = (struct kbase_device *)drv_data;
295
296 if (unlikely(!ranges)) {
297 dev_err(kbdev->dev, "NULL ranges");
298 return NULL;
299 }
300
301 if (unlikely(!nr_ranges)) {
302 dev_err(kbdev->dev, "nr_ranges is 0");
303 return NULL;
304 }
305
306 for (k = 0; k < nr_ranges; k++) {
307 if (ranges[k].end < ranges[k].start) {
308 dev_err(kbdev->dev, "Invalid address ranges 0x%08x - 0x%08x",
309 ranges[k].start, ranges[k].end);
310 return NULL;
311 }
312 }
313
314 client = kzalloc(sizeof(struct kbase_debug_coresight_csf_client), GFP_KERNEL);
315
316 if (!client)
317 return NULL;
318
319 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
320 list_for_each_entry(client_entry, &kbdev->csf.coresight.clients, link) {
321 struct kbase_debug_coresight_csf_address_range *client_ranges =
322 client_entry->addr_ranges;
323 int i;
324
325 for (i = 0; i < client_entry->nr_ranges; i++) {
326 int j;
327
328 for (j = 0; j < nr_ranges; j++) {
329 if ((ranges[j].start < client_ranges[i].end) &&
330 (client_ranges[i].start < ranges[j].end)) {
331 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
332 kfree(client);
333 dev_err(kbdev->dev,
334 "Client with range 0x%08x - 0x%08x already present at address range 0x%08x - 0x%08x",
335 client_ranges[i].start, client_ranges[i].end,
336 ranges[j].start, ranges[j].end);
337
338 return NULL;
339 }
340 }
341 }
342 }
343
344 client->drv_data = drv_data;
345 client->addr_ranges = ranges;
346 client->nr_ranges = nr_ranges;
347 list_add(&client->link, &kbdev->csf.coresight.clients);
348 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
349
350 return client;
351 }
352 EXPORT_SYMBOL(kbase_debug_coresight_csf_register);
353
kbase_debug_coresight_csf_unregister(void * client_data)354 void kbase_debug_coresight_csf_unregister(void *client_data)
355 {
356 struct kbase_debug_coresight_csf_client *client;
357 struct kbase_debug_coresight_csf_config *config_entry;
358 struct kbase_device *kbdev;
359 unsigned long flags;
360 bool retry = true;
361
362 if (unlikely(!client_data)) {
363 pr_err("NULL client");
364 return;
365 }
366
367 client = (struct kbase_debug_coresight_csf_client *)client_data;
368
369 kbdev = (struct kbase_device *)client->drv_data;
370 if (unlikely(!kbdev)) {
371 pr_err("NULL drv_data in client");
372 return;
373 }
374
375 /* check for active config from client */
376 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
377 list_del_init(&client->link);
378
379 while (retry && !list_empty(&kbdev->csf.coresight.configs)) {
380 retry = false;
381 list_for_each_entry(config_entry, &kbdev->csf.coresight.configs, link) {
382 if (config_entry->client == client) {
383 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
384 kbase_debug_coresight_csf_config_free(config_entry);
385 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
386 retry = true;
387 break;
388 }
389 }
390 }
391 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
392
393 kfree(client);
394 }
395 EXPORT_SYMBOL(kbase_debug_coresight_csf_unregister);
396
397 void *
kbase_debug_coresight_csf_config_create(void * client_data,struct kbase_debug_coresight_csf_sequence * enable_seq,struct kbase_debug_coresight_csf_sequence * disable_seq)398 kbase_debug_coresight_csf_config_create(void *client_data,
399 struct kbase_debug_coresight_csf_sequence *enable_seq,
400 struct kbase_debug_coresight_csf_sequence *disable_seq)
401 {
402 struct kbase_debug_coresight_csf_client *client;
403 struct kbase_debug_coresight_csf_config *config;
404 struct kbase_device *kbdev;
405
406 if (unlikely(!client_data)) {
407 pr_err("NULL client");
408 return NULL;
409 }
410
411 client = (struct kbase_debug_coresight_csf_client *)client_data;
412
413 kbdev = (struct kbase_device *)client->drv_data;
414 if (unlikely(!kbdev)) {
415 pr_err("NULL drv_data in client");
416 return NULL;
417 }
418
419 if (enable_seq) {
420 if (!validate_seq(client, enable_seq)) {
421 dev_err(kbdev->dev, "Invalid enable_seq");
422 return NULL;
423 }
424 }
425
426 if (disable_seq) {
427 if (!validate_seq(client, disable_seq)) {
428 dev_err(kbdev->dev, "Invalid disable_seq");
429 return NULL;
430 }
431 }
432
433 config = kzalloc(sizeof(struct kbase_debug_coresight_csf_config), GFP_KERNEL);
434 if (WARN_ON(!client))
435 return NULL;
436
437 config->client = client;
438 config->enable_seq = enable_seq;
439 config->disable_seq = disable_seq;
440 config->error = 0;
441 config->state = KBASE_DEBUG_CORESIGHT_CSF_DISABLED;
442
443 INIT_LIST_HEAD(&config->link);
444
445 return config;
446 }
447 EXPORT_SYMBOL(kbase_debug_coresight_csf_config_create);
448
kbase_debug_coresight_csf_config_free(void * config_data)449 void kbase_debug_coresight_csf_config_free(void *config_data)
450 {
451 struct kbase_debug_coresight_csf_config *config;
452
453 if (unlikely(!config_data)) {
454 pr_err("NULL config");
455 return;
456 }
457
458 config = (struct kbase_debug_coresight_csf_config *)config_data;
459
460 kbase_debug_coresight_csf_config_disable(config);
461
462 kfree(config);
463 }
464 EXPORT_SYMBOL(kbase_debug_coresight_csf_config_free);
465
kbase_debug_coresight_csf_config_enable(void * config_data)466 int kbase_debug_coresight_csf_config_enable(void *config_data)
467 {
468 struct kbase_debug_coresight_csf_config *config;
469 struct kbase_debug_coresight_csf_client *client;
470 struct kbase_device *kbdev;
471 struct kbase_debug_coresight_csf_config *config_entry;
472 unsigned long flags;
473 int ret = 0;
474
475 if (unlikely(!config_data)) {
476 pr_err("NULL config");
477 return -EINVAL;
478 }
479
480 config = (struct kbase_debug_coresight_csf_config *)config_data;
481 client = (struct kbase_debug_coresight_csf_client *)config->client;
482
483 if (unlikely(!client)) {
484 pr_err("NULL client in config");
485 return -EINVAL;
486 }
487
488 kbdev = (struct kbase_device *)client->drv_data;
489 if (unlikely(!kbdev)) {
490 pr_err("NULL drv_data in client");
491 return -EINVAL;
492 }
493
494 /* Check to prevent double entry of config */
495 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
496 list_for_each_entry(config_entry, &kbdev->csf.coresight.configs, link) {
497 if (config_entry == config) {
498 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
499 dev_err(kbdev->dev, "Config already enabled");
500 return -EINVAL;
501 }
502 }
503 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
504
505 kbase_csf_scheduler_lock(kbdev);
506 kbase_csf_scheduler_spin_lock(kbdev, &flags);
507
508 /* Check the state of Scheduler to confirm the desired state of MCU */
509 if (((kbdev->csf.scheduler.state != SCHED_SUSPENDED) &&
510 (kbdev->csf.scheduler.state != SCHED_SLEEPING) &&
511 !kbase_csf_scheduler_protected_mode_in_use(kbdev)) ||
512 kbase_pm_get_policy(kbdev) == &kbase_pm_always_on_policy_ops) {
513 kbase_csf_scheduler_spin_unlock(kbdev, flags);
514 /* Wait for MCU to reach the stable ON state */
515 ret = kbase_pm_wait_for_desired_state(kbdev);
516
517 if (ret)
518 dev_err(kbdev->dev,
519 "Wait for PM state failed when enabling coresight config");
520 else
521 ret = coresight_config_enable(kbdev, config);
522
523 kbase_csf_scheduler_spin_lock(kbdev, &flags);
524 }
525
526 /* Add config to next enable sequence */
527 if (!ret) {
528 spin_lock(&kbdev->csf.coresight.lock);
529 list_add(&config->link, &kbdev->csf.coresight.configs);
530 spin_unlock(&kbdev->csf.coresight.lock);
531 }
532
533 kbase_csf_scheduler_spin_unlock(kbdev, flags);
534 kbase_csf_scheduler_unlock(kbdev);
535
536 return ret;
537 }
538 EXPORT_SYMBOL(kbase_debug_coresight_csf_config_enable);
539
kbase_debug_coresight_csf_config_disable(void * config_data)540 int kbase_debug_coresight_csf_config_disable(void *config_data)
541 {
542 struct kbase_debug_coresight_csf_config *config;
543 struct kbase_debug_coresight_csf_client *client;
544 struct kbase_device *kbdev;
545 struct kbase_debug_coresight_csf_config *config_entry;
546 bool found_in_list = false;
547 unsigned long flags;
548 int ret = 0;
549
550 if (unlikely(!config_data)) {
551 pr_err("NULL config");
552 return -EINVAL;
553 }
554
555 config = (struct kbase_debug_coresight_csf_config *)config_data;
556
557 /* Exit early if not enabled prior */
558 if (list_empty(&config->link))
559 return ret;
560
561 client = (struct kbase_debug_coresight_csf_client *)config->client;
562
563 if (unlikely(!client)) {
564 pr_err("NULL client in config");
565 return -EINVAL;
566 }
567
568 kbdev = (struct kbase_device *)client->drv_data;
569 if (unlikely(!kbdev)) {
570 pr_err("NULL drv_data in client");
571 return -EINVAL;
572 }
573
574 /* Check if the config is in the correct list */
575 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
576 list_for_each_entry(config_entry, &kbdev->csf.coresight.configs, link) {
577 if (config_entry == config) {
578 found_in_list = true;
579 break;
580 }
581 }
582 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
583
584 if (!found_in_list) {
585 dev_err(kbdev->dev, "Config looks corrupted");
586 return -EINVAL;
587 }
588
589 kbase_csf_scheduler_lock(kbdev);
590 kbase_csf_scheduler_spin_lock(kbdev, &flags);
591
592 /* Check the state of Scheduler to confirm the desired state of MCU */
593 if (((kbdev->csf.scheduler.state != SCHED_SUSPENDED) &&
594 (kbdev->csf.scheduler.state != SCHED_SLEEPING) &&
595 !kbase_csf_scheduler_protected_mode_in_use(kbdev)) ||
596 kbase_pm_get_policy(kbdev) == &kbase_pm_always_on_policy_ops) {
597 kbase_csf_scheduler_spin_unlock(kbdev, flags);
598 /* Wait for MCU to reach the stable ON state */
599 ret = kbase_pm_wait_for_desired_state(kbdev);
600
601 if (ret)
602 dev_err(kbdev->dev,
603 "Wait for PM state failed when disabling coresight config");
604 else
605 ret = coresight_config_disable(kbdev, config);
606
607 kbase_csf_scheduler_spin_lock(kbdev, &flags);
608 } else if (kbdev->pm.backend.mcu_state == KBASE_MCU_OFF) {
609 /* MCU is OFF, so the disable sequence was already executed.
610 *
611 * Propagate any error that would have occurred during the enable
612 * or disable sequence.
613 *
614 * This is done as part of the disable sequence, since the call from
615 * client is synchronous.
616 */
617 ret = config->error;
618 }
619
620 /* Remove config from next disable sequence */
621 spin_lock(&kbdev->csf.coresight.lock);
622 list_del_init(&config->link);
623 spin_unlock(&kbdev->csf.coresight.lock);
624
625 kbase_csf_scheduler_spin_unlock(kbdev, flags);
626 kbase_csf_scheduler_unlock(kbdev);
627
628 return ret;
629 }
630 EXPORT_SYMBOL(kbase_debug_coresight_csf_config_disable);
631
coresight_config_enable_all(struct work_struct * data)632 static void coresight_config_enable_all(struct work_struct *data)
633 {
634 struct kbase_device *kbdev =
635 container_of(data, struct kbase_device, csf.coresight.enable_work);
636 struct kbase_debug_coresight_csf_config *config_entry;
637 unsigned long flags;
638
639 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
640
641 list_for_each_entry(config_entry, &kbdev->csf.coresight.configs, link) {
642 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
643 if (coresight_config_enable(kbdev, config_entry))
644 dev_err(kbdev->dev, "enable config (0x%pK) failed", config_entry);
645 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
646 }
647
648 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
649
650 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
651 kbase_pm_update_state(kbdev);
652 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
653
654 wake_up_all(&kbdev->csf.coresight.event_wait);
655 }
656
coresight_config_disable_all(struct work_struct * data)657 static void coresight_config_disable_all(struct work_struct *data)
658 {
659 struct kbase_device *kbdev =
660 container_of(data, struct kbase_device, csf.coresight.disable_work);
661 struct kbase_debug_coresight_csf_config *config_entry;
662 unsigned long flags;
663
664 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
665
666 list_for_each_entry(config_entry, &kbdev->csf.coresight.configs, link) {
667 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
668 if (coresight_config_disable(kbdev, config_entry))
669 dev_err(kbdev->dev, "disable config (0x%pK) failed", config_entry);
670 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
671 }
672
673 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
674
675 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
676 kbase_pm_update_state(kbdev);
677 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
678
679 wake_up_all(&kbdev->csf.coresight.event_wait);
680 }
681
kbase_debug_coresight_csf_disable_pmode_enter(struct kbase_device * kbdev)682 void kbase_debug_coresight_csf_disable_pmode_enter(struct kbase_device *kbdev)
683 {
684 unsigned long flags;
685
686 dev_dbg(kbdev->dev, "Coresight state %s before protected mode enter",
687 coresight_state_to_string(KBASE_DEBUG_CORESIGHT_CSF_ENABLED));
688
689 lockdep_assert_held(&kbdev->csf.scheduler.lock);
690
691 kbase_pm_lock(kbdev);
692 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
693
694 kbdev->csf.coresight.disable_on_pmode_enter = true;
695 kbdev->csf.coresight.enable_on_pmode_exit = false;
696 kbase_pm_update_state(kbdev);
697
698 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
699
700 kbase_pm_wait_for_desired_state(kbdev);
701
702 kbase_pm_unlock(kbdev);
703 }
704
kbase_debug_coresight_csf_enable_pmode_exit(struct kbase_device * kbdev)705 void kbase_debug_coresight_csf_enable_pmode_exit(struct kbase_device *kbdev)
706 {
707 dev_dbg(kbdev->dev, "Coresight state %s after protected mode exit",
708 coresight_state_to_string(KBASE_DEBUG_CORESIGHT_CSF_DISABLED));
709
710 lockdep_assert_held(&kbdev->hwaccess_lock);
711
712 WARN_ON(kbdev->csf.coresight.disable_on_pmode_enter);
713
714 kbdev->csf.coresight.enable_on_pmode_exit = true;
715 kbase_pm_update_state(kbdev);
716 }
717
kbase_debug_coresight_csf_state_request(struct kbase_device * kbdev,enum kbase_debug_coresight_csf_state state)718 void kbase_debug_coresight_csf_state_request(struct kbase_device *kbdev,
719 enum kbase_debug_coresight_csf_state state)
720 {
721 if (unlikely(!kbdev))
722 return;
723
724 if (unlikely(!kbdev->csf.coresight.workq))
725 return;
726
727 dev_dbg(kbdev->dev, "Coresight state %s requested", coresight_state_to_string(state));
728
729 switch (state) {
730 case KBASE_DEBUG_CORESIGHT_CSF_DISABLED:
731 queue_work(kbdev->csf.coresight.workq, &kbdev->csf.coresight.disable_work);
732 break;
733 case KBASE_DEBUG_CORESIGHT_CSF_ENABLED:
734 queue_work(kbdev->csf.coresight.workq, &kbdev->csf.coresight.enable_work);
735 break;
736 default:
737 dev_err(kbdev->dev, "Invalid Coresight state %d", state);
738 break;
739 }
740 }
741
kbase_debug_coresight_csf_state_check(struct kbase_device * kbdev,enum kbase_debug_coresight_csf_state state)742 bool kbase_debug_coresight_csf_state_check(struct kbase_device *kbdev,
743 enum kbase_debug_coresight_csf_state state)
744 {
745 struct kbase_debug_coresight_csf_config *config_entry;
746 unsigned long flags;
747 bool success = true;
748
749 dev_dbg(kbdev->dev, "Coresight check for state: %s", coresight_state_to_string(state));
750
751 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
752
753 list_for_each_entry(config_entry, &kbdev->csf.coresight.configs, link) {
754 if (state != config_entry->state) {
755 success = false;
756 break;
757 }
758 }
759
760 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
761
762 return success;
763 }
764 KBASE_EXPORT_TEST_API(kbase_debug_coresight_csf_state_check);
765
kbase_debug_coresight_csf_state_wait(struct kbase_device * kbdev,enum kbase_debug_coresight_csf_state state)766 bool kbase_debug_coresight_csf_state_wait(struct kbase_device *kbdev,
767 enum kbase_debug_coresight_csf_state state)
768 {
769 const long wait_timeout = kbase_csf_timeout_in_jiffies(kbdev->csf.fw_timeout_ms);
770 struct kbase_debug_coresight_csf_config *config_entry, *next_config_entry;
771 unsigned long flags;
772 bool success = true;
773
774 dev_dbg(kbdev->dev, "Coresight wait for state: %s", coresight_state_to_string(state));
775
776 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
777
778 list_for_each_entry_safe(config_entry, next_config_entry, &kbdev->csf.coresight.configs,
779 link) {
780 const enum kbase_debug_coresight_csf_state prev_state = config_entry->state;
781 long remaining;
782
783 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
784 remaining = wait_event_timeout(kbdev->csf.coresight.event_wait,
785 state == config_entry->state, wait_timeout);
786 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
787
788 if (!remaining) {
789 success = false;
790 dev_err(kbdev->dev,
791 "Timeout waiting for Coresight state transition %s to %s",
792 coresight_state_to_string(prev_state),
793 coresight_state_to_string(state));
794 }
795 }
796
797 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
798
799 return success;
800 }
801 KBASE_EXPORT_TEST_API(kbase_debug_coresight_csf_state_wait);
802
kbase_debug_coresight_csf_init(struct kbase_device * kbdev)803 int kbase_debug_coresight_csf_init(struct kbase_device *kbdev)
804 {
805 kbdev->csf.coresight.workq = alloc_ordered_workqueue("Mali CoreSight workqueue", 0);
806 if (kbdev->csf.coresight.workq == NULL)
807 return -ENOMEM;
808
809 INIT_LIST_HEAD(&kbdev->csf.coresight.clients);
810 INIT_LIST_HEAD(&kbdev->csf.coresight.configs);
811 INIT_WORK(&kbdev->csf.coresight.enable_work, coresight_config_enable_all);
812 INIT_WORK(&kbdev->csf.coresight.disable_work, coresight_config_disable_all);
813 init_waitqueue_head(&kbdev->csf.coresight.event_wait);
814 spin_lock_init(&kbdev->csf.coresight.lock);
815
816 kbdev->csf.coresight.disable_on_pmode_enter = false;
817 kbdev->csf.coresight.enable_on_pmode_exit = false;
818
819 return 0;
820 }
821
kbase_debug_coresight_csf_term(struct kbase_device * kbdev)822 void kbase_debug_coresight_csf_term(struct kbase_device *kbdev)
823 {
824 struct kbase_debug_coresight_csf_client *client_entry, *next_client_entry;
825 struct kbase_debug_coresight_csf_config *config_entry, *next_config_entry;
826 unsigned long flags;
827
828 kbdev->csf.coresight.disable_on_pmode_enter = false;
829 kbdev->csf.coresight.enable_on_pmode_exit = false;
830
831 cancel_work_sync(&kbdev->csf.coresight.enable_work);
832 cancel_work_sync(&kbdev->csf.coresight.disable_work);
833 destroy_workqueue(kbdev->csf.coresight.workq);
834 kbdev->csf.coresight.workq = NULL;
835
836 spin_lock_irqsave(&kbdev->csf.coresight.lock, flags);
837
838 list_for_each_entry_safe(config_entry, next_config_entry, &kbdev->csf.coresight.configs,
839 link) {
840 list_del_init(&config_entry->link);
841 kfree(config_entry);
842 }
843
844 list_for_each_entry_safe(client_entry, next_client_entry, &kbdev->csf.coresight.clients,
845 link) {
846 list_del_init(&client_entry->link);
847 kfree(client_entry);
848 }
849
850 spin_unlock_irqrestore(&kbdev->csf.coresight.lock, flags);
851 }
852