1 /*
2 * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
3 *
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6 *
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
9 */
10 #include "mali_kernel_common.h"
11 #include "mali_group.h"
12 #include "mali_osk.h"
13 #include "mali_l2_cache.h"
14 #include "mali_gp.h"
15 #include "mali_pp.h"
16 #include "mali_mmu.h"
17 #include "mali_dlbu.h"
18 #include "mali_broadcast.h"
19 #include "mali_scheduler.h"
20 #include "mali_osk_profiling.h"
21 #include "mali_osk_mali.h"
22 #include "mali_pm_domain.h"
23 #include "mali_pm.h"
24 #include "mali_executor.h"
25
26 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
27 #include <linux/sched.h>
28 #include <trace/events/gpu.h>
29 #endif
30
31 #define MALI_MAX_NUM_DOMAIN_REFS (MALI_MAX_NUMBER_OF_GROUPS * 2)
32
33 #if defined(CONFIG_MALI400_PROFILING)
34 static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
35 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
36
37 static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
38 static u32 mali_global_num_groups = 0;
39
40 /* SW timer for job execution */
41 int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
42
43 /* local helper functions */
44 static void mali_group_bottom_half_mmu(void *data);
45 static void mali_group_bottom_half_gp(void *data);
46 static void mali_group_bottom_half_pp(void *data);
47 static void mali_group_timeout(void *data);
48 static void mali_group_reset_pp(struct mali_group *group);
49 static void mali_group_reset_mmu(struct mali_group *group);
50
51 static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session, mali_bool is_reload);
52 static void mali_group_recovery_reset(struct mali_group *group);
53
mali_group_create(struct mali_l2_cache_core * core,struct mali_dlbu_core * dlbu,struct mali_bcast_unit * bcast,u32 domain_index)54 struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
55 struct mali_dlbu_core *dlbu,
56 struct mali_bcast_unit *bcast,
57 u32 domain_index)
58 {
59 struct mali_group *group = NULL;
60
61 if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) {
62 MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
63 return NULL;
64 }
65
66 group = _mali_osk_calloc(1, sizeof(struct mali_group));
67 if (NULL != group) {
68 group->timeout_timer = _mali_osk_timer_init(mali_group_timeout);
69 if (NULL != group->timeout_timer) {
70 _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
71
72 group->l2_cache_core[0] = core;
73 _mali_osk_list_init(&group->group_list);
74 _mali_osk_list_init(&group->executor_list);
75 _mali_osk_list_init(&group->pm_domain_list);
76 group->bcast_core = bcast;
77 group->dlbu_core = dlbu;
78
79 /* register this object as a part of the correct power domain */
80 if ((NULL != core) || (NULL != dlbu) || (NULL != bcast))
81 group->pm_domain = mali_pm_register_group(domain_index, group);
82
83 mali_global_groups[mali_global_num_groups] = group;
84 mali_global_num_groups++;
85
86 return group;
87 }
88 _mali_osk_free(group);
89 }
90
91 return NULL;
92 }
93
mali_group_delete(struct mali_group * group)94 void mali_group_delete(struct mali_group *group)
95 {
96 u32 i;
97
98 MALI_DEBUG_PRINT(4, ("Deleting group %s\n",
99 mali_group_core_description(group)));
100
101 MALI_DEBUG_ASSERT(NULL == group->parent_group);
102 MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state)));
103
104 /* Delete the resources that this group owns */
105 if (NULL != group->gp_core) {
106 mali_gp_delete(group->gp_core);
107 }
108
109 if (NULL != group->pp_core) {
110 mali_pp_delete(group->pp_core);
111 }
112
113 if (NULL != group->mmu) {
114 mali_mmu_delete(group->mmu);
115 }
116
117 if (mali_group_is_virtual(group)) {
118 /* Remove all groups from virtual group */
119 struct mali_group *child;
120 struct mali_group *temp;
121
122 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
123 child->parent_group = NULL;
124 mali_group_delete(child);
125 }
126
127 mali_dlbu_delete(group->dlbu_core);
128
129 if (NULL != group->bcast_core) {
130 mali_bcast_unit_delete(group->bcast_core);
131 }
132 }
133
134 for (i = 0; i < mali_global_num_groups; i++) {
135 if (mali_global_groups[i] == group) {
136 mali_global_groups[i] = NULL;
137 mali_global_num_groups--;
138
139 if (i != mali_global_num_groups) {
140 /* We removed a group from the middle of the array -- move the last
141 * group to the current position to close the gap */
142 mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
143 mali_global_groups[mali_global_num_groups] = NULL;
144 }
145
146 break;
147 }
148 }
149
150 if (NULL != group->timeout_timer) {
151 _mali_osk_timer_del(group->timeout_timer);
152 _mali_osk_timer_term(group->timeout_timer);
153 }
154
155 if (NULL != group->bottom_half_work_mmu) {
156 _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
157 }
158
159 if (NULL != group->bottom_half_work_gp) {
160 _mali_osk_wq_delete_work(group->bottom_half_work_gp);
161 }
162
163 if (NULL != group->bottom_half_work_pp) {
164 _mali_osk_wq_delete_work(group->bottom_half_work_pp);
165 }
166
167 _mali_osk_free(group);
168 }
169
mali_group_add_mmu_core(struct mali_group * group,struct mali_mmu_core * mmu_core)170 _mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core)
171 {
172 /* This group object now owns the MMU core object */
173 group->mmu = mmu_core;
174 group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
175 if (NULL == group->bottom_half_work_mmu) {
176 return _MALI_OSK_ERR_FAULT;
177 }
178 return _MALI_OSK_ERR_OK;
179 }
180
mali_group_remove_mmu_core(struct mali_group * group)181 void mali_group_remove_mmu_core(struct mali_group *group)
182 {
183 /* This group object no longer owns the MMU core object */
184 group->mmu = NULL;
185 if (NULL != group->bottom_half_work_mmu) {
186 _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
187 }
188 }
189
mali_group_add_gp_core(struct mali_group * group,struct mali_gp_core * gp_core)190 _mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core)
191 {
192 /* This group object now owns the GP core object */
193 group->gp_core = gp_core;
194 group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
195 if (NULL == group->bottom_half_work_gp) {
196 return _MALI_OSK_ERR_FAULT;
197 }
198
199 return _MALI_OSK_ERR_OK;
200 }
201
mali_group_remove_gp_core(struct mali_group * group)202 void mali_group_remove_gp_core(struct mali_group *group)
203 {
204 /* This group object no longer owns the GP core object */
205 group->gp_core = NULL;
206 if (NULL != group->bottom_half_work_gp) {
207 _mali_osk_wq_delete_work(group->bottom_half_work_gp);
208 }
209 }
210
mali_group_add_pp_core(struct mali_group * group,struct mali_pp_core * pp_core)211 _mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core)
212 {
213 /* This group object now owns the PP core object */
214 group->pp_core = pp_core;
215 group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
216 if (NULL == group->bottom_half_work_pp) {
217 return _MALI_OSK_ERR_FAULT;
218 }
219 return _MALI_OSK_ERR_OK;
220 }
221
mali_group_remove_pp_core(struct mali_group * group)222 void mali_group_remove_pp_core(struct mali_group *group)
223 {
224 /* This group object no longer owns the PP core object */
225 group->pp_core = NULL;
226 if (NULL != group->bottom_half_work_pp) {
227 _mali_osk_wq_delete_work(group->bottom_half_work_pp);
228 }
229 }
230
mali_group_activate(struct mali_group * group)231 enum mali_group_state mali_group_activate(struct mali_group *group)
232 {
233 MALI_DEBUG_ASSERT_POINTER(group);
234 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
235
236 MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n",
237 mali_group_core_description(group)));
238
239 if (MALI_GROUP_STATE_INACTIVE == group->state) {
240 /* Group is inactive, get PM refs in order to power up */
241
242 /*
243 * We'll take a maximum of 2 power domain references pr group,
244 * one for the group itself, and one for it's L2 cache.
245 */
246 struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
247 struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS];
248 u32 num_domains = 0;
249 mali_bool all_groups_on;
250
251 /* Deal with child groups first */
252 if (mali_group_is_virtual(group)) {
253 /*
254 * The virtual group might have 0, 1 or 2 L2s in
255 * its l2_cache_core array, but we ignore these and
256 * let the child groups take the needed L2 cache ref
257 * on behalf of the virtual group.
258 * In other words; The L2 refs are taken in pair with
259 * the physical group which the L2 is attached to.
260 */
261 struct mali_group *child;
262 struct mali_group *temp;
263
264 /*
265 * Child group is inactive, get PM
266 * refs in order to power up.
267 */
268 _MALI_OSK_LIST_FOREACHENTRY(child, temp,
269 &group->group_list,
270 struct mali_group, group_list) {
271 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE
272 == child->state);
273
274 child->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
275
276 MALI_DEBUG_ASSERT_POINTER(
277 child->pm_domain);
278 domains[num_domains] = child->pm_domain;
279 groups[num_domains] = child;
280 num_domains++;
281
282 /*
283 * Take L2 domain ref for child group.
284 */
285 MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS
286 > num_domains);
287 domains[num_domains] = mali_l2_cache_get_pm_domain(
288 child->l2_cache_core[0]);
289 groups[num_domains] = NULL;
290 MALI_DEBUG_ASSERT(NULL ==
291 child->l2_cache_core[1]);
292 num_domains++;
293 }
294 } else {
295 /* Take L2 domain ref for physical groups. */
296 MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
297 num_domains);
298
299 domains[num_domains] = mali_l2_cache_get_pm_domain(
300 group->l2_cache_core[0]);
301 groups[num_domains] = NULL;
302 MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
303 num_domains++;
304 }
305
306 /* Do the group itself last (it's dependencies first) */
307
308 group->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
309
310 MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
311 domains[num_domains] = group->pm_domain;
312 groups[num_domains] = group;
313 num_domains++;
314
315 all_groups_on = mali_pm_get_domain_refs(domains, groups,
316 num_domains);
317
318 /*
319 * Complete activation for group, include
320 * virtual group or physical group.
321 */
322 if (MALI_TRUE == all_groups_on) {
323
324 mali_group_set_active(group);
325 }
326 } else if (MALI_GROUP_STATE_ACTIVE == group->state) {
327 /* Already active */
328 MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
329 } else {
330 /*
331 * Activation already pending, group->power_is_on could
332 * be both true or false. We need to wait for power up
333 * notification anyway.
334 */
335 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING
336 == group->state);
337 }
338
339 MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n",
340 mali_group_core_description(group),
341 MALI_GROUP_STATE_ACTIVE == group->state ?
342 "ACTIVE" : "PENDING"));
343
344 return group->state;
345 }
346
mali_group_set_active(struct mali_group * group)347 mali_bool mali_group_set_active(struct mali_group *group)
348 {
349 MALI_DEBUG_ASSERT_POINTER(group);
350 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
351 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state);
352 MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
353
354 MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n",
355 mali_group_core_description(group)));
356
357 if (mali_group_is_virtual(group)) {
358 struct mali_group *child;
359 struct mali_group *temp;
360
361 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
362 struct mali_group, group_list) {
363 if (MALI_TRUE != child->power_is_on) {
364 return MALI_FALSE;
365 }
366
367 child->state = MALI_GROUP_STATE_ACTIVE;
368 }
369
370 mali_group_reset(group);
371 }
372
373 /* Go to ACTIVE state */
374 group->state = MALI_GROUP_STATE_ACTIVE;
375
376 return MALI_TRUE;
377 }
378
mali_group_deactivate(struct mali_group * group)379 mali_bool mali_group_deactivate(struct mali_group *group)
380 {
381 struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
382 u32 num_domains = 0;
383 mali_bool power_down = MALI_FALSE;
384
385 MALI_DEBUG_ASSERT_POINTER(group);
386 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
387 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state);
388
389 MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n",
390 mali_group_core_description(group)));
391
392 group->state = MALI_GROUP_STATE_INACTIVE;
393
394 MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
395 domains[num_domains] = group->pm_domain;
396 num_domains++;
397
398 if (mali_group_is_virtual(group)) {
399 /* Release refs for all child groups */
400 struct mali_group *child;
401 struct mali_group *temp;
402
403 _MALI_OSK_LIST_FOREACHENTRY(child, temp,
404 &group->group_list,
405 struct mali_group, group_list) {
406 child->state = MALI_GROUP_STATE_INACTIVE;
407
408 MALI_DEBUG_ASSERT_POINTER(child->pm_domain);
409 domains[num_domains] = child->pm_domain;
410 num_domains++;
411
412 /* Release L2 cache domain for child groups */
413 MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
414 num_domains);
415 domains[num_domains] = mali_l2_cache_get_pm_domain(
416 child->l2_cache_core[0]);
417 MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]);
418 num_domains++;
419 }
420
421 /*
422 * Must do mali_group_power_down() steps right here for
423 * virtual group, because virtual group itself is likely to
424 * stay powered on, however child groups are now very likely
425 * to be powered off (and thus lose their state).
426 */
427
428 mali_group_clear_session(group);
429 /*
430 * Disable the broadcast unit (clear it's mask).
431 * This is needed in case the GPU isn't actually
432 * powered down at this point and groups are
433 * removed from an inactive virtual group.
434 * If not, then the broadcast unit will intercept
435 * their interrupts!
436 */
437 mali_bcast_disable(group->bcast_core);
438 } else {
439 /* Release L2 cache domain for physical groups */
440 MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
441 num_domains);
442 domains[num_domains] = mali_l2_cache_get_pm_domain(
443 group->l2_cache_core[0]);
444 MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
445 num_domains++;
446 }
447
448 power_down = mali_pm_put_domain_refs(domains, num_domains);
449
450 return power_down;
451 }
452
mali_group_power_up(struct mali_group * group)453 void mali_group_power_up(struct mali_group *group)
454 {
455 MALI_DEBUG_ASSERT_POINTER(group);
456 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
457
458 MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n",
459 mali_group_core_description(group)));
460
461 group->power_is_on = MALI_TRUE;
462
463 if (MALI_FALSE == mali_group_is_virtual(group)
464 && MALI_FALSE == mali_group_is_in_virtual(group)) {
465 mali_group_reset(group);
466 }
467
468 /*
469 * When we just acquire only one physical group form virt group,
470 * we should remove the bcast&dlbu mask from virt group and
471 * reset bcast and dlbu core, although part of pp cores in virt
472 * group maybe not be powered on.
473 */
474 if (MALI_TRUE == mali_group_is_virtual(group)) {
475 mali_bcast_reset(group->bcast_core);
476 mali_dlbu_update_mask(group->dlbu_core);
477 }
478 }
479
mali_group_power_down(struct mali_group * group)480 void mali_group_power_down(struct mali_group *group)
481 {
482 MALI_DEBUG_ASSERT_POINTER(group);
483 MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
484 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
485
486 MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n",
487 mali_group_core_description(group)));
488
489 group->power_is_on = MALI_FALSE;
490
491 if (mali_group_is_virtual(group)) {
492 /*
493 * What we do for physical jobs in this function should
494 * already have been done in mali_group_deactivate()
495 * for virtual group.
496 */
497 MALI_DEBUG_ASSERT(NULL == group->session);
498 } else {
499 mali_group_clear_session(group);
500 }
501 }
502
MALI_DEBUG_CODE(static void mali_group_print_virtual (struct mali_group * vgroup){ u32 i; struct mali_group *group; struct mali_group *temp; MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\\n", mali_group_core_description(vgroup), vgroup)); MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0])); MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1])); i = 0; _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) { MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\\n", i, mali_group_core_description(group), group, group->l2_cache_core[0])); i++; } })503 MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
504 {
505 u32 i;
506 struct mali_group *group;
507 struct mali_group *temp;
508
509 MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n",
510 mali_group_core_description(vgroup),
511 vgroup));
512 MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
513 MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
514
515 i = 0;
516 _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
517 MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n",
518 i, mali_group_core_description(group),
519 group, group->l2_cache_core[0]));
520 i++;
521 }
522 })
523
524 static void mali_group_dump_core_status(struct mali_group *group)
525 {
526 u32 i;
527
528 MALI_DEBUG_ASSERT_POINTER(group);
529 MALI_DEBUG_ASSERT(NULL != group->gp_core || (NULL != group->pp_core && !mali_group_is_virtual(group)));
530
531 if (NULL != group->gp_core) {
532 MALI_PRINT(("Dump Group %s\n", group->gp_core->hw_core.description));
533
534 for (i = 0; i < 0xA8; i += 0x10) {
535 MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->gp_core->hw_core, i),
536 mali_hw_core_register_read(&group->gp_core->hw_core, i + 4),
537 mali_hw_core_register_read(&group->gp_core->hw_core, i + 8),
538 mali_hw_core_register_read(&group->gp_core->hw_core, i + 12)));
539 }
540
541
542 } else {
543 MALI_PRINT(("Dump Group %s\n", group->pp_core->hw_core.description));
544
545 for (i = 0; i < 0x5c; i += 0x10) {
546 MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i),
547 mali_hw_core_register_read(&group->pp_core->hw_core, i + 4),
548 mali_hw_core_register_read(&group->pp_core->hw_core, i + 8),
549 mali_hw_core_register_read(&group->pp_core->hw_core, i + 12)));
550 }
551
552 /* Ignore some minor registers */
553 for (i = 0x1000; i < 0x1068; i += 0x10) {
554 MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i),
555 mali_hw_core_register_read(&group->pp_core->hw_core, i + 4),
556 mali_hw_core_register_read(&group->pp_core->hw_core, i + 8),
557 mali_hw_core_register_read(&group->pp_core->hw_core, i + 12)));
558 }
559 }
560
561 MALI_PRINT(("Dump Group MMU\n"));
562 for (i = 0; i < 0x24; i += 0x10) {
563 MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->mmu->hw_core, i),
564 mali_hw_core_register_read(&group->mmu->hw_core, i + 4),
565 mali_hw_core_register_read(&group->mmu->hw_core, i + 8),
566 mali_hw_core_register_read(&group->mmu->hw_core, i + 12)));
567 }
568 }
569
570
571 /**
572 * @Dump group status
573 */
mali_group_dump_status(struct mali_group * group)574 void mali_group_dump_status(struct mali_group *group)
575 {
576 MALI_DEBUG_ASSERT_POINTER(group);
577
578 if (mali_group_is_virtual(group)) {
579 struct mali_group *group_c;
580 struct mali_group *temp;
581 _MALI_OSK_LIST_FOREACHENTRY(group_c, temp, &group->group_list, struct mali_group, group_list) {
582 mali_group_dump_core_status(group_c);
583 }
584 } else {
585 mali_group_dump_core_status(group);
586 }
587 }
588
589 /**
590 * @brief Add child group to virtual group parent
591 */
mali_group_add_group(struct mali_group * parent,struct mali_group * child)592 void mali_group_add_group(struct mali_group *parent, struct mali_group *child)
593 {
594 mali_bool found;
595 u32 i;
596
597 MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n",
598 mali_group_core_description(child),
599 mali_group_core_description(parent)));
600
601 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
602 MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
603 MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
604 MALI_DEBUG_ASSERT(NULL == child->parent_group);
605
606 _mali_osk_list_addtail(&child->group_list, &parent->group_list);
607
608 child->parent_group = parent;
609
610 MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
611
612 MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
613 MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
614
615 /* Keep track of the L2 cache cores of child groups */
616 found = MALI_FALSE;
617 for (i = 0; i < 2; i++) {
618 if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
619 MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
620 parent->l2_cache_core_ref_count[i]++;
621 found = MALI_TRUE;
622 }
623 }
624
625 if (!found) {
626 /* First time we see this L2 cache, add it to our list */
627 i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
628
629 MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
630
631 MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
632
633 parent->l2_cache_core[i] = child->l2_cache_core[0];
634 parent->l2_cache_core_ref_count[i]++;
635 }
636
637 /* Update Broadcast Unit and DLBU */
638 mali_bcast_add_group(parent->bcast_core, child);
639 mali_dlbu_add_group(parent->dlbu_core, child);
640
641 if (MALI_TRUE == parent->power_is_on) {
642 mali_bcast_reset(parent->bcast_core);
643 mali_dlbu_update_mask(parent->dlbu_core);
644 }
645
646 if (MALI_TRUE == child->power_is_on) {
647 if (NULL == parent->session) {
648 if (NULL != child->session) {
649 /*
650 * Parent has no session, so clear
651 * child session as well.
652 */
653 mali_mmu_activate_empty_page_directory(child->mmu);
654 }
655 } else {
656 if (parent->session == child->session) {
657 /* We already have same session as parent,
658 * so a simple zap should be enough.
659 */
660 mali_mmu_zap_tlb(child->mmu);
661 } else {
662 /*
663 * Parent has a different session, so we must
664 * switch to that sessions page table
665 */
666 mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
667 }
668
669 /* It is the parent which keeps the session from now on */
670 child->session = NULL;
671 }
672 } else {
673 /* should have been cleared when child was powered down */
674 MALI_DEBUG_ASSERT(NULL == child->session);
675 }
676
677 /* Start job on child when parent is active */
678 if (NULL != parent->pp_running_job) {
679 struct mali_pp_job *job = parent->pp_running_job;
680
681 MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
682 child, mali_pp_job_get_id(job), parent));
683
684 /* Only allowed to add active child to an active parent */
685 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state);
686 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state);
687
688 mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
689
690 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
691 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
692 MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
693 mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
694
695 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
696 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
697 MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
698 mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
699 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
700 if (child->pp_core) {
701 trace_gpu_sched_switch(
702 mali_pp_core_description(child->pp_core),
703 sched_clock(), mali_pp_job_get_tid(job),
704 0, mali_pp_job_get_id(job));
705 }
706 #endif
707
708 #if defined(CONFIG_MALI400_PROFILING)
709 trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
710 mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
711 #endif
712 }
713
714 MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
715 }
716
717 /**
718 * @brief Remove child group from virtual group parent
719 */
mali_group_remove_group(struct mali_group * parent,struct mali_group * child)720 void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
721 {
722 u32 i;
723
724 MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n",
725 mali_group_core_description(child),
726 mali_group_core_description(parent)));
727
728 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
729 MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
730 MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
731 MALI_DEBUG_ASSERT(parent == child->parent_group);
732
733 /* Update Broadcast Unit and DLBU */
734 mali_bcast_remove_group(parent->bcast_core, child);
735 mali_dlbu_remove_group(parent->dlbu_core, child);
736
737 if (MALI_TRUE == parent->power_is_on) {
738 mali_bcast_reset(parent->bcast_core);
739 mali_dlbu_update_mask(parent->dlbu_core);
740 }
741
742 child->session = parent->session;
743 child->parent_group = NULL;
744
745 _mali_osk_list_delinit(&child->group_list);
746 if (_mali_osk_list_empty(&parent->group_list)) {
747 parent->session = NULL;
748 }
749
750 /* Keep track of the L2 cache cores of child groups */
751 i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
752
753 MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
754
755 parent->l2_cache_core_ref_count[i]--;
756 if (parent->l2_cache_core_ref_count[i] == 0) {
757 parent->l2_cache_core[i] = NULL;
758 }
759
760 MALI_DEBUG_CODE(mali_group_print_virtual(parent));
761 }
762
mali_group_acquire_group(struct mali_group * parent)763 struct mali_group *mali_group_acquire_group(struct mali_group *parent)
764 {
765 struct mali_group *child = NULL;
766
767 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
768 MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
769
770 if (!_mali_osk_list_empty(&parent->group_list)) {
771 child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
772 mali_group_remove_group(parent, child);
773 }
774
775 if (NULL != child) {
776 if (MALI_GROUP_STATE_ACTIVE != parent->state
777 && MALI_TRUE == child->power_is_on) {
778 mali_group_reset(child);
779 }
780 }
781
782 return child;
783 }
784
mali_group_reset(struct mali_group * group)785 void mali_group_reset(struct mali_group *group)
786 {
787 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
788 MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
789 MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
790
791 MALI_DEBUG_PRINT(3, ("Group: reset of %s\n",
792 mali_group_core_description(group)));
793
794 if (NULL != group->dlbu_core) {
795 mali_dlbu_reset(group->dlbu_core);
796 }
797
798 if (NULL != group->bcast_core) {
799 mali_bcast_reset(group->bcast_core);
800 }
801
802 MALI_DEBUG_ASSERT(NULL != group->mmu);
803 mali_group_reset_mmu(group);
804
805 if (NULL != group->gp_core) {
806 MALI_DEBUG_ASSERT(NULL == group->pp_core);
807 mali_gp_reset(group->gp_core);
808 } else {
809 MALI_DEBUG_ASSERT(NULL != group->pp_core);
810 mali_group_reset_pp(group);
811 }
812 }
813
mali_group_start_gp_job(struct mali_group * group,struct mali_gp_job * job,mali_bool gpu_secure_mode_pre_enabled)814 void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job, mali_bool gpu_secure_mode_pre_enabled)
815 {
816 struct mali_session_data *session;
817
818 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
819
820 MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n",
821 job,
822 mali_group_core_description(group)));
823
824 session = mali_gp_job_get_session(job);
825
826 MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
827 mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
828
829 /* Reset GPU and disable gpu secure mode if needed. */
830 if (MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) {
831 struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
832 _mali_osk_gpu_reset_and_secure_mode_disable();
833 /* Need to disable the pmu interrupt mask register */
834 if (NULL != pmu) {
835 mali_pmu_reset(pmu);
836 }
837 }
838
839 /* Reload mmu page table if needed */
840 if (MALI_TRUE == gpu_secure_mode_pre_enabled) {
841 mali_group_reset(group);
842 mali_group_activate_page_directory(group, session, MALI_TRUE);
843 } else {
844 mali_group_activate_page_directory(group, session, MALI_FALSE);
845 }
846
847 mali_gp_job_start(group->gp_core, job);
848
849 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
850 MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
851 MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
852 mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
853 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
854 MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
855 mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
856
857 #if defined(CONFIG_MALI400_PROFILING)
858 trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */, 0 /* core */,
859 mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job));
860 #endif
861
862 #if defined(CONFIG_MALI400_PROFILING)
863 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
864 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
865 mali_group_report_l2_cache_counters_per_core(group, 0);
866 }
867 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
868
869 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
870 if (group->gp_core) {
871 trace_gpu_sched_switch(mali_gp_core_description(group->gp_core),
872 sched_clock(), mali_gp_job_get_tid(job),
873 0, mali_gp_job_get_id(job));
874 }
875 #endif
876
877 group->gp_running_job = job;
878 group->is_working = MALI_TRUE;
879
880 /* Setup SW timer and record start time */
881 group->start_time = _mali_osk_time_tickcount();
882 _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
883
884 MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n",
885 job,
886 mali_group_core_description(group),
887 group->start_time));
888 }
889
890 /* Used to set all the registers except frame renderer list address and fragment shader stack address
891 * It means the caller must set these two registers properly before calling this function
892 */
mali_group_start_pp_job(struct mali_group * group,struct mali_pp_job * job,u32 sub_job,mali_bool gpu_secure_mode_pre_enabled)893 void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool gpu_secure_mode_pre_enabled)
894 {
895 struct mali_session_data *session;
896
897 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
898
899 MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n",
900 job, sub_job + 1,
901 mali_pp_job_get_sub_job_count(job),
902 mali_group_core_description(group)));
903
904 session = mali_pp_job_get_session(job);
905
906 if (NULL != group->l2_cache_core[0]) {
907 mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job));
908 }
909
910 if (NULL != group->l2_cache_core[1]) {
911 mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job));
912 }
913
914 /* Reset GPU and change gpu secure mode if needed. */
915 if (MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == _mali_osk_gpu_secure_mode_is_enabled()) {
916 struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
917 _mali_osk_gpu_reset_and_secure_mode_enable();
918 /* Need to disable the pmu interrupt mask register */
919 if (NULL != pmu) {
920 mali_pmu_reset(pmu);
921 }
922 } else if (MALI_FALSE == mali_pp_job_is_protected_job(job) && MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) {
923 struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
924 _mali_osk_gpu_reset_and_secure_mode_disable();
925 /* Need to disable the pmu interrupt mask register */
926 if (NULL != pmu) {
927 mali_pmu_reset(pmu);
928 }
929 }
930
931 /* Reload the mmu page table if needed */
932 if ((MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == gpu_secure_mode_pre_enabled)
933 || (MALI_FALSE == mali_pp_job_is_protected_job(job) && MALI_TRUE == gpu_secure_mode_pre_enabled)) {
934 mali_group_reset(group);
935 mali_group_activate_page_directory(group, session, MALI_TRUE);
936 } else {
937 mali_group_activate_page_directory(group, session, MALI_FALSE);
938 }
939
940 if (mali_group_is_virtual(group)) {
941 struct mali_group *child;
942 struct mali_group *temp;
943 u32 core_num = 0;
944
945 MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
946
947 /* Configure DLBU for the job */
948 mali_dlbu_config_job(group->dlbu_core, job);
949
950 /* Write stack address for each child group */
951 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
952 mali_pp_write_addr_stack(child->pp_core, job);
953 core_num++;
954 }
955
956 mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
957 } else {
958 mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
959 }
960
961 /* if the group is virtual, loop through physical groups which belong to this group
962 * and call profiling events for its cores as virtual */
963 if (MALI_TRUE == mali_group_is_virtual(group)) {
964 struct mali_group *child;
965 struct mali_group *temp;
966
967 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
968 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
969 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
970 MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
971 mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
972
973 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
974 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
975 MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
976 mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
977
978 #if defined(CONFIG_MALI400_PROFILING)
979 trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
980 mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
981 #endif
982 }
983
984 #if defined(CONFIG_MALI400_PROFILING)
985 if (0 != group->l2_cache_core_ref_count[0]) {
986 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
987 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
988 mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
989 }
990 }
991 if (0 != group->l2_cache_core_ref_count[1]) {
992 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
993 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
994 mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
995 }
996 }
997 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
998
999 } else { /* group is physical - call profiling events for physical cores */
1000 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
1001 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
1002 MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
1003 mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
1004
1005 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
1006 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
1007 MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
1008 mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
1009
1010 #if defined(CONFIG_MALI400_PROFILING)
1011 trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
1012 mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
1013 #endif
1014
1015 #if defined(CONFIG_MALI400_PROFILING)
1016 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
1017 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
1018 mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
1019 }
1020 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
1021 }
1022
1023 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
1024 if (group->pp_core) {
1025 trace_gpu_sched_switch(mali_pp_core_description(group->pp_core),
1026 sched_clock(), mali_pp_job_get_tid(job),
1027 0, mali_pp_job_get_id(job));
1028 }
1029 #endif
1030
1031 group->pp_running_job = job;
1032 group->pp_running_sub_job = sub_job;
1033 group->is_working = MALI_TRUE;
1034
1035 /* Setup SW timer and record start time */
1036 group->start_time = _mali_osk_time_tickcount();
1037 _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
1038
1039 MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n",
1040 job, sub_job + 1,
1041 mali_pp_job_get_sub_job_count(job),
1042 mali_group_core_description(group),
1043 group->start_time));
1044
1045 }
1046
mali_group_resume_gp_with_new_heap(struct mali_group * group,u32 job_id,u32 start_addr,u32 end_addr)1047 void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
1048 {
1049 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1050
1051 MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
1052 mali_l2_cache_invalidate(group->l2_cache_core[0]);
1053
1054 mali_mmu_zap_tlb_without_stall(group->mmu);
1055
1056 mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
1057
1058 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
1059 MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
1060 0, 0, 0, 0, 0);
1061
1062 #if defined(CONFIG_MALI400_PROFILING)
1063 trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */, 0 /* core */,
1064 mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
1065 #endif
1066 }
1067
mali_group_reset_mmu(struct mali_group * group)1068 static void mali_group_reset_mmu(struct mali_group *group)
1069 {
1070 struct mali_group *child;
1071 struct mali_group *temp;
1072 _mali_osk_errcode_t err;
1073
1074 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1075
1076 if (!mali_group_is_virtual(group)) {
1077 /* This is a physical group or an idle virtual group -- simply wait for
1078 * the reset to complete. */
1079 err = mali_mmu_reset(group->mmu);
1080 MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
1081 } else { /* virtual group */
1082 /* Loop through all members of this virtual group and wait
1083 * until they are done resetting.
1084 */
1085 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
1086 err = mali_mmu_reset(child->mmu);
1087 MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
1088 }
1089 }
1090 }
1091
mali_group_reset_pp(struct mali_group * group)1092 static void mali_group_reset_pp(struct mali_group *group)
1093 {
1094 struct mali_group *child;
1095 struct mali_group *temp;
1096
1097 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1098
1099 mali_pp_reset_async(group->pp_core);
1100
1101 if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
1102 /* This is a physical group or an idle virtual group -- simply wait for
1103 * the reset to complete. */
1104 mali_pp_reset_wait(group->pp_core);
1105 } else {
1106 /* Loop through all members of this virtual group and wait until they
1107 * are done resetting.
1108 */
1109 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
1110 mali_pp_reset_wait(child->pp_core);
1111 }
1112 }
1113 }
1114
mali_group_complete_pp(struct mali_group * group,mali_bool success,u32 * sub_job)1115 struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job)
1116 {
1117 struct mali_pp_job *pp_job_to_return;
1118
1119 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1120 MALI_DEBUG_ASSERT_POINTER(group);
1121 MALI_DEBUG_ASSERT_POINTER(group->pp_core);
1122 MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
1123 MALI_DEBUG_ASSERT_POINTER(sub_job);
1124 MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
1125
1126 /* Stop/clear the timeout timer. */
1127 _mali_osk_timer_del_async(group->timeout_timer);
1128
1129 if (NULL != group->pp_running_job) {
1130
1131 /* Deal with HW counters and profiling */
1132
1133 if (MALI_TRUE == mali_group_is_virtual(group)) {
1134 struct mali_group *child;
1135 struct mali_group *temp;
1136
1137 /* update performance counters from each physical pp core within this virtual group */
1138 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
1139 mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
1140 }
1141
1142 #if defined(CONFIG_MALI400_PROFILING)
1143 /* send profiling data per physical core */
1144 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
1145 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1146 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
1147 MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
1148 mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
1149 mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
1150 mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
1151 0, 0);
1152
1153 trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
1154 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
1155 mali_pp_job_get_frame_builder_id(group->pp_running_job),
1156 mali_pp_job_get_flush_id(group->pp_running_job));
1157 }
1158 if (0 != group->l2_cache_core_ref_count[0]) {
1159 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
1160 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
1161 mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
1162 }
1163 }
1164 if (0 != group->l2_cache_core_ref_count[1]) {
1165 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
1166 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
1167 mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
1168 }
1169 }
1170
1171 #endif
1172 } else {
1173 /* update performance counters for a physical group's pp core */
1174 mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
1175
1176 #if defined(CONFIG_MALI400_PROFILING)
1177 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1178 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
1179 MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
1180 mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
1181 mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
1182 mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
1183 0, 0);
1184
1185 trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
1186 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
1187 mali_pp_job_get_frame_builder_id(group->pp_running_job),
1188 mali_pp_job_get_flush_id(group->pp_running_job));
1189
1190 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
1191 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
1192 mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
1193 }
1194 #endif
1195 }
1196
1197 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
1198 if (group->gp_core) {
1199 trace_gpu_sched_switch(
1200 mali_gp_core_description(group->gp_core),
1201 sched_clock(), 0, 0, 0);
1202 }
1203 #endif
1204
1205 }
1206
1207 if (success) {
1208 /* Only do soft reset for successful jobs, a full recovery
1209 * reset will be done for failed jobs. */
1210 mali_pp_reset_async(group->pp_core);
1211 }
1212
1213 pp_job_to_return = group->pp_running_job;
1214 group->pp_running_job = NULL;
1215 group->is_working = MALI_FALSE;
1216 *sub_job = group->pp_running_sub_job;
1217
1218 if (!success) {
1219 MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
1220 mali_group_recovery_reset(group);
1221 } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) {
1222 MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
1223 mali_group_recovery_reset(group);
1224 }
1225
1226 return pp_job_to_return;
1227 }
1228
mali_group_complete_gp(struct mali_group * group,mali_bool success)1229 struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success)
1230 {
1231 struct mali_gp_job *gp_job_to_return;
1232
1233 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1234 MALI_DEBUG_ASSERT_POINTER(group);
1235 MALI_DEBUG_ASSERT_POINTER(group->gp_core);
1236 MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
1237 MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
1238
1239 /* Stop/clear the timeout timer. */
1240 _mali_osk_timer_del_async(group->timeout_timer);
1241
1242 if (NULL != group->gp_running_job) {
1243 mali_gp_update_performance_counters(group->gp_core, group->gp_running_job);
1244
1245 #if defined(CONFIG_MALI400_PROFILING)
1246 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
1247 mali_gp_job_get_perf_counter_value0(group->gp_running_job),
1248 mali_gp_job_get_perf_counter_value1(group->gp_running_job),
1249 mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
1250 0, 0);
1251
1252 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
1253 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
1254 mali_group_report_l2_cache_counters_per_core(group, 0);
1255 #endif
1256
1257 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
1258 if (group->pp_core) {
1259 trace_gpu_sched_switch(
1260 mali_pp_core_description(group->pp_core),
1261 sched_clock(), 0, 0, 0);
1262 }
1263 #endif
1264
1265 #if defined(CONFIG_MALI400_PROFILING)
1266 trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */,
1267 mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
1268 #endif
1269
1270 mali_gp_job_set_current_heap_addr(group->gp_running_job,
1271 mali_gp_read_plbu_alloc_start_addr(group->gp_core));
1272 }
1273
1274 if (success) {
1275 /* Only do soft reset for successful jobs, a full recovery
1276 * reset will be done for failed jobs. */
1277 mali_gp_reset_async(group->gp_core);
1278 }
1279
1280 gp_job_to_return = group->gp_running_job;
1281 group->gp_running_job = NULL;
1282 group->is_working = MALI_FALSE;
1283
1284 if (!success) {
1285 MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
1286 mali_group_recovery_reset(group);
1287 } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) {
1288 MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
1289 mali_group_recovery_reset(group);
1290 }
1291
1292 return gp_job_to_return;
1293 }
1294
mali_group_get_glob_group(u32 index)1295 struct mali_group *mali_group_get_glob_group(u32 index)
1296 {
1297 if (mali_global_num_groups > index) {
1298 return mali_global_groups[index];
1299 }
1300
1301 return NULL;
1302 }
1303
mali_group_get_glob_num_groups(void)1304 u32 mali_group_get_glob_num_groups(void)
1305 {
1306 return mali_global_num_groups;
1307 }
1308
mali_group_activate_page_directory(struct mali_group * group,struct mali_session_data * session,mali_bool is_reload)1309 static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session, mali_bool is_reload)
1310 {
1311 MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n",
1312 mali_session_get_page_directory(session), session,
1313 mali_group_core_description(group)));
1314
1315 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1316
1317 if (group->session != session || MALI_TRUE == is_reload) {
1318 /* Different session than last time, so we need to do some work */
1319 MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group %s\n",
1320 session, group->session,
1321 mali_group_core_description(group)));
1322 mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
1323 group->session = session;
1324 } else {
1325 /* Same session as last time, so no work required */
1326 MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group %s\n",
1327 session->page_directory,
1328 mali_group_core_description(group)));
1329 mali_mmu_zap_tlb_without_stall(group->mmu);
1330 }
1331 }
1332
mali_group_recovery_reset(struct mali_group * group)1333 static void mali_group_recovery_reset(struct mali_group *group)
1334 {
1335 _mali_osk_errcode_t err;
1336
1337 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1338
1339 /* Stop cores, bus stop */
1340 if (NULL != group->pp_core) {
1341 mali_pp_stop_bus(group->pp_core);
1342 } else {
1343 mali_gp_stop_bus(group->gp_core);
1344 }
1345
1346 /* Flush MMU and clear page fault (if any) */
1347 mali_mmu_activate_fault_flush_page_directory(group->mmu);
1348 mali_mmu_page_fault_done(group->mmu);
1349
1350 /* Wait for cores to stop bus, then do a hard reset on them */
1351 if (NULL != group->pp_core) {
1352 if (mali_group_is_virtual(group)) {
1353 struct mali_group *child, *temp;
1354
1355 /* Disable the broadcast unit while we do reset directly on the member cores. */
1356 mali_bcast_disable(group->bcast_core);
1357
1358 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
1359 mali_pp_stop_bus_wait(child->pp_core);
1360 mali_pp_hard_reset(child->pp_core);
1361 }
1362
1363 mali_bcast_enable(group->bcast_core);
1364 } else {
1365 mali_pp_stop_bus_wait(group->pp_core);
1366 mali_pp_hard_reset(group->pp_core);
1367 }
1368 } else {
1369 mali_gp_stop_bus_wait(group->gp_core);
1370 mali_gp_hard_reset(group->gp_core);
1371 }
1372
1373 /* Reset MMU */
1374 err = mali_mmu_reset(group->mmu);
1375 MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
1376 MALI_IGNORE(err);
1377
1378 group->session = NULL;
1379 }
1380
1381 #if MALI_STATE_TRACKING
mali_group_dump_state(struct mali_group * group,char * buf,u32 size)1382 u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
1383 {
1384 int n = 0;
1385 int i;
1386 struct mali_group *child;
1387 struct mali_group *temp;
1388
1389 if (mali_group_is_virtual(group)) {
1390 n += _mali_osk_snprintf(buf + n, size - n,
1391 "Virtual PP Group: %p\n", group);
1392 } else if (mali_group_is_in_virtual(group)) {
1393 n += _mali_osk_snprintf(buf + n, size - n,
1394 "Child PP Group: %p\n", group);
1395 } else if (NULL != group->pp_core) {
1396 n += _mali_osk_snprintf(buf + n, size - n,
1397 "Physical PP Group: %p\n", group);
1398 } else {
1399 MALI_DEBUG_ASSERT_POINTER(group->gp_core);
1400 n += _mali_osk_snprintf(buf + n, size - n,
1401 "GP Group: %p\n", group);
1402 }
1403
1404 switch (group->state) {
1405 case MALI_GROUP_STATE_INACTIVE:
1406 n += _mali_osk_snprintf(buf + n, size - n,
1407 "\tstate: INACTIVE\n");
1408 break;
1409 case MALI_GROUP_STATE_ACTIVATION_PENDING:
1410 n += _mali_osk_snprintf(buf + n, size - n,
1411 "\tstate: ACTIVATION_PENDING\n");
1412 break;
1413 case MALI_GROUP_STATE_ACTIVE:
1414 n += _mali_osk_snprintf(buf + n, size - n,
1415 "\tstate: MALI_GROUP_STATE_ACTIVE\n");
1416 break;
1417 default:
1418 n += _mali_osk_snprintf(buf + n, size - n,
1419 "\tstate: UNKNOWN (%d)\n", group->state);
1420 MALI_DEBUG_ASSERT(0);
1421 break;
1422 }
1423
1424 n += _mali_osk_snprintf(buf + n, size - n,
1425 "\tSW power: %s\n",
1426 group->power_is_on ? "On" : "Off");
1427
1428 n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n);
1429
1430 for (i = 0; i < 2; i++) {
1431 if (NULL != group->l2_cache_core[i]) {
1432 struct mali_pm_domain *domain;
1433 domain = mali_l2_cache_get_pm_domain(
1434 group->l2_cache_core[i]);
1435 n += mali_pm_dump_state_domain(domain,
1436 buf + n, size - n);
1437 }
1438 }
1439
1440 if (group->gp_core) {
1441 n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
1442 n += _mali_osk_snprintf(buf + n, size - n,
1443 "\tGP running job: %p\n", group->gp_running_job);
1444 }
1445
1446 if (group->pp_core) {
1447 n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
1448 n += _mali_osk_snprintf(buf + n, size - n,
1449 "\tPP running job: %p, subjob %d \n",
1450 group->pp_running_job,
1451 group->pp_running_sub_job);
1452 }
1453
1454 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
1455 struct mali_group, group_list) {
1456 n += mali_group_dump_state(child, buf + n, size - n);
1457 }
1458
1459 return n;
1460 }
1461 #endif
1462
mali_group_upper_half_mmu(void * data)1463 _mali_osk_errcode_t mali_group_upper_half_mmu(void *data)
1464 {
1465 struct mali_group *group = (struct mali_group *)data;
1466 _mali_osk_errcode_t ret;
1467
1468 MALI_DEBUG_ASSERT_POINTER(group);
1469 MALI_DEBUG_ASSERT_POINTER(group->mmu);
1470
1471 #if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
1472 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1473 mali_executor_lock();
1474 if (!mali_group_is_working(group)) {
1475 /* Not working, so nothing to do */
1476 mali_executor_unlock();
1477 return _MALI_OSK_ERR_FAULT;
1478 }
1479 #endif
1480 if (NULL != group->gp_core) {
1481 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
1482 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1483 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1484 0, 0, /* No pid and tid for interrupt handler */
1485 MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
1486 mali_mmu_get_rawstat(group->mmu), 0);
1487 } else {
1488 MALI_DEBUG_ASSERT_POINTER(group->pp_core);
1489 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
1490 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1491 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1492 0, 0, /* No pid and tid for interrupt handler */
1493 MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
1494 mali_pp_core_get_id(group->pp_core)),
1495 mali_mmu_get_rawstat(group->mmu), 0);
1496 }
1497 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1498 mali_executor_unlock();
1499 #endif
1500 #endif
1501
1502 ret = mali_executor_interrupt_mmu(group, MALI_TRUE);
1503
1504 #if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
1505 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1506 mali_executor_lock();
1507 if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) {
1508 /* group complete and on job shedule on it, it already power off */
1509 if (NULL != group->gp_core) {
1510 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1511 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1512 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1513 0, 0, /* No pid and tid for interrupt handler */
1514 MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
1515 0xFFFFFFFF, 0);
1516 } else {
1517 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1518 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1519 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1520 0, 0, /* No pid and tid for interrupt handler */
1521 MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
1522 mali_pp_core_get_id(group->pp_core)),
1523 0xFFFFFFFF, 0);
1524 }
1525
1526 mali_executor_unlock();
1527 return ret;
1528 }
1529 #endif
1530
1531 if (NULL != group->gp_core) {
1532 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1533 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1534 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1535 0, 0, /* No pid and tid for interrupt handler */
1536 MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
1537 mali_mmu_get_rawstat(group->mmu), 0);
1538 } else {
1539 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1540 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1541 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1542 0, 0, /* No pid and tid for interrupt handler */
1543 MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
1544 mali_pp_core_get_id(group->pp_core)),
1545 mali_mmu_get_rawstat(group->mmu), 0);
1546 }
1547 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1548 mali_executor_unlock();
1549 #endif
1550 #endif
1551
1552 return ret;
1553 }
1554
mali_group_bottom_half_mmu(void * data)1555 static void mali_group_bottom_half_mmu(void *data)
1556 {
1557 struct mali_group *group = (struct mali_group *)data;
1558
1559 MALI_DEBUG_ASSERT_POINTER(group);
1560 MALI_DEBUG_ASSERT_POINTER(group->mmu);
1561
1562 if (NULL != group->gp_core) {
1563 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
1564 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1565 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1566 0, _mali_osk_get_tid(), /* pid and tid */
1567 MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
1568 mali_mmu_get_rawstat(group->mmu), 0);
1569 } else {
1570 MALI_DEBUG_ASSERT_POINTER(group->pp_core);
1571 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
1572 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1573 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1574 0, _mali_osk_get_tid(), /* pid and tid */
1575 MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
1576 mali_pp_core_get_id(group->pp_core)),
1577 mali_mmu_get_rawstat(group->mmu), 0);
1578 }
1579
1580 mali_executor_interrupt_mmu(group, MALI_FALSE);
1581
1582 if (NULL != group->gp_core) {
1583 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1584 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1585 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1586 0, _mali_osk_get_tid(), /* pid and tid */
1587 MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
1588 mali_mmu_get_rawstat(group->mmu), 0);
1589 } else {
1590 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1591 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1592 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1593 0, _mali_osk_get_tid(), /* pid and tid */
1594 MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
1595 mali_pp_core_get_id(group->pp_core)),
1596 mali_mmu_get_rawstat(group->mmu), 0);
1597 }
1598 }
1599
mali_group_upper_half_gp(void * data)1600 _mali_osk_errcode_t mali_group_upper_half_gp(void *data)
1601 {
1602 struct mali_group *group = (struct mali_group *)data;
1603 _mali_osk_errcode_t ret;
1604
1605 MALI_DEBUG_ASSERT_POINTER(group);
1606 MALI_DEBUG_ASSERT_POINTER(group->gp_core);
1607 MALI_DEBUG_ASSERT_POINTER(group->mmu);
1608
1609 #if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
1610 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1611 mali_executor_lock();
1612 if (!mali_group_is_working(group)) {
1613 /* Not working, so nothing to do */
1614 mali_executor_unlock();
1615 return _MALI_OSK_ERR_FAULT;
1616 }
1617 #endif
1618 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
1619 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1620 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1621 0, 0, /* No pid and tid for interrupt handler */
1622 MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
1623 mali_gp_get_rawstat(group->gp_core), 0);
1624
1625 MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
1626 mali_gp_get_rawstat(group->gp_core),
1627 mali_group_core_description(group)));
1628 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1629 mali_executor_unlock();
1630 #endif
1631 #endif
1632 ret = mali_executor_interrupt_gp(group, MALI_TRUE);
1633
1634 #if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
1635 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1636 mali_executor_lock();
1637 if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) {
1638 /* group complete and on job shedule on it, it already power off */
1639 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1640 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1641 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1642 0, 0, /* No pid and tid for interrupt handler */
1643 MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
1644 0xFFFFFFFF, 0);
1645 mali_executor_unlock();
1646 return ret;
1647 }
1648 #endif
1649 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1650 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1651 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1652 0, 0, /* No pid and tid for interrupt handler */
1653 MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
1654 mali_gp_get_rawstat(group->gp_core), 0);
1655 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1656 mali_executor_unlock();
1657 #endif
1658 #endif
1659 return ret;
1660 }
1661
mali_group_bottom_half_gp(void * data)1662 static void mali_group_bottom_half_gp(void *data)
1663 {
1664 struct mali_group *group = (struct mali_group *)data;
1665
1666 MALI_DEBUG_ASSERT_POINTER(group);
1667 MALI_DEBUG_ASSERT_POINTER(group->gp_core);
1668 MALI_DEBUG_ASSERT_POINTER(group->mmu);
1669
1670 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
1671 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1672 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1673 0, _mali_osk_get_tid(), /* pid and tid */
1674 MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
1675 mali_gp_get_rawstat(group->gp_core), 0);
1676
1677 mali_executor_interrupt_gp(group, MALI_FALSE);
1678
1679 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1680 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1681 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1682 0, _mali_osk_get_tid(), /* pid and tid */
1683 MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
1684 mali_gp_get_rawstat(group->gp_core), 0);
1685 }
1686
mali_group_upper_half_pp(void * data)1687 _mali_osk_errcode_t mali_group_upper_half_pp(void *data)
1688 {
1689 struct mali_group *group = (struct mali_group *)data;
1690 _mali_osk_errcode_t ret;
1691
1692 MALI_DEBUG_ASSERT_POINTER(group);
1693 MALI_DEBUG_ASSERT_POINTER(group->pp_core);
1694 MALI_DEBUG_ASSERT_POINTER(group->mmu);
1695
1696 #if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
1697 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1698 mali_executor_lock();
1699 if (!mali_group_is_working(group)) {
1700 /* Not working, so nothing to do */
1701 mali_executor_unlock();
1702 return _MALI_OSK_ERR_FAULT;
1703 }
1704 #endif
1705
1706 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
1707 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1708 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1709 0, 0, /* No pid and tid for interrupt handler */
1710 MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
1711 mali_pp_core_get_id(group->pp_core)),
1712 mali_pp_get_rawstat(group->pp_core), 0);
1713
1714 MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
1715 mali_pp_get_rawstat(group->pp_core),
1716 mali_group_core_description(group)));
1717 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1718 mali_executor_unlock();
1719 #endif
1720 #endif
1721
1722 ret = mali_executor_interrupt_pp(group, MALI_TRUE);
1723
1724 #if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
1725 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1726 mali_executor_lock();
1727 if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) {
1728 /* group complete and on job shedule on it, it already power off */
1729 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1730 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1731 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1732 0, 0, /* No pid and tid for interrupt handler */
1733 MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
1734 mali_pp_core_get_id(group->pp_core)),
1735 0xFFFFFFFF, 0);
1736 mali_executor_unlock();
1737 return ret;
1738 }
1739 #endif
1740 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1741 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1742 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1743 0, 0, /* No pid and tid for interrupt handler */
1744 MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
1745 mali_pp_core_get_id(group->pp_core)),
1746 mali_pp_get_rawstat(group->pp_core), 0);
1747 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1748 mali_executor_unlock();
1749 #endif
1750 #endif
1751 return ret;
1752 }
1753
mali_group_bottom_half_pp(void * data)1754 static void mali_group_bottom_half_pp(void *data)
1755 {
1756 struct mali_group *group = (struct mali_group *)data;
1757
1758 MALI_DEBUG_ASSERT_POINTER(group);
1759 MALI_DEBUG_ASSERT_POINTER(group->pp_core);
1760 MALI_DEBUG_ASSERT_POINTER(group->mmu);
1761
1762 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
1763 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1764 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1765 0, _mali_osk_get_tid(), /* pid and tid */
1766 MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
1767 mali_pp_core_get_id(group->pp_core)),
1768 mali_pp_get_rawstat(group->pp_core), 0);
1769
1770 mali_executor_interrupt_pp(group, MALI_FALSE);
1771
1772 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1773 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1774 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1775 0, _mali_osk_get_tid(), /* pid and tid */
1776 MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
1777 mali_pp_core_get_id(group->pp_core)),
1778 mali_pp_get_rawstat(group->pp_core), 0);
1779 }
1780
mali_group_timeout(void * data)1781 static void mali_group_timeout(void *data)
1782 {
1783 struct mali_group *group = (struct mali_group *)data;
1784 MALI_DEBUG_ASSERT_POINTER(group);
1785
1786 MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n",
1787 mali_group_core_description(group),
1788 _mali_osk_time_tickcount()));
1789
1790 if (NULL != group->gp_core) {
1791 mali_group_schedule_bottom_half_gp(group);
1792 } else {
1793 MALI_DEBUG_ASSERT_POINTER(group->pp_core);
1794 mali_group_schedule_bottom_half_pp(group);
1795 }
1796 }
1797
mali_group_zap_session(struct mali_group * group,struct mali_session_data * session)1798 mali_bool mali_group_zap_session(struct mali_group *group,
1799 struct mali_session_data *session)
1800 {
1801 MALI_DEBUG_ASSERT_POINTER(group);
1802 MALI_DEBUG_ASSERT_POINTER(session);
1803 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1804
1805 if (group->session != session) {
1806 /* not running from this session */
1807 return MALI_TRUE; /* success */
1808 }
1809
1810 if (group->is_working) {
1811 /* The Zap also does the stall and disable_stall */
1812 mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
1813 return zap_success;
1814 } else {
1815 /* Just remove the session instead of zapping */
1816 mali_group_clear_session(group);
1817 return MALI_TRUE; /* success */
1818 }
1819 }
1820
1821 #if defined(CONFIG_MALI400_PROFILING)
mali_group_report_l2_cache_counters_per_core(struct mali_group * group,u32 core_num)1822 static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num)
1823 {
1824 u32 source0 = 0;
1825 u32 value0 = 0;
1826 u32 source1 = 0;
1827 u32 value1 = 0;
1828 u32 profiling_channel = 0;
1829
1830 MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1831
1832 switch (core_num) {
1833 case 0:
1834 profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
1835 MALI_PROFILING_EVENT_CHANNEL_GPU |
1836 MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
1837 break;
1838 case 1:
1839 profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
1840 MALI_PROFILING_EVENT_CHANNEL_GPU |
1841 MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
1842 break;
1843 case 2:
1844 profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
1845 MALI_PROFILING_EVENT_CHANNEL_GPU |
1846 MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
1847 break;
1848 default:
1849 profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
1850 MALI_PROFILING_EVENT_CHANNEL_GPU |
1851 MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
1852 break;
1853 }
1854
1855 if (0 == core_num) {
1856 mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
1857 }
1858 if (1 == core_num) {
1859 if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
1860 mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
1861 } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
1862 mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
1863 }
1864 }
1865 if (2 == core_num) {
1866 if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
1867 mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
1868 } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
1869 mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
1870 }
1871 }
1872
1873 _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
1874 }
1875 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
1876