xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/common/mali_executor.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10 
11 #include "mali_executor.h"
12 #include "mali_scheduler.h"
13 #include "mali_kernel_common.h"
14 #include "mali_kernel_core.h"
15 #include "mali_osk.h"
16 #include "mali_osk_list.h"
17 #include "mali_pp.h"
18 #include "mali_pp_job.h"
19 #include "mali_group.h"
20 #include "mali_pm.h"
21 #include "mali_timeline.h"
22 #include "mali_osk_profiling.h"
23 #include "mali_session.h"
24 #include "mali_osk_mali.h"
25 
26 /*Add for voltage scan function*/
27 extern u32 mali_group_error;
28 
29 /*
30  * If dma_buf with map on demand is used, we defer job deletion and job queue
31  * if in atomic context, since both might sleep.
32  */
33 #if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
34 #define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_DELETE 1
35 #define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_QUEUE 1
36 #endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
37 
38 /*
39  * ---------- static type definitions (structs, enums, etc) ----------
40  */
41 
42 enum mali_executor_state_t {
43 	EXEC_STATE_NOT_PRESENT, /* Virtual group on Mali-300/400 (do not use) */
44 	EXEC_STATE_DISABLED,    /* Disabled by core scaling (do not use) */
45 	EXEC_STATE_EMPTY,       /* No child groups for virtual group (do not use) */
46 	EXEC_STATE_INACTIVE,    /* Can be used, but must be activate first */
47 	EXEC_STATE_IDLE,        /* Active and ready to be used */
48 	EXEC_STATE_WORKING,     /* Executing a job */
49 };
50 
51 /*
52  * ---------- global variables (exported due to inline functions) ----------
53  */
54 
55 /* Lock for this module (protecting all HW access except L2 caches) */
56 _mali_osk_spinlock_irq_t *mali_executor_lock_obj = NULL;
57 
58 mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
59 
60 /*
61  * ---------- static variables ----------
62  */
63 
64 /* Used to defer job scheduling */
65 static _mali_osk_wq_work_t *executor_wq_high_pri = NULL;
66 
67 /* Store version from GP and PP (user space wants to know this) */
68 static u32 pp_version = 0;
69 static u32 gp_version = 0;
70 
71 /* List of physical PP groups which are disabled by some external source */
72 static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled);
73 static u32 group_list_disabled_count = 0;
74 
75 /* List of groups which can be used, but activate first */
76 static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_inactive);
77 static u32 group_list_inactive_count = 0;
78 
79 /* List of groups which are active and ready to be used */
80 static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle);
81 static u32 group_list_idle_count = 0;
82 
83 /* List of groups which are executing a job */
84 static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working);
85 static u32 group_list_working_count = 0;
86 
87 /* Virtual group (if any) */
88 static struct mali_group *virtual_group = NULL;
89 
90 /* Virtual group state is tracked with a state variable instead of 4 lists */
91 static enum mali_executor_state_t virtual_group_state = EXEC_STATE_NOT_PRESENT;
92 
93 /* GP group */
94 static struct mali_group *gp_group = NULL;
95 
96 /* GP group state is tracked with a state variable instead of 4 lists */
97 static enum mali_executor_state_t gp_group_state = EXEC_STATE_NOT_PRESENT;
98 
99 static u32 gp_returned_cookie = 0;
100 
101 /* Total number of physical PP cores present */
102 static u32 num_physical_pp_cores_total = 0;
103 
104 /* Number of physical cores which are enabled */
105 static u32 num_physical_pp_cores_enabled = 0;
106 
107 /* Enable or disable core scaling */
108 static mali_bool core_scaling_enabled = MALI_TRUE;
109 
110 /* Variables to allow safe pausing of the scheduler */
111 static _mali_osk_wait_queue_t *executor_working_wait_queue = NULL;
112 static u32 pause_count = 0;
113 
114 /* PP cores haven't been enabled because of some pp cores haven't been disabled. */
115 static int core_scaling_delay_up_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
116 
117 /* Variables used to implement notify pp core changes to userspace when core scaling
118  * is finished in mali_executor_complete_group() function. */
119 static _mali_osk_wq_work_t *executor_wq_notify_core_change = NULL;
120 static _mali_osk_wait_queue_t *executor_notify_core_change_wait_queue = NULL;
121 
122 /*
123  * ---------- Forward declaration of static functions ----------
124  */
125 static mali_bool mali_executor_is_suspended(void *data);
126 static mali_bool mali_executor_is_working(void);
127 static void mali_executor_disable_empty_virtual(void);
128 static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group);
129 static mali_bool mali_executor_has_virtual_group(void);
130 static mali_bool mali_executor_virtual_group_is_usable(void);
131 static void mali_executor_schedule(void);
132 static void mali_executor_wq_schedule(void *arg);
133 static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job);
134 static void mali_executor_complete_group(struct mali_group *group,
135 		mali_bool success,
136 		struct mali_gp_job **gp_job_done,
137 		struct mali_pp_job **pp_job_done);
138 static void mali_executor_change_state_pp_physical(struct mali_group *group,
139 		_mali_osk_list_t *old_list,
140 		u32 *old_count,
141 		_mali_osk_list_t *new_list,
142 		u32 *new_count);
143 static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
144 		enum mali_executor_state_t state);
145 
146 static void mali_executor_group_enable_internal(struct mali_group *group);
147 static void mali_executor_group_disable_internal(struct mali_group *group);
148 static void mali_executor_core_scale(unsigned int target_core_nr);
149 static void mali_executor_core_scale_in_group_complete(struct mali_group *group);
150 static void mali_executor_notify_core_change(u32 num_cores);
151 static void mali_executor_wq_notify_core_change(void *arg);
152 static void mali_executor_change_group_status_disabled(struct mali_group *group);
153 static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group);
154 static void mali_executor_set_state_pp_physical(struct mali_group *group,
155 		_mali_osk_list_t *new_list,
156 		u32 *new_count);
157 
158 /*
159  * ---------- Actual implementation ----------
160  */
161 
mali_executor_initialize(void)162 _mali_osk_errcode_t mali_executor_initialize(void)
163 {
164 	mali_executor_lock_obj = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_EXECUTOR);
165 	if (NULL == mali_executor_lock_obj) {
166 		mali_executor_terminate();
167 		return _MALI_OSK_ERR_NOMEM;
168 	}
169 
170 	executor_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_executor_wq_schedule, NULL);
171 	if (NULL == executor_wq_high_pri) {
172 		mali_executor_terminate();
173 		return _MALI_OSK_ERR_NOMEM;
174 	}
175 
176 	executor_working_wait_queue = _mali_osk_wait_queue_init();
177 	if (NULL == executor_working_wait_queue) {
178 		mali_executor_terminate();
179 		return _MALI_OSK_ERR_NOMEM;
180 	}
181 
182 	executor_wq_notify_core_change = _mali_osk_wq_create_work(mali_executor_wq_notify_core_change, NULL);
183 	if (NULL == executor_wq_notify_core_change) {
184 		mali_executor_terminate();
185 		return _MALI_OSK_ERR_NOMEM;
186 	}
187 
188 	executor_notify_core_change_wait_queue = _mali_osk_wait_queue_init();
189 	if (NULL == executor_notify_core_change_wait_queue) {
190 		mali_executor_terminate();
191 		return _MALI_OSK_ERR_NOMEM;
192 	}
193 
194 	return _MALI_OSK_ERR_OK;
195 }
196 
mali_executor_terminate(void)197 void mali_executor_terminate(void)
198 {
199 	if (NULL != executor_notify_core_change_wait_queue) {
200 		_mali_osk_wait_queue_term(executor_notify_core_change_wait_queue);
201 		executor_notify_core_change_wait_queue = NULL;
202 	}
203 
204 	if (NULL != executor_wq_notify_core_change) {
205 		_mali_osk_wq_delete_work(executor_wq_notify_core_change);
206 		executor_wq_notify_core_change = NULL;
207 	}
208 
209 	if (NULL != executor_working_wait_queue) {
210 		_mali_osk_wait_queue_term(executor_working_wait_queue);
211 		executor_working_wait_queue = NULL;
212 	}
213 
214 	if (NULL != executor_wq_high_pri) {
215 		_mali_osk_wq_delete_work(executor_wq_high_pri);
216 		executor_wq_high_pri = NULL;
217 	}
218 
219 	if (NULL != mali_executor_lock_obj) {
220 		_mali_osk_spinlock_irq_term(mali_executor_lock_obj);
221 		mali_executor_lock_obj = NULL;
222 	}
223 }
224 
mali_executor_populate(void)225 void mali_executor_populate(void)
226 {
227 	u32 num_groups;
228 	u32 i;
229 
230 	num_groups = mali_group_get_glob_num_groups();
231 
232 	/* Do we have a virtual group? */
233 	for (i = 0; i < num_groups; i++) {
234 		struct mali_group *group = mali_group_get_glob_group(i);
235 
236 		if (mali_group_is_virtual(group)) {
237 			virtual_group = group;
238 			virtual_group_state = EXEC_STATE_INACTIVE;
239 			break;
240 		}
241 	}
242 
243 	/* Find all the available physical GP and PP cores */
244 	for (i = 0; i < num_groups; i++) {
245 		struct mali_group *group = mali_group_get_glob_group(i);
246 
247 		if (NULL != group) {
248 			struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
249 			struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
250 
251 			if (!mali_group_is_virtual(group)) {
252 				if (NULL != pp_core) {
253 					if (0 == pp_version) {
254 						/* Retrieve PP version from the first available PP core */
255 						pp_version = mali_pp_core_get_version(pp_core);
256 					}
257 
258 					if (NULL != virtual_group) {
259 						mali_executor_lock();
260 						mali_group_add_group(virtual_group, group);
261 						mali_executor_unlock();
262 					} else {
263 						_mali_osk_list_add(&group->executor_list, &group_list_inactive);
264 						group_list_inactive_count++;
265 					}
266 
267 					num_physical_pp_cores_total++;
268 				} else {
269 					MALI_DEBUG_ASSERT_POINTER(gp_core);
270 
271 					if (0 == gp_version) {
272 						/* Retrieve GP version */
273 						gp_version = mali_gp_core_get_version(gp_core);
274 					}
275 
276 					gp_group = group;
277 					gp_group_state = EXEC_STATE_INACTIVE;
278 				}
279 
280 			}
281 		}
282 	}
283 
284 	num_physical_pp_cores_enabled = num_physical_pp_cores_total;
285 }
286 
mali_executor_depopulate(void)287 void mali_executor_depopulate(void)
288 {
289 	struct mali_group *group;
290 	struct mali_group *temp;
291 
292 	MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
293 
294 	if (NULL != gp_group) {
295 		mali_group_delete(gp_group);
296 		gp_group = NULL;
297 	}
298 
299 	MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
300 
301 	if (NULL != virtual_group) {
302 		mali_group_delete(virtual_group);
303 		virtual_group = NULL;
304 	}
305 
306 	MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
307 
308 	_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
309 		mali_group_delete(group);
310 	}
311 
312 	_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
313 		mali_group_delete(group);
314 	}
315 
316 	_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
317 		mali_group_delete(group);
318 	}
319 }
320 
mali_executor_suspend(void)321 void mali_executor_suspend(void)
322 {
323 	mali_executor_lock();
324 
325 	/* Increment the pause_count so that no more jobs will be scheduled */
326 	pause_count++;
327 
328 	mali_executor_unlock();
329 
330 	_mali_osk_wait_queue_wait_event(executor_working_wait_queue,
331 					mali_executor_is_suspended, NULL);
332 
333 	/*
334 	 * mali_executor_complete_XX() leaves jobs in idle state.
335 	 * deactivate option is used when we are going to power down
336 	 * the entire GPU (OS suspend) and want a consistent SW vs HW
337 	 * state.
338 	 */
339 	mali_executor_lock();
340 
341 	mali_executor_deactivate_list_idle(MALI_TRUE);
342 
343 	/*
344 	 * The following steps are used to deactive all of activated
345 	 * (MALI_GROUP_STATE_ACTIVE) and activating (MALI_GROUP
346 	 * _STAET_ACTIVATION_PENDING) groups, to make sure the variable
347 	 * pd_mask_wanted is equal with 0. */
348 	if (MALI_GROUP_STATE_INACTIVE != mali_group_get_state(gp_group)) {
349 		gp_group_state = EXEC_STATE_INACTIVE;
350 		mali_group_deactivate(gp_group);
351 	}
352 
353 	if (mali_executor_has_virtual_group()) {
354 		if (MALI_GROUP_STATE_INACTIVE
355 		    != mali_group_get_state(virtual_group)) {
356 			virtual_group_state = EXEC_STATE_INACTIVE;
357 			mali_group_deactivate(virtual_group);
358 		}
359 	}
360 
361 	if (0 < group_list_inactive_count) {
362 		struct mali_group *group;
363 		struct mali_group *temp;
364 
365 		_MALI_OSK_LIST_FOREACHENTRY(group, temp,
366 					    &group_list_inactive,
367 					    struct mali_group, executor_list) {
368 			if (MALI_GROUP_STATE_ACTIVATION_PENDING
369 			    == mali_group_get_state(group)) {
370 				mali_group_deactivate(group);
371 			}
372 
373 			/*
374 			 * On mali-450 platform, we may have physical group in the group inactive
375 			 * list, and its state is MALI_GROUP_STATE_ACTIVATION_PENDING, so we only
376 			 * deactivate it is not enough, we still also need add it back to virtual group.
377 			 * And now, virtual group must be in INACTIVE state, so it's safe to add
378 			 * physical group to virtual group at this point.
379 			 */
380 			if (NULL != virtual_group) {
381 				_mali_osk_list_delinit(&group->executor_list);
382 				group_list_inactive_count--;
383 
384 				mali_group_add_group(virtual_group, group);
385 			}
386 		}
387 	}
388 
389 	mali_executor_unlock();
390 }
391 
mali_executor_resume(void)392 void mali_executor_resume(void)
393 {
394 	mali_executor_lock();
395 
396 	/* Decrement pause_count to allow scheduling again (if it reaches 0) */
397 	pause_count--;
398 	if (0 == pause_count) {
399 		mali_executor_schedule();
400 	}
401 
402 	mali_executor_unlock();
403 }
404 
mali_executor_get_num_cores_total(void)405 u32 mali_executor_get_num_cores_total(void)
406 {
407 	return num_physical_pp_cores_total;
408 }
409 
mali_executor_get_num_cores_enabled(void)410 u32 mali_executor_get_num_cores_enabled(void)
411 {
412 	return num_physical_pp_cores_enabled;
413 }
414 
mali_executor_get_virtual_pp(void)415 struct mali_pp_core *mali_executor_get_virtual_pp(void)
416 {
417 	MALI_DEBUG_ASSERT_POINTER(virtual_group);
418 	MALI_DEBUG_ASSERT_POINTER(virtual_group->pp_core);
419 	return virtual_group->pp_core;
420 }
421 
mali_executor_get_virtual_group(void)422 struct mali_group *mali_executor_get_virtual_group(void)
423 {
424 	return virtual_group;
425 }
426 
mali_executor_zap_all_active(struct mali_session_data * session)427 void mali_executor_zap_all_active(struct mali_session_data *session)
428 {
429 	struct mali_group *group;
430 	struct mali_group *temp;
431 	mali_bool ret;
432 
433 	mali_executor_lock();
434 
435 	/*
436 	 * This function is a bit complicated because
437 	 * mali_group_zap_session() can fail. This only happens because the
438 	 * group is in an unhandled page fault status.
439 	 * We need to make sure this page fault is handled before we return,
440 	 * so that we know every single outstanding MMU transactions have
441 	 * completed. This will allow caller to safely remove physical pages
442 	 * when we have returned.
443 	 */
444 
445 	MALI_DEBUG_ASSERT(NULL != gp_group);
446 	ret = mali_group_zap_session(gp_group, session);
447 	if (MALI_FALSE == ret) {
448 		struct mali_gp_job *gp_job = NULL;
449 
450 		mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL);
451 
452 		MALI_DEBUG_ASSERT_POINTER(gp_job);
453 
454 		/* GP job completed, make sure it is freed */
455 		mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
456 					       MALI_TRUE, MALI_TRUE);
457 	}
458 
459 	if (mali_executor_has_virtual_group()) {
460 		ret = mali_group_zap_session(virtual_group, session);
461 		if (MALI_FALSE == ret) {
462 			struct mali_pp_job *pp_job = NULL;
463 
464 			mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job);
465 
466 			if (NULL != pp_job) {
467 				/* PP job completed, make sure it is freed */
468 				mali_scheduler_complete_pp_job(pp_job, 0,
469 							       MALI_FALSE, MALI_TRUE);
470 			}
471 		}
472 	}
473 
474 	_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working,
475 				    struct mali_group, executor_list) {
476 		ret = mali_group_zap_session(group, session);
477 		if (MALI_FALSE == ret) {
478 			ret = mali_group_zap_session(group, session);
479 			if (MALI_FALSE == ret) {
480 				struct mali_pp_job *pp_job = NULL;
481 
482 				mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job);
483 
484 				if (NULL != pp_job) {
485 					/* PP job completed, free it */
486 					mali_scheduler_complete_pp_job(pp_job,
487 								       0, MALI_FALSE,
488 								       MALI_TRUE);
489 				}
490 			}
491 		}
492 	}
493 
494 	mali_executor_unlock();
495 }
496 
mali_executor_schedule_from_mask(mali_scheduler_mask mask,mali_bool deferred_schedule)497 void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
498 {
499 	if (MALI_SCHEDULER_MASK_EMPTY != mask) {
500 		if (MALI_TRUE == deferred_schedule) {
501 			_mali_osk_wq_schedule_work_high_pri(executor_wq_high_pri);
502 		} else {
503 			/* Schedule from this thread*/
504 			mali_executor_lock();
505 			mali_executor_schedule();
506 			mali_executor_unlock();
507 		}
508 	}
509 }
510 
mali_executor_interrupt_gp(struct mali_group * group,mali_bool in_upper_half)511 _mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group,
512 		mali_bool in_upper_half)
513 {
514 	enum mali_interrupt_result int_result;
515 	mali_bool time_out = MALI_FALSE;
516 
517 	MALI_DEBUG_PRINT(4, ("Executor: GP interrupt from %s in %s half\n",
518 			     mali_group_core_description(group),
519 			     in_upper_half ? "upper" : "bottom"));
520 
521 	mali_executor_lock();
522 	if (!mali_group_is_working(group)) {
523 		/* Not working, so nothing to do */
524 		mali_executor_unlock();
525 		return _MALI_OSK_ERR_FAULT;
526 	}
527 
528 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
529 	MALI_DEBUG_ASSERT(mali_group_is_working(group));
530 
531 	if (mali_group_has_timed_out(group)) {
532 		int_result = MALI_INTERRUPT_RESULT_ERROR;
533 		time_out = MALI_TRUE;
534 		MALI_PRINT(("Executor GP: Job %d Timeout on %s\n",
535 			    mali_gp_job_get_id(group->gp_running_job),
536 			    mali_group_core_description(group)));
537 	} else {
538 		int_result = mali_group_get_interrupt_result_gp(group);
539 		if (MALI_INTERRUPT_RESULT_NONE == int_result) {
540 			mali_executor_unlock();
541 			return _MALI_OSK_ERR_FAULT;
542 		}
543 	}
544 
545 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
546 	if (MALI_INTERRUPT_RESULT_NONE == int_result) {
547 		/* No interrupts signalled, so nothing to do */
548 		mali_executor_unlock();
549 		return _MALI_OSK_ERR_FAULT;
550 	}
551 #else
552 	MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
553 #endif
554 
555 	mali_group_mask_all_interrupts_gp(group);
556 
557 	if (MALI_INTERRUPT_RESULT_SUCCESS_VS == int_result) {
558 		if (mali_group_gp_is_active(group)) {
559 			/* Only VS completed so far, while PLBU is still active */
560 
561 			/* Enable all but the current interrupt */
562 			mali_group_enable_interrupts_gp(group, int_result);
563 
564 			mali_executor_unlock();
565 			return _MALI_OSK_ERR_OK;
566 		}
567 	} else if (MALI_INTERRUPT_RESULT_SUCCESS_PLBU == int_result) {
568 		if (mali_group_gp_is_active(group)) {
569 			/* Only PLBU completed so far, while VS is still active */
570 
571 			/* Enable all but the current interrupt */
572 			mali_group_enable_interrupts_gp(group, int_result);
573 
574 			mali_executor_unlock();
575 			return _MALI_OSK_ERR_OK;
576 		}
577 	} else if (MALI_INTERRUPT_RESULT_OOM == int_result) {
578 		struct mali_gp_job *job = mali_group_get_running_gp_job(group);
579 
580 		/* PLBU out of mem */
581 		MALI_DEBUG_PRINT(3, ("Executor: PLBU needs more heap memory\n"));
582 
583 #if defined(CONFIG_MALI400_PROFILING)
584 		/* Give group a chance to generate a SUSPEND event */
585 		mali_group_oom(group);
586 #endif
587 
588 		/*
589 		 * no need to hold interrupt raised while
590 		 * waiting for more memory.
591 		 */
592 		mali_executor_send_gp_oom_to_user(job);
593 
594 		mali_executor_unlock();
595 
596 		return _MALI_OSK_ERR_OK;
597 	}
598 
599 	/*Add for voltage scan function*/
600 	if (MALI_INTERRUPT_RESULT_ERROR == int_result)
601 		mali_group_error++;
602 
603 	/* We should now have a real interrupt to handle */
604 
605 	MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
606 			     mali_group_core_description(group),
607 			     (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
608 			     "ERROR" : "success"));
609 
610 	if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
611 		/* Don't bother to do processing of errors in upper half */
612 		mali_executor_unlock();
613 
614 		if (MALI_FALSE == time_out) {
615 			mali_group_schedule_bottom_half_gp(group);
616 		}
617 	} else {
618 		struct mali_gp_job *job;
619 		mali_bool success;
620 
621 		/*
622 		if (MALI_TRUE == time_out) {
623 			mali_group_dump_status(group);
624 		}
625 		*/
626 
627 		success = (int_result != MALI_INTERRUPT_RESULT_ERROR) ?
628 			  MALI_TRUE : MALI_FALSE;
629 
630 		mali_executor_complete_group(group, success, &job, NULL);
631 
632 		mali_executor_unlock();
633 
634 		/* GP jobs always fully complete */
635 		MALI_DEBUG_ASSERT(NULL != job);
636 
637 		/* This will notify user space and close the job object */
638 		mali_scheduler_complete_gp_job(job, success,
639 					       MALI_TRUE, MALI_TRUE);
640 	}
641 
642 	return _MALI_OSK_ERR_OK;
643 }
644 
mali_executor_interrupt_pp(struct mali_group * group,mali_bool in_upper_half)645 _mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group,
646 		mali_bool in_upper_half)
647 {
648 	enum mali_interrupt_result int_result;
649 	mali_bool time_out = MALI_FALSE;
650 
651 	MALI_DEBUG_PRINT(4, ("Executor: PP interrupt from %s in %s half\n",
652 			     mali_group_core_description(group),
653 			     in_upper_half ? "upper" : "bottom"));
654 
655 	mali_executor_lock();
656 
657 	if (!mali_group_is_working(group)) {
658 		/* Not working, so nothing to do */
659 		mali_executor_unlock();
660 		return _MALI_OSK_ERR_FAULT;
661 	}
662 
663 	if (in_upper_half) {
664 		if (mali_group_is_in_virtual(group)) {
665 			/* Child groups should never handle PP interrupts */
666 			MALI_DEBUG_ASSERT(!mali_group_has_timed_out(group));
667 			mali_executor_unlock();
668 			return _MALI_OSK_ERR_FAULT;
669 		}
670 	}
671 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
672 	MALI_DEBUG_ASSERT(mali_group_is_working(group));
673 	MALI_DEBUG_ASSERT(!mali_group_is_in_virtual(group));
674 
675 	if (mali_group_has_timed_out(group)) {
676 		int_result = MALI_INTERRUPT_RESULT_ERROR;
677 		time_out = MALI_TRUE;
678 		MALI_PRINT(("Executor PP: Job %d Timeout on %s\n",
679 			    mali_pp_job_get_id(group->pp_running_job),
680 			    mali_group_core_description(group)));
681 	} else {
682 		int_result = mali_group_get_interrupt_result_pp(group);
683 		if (MALI_INTERRUPT_RESULT_NONE == int_result) {
684 			mali_executor_unlock();
685 			return _MALI_OSK_ERR_FAULT;
686 		}
687 	}
688 
689 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
690 	if (MALI_INTERRUPT_RESULT_NONE == int_result) {
691 		/* No interrupts signalled, so nothing to do */
692 		mali_executor_unlock();
693 		return _MALI_OSK_ERR_FAULT;
694 	} else if (MALI_INTERRUPT_RESULT_SUCCESS == int_result) {
695 		if (mali_group_is_virtual(group) && mali_group_pp_is_active(group)) {
696 			/* Some child groups are still working, so nothing to do right now */
697 			mali_executor_unlock();
698 			return _MALI_OSK_ERR_FAULT;
699 		}
700 	}
701 #else
702 	MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
703 #endif
704 
705 	/*Add voltage scan function*/
706 
707 	if (MALI_INTERRUPT_RESULT_ERROR == int_result)
708 		mali_group_error++;
709 
710 	/* We should now have a real interrupt to handle */
711 
712 	MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
713 			     mali_group_core_description(group),
714 			     (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
715 			     "ERROR" : "success"));
716 
717 	if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
718 		/* Don't bother to do processing of errors in upper half */
719 		mali_group_mask_all_interrupts_pp(group);
720 		mali_executor_unlock();
721 
722 		if (MALI_FALSE == time_out) {
723 			mali_group_schedule_bottom_half_pp(group);
724 		}
725 	} else {
726 		struct mali_pp_job *job = NULL;
727 		mali_bool success;
728 
729 		if (MALI_TRUE == time_out) {
730 			mali_group_dump_status(group);
731 		}
732 
733 		success = (int_result == MALI_INTERRUPT_RESULT_SUCCESS) ?
734 			  MALI_TRUE : MALI_FALSE;
735 
736 		mali_executor_complete_group(group, success, NULL, &job);
737 
738 		mali_executor_unlock();
739 
740 		if (NULL != job) {
741 			/* Notify user space and close the job object */
742 			mali_scheduler_complete_pp_job(job,
743 						       num_physical_pp_cores_total,
744 						       MALI_TRUE, MALI_TRUE);
745 		}
746 	}
747 
748 	return _MALI_OSK_ERR_OK;
749 }
750 
mali_executor_interrupt_mmu(struct mali_group * group,mali_bool in_upper_half)751 _mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group,
752 		mali_bool in_upper_half)
753 {
754 	enum mali_interrupt_result int_result;
755 
756 	MALI_DEBUG_PRINT(4, ("Executor: MMU interrupt from %s in %s half\n",
757 			     mali_group_core_description(group),
758 			     in_upper_half ? "upper" : "bottom"));
759 
760 	mali_executor_lock();
761 	if (!mali_group_is_working(group)) {
762 		/* Not working, so nothing to do */
763 		mali_executor_unlock();
764 		return _MALI_OSK_ERR_FAULT;
765 	}
766 
767 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
768 	MALI_DEBUG_ASSERT(mali_group_is_working(group));
769 
770 	int_result = mali_group_get_interrupt_result_mmu(group);
771 	if (MALI_INTERRUPT_RESULT_NONE == int_result) {
772 		mali_executor_unlock();
773 		return _MALI_OSK_ERR_FAULT;
774 	}
775 
776 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
777 	if (MALI_INTERRUPT_RESULT_NONE == int_result) {
778 		/* No interrupts signalled, so nothing to do */
779 		mali_executor_unlock();
780 		return _MALI_OSK_ERR_FAULT;
781 	}
782 #else
783 	MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_ERROR == int_result);
784 #endif
785 
786 	/* We should now have a real interrupt to handle */
787 
788 	if (in_upper_half) {
789 		/* Don't bother to do processing of errors in upper half */
790 
791 		struct mali_group *parent = group->parent_group;
792 
793 		mali_mmu_mask_all_interrupts(group->mmu);
794 
795 		mali_executor_unlock();
796 
797 		if (NULL == parent) {
798 			mali_group_schedule_bottom_half_mmu(group);
799 		} else {
800 			mali_group_schedule_bottom_half_mmu(parent);
801 		}
802 
803 	} else {
804 		struct mali_gp_job *gp_job = NULL;
805 		struct mali_pp_job *pp_job = NULL;
806 
807 #ifdef DEBUG
808 
809 		u32 fault_address = mali_mmu_get_page_fault_addr(group->mmu);
810 		u32 status = mali_mmu_get_status(group->mmu);
811 		MALI_DEBUG_PRINT(2, ("Executor: Mali page fault detected at 0x%x from bus id %d of type %s on %s\n",
812 				     (void *)(uintptr_t)fault_address,
813 				     (status >> 6) & 0x1F,
814 				     (status & 32) ? "write" : "read",
815 				     group->mmu->hw_core.description));
816 		MALI_DEBUG_PRINT(3, ("Executor: MMU rawstat = 0x%08X, MMU status = 0x%08X\n",
817 				     mali_mmu_get_rawstat(group->mmu), status));
818 		mali_mmu_pagedir_diag(mali_session_get_page_directory(group->session), fault_address);
819 #endif
820 
821 		mali_executor_complete_group(group, MALI_FALSE, &gp_job, &pp_job);
822 
823 		mali_executor_unlock();
824 
825 		if (NULL != gp_job) {
826 			MALI_DEBUG_ASSERT(NULL == pp_job);
827 
828 			/* Notify user space and close the job object */
829 			mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
830 						       MALI_TRUE, MALI_TRUE);
831 		} else if (NULL != pp_job) {
832 			MALI_DEBUG_ASSERT(NULL == gp_job);
833 
834 			/* Notify user space and close the job object */
835 			mali_scheduler_complete_pp_job(pp_job,
836 						       num_physical_pp_cores_total,
837 						       MALI_TRUE, MALI_TRUE);
838 		}
839 	}
840 
841 	return _MALI_OSK_ERR_OK;
842 }
843 
mali_executor_group_power_up(struct mali_group * groups[],u32 num_groups)844 void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups)
845 {
846 	u32 i;
847 	mali_bool child_groups_activated = MALI_FALSE;
848 	mali_bool do_schedule = MALI_FALSE;
849 #if defined(DEBUG)
850 	u32 num_activated = 0;
851 #endif
852 
853 	MALI_DEBUG_ASSERT_POINTER(groups);
854 	MALI_DEBUG_ASSERT(0 < num_groups);
855 
856 	mali_executor_lock();
857 
858 	MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups\n", num_groups));
859 
860 	for (i = 0; i < num_groups; i++) {
861 		MALI_DEBUG_PRINT(3, ("Executor: powering up group %s\n",
862 				     mali_group_core_description(groups[i])));
863 
864 		mali_group_power_up(groups[i]);
865 
866 		if ((MALI_GROUP_STATE_ACTIVATION_PENDING != mali_group_get_state(groups[i]) ||
867 		     (MALI_TRUE != mali_executor_group_is_in_state(groups[i], EXEC_STATE_INACTIVE)))) {
868 			/* nothing more to do for this group */
869 			continue;
870 		}
871 
872 		MALI_DEBUG_PRINT(3, ("Executor: activating group %s\n",
873 				     mali_group_core_description(groups[i])));
874 
875 #if defined(DEBUG)
876 		num_activated++;
877 #endif
878 
879 		if (mali_group_is_in_virtual(groups[i])) {
880 			/*
881 			 * At least one child group of virtual group is powered on.
882 			 */
883 			child_groups_activated = MALI_TRUE;
884 		} else if (MALI_FALSE == mali_group_is_virtual(groups[i])) {
885 			/* Set gp and pp not in virtual to active. */
886 			mali_group_set_active(groups[i]);
887 		}
888 
889 		/* Move group from inactive to idle list */
890 		if (groups[i] == gp_group) {
891 			MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
892 					  gp_group_state);
893 			gp_group_state = EXEC_STATE_IDLE;
894 		} else if (MALI_FALSE == mali_group_is_in_virtual(groups[i])
895 			   && MALI_FALSE == mali_group_is_virtual(groups[i])) {
896 			MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_group_is_in_state(groups[i],
897 					  EXEC_STATE_INACTIVE));
898 
899 			mali_executor_change_state_pp_physical(groups[i],
900 							       &group_list_inactive,
901 							       &group_list_inactive_count,
902 							       &group_list_idle,
903 							       &group_list_idle_count);
904 		}
905 
906 		do_schedule = MALI_TRUE;
907 	}
908 
909 	if (mali_executor_has_virtual_group() &&
910 	    MALI_TRUE == child_groups_activated &&
911 	    MALI_GROUP_STATE_ACTIVATION_PENDING ==
912 	    mali_group_get_state(virtual_group)) {
913 		/*
914 		 * Try to active virtual group while it may be not sucessful every time,
915 		 * because there is one situation that not all of child groups are powered on
916 		 * in one time and virtual group is in activation pending state.
917 		 */
918 		if (mali_group_set_active(virtual_group)) {
919 			/* Move group from inactive to idle */
920 			MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
921 					  virtual_group_state);
922 			virtual_group_state = EXEC_STATE_IDLE;
923 
924 			MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u  physical activated, 1 virtual activated.\n", num_groups, num_activated));
925 		} else {
926 			MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
927 		}
928 	} else {
929 		MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
930 	}
931 
932 	if (MALI_TRUE == do_schedule) {
933 		/* Trigger a schedule */
934 		mali_executor_schedule();
935 	}
936 
937 	mali_executor_unlock();
938 }
939 
mali_executor_group_power_down(struct mali_group * groups[],u32 num_groups)940 void mali_executor_group_power_down(struct mali_group *groups[],
941 				    u32 num_groups)
942 {
943 	u32 i;
944 
945 	MALI_DEBUG_ASSERT_POINTER(groups);
946 	MALI_DEBUG_ASSERT(0 < num_groups);
947 
948 	mali_executor_lock();
949 
950 	MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups\n", num_groups));
951 
952 	for (i = 0; i < num_groups; i++) {
953 		/* Groups must be either disabled or inactive. while for virtual group,
954 		 * it maybe in empty state, because when we meet pm_runtime_suspend,
955 		 * virtual group could be powered off, and before we acquire mali_executor_lock,
956 		 * we must release mali_pm_state_lock, if there is a new physical job was queued,
957 		 * all of physical groups in virtual group could be pulled out, so we only can
958 		 * powered down an empty virtual group. Those physical groups will be powered
959 		 * up in following pm_runtime_resume callback function.
960 		 */
961 		MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(groups[i],
962 				  EXEC_STATE_DISABLED) ||
963 				  mali_executor_group_is_in_state(groups[i],
964 						  EXEC_STATE_INACTIVE) ||
965 				  mali_executor_group_is_in_state(groups[i],
966 						  EXEC_STATE_EMPTY));
967 
968 		MALI_DEBUG_PRINT(3, ("Executor: powering down group %s\n",
969 				     mali_group_core_description(groups[i])));
970 
971 		mali_group_power_down(groups[i]);
972 	}
973 
974 	MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups completed\n", num_groups));
975 
976 	mali_executor_unlock();
977 }
978 
mali_executor_abort_session(struct mali_session_data * session)979 void mali_executor_abort_session(struct mali_session_data *session)
980 {
981 	struct mali_group *group;
982 	struct mali_group *tmp_group;
983 
984 	MALI_DEBUG_ASSERT_POINTER(session);
985 	MALI_DEBUG_ASSERT(session->is_aborting);
986 
987 	MALI_DEBUG_PRINT(3,
988 			 ("Executor: Aborting all jobs from session 0x%08X.\n",
989 			  session));
990 
991 	mali_executor_lock();
992 
993 	if (mali_group_get_session(gp_group) == session) {
994 		if (EXEC_STATE_WORKING == gp_group_state) {
995 			struct mali_gp_job *gp_job = NULL;
996 
997 			mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL);
998 
999 			MALI_DEBUG_ASSERT_POINTER(gp_job);
1000 
1001 			/* GP job completed, make sure it is freed */
1002 			mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
1003 						       MALI_FALSE, MALI_TRUE);
1004 		} else {
1005 			/* Same session, but not working, so just clear it */
1006 			mali_group_clear_session(gp_group);
1007 		}
1008 	}
1009 
1010 	if (mali_executor_has_virtual_group()) {
1011 		if (EXEC_STATE_WORKING == virtual_group_state
1012 		    && mali_group_get_session(virtual_group) == session) {
1013 			struct mali_pp_job *pp_job = NULL;
1014 
1015 			mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job);
1016 
1017 			if (NULL != pp_job) {
1018 				/* PP job completed, make sure it is freed */
1019 				mali_scheduler_complete_pp_job(pp_job, 0,
1020 							       MALI_FALSE, MALI_TRUE);
1021 			}
1022 		}
1023 	}
1024 
1025 	_MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working,
1026 				    struct mali_group, executor_list) {
1027 		if (mali_group_get_session(group) == session) {
1028 			struct mali_pp_job *pp_job = NULL;
1029 
1030 			mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job);
1031 
1032 			if (NULL != pp_job) {
1033 				/* PP job completed, make sure it is freed */
1034 				mali_scheduler_complete_pp_job(pp_job, 0,
1035 							       MALI_FALSE, MALI_TRUE);
1036 			}
1037 		}
1038 	}
1039 
1040 	_MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, executor_list) {
1041 		mali_group_clear_session(group);
1042 	}
1043 
1044 	_MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_inactive, struct mali_group, executor_list) {
1045 		mali_group_clear_session(group);
1046 	}
1047 
1048 	_MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_disabled, struct mali_group, executor_list) {
1049 		mali_group_clear_session(group);
1050 	}
1051 
1052 	mali_executor_unlock();
1053 }
1054 
1055 
mali_executor_core_scaling_enable(void)1056 void mali_executor_core_scaling_enable(void)
1057 {
1058 	/* PS: Core scaling is by default enabled */
1059 	core_scaling_enabled = MALI_TRUE;
1060 }
1061 
mali_executor_core_scaling_disable(void)1062 void mali_executor_core_scaling_disable(void)
1063 {
1064 	core_scaling_enabled = MALI_FALSE;
1065 }
1066 
mali_executor_core_scaling_is_enabled(void)1067 mali_bool mali_executor_core_scaling_is_enabled(void)
1068 {
1069 	return core_scaling_enabled;
1070 }
1071 
mali_executor_group_enable(struct mali_group * group)1072 void mali_executor_group_enable(struct mali_group *group)
1073 {
1074 	MALI_DEBUG_ASSERT_POINTER(group);
1075 
1076 	mali_executor_lock();
1077 
1078 	if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
1079 	    && (mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
1080 		mali_executor_group_enable_internal(group);
1081 	}
1082 
1083 	mali_executor_schedule();
1084 	mali_executor_unlock();
1085 
1086 	_mali_osk_wq_schedule_work(executor_wq_notify_core_change);
1087 }
1088 
1089 /*
1090  * If a physical group is inactive or idle, we should disable it immediately,
1091  * if group is in virtual, and virtual group is idle, disable given physical group in it.
1092  */
mali_executor_group_disable(struct mali_group * group)1093 void mali_executor_group_disable(struct mali_group *group)
1094 {
1095 	MALI_DEBUG_ASSERT_POINTER(group);
1096 
1097 	mali_executor_lock();
1098 
1099 	if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
1100 	    && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
1101 		mali_executor_group_disable_internal(group);
1102 	}
1103 
1104 	mali_executor_schedule();
1105 	mali_executor_unlock();
1106 
1107 	_mali_osk_wq_schedule_work(executor_wq_notify_core_change);
1108 }
1109 
mali_executor_group_is_disabled(struct mali_group * group)1110 mali_bool mali_executor_group_is_disabled(struct mali_group *group)
1111 {
1112 	/* NB: This function is not optimized for time critical usage */
1113 
1114 	mali_bool ret;
1115 
1116 	MALI_DEBUG_ASSERT_POINTER(group);
1117 
1118 	mali_executor_lock();
1119 	ret = mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED);
1120 	mali_executor_unlock();
1121 
1122 	return ret;
1123 }
1124 
mali_executor_set_perf_level(unsigned int target_core_nr,mali_bool override)1125 int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override)
1126 {
1127 	if (target_core_nr == num_physical_pp_cores_enabled) return 0;
1128 	if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
1129 	if (target_core_nr > num_physical_pp_cores_total) return -EINVAL;
1130 	if (0 == target_core_nr) return -EINVAL;
1131 
1132 	mali_executor_core_scale(target_core_nr);
1133 
1134 	_mali_osk_wq_schedule_work(executor_wq_notify_core_change);
1135 
1136 	return 0;
1137 }
1138 
1139 #if MALI_STATE_TRACKING
mali_executor_dump_state(char * buf,u32 size)1140 u32 mali_executor_dump_state(char *buf, u32 size)
1141 {
1142 	int n = 0;
1143 	struct mali_group *group;
1144 	struct mali_group *temp;
1145 
1146 	mali_executor_lock();
1147 
1148 	switch (gp_group_state) {
1149 	case EXEC_STATE_INACTIVE:
1150 		n += _mali_osk_snprintf(buf + n, size - n,
1151 					"GP group is in state INACTIVE\n");
1152 		break;
1153 	case EXEC_STATE_IDLE:
1154 		n += _mali_osk_snprintf(buf + n, size - n,
1155 					"GP group is in state IDLE\n");
1156 		break;
1157 	case EXEC_STATE_WORKING:
1158 		n += _mali_osk_snprintf(buf + n, size - n,
1159 					"GP group is in state WORKING\n");
1160 		break;
1161 	default:
1162 		n += _mali_osk_snprintf(buf + n, size - n,
1163 					"GP group is in unknown/illegal state %u\n",
1164 					gp_group_state);
1165 		break;
1166 	}
1167 
1168 	n += mali_group_dump_state(gp_group, buf + n, size - n);
1169 
1170 	n += _mali_osk_snprintf(buf + n, size - n,
1171 				"Physical PP groups in WORKING state (count = %u):\n",
1172 				group_list_working_count);
1173 
1174 	_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) {
1175 		n += mali_group_dump_state(group, buf + n, size - n);
1176 	}
1177 
1178 	n += _mali_osk_snprintf(buf + n, size - n,
1179 				"Physical PP groups in IDLE state (count = %u):\n",
1180 				group_list_idle_count);
1181 
1182 	_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
1183 		n += mali_group_dump_state(group, buf + n, size - n);
1184 	}
1185 
1186 	n += _mali_osk_snprintf(buf + n, size - n,
1187 				"Physical PP groups in INACTIVE state (count = %u):\n",
1188 				group_list_inactive_count);
1189 
1190 	_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
1191 		n += mali_group_dump_state(group, buf + n, size - n);
1192 	}
1193 
1194 	n += _mali_osk_snprintf(buf + n, size - n,
1195 				"Physical PP groups in DISABLED state (count = %u):\n",
1196 				group_list_disabled_count);
1197 
1198 	_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
1199 		n += mali_group_dump_state(group, buf + n, size - n);
1200 	}
1201 
1202 	if (mali_executor_has_virtual_group()) {
1203 		switch (virtual_group_state) {
1204 		case EXEC_STATE_EMPTY:
1205 			n += _mali_osk_snprintf(buf + n, size - n,
1206 						"Virtual PP group is in state EMPTY\n");
1207 			break;
1208 		case EXEC_STATE_INACTIVE:
1209 			n += _mali_osk_snprintf(buf + n, size - n,
1210 						"Virtual PP group is in state INACTIVE\n");
1211 			break;
1212 		case EXEC_STATE_IDLE:
1213 			n += _mali_osk_snprintf(buf + n, size - n,
1214 						"Virtual PP group is in state IDLE\n");
1215 			break;
1216 		case EXEC_STATE_WORKING:
1217 			n += _mali_osk_snprintf(buf + n, size - n,
1218 						"Virtual PP group is in state WORKING\n");
1219 			break;
1220 		default:
1221 			n += _mali_osk_snprintf(buf + n, size - n,
1222 						"Virtual PP group is in unknown/illegal state %u\n",
1223 						virtual_group_state);
1224 			break;
1225 		}
1226 
1227 		n += mali_group_dump_state(virtual_group, buf + n, size - n);
1228 	}
1229 
1230 	mali_executor_unlock();
1231 
1232 	n += _mali_osk_snprintf(buf + n, size - n, "\n");
1233 
1234 	return n;
1235 }
1236 #endif
1237 
_mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s * args)1238 _mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
1239 {
1240 	MALI_DEBUG_ASSERT_POINTER(args);
1241 	MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
1242 	args->number_of_total_cores = num_physical_pp_cores_total;
1243 	args->number_of_enabled_cores = num_physical_pp_cores_enabled;
1244 	return _MALI_OSK_ERR_OK;
1245 }
1246 
_mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s * args)1247 _mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
1248 {
1249 	MALI_DEBUG_ASSERT_POINTER(args);
1250 	MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
1251 	args->version = pp_version;
1252 	return _MALI_OSK_ERR_OK;
1253 }
1254 
_mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s * args)1255 _mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
1256 {
1257 	MALI_DEBUG_ASSERT_POINTER(args);
1258 	MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
1259 	args->number_of_cores = 1;
1260 	return _MALI_OSK_ERR_OK;
1261 }
1262 
_mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s * args)1263 _mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
1264 {
1265 	MALI_DEBUG_ASSERT_POINTER(args);
1266 	MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
1267 	args->version = gp_version;
1268 	return _MALI_OSK_ERR_OK;
1269 }
1270 
_mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s * args)1271 _mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
1272 {
1273 	struct mali_session_data *session;
1274 	struct mali_gp_job *job;
1275 
1276 	MALI_DEBUG_ASSERT_POINTER(args);
1277 	MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
1278 
1279 	session = (struct mali_session_data *)(uintptr_t)args->ctx;
1280 
1281 	if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
1282 		_mali_osk_notification_t *new_notification = NULL;
1283 
1284 		new_notification = _mali_osk_notification_create(
1285 					   _MALI_NOTIFICATION_GP_STALLED,
1286 					   sizeof(_mali_uk_gp_job_suspended_s));
1287 
1288 		if (NULL != new_notification) {
1289 			MALI_DEBUG_PRINT(3, ("Executor: Resuming job %u with new heap; 0x%08X - 0x%08X\n",
1290 					     args->cookie, args->arguments[0], args->arguments[1]));
1291 
1292 			mali_executor_lock();
1293 
1294 			/* Resume the job in question if it is still running */
1295 			job = mali_group_get_running_gp_job(gp_group);
1296 			if (NULL != job &&
1297 			    args->cookie == mali_gp_job_get_id(job) &&
1298 			    session == mali_gp_job_get_session(job)) {
1299 				/*
1300 				 * Correct job is running, resume with new heap
1301 				 */
1302 
1303 				mali_gp_job_set_oom_notification(job,
1304 								 new_notification);
1305 
1306 				/* This will also re-enable interrupts */
1307 				mali_group_resume_gp_with_new_heap(gp_group,
1308 								   args->cookie,
1309 								   args->arguments[0],
1310 								   args->arguments[1]);
1311 
1312 				mali_executor_unlock();
1313 				return _MALI_OSK_ERR_OK;
1314 			} else {
1315 				MALI_DEBUG_PRINT(2, ("Executor: Unable to resume  gp job becasue gp time out or any other unexpected reason!\n"));
1316 
1317 				_mali_osk_notification_delete(new_notification);
1318 
1319 				mali_executor_unlock();
1320 				return _MALI_OSK_ERR_FAULT;
1321 			}
1322 		} else {
1323 			MALI_PRINT_ERROR(("Executor: Failed to allocate notification object. Will abort GP job.\n"));
1324 		}
1325 	} else {
1326 		MALI_DEBUG_PRINT(2, ("Executor: Aborting job %u, no new heap provided\n", args->cookie));
1327 	}
1328 
1329 	mali_executor_lock();
1330 
1331 	/* Abort the job in question if it is still running */
1332 	job = mali_group_get_running_gp_job(gp_group);
1333 	if (NULL != job &&
1334 	    args->cookie == mali_gp_job_get_id(job) &&
1335 	    session == mali_gp_job_get_session(job)) {
1336 		/* Correct job is still running */
1337 		struct mali_gp_job *job_done = NULL;
1338 
1339 		mali_executor_complete_group(gp_group, MALI_FALSE, &job_done, NULL);
1340 
1341 		/* The same job should have completed */
1342 		MALI_DEBUG_ASSERT(job_done == job);
1343 
1344 		/* GP job completed, make sure it is freed */
1345 		mali_scheduler_complete_gp_job(job_done, MALI_FALSE,
1346 					       MALI_TRUE, MALI_TRUE);
1347 	}
1348 
1349 	mali_executor_unlock();
1350 	return _MALI_OSK_ERR_FAULT;
1351 }
1352 
1353 
1354 /*
1355  * ---------- Implementation of static functions ----------
1356  */
1357 
mali_executor_lock(void)1358 void mali_executor_lock(void)
1359 {
1360 	_mali_osk_spinlock_irq_lock(mali_executor_lock_obj);
1361 	MALI_DEBUG_PRINT(5, ("Executor: lock taken\n"));
1362 }
1363 
mali_executor_unlock(void)1364 void mali_executor_unlock(void)
1365 {
1366 	MALI_DEBUG_PRINT(5, ("Executor: Releasing lock\n"));
1367 	_mali_osk_spinlock_irq_unlock(mali_executor_lock_obj);
1368 }
1369 
mali_executor_is_suspended(void * data)1370 static mali_bool mali_executor_is_suspended(void *data)
1371 {
1372 	mali_bool ret;
1373 
1374 	/* This callback does not use the data pointer. */
1375 	MALI_IGNORE(data);
1376 
1377 	mali_executor_lock();
1378 
1379 	ret = pause_count > 0 && !mali_executor_is_working();
1380 
1381 	mali_executor_unlock();
1382 
1383 	return ret;
1384 }
1385 
mali_executor_is_working(void)1386 static mali_bool mali_executor_is_working(void)
1387 {
1388 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1389 
1390 	return (0 != group_list_working_count ||
1391 		EXEC_STATE_WORKING == gp_group_state ||
1392 		EXEC_STATE_WORKING == virtual_group_state);
1393 }
1394 
mali_executor_disable_empty_virtual(void)1395 static void mali_executor_disable_empty_virtual(void)
1396 {
1397 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1398 	MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_EMPTY);
1399 	MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_WORKING);
1400 
1401 	if (mali_group_is_empty(virtual_group)) {
1402 		virtual_group_state = EXEC_STATE_EMPTY;
1403 	}
1404 }
1405 
mali_executor_physical_rejoin_virtual(struct mali_group * group)1406 static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group)
1407 {
1408 	mali_bool trigger_pm_update = MALI_FALSE;
1409 
1410 	MALI_DEBUG_ASSERT_POINTER(group);
1411 	/* Only rejoining after job has completed (still active) */
1412 	MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
1413 			  mali_group_get_state(group));
1414 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1415 	MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_has_virtual_group());
1416 	MALI_DEBUG_ASSERT(MALI_FALSE == mali_group_is_virtual(group));
1417 
1418 	/* Make sure group and virtual group have same status */
1419 
1420 	if (MALI_GROUP_STATE_INACTIVE == mali_group_get_state(virtual_group)) {
1421 		if (mali_group_deactivate(group)) {
1422 			trigger_pm_update = MALI_TRUE;
1423 		}
1424 
1425 		if (virtual_group_state == EXEC_STATE_EMPTY) {
1426 			virtual_group_state = EXEC_STATE_INACTIVE;
1427 		}
1428 	} else if (MALI_GROUP_STATE_ACTIVATION_PENDING ==
1429 		   mali_group_get_state(virtual_group)) {
1430 		/*
1431 		 * Activation is pending for virtual group, leave
1432 		 * this child group as active.
1433 		 */
1434 		if (virtual_group_state == EXEC_STATE_EMPTY) {
1435 			virtual_group_state = EXEC_STATE_INACTIVE;
1436 		}
1437 	} else {
1438 		MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
1439 				  mali_group_get_state(virtual_group));
1440 
1441 		if (virtual_group_state == EXEC_STATE_EMPTY) {
1442 			virtual_group_state = EXEC_STATE_IDLE;
1443 		}
1444 	}
1445 
1446 	/* Remove group from idle list */
1447 	MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group,
1448 			  EXEC_STATE_IDLE));
1449 	_mali_osk_list_delinit(&group->executor_list);
1450 	group_list_idle_count--;
1451 
1452 	/*
1453 	 * And finally rejoin the virtual group
1454 	 * group will start working on same job as virtual_group,
1455 	 * if virtual_group is working on a job
1456 	 */
1457 	mali_group_add_group(virtual_group, group);
1458 
1459 	return trigger_pm_update;
1460 }
1461 
mali_executor_has_virtual_group(void)1462 static mali_bool mali_executor_has_virtual_group(void)
1463 {
1464 #if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
1465 	return (NULL != virtual_group) ? MALI_TRUE : MALI_FALSE;
1466 #else
1467 	return MALI_FALSE;
1468 #endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
1469 }
1470 
mali_executor_virtual_group_is_usable(void)1471 static mali_bool mali_executor_virtual_group_is_usable(void)
1472 {
1473 #if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
1474 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1475 	return ((EXEC_STATE_INACTIVE == virtual_group_state ||
1476 		 EXEC_STATE_IDLE == virtual_group_state) && (virtual_group->state != MALI_GROUP_STATE_ACTIVATION_PENDING)) ?
1477 	       MALI_TRUE : MALI_FALSE;
1478 #else
1479 	return MALI_FALSE;
1480 #endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
1481 }
1482 
mali_executor_tackle_gp_bound(void)1483 static mali_bool mali_executor_tackle_gp_bound(void)
1484 {
1485 	struct mali_pp_job *job;
1486 
1487 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1488 
1489 	job = mali_scheduler_job_pp_physical_peek();
1490 
1491 	if (NULL != job && MALI_TRUE == mali_is_mali400()) {
1492 		if (0 < group_list_working_count &&
1493 		    mali_pp_job_is_large_and_unstarted(job)) {
1494 			return MALI_TRUE;
1495 		}
1496 	}
1497 
1498 	return MALI_FALSE;
1499 }
1500 
mali_executor_schedule_is_early_out(mali_bool * gpu_secure_mode_is_needed)1501 static mali_bool mali_executor_schedule_is_early_out(mali_bool *gpu_secure_mode_is_needed)
1502 {
1503 	struct mali_pp_job *next_pp_job_to_start = NULL;
1504 	struct mali_group *group;
1505 	struct mali_group *tmp_group;
1506 	struct mali_pp_job *physical_pp_job_working = NULL;
1507 	struct mali_pp_job *virtual_pp_job_working = NULL;
1508 	mali_bool gpu_working_in_protected_mode = MALI_FALSE;
1509 	mali_bool gpu_working_in_non_protected_mode = MALI_FALSE;
1510 
1511 	MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
1512 
1513 	*gpu_secure_mode_is_needed = MALI_FALSE;
1514 
1515 	/* Check if the gpu secure mode is supported, exit if not.*/
1516 	if (MALI_FALSE == _mali_osk_gpu_secure_mode_is_supported()) {
1517 		return MALI_FALSE;
1518 	}
1519 
1520 	/* Check if need to set gpu secure mode for the next pp job,
1521 	 * get the next pp job that will be scheduled  if exist.
1522 	 */
1523 	next_pp_job_to_start = mali_scheduler_job_pp_next();
1524 
1525 	/* Check current pp physical/virtual running job is protected job or not if exist.*/
1526 	_MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working,
1527 				    struct mali_group, executor_list) {
1528 		physical_pp_job_working = group->pp_running_job;
1529 		break;
1530 	}
1531 
1532 	if (EXEC_STATE_WORKING == virtual_group_state) {
1533 		virtual_pp_job_working = virtual_group->pp_running_job;
1534 	}
1535 
1536 	if (NULL != physical_pp_job_working) {
1537 		if (MALI_TRUE == mali_pp_job_is_protected_job(physical_pp_job_working)) {
1538 			gpu_working_in_protected_mode = MALI_TRUE;
1539 		} else {
1540 			gpu_working_in_non_protected_mode = MALI_TRUE;
1541 		}
1542 	} else if (NULL != virtual_pp_job_working) {
1543 		if (MALI_TRUE == mali_pp_job_is_protected_job(virtual_pp_job_working)) {
1544 			gpu_working_in_protected_mode = MALI_TRUE;
1545 		} else {
1546 			gpu_working_in_non_protected_mode = MALI_TRUE;
1547 		}
1548 	} else if (EXEC_STATE_WORKING == gp_group_state) {
1549 		gpu_working_in_non_protected_mode = MALI_TRUE;
1550 	}
1551 
1552 	/* If the next pp job is the protected pp job.*/
1553 	if ((NULL != next_pp_job_to_start) && MALI_TRUE == mali_pp_job_is_protected_job(next_pp_job_to_start)) {
1554 		/* if gp is working or any non-protected pp job is working now, unable to schedule protected pp job. */
1555 		if (MALI_TRUE == gpu_working_in_non_protected_mode)
1556 			return MALI_TRUE;
1557 
1558 		*gpu_secure_mode_is_needed = MALI_TRUE;
1559 		return MALI_FALSE;
1560 
1561 	}
1562 
1563 	if (MALI_TRUE == gpu_working_in_protected_mode) {
1564 		/* Unable to schedule non-protected pp job/gp job if exist protected pp running jobs*/
1565 		return MALI_TRUE;
1566 	}
1567 
1568 	return MALI_FALSE;
1569 }
1570 /*
1571  * This is where jobs are actually started.
1572  */
mali_executor_schedule(void)1573 static void mali_executor_schedule(void)
1574 {
1575 	u32 i;
1576 	u32 num_physical_needed = 0;
1577 	u32 num_physical_to_process = 0;
1578 	mali_bool trigger_pm_update = MALI_FALSE;
1579 	mali_bool deactivate_idle_group = MALI_TRUE;
1580 	mali_bool gpu_secure_mode_is_needed = MALI_FALSE;
1581 	mali_bool is_gpu_secure_mode = MALI_FALSE;
1582 	/* Physical groups + jobs to start in this function */
1583 	struct mali_group *groups_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
1584 	struct mali_pp_job *jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
1585 	u32 sub_jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
1586 	int num_jobs_to_start = 0;
1587 
1588 	/* Virtual job to start in this function */
1589 	struct mali_pp_job *virtual_job_to_start = NULL;
1590 
1591 	/* GP job to start in this function */
1592 	struct mali_gp_job *gp_job_to_start = NULL;
1593 
1594 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1595 
1596 	if (pause_count > 0) {
1597 		/* Execution is suspended, don't schedule any jobs. */
1598 		return;
1599 	}
1600 
1601 	/* Lock needed in order to safely handle the job queues */
1602 	mali_scheduler_lock();
1603 
1604 	/* 1. Check the schedule if need to early out. */
1605 	if (MALI_TRUE == mali_executor_schedule_is_early_out(&gpu_secure_mode_is_needed)) {
1606 		mali_scheduler_unlock();
1607 		return;
1608 	}
1609 
1610 	/* 2. Activate gp firstly if have gp job queued. */
1611 	if ((EXEC_STATE_INACTIVE == gp_group_state)
1612 	    && (0 < mali_scheduler_job_gp_count())
1613 	    && (gpu_secure_mode_is_needed == MALI_FALSE)) {
1614 
1615 		enum mali_group_state state =
1616 			mali_group_activate(gp_group);
1617 		if (MALI_GROUP_STATE_ACTIVE == state) {
1618 			/* Set GP group state to idle */
1619 			gp_group_state = EXEC_STATE_IDLE;
1620 		} else {
1621 			trigger_pm_update = MALI_TRUE;
1622 		}
1623 	}
1624 
1625 	/* 3. Prepare as many physical groups as needed/possible */
1626 
1627 	num_physical_needed = mali_scheduler_job_physical_head_count(gpu_secure_mode_is_needed);
1628 
1629 	/* On mali-450 platform, we don't need to enter in this block frequently. */
1630 	if (0 < num_physical_needed) {
1631 
1632 		if (num_physical_needed <= group_list_idle_count) {
1633 			/* We have enough groups on idle list already */
1634 			num_physical_to_process = num_physical_needed;
1635 			num_physical_needed = 0;
1636 		} else {
1637 			/* We need to get a hold of some more groups */
1638 			num_physical_to_process = group_list_idle_count;
1639 			num_physical_needed -= group_list_idle_count;
1640 		}
1641 
1642 		if (0 < num_physical_needed) {
1643 
1644 			/* 3.1. Activate groups which are inactive */
1645 
1646 			struct mali_group *group;
1647 			struct mali_group *temp;
1648 
1649 			_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive,
1650 						    struct mali_group, executor_list) {
1651 				enum mali_group_state state =
1652 					mali_group_activate(group);
1653 				if (MALI_GROUP_STATE_ACTIVE == state) {
1654 					/* Move from inactive to idle */
1655 					mali_executor_change_state_pp_physical(group,
1656 									       &group_list_inactive,
1657 									       &group_list_inactive_count,
1658 									       &group_list_idle,
1659 									       &group_list_idle_count);
1660 					num_physical_to_process++;
1661 				} else {
1662 					trigger_pm_update = MALI_TRUE;
1663 				}
1664 
1665 				num_physical_needed--;
1666 				if (0 == num_physical_needed) {
1667 					/* We have activated all the groups we need */
1668 					break;
1669 				}
1670 			}
1671 		}
1672 
1673 		if (mali_executor_virtual_group_is_usable()) {
1674 
1675 			/*
1676 			 * 3.2. And finally, steal and activate groups
1677 			 * from virtual group if we need even more
1678 			 */
1679 			while (0 < num_physical_needed) {
1680 				struct mali_group *group;
1681 
1682 				group = mali_group_acquire_group(virtual_group);
1683 				if (NULL != group) {
1684 					enum mali_group_state state;
1685 
1686 					mali_executor_disable_empty_virtual();
1687 
1688 					state = mali_group_activate(group);
1689 					if (MALI_GROUP_STATE_ACTIVE == state) {
1690 						/* Group is ready, add to idle list */
1691 						_mali_osk_list_add(
1692 							&group->executor_list,
1693 							&group_list_idle);
1694 						group_list_idle_count++;
1695 						num_physical_to_process++;
1696 					} else {
1697 						/*
1698 						 * Group is not ready yet,
1699 						 * add to inactive list
1700 						 */
1701 						_mali_osk_list_add(
1702 							&group->executor_list,
1703 							&group_list_inactive);
1704 						group_list_inactive_count++;
1705 
1706 						trigger_pm_update = MALI_TRUE;
1707 					}
1708 					num_physical_needed--;
1709 				} else {
1710 					/*
1711 					 * We could not get enough groups
1712 					 * from the virtual group.
1713 					 */
1714 					break;
1715 				}
1716 			}
1717 		}
1718 
1719 		/* 3.3. Assign physical jobs to groups */
1720 
1721 		if (0 < num_physical_to_process) {
1722 			struct mali_group *group;
1723 			struct mali_group *temp;
1724 
1725 			_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle,
1726 						    struct mali_group, executor_list) {
1727 				struct mali_pp_job *job = NULL;
1728 				u32 sub_job = MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
1729 
1730 				MALI_DEBUG_ASSERT(num_jobs_to_start <
1731 						  MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
1732 
1733 				MALI_DEBUG_ASSERT(0 <
1734 						  mali_scheduler_job_physical_head_count(gpu_secure_mode_is_needed));
1735 
1736 				/* If the next pp job is non-protected, check if gp bound now. */
1737 				if ((MALI_FALSE == gpu_secure_mode_is_needed)
1738 				    && (mali_executor_hint_is_enabled(MALI_EXECUTOR_HINT_GP_BOUND))
1739 				    && (MALI_TRUE == mali_executor_tackle_gp_bound())) {
1740 					/*
1741 					* We're gp bound,
1742 					* don't start this right now.
1743 					*/
1744 					deactivate_idle_group = MALI_FALSE;
1745 					num_physical_to_process = 0;
1746 					break;
1747 				}
1748 
1749 				job = mali_scheduler_job_pp_physical_get(
1750 					      &sub_job);
1751 
1752 				if (MALI_FALSE == gpu_secure_mode_is_needed) {
1753 					MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_protected_job(job));
1754 				} else {
1755 					MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_protected_job(job));
1756 				}
1757 
1758 				MALI_DEBUG_ASSERT_POINTER(job);
1759 				MALI_DEBUG_ASSERT(sub_job <= MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
1760 
1761 				/* Put job + group on list of jobs to start later on */
1762 
1763 				groups_to_start[num_jobs_to_start] = group;
1764 				jobs_to_start[num_jobs_to_start] = job;
1765 				sub_jobs_to_start[num_jobs_to_start] = sub_job;
1766 				num_jobs_to_start++;
1767 
1768 				/* Move group from idle to working */
1769 				mali_executor_change_state_pp_physical(group,
1770 								       &group_list_idle,
1771 								       &group_list_idle_count,
1772 								       &group_list_working,
1773 								       &group_list_working_count);
1774 
1775 				num_physical_to_process--;
1776 				if (0 == num_physical_to_process) {
1777 					/* Got all we needed */
1778 					break;
1779 				}
1780 			}
1781 		}
1782 	}
1783 
1784 	/* 4. Deactivate idle pp group , must put deactive here before active vitual group
1785 	 *    for cover case first only has physical job in normal queue but group inactive,
1786 	 *    so delay the job start go to active group, when group activated,
1787 	 *    call scheduler again, but now if we get high queue virtual job,
1788 	 *    we will do nothing in schedule cause executor schedule stop
1789 	 */
1790 
1791 	if (MALI_TRUE == mali_executor_deactivate_list_idle(deactivate_idle_group
1792 			&& (!mali_timeline_has_physical_pp_job()))) {
1793 		trigger_pm_update = MALI_TRUE;
1794 	}
1795 
1796 	/* 5. Activate virtual group, if needed */
1797 	if (EXEC_STATE_INACTIVE == virtual_group_state &&
1798 	    MALI_TRUE ==  mali_scheduler_job_next_is_virtual()) {
1799 		struct mali_pp_job *virtual_job = mali_scheduler_job_pp_virtual_peek();
1800 		if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == mali_pp_job_is_protected_job(virtual_job))
1801 		    || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == mali_pp_job_is_protected_job(virtual_job))) {
1802 			enum mali_group_state state =
1803 				mali_group_activate(virtual_group);
1804 			if (MALI_GROUP_STATE_ACTIVE == state) {
1805 				/* Set virtual group state to idle */
1806 				virtual_group_state = EXEC_STATE_IDLE;
1807 			} else {
1808 				trigger_pm_update = MALI_TRUE;
1809 			}
1810 		}
1811 	}
1812 
1813 	/* 6. To power up group asap,  trigger pm update only when no need to swith the gpu mode. */
1814 
1815 	is_gpu_secure_mode = _mali_osk_gpu_secure_mode_is_enabled();
1816 
1817 	if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == is_gpu_secure_mode)
1818 	    || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == is_gpu_secure_mode)) {
1819 		if (MALI_TRUE == trigger_pm_update) {
1820 			trigger_pm_update = MALI_FALSE;
1821 			mali_pm_update_async();
1822 		}
1823 	}
1824 
1825 	/* 7. Assign jobs to idle virtual group (or deactivate if no job) */
1826 
1827 	if (EXEC_STATE_IDLE == virtual_group_state) {
1828 		if (MALI_TRUE == mali_scheduler_job_next_is_virtual()) {
1829 			struct mali_pp_job *virtual_job = mali_scheduler_job_pp_virtual_peek();
1830 			if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == mali_pp_job_is_protected_job(virtual_job))
1831 			    || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == mali_pp_job_is_protected_job(virtual_job))) {
1832 				virtual_job_to_start =
1833 					mali_scheduler_job_pp_virtual_get();
1834 				virtual_group_state = EXEC_STATE_WORKING;
1835 			}
1836 		} else if (!mali_timeline_has_virtual_pp_job()) {
1837 			virtual_group_state = EXEC_STATE_INACTIVE;
1838 
1839 			if (mali_group_deactivate(virtual_group)) {
1840 				trigger_pm_update = MALI_TRUE;
1841 			}
1842 		}
1843 	}
1844 
1845 	/* 8. Assign job to idle GP group (or deactivate if no job) */
1846 
1847 	if (EXEC_STATE_IDLE == gp_group_state && MALI_FALSE == gpu_secure_mode_is_needed) {
1848 		if (0 < mali_scheduler_job_gp_count()) {
1849 			gp_job_to_start = mali_scheduler_job_gp_get();
1850 			gp_group_state = EXEC_STATE_WORKING;
1851 		} else if (!mali_timeline_has_gp_job()) {
1852 			gp_group_state = EXEC_STATE_INACTIVE;
1853 			if (mali_group_deactivate(gp_group)) {
1854 				trigger_pm_update = MALI_TRUE;
1855 			}
1856 		}
1857 	}
1858 
1859 	/* 9. We no longer need the schedule/queue lock */
1860 
1861 	mali_scheduler_unlock();
1862 
1863 	/* 10. start jobs */
1864 	if (NULL != virtual_job_to_start) {
1865 		MALI_DEBUG_ASSERT(!mali_group_pp_is_active(virtual_group));
1866 		mali_group_start_pp_job(virtual_group,
1867 					virtual_job_to_start, 0, is_gpu_secure_mode);
1868 	}
1869 
1870 	for (i = 0; i < num_jobs_to_start; i++) {
1871 		MALI_DEBUG_ASSERT(!mali_group_pp_is_active(
1872 					  groups_to_start[i]));
1873 		mali_group_start_pp_job(groups_to_start[i],
1874 					jobs_to_start[i],
1875 					sub_jobs_to_start[i], is_gpu_secure_mode);
1876 	}
1877 
1878 	MALI_DEBUG_ASSERT_POINTER(gp_group);
1879 
1880 	if (NULL != gp_job_to_start) {
1881 		MALI_DEBUG_ASSERT(!mali_group_gp_is_active(gp_group));
1882 		mali_group_start_gp_job(gp_group, gp_job_to_start, is_gpu_secure_mode);
1883 	}
1884 
1885 	/* 11. Trigger any pending PM updates */
1886 	if (MALI_TRUE == trigger_pm_update) {
1887 		mali_pm_update_async();
1888 	}
1889 }
1890 
1891 /* Handler for deferred schedule requests */
mali_executor_wq_schedule(void * arg)1892 static void mali_executor_wq_schedule(void *arg)
1893 {
1894 	MALI_IGNORE(arg);
1895 	mali_executor_lock();
1896 	mali_executor_schedule();
1897 	mali_executor_unlock();
1898 }
1899 
mali_executor_send_gp_oom_to_user(struct mali_gp_job * job)1900 static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job)
1901 {
1902 	_mali_uk_gp_job_suspended_s *jobres;
1903 	_mali_osk_notification_t *notification;
1904 
1905 	notification = mali_gp_job_get_oom_notification(job);
1906 
1907 	/*
1908 	 * Remember the id we send to user space, so we have something to
1909 	 * verify when we get a response
1910 	 */
1911 	gp_returned_cookie = mali_gp_job_get_id(job);
1912 
1913 	jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
1914 	jobres->user_job_ptr = mali_gp_job_get_user_id(job);
1915 	jobres->cookie = gp_returned_cookie;
1916 
1917 	mali_session_send_notification(mali_gp_job_get_session(job),
1918 				       notification);
1919 }
mali_executor_complete_gp(struct mali_group * group,mali_bool success)1920 static struct mali_gp_job *mali_executor_complete_gp(struct mali_group *group,
1921 		mali_bool success)
1922 {
1923 	struct mali_gp_job *job;
1924 
1925 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1926 
1927 	/* Extracts the needed HW status from core and reset */
1928 	job = mali_group_complete_gp(group, success);
1929 
1930 	MALI_DEBUG_ASSERT_POINTER(job);
1931 
1932 	/* Core is now ready to go into idle list */
1933 	gp_group_state = EXEC_STATE_IDLE;
1934 
1935 	/* This will potentially queue more GP and PP jobs */
1936 	mali_timeline_tracker_release(&job->tracker);
1937 
1938 	/* Signal PP job */
1939 	mali_gp_job_signal_pp_tracker(job, success);
1940 
1941 	return job;
1942 }
1943 
mali_executor_complete_pp(struct mali_group * group,mali_bool success)1944 static struct mali_pp_job *mali_executor_complete_pp(struct mali_group *group,
1945 		mali_bool success)
1946 {
1947 	struct mali_pp_job *job;
1948 	u32 sub_job;
1949 	mali_bool job_is_done;
1950 
1951 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
1952 
1953 	/* Extracts the needed HW status from core and reset */
1954 	job = mali_group_complete_pp(group, success, &sub_job);
1955 
1956 	MALI_DEBUG_ASSERT_POINTER(job);
1957 
1958 	/* Core is now ready to go into idle list */
1959 	if (mali_group_is_virtual(group)) {
1960 		virtual_group_state = EXEC_STATE_IDLE;
1961 	} else {
1962 		/* Move from working to idle state */
1963 		mali_executor_change_state_pp_physical(group,
1964 						       &group_list_working,
1965 						       &group_list_working_count,
1966 						       &group_list_idle,
1967 						       &group_list_idle_count);
1968 	}
1969 
1970 	/* It is the executor module which owns the jobs themselves by now */
1971 	mali_pp_job_mark_sub_job_completed(job, success);
1972 	job_is_done = mali_pp_job_is_complete(job);
1973 
1974 	if (job_is_done) {
1975 		/* This will potentially queue more GP and PP jobs */
1976 		mali_timeline_tracker_release(&job->tracker);
1977 	}
1978 
1979 	return job;
1980 }
1981 
mali_executor_complete_group(struct mali_group * group,mali_bool success,struct mali_gp_job ** gp_job_done,struct mali_pp_job ** pp_job_done)1982 static void mali_executor_complete_group(struct mali_group *group,
1983 		mali_bool success,
1984 		struct mali_gp_job **gp_job_done,
1985 		struct mali_pp_job **pp_job_done)
1986 {
1987 	struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
1988 	struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
1989 	struct mali_gp_job *gp_job = NULL;
1990 	struct mali_pp_job *pp_job = NULL;
1991 	mali_bool pp_job_is_done = MALI_TRUE;
1992 
1993 	if (NULL != gp_core) {
1994 		gp_job = mali_executor_complete_gp(group, success);
1995 	} else {
1996 		MALI_DEBUG_ASSERT_POINTER(pp_core);
1997 		MALI_IGNORE(pp_core);
1998 		pp_job = mali_executor_complete_pp(group, success);
1999 
2000 		pp_job_is_done = mali_pp_job_is_complete(pp_job);
2001 	}
2002 
2003 	if (pause_count > 0) {
2004 		/* Execution has been suspended */
2005 
2006 		if (!mali_executor_is_working()) {
2007 			/* Last job completed, wake up sleepers */
2008 			_mali_osk_wait_queue_wake_up(
2009 				executor_working_wait_queue);
2010 		}
2011 	} else if (MALI_TRUE == mali_group_disable_requested(group)) {
2012 		mali_executor_core_scale_in_group_complete(group);
2013 
2014 		mali_executor_schedule();
2015 	} else {
2016 		/* try to schedule new jobs */
2017 		mali_executor_schedule();
2018 	}
2019 
2020 	if (NULL != gp_job) {
2021 		MALI_DEBUG_ASSERT_POINTER(gp_job_done);
2022 		*gp_job_done = gp_job;
2023 	} else if (pp_job_is_done) {
2024 		MALI_DEBUG_ASSERT_POINTER(pp_job);
2025 		MALI_DEBUG_ASSERT_POINTER(pp_job_done);
2026 		*pp_job_done = pp_job;
2027 	}
2028 }
2029 
mali_executor_change_state_pp_physical(struct mali_group * group,_mali_osk_list_t * old_list,u32 * old_count,_mali_osk_list_t * new_list,u32 * new_count)2030 static void mali_executor_change_state_pp_physical(struct mali_group *group,
2031 		_mali_osk_list_t *old_list,
2032 		u32 *old_count,
2033 		_mali_osk_list_t *new_list,
2034 		u32 *new_count)
2035 {
2036 	/*
2037 	 * It's a bit more complicated to change the state for the physical PP
2038 	 * groups since their state is determined by the list they are on.
2039 	 */
2040 #if defined(DEBUG)
2041 	mali_bool found = MALI_FALSE;
2042 	struct mali_group *group_iter;
2043 	struct mali_group *temp;
2044 	u32 old_counted = 0;
2045 	u32 new_counted = 0;
2046 
2047 	MALI_DEBUG_ASSERT_POINTER(group);
2048 	MALI_DEBUG_ASSERT_POINTER(old_list);
2049 	MALI_DEBUG_ASSERT_POINTER(old_count);
2050 	MALI_DEBUG_ASSERT_POINTER(new_list);
2051 	MALI_DEBUG_ASSERT_POINTER(new_count);
2052 
2053 	/*
2054 	 * Verify that group is present on old list,
2055 	 * and that the count is correct
2056 	 */
2057 
2058 	_MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, old_list,
2059 				    struct mali_group, executor_list) {
2060 		old_counted++;
2061 		if (group == group_iter) {
2062 			found = MALI_TRUE;
2063 		}
2064 	}
2065 
2066 	_MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, new_list,
2067 				    struct mali_group, executor_list) {
2068 		new_counted++;
2069 	}
2070 
2071 	if (MALI_FALSE == found) {
2072 		if (old_list == &group_list_idle) {
2073 			MALI_DEBUG_PRINT(1, (" old Group list is idle,"));
2074 		} else if (old_list == &group_list_inactive) {
2075 			MALI_DEBUG_PRINT(1, (" old Group list is inactive,"));
2076 		} else if (old_list == &group_list_working) {
2077 			MALI_DEBUG_PRINT(1, (" old Group list is working,"));
2078 		} else if (old_list == &group_list_disabled) {
2079 			MALI_DEBUG_PRINT(1, (" old Group list is disable,"));
2080 		}
2081 
2082 		if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_WORKING)) {
2083 			MALI_DEBUG_PRINT(1, (" group in working \n"));
2084 		} else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_INACTIVE)) {
2085 			MALI_DEBUG_PRINT(1, (" group in inactive \n"));
2086 		} else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_IDLE)) {
2087 			MALI_DEBUG_PRINT(1, (" group in idle \n"));
2088 		} else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)) {
2089 			MALI_DEBUG_PRINT(1, (" but group in disabled \n"));
2090 		}
2091 	}
2092 
2093 	MALI_DEBUG_ASSERT(MALI_TRUE == found);
2094 	MALI_DEBUG_ASSERT(0 < (*old_count));
2095 	MALI_DEBUG_ASSERT((*old_count) == old_counted);
2096 	MALI_DEBUG_ASSERT((*new_count) == new_counted);
2097 #endif
2098 
2099 	_mali_osk_list_move(&group->executor_list, new_list);
2100 	(*old_count)--;
2101 	(*new_count)++;
2102 }
2103 
mali_executor_set_state_pp_physical(struct mali_group * group,_mali_osk_list_t * new_list,u32 * new_count)2104 static void mali_executor_set_state_pp_physical(struct mali_group *group,
2105 		_mali_osk_list_t *new_list,
2106 		u32 *new_count)
2107 {
2108 	_mali_osk_list_add(&group->executor_list, new_list);
2109 	(*new_count)++;
2110 }
2111 
mali_executor_group_is_in_state(struct mali_group * group,enum mali_executor_state_t state)2112 static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
2113 		enum mali_executor_state_t state)
2114 {
2115 	MALI_DEBUG_ASSERT_POINTER(group);
2116 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
2117 
2118 	if (gp_group == group) {
2119 		if (gp_group_state == state) {
2120 			return MALI_TRUE;
2121 		}
2122 	} else if (virtual_group == group || mali_group_is_in_virtual(group)) {
2123 		if (virtual_group_state == state) {
2124 			return MALI_TRUE;
2125 		}
2126 	} else {
2127 		/* Physical PP group */
2128 		struct mali_group *group_iter;
2129 		struct mali_group *temp;
2130 		_mali_osk_list_t *list;
2131 
2132 		if (EXEC_STATE_DISABLED == state) {
2133 			list = &group_list_disabled;
2134 		} else if (EXEC_STATE_INACTIVE == state) {
2135 			list = &group_list_inactive;
2136 		} else if (EXEC_STATE_IDLE == state) {
2137 			list = &group_list_idle;
2138 		} else {
2139 			MALI_DEBUG_ASSERT(EXEC_STATE_WORKING == state);
2140 			list = &group_list_working;
2141 		}
2142 
2143 		_MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, list,
2144 					    struct mali_group, executor_list) {
2145 			if (group_iter == group) {
2146 				return MALI_TRUE;
2147 			}
2148 		}
2149 	}
2150 
2151 	/* group not in correct state */
2152 	return MALI_FALSE;
2153 }
2154 
mali_executor_group_enable_internal(struct mali_group * group)2155 static void mali_executor_group_enable_internal(struct mali_group *group)
2156 {
2157 	MALI_DEBUG_ASSERT(group);
2158 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
2159 	MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
2160 
2161 	/* Put into inactive state (== "lowest" enabled state) */
2162 	if (group == gp_group) {
2163 		MALI_DEBUG_ASSERT(EXEC_STATE_DISABLED == gp_group_state);
2164 		gp_group_state = EXEC_STATE_INACTIVE;
2165 	} else {
2166 		mali_executor_change_state_pp_physical(group,
2167 						       &group_list_disabled,
2168 						       &group_list_disabled_count,
2169 						       &group_list_inactive,
2170 						       &group_list_inactive_count);
2171 
2172 		++num_physical_pp_cores_enabled;
2173 		MALI_DEBUG_PRINT(4, ("Enabling group id %d \n", group->pp_core->core_id));
2174 	}
2175 
2176 	if (MALI_GROUP_STATE_ACTIVE == mali_group_activate(group)) {
2177 		MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_power_is_on(group));
2178 
2179 		/* Move from inactive to idle */
2180 		if (group == gp_group) {
2181 			gp_group_state = EXEC_STATE_IDLE;
2182 		} else {
2183 			mali_executor_change_state_pp_physical(group,
2184 							       &group_list_inactive,
2185 							       &group_list_inactive_count,
2186 							       &group_list_idle,
2187 							       &group_list_idle_count);
2188 
2189 			if (mali_executor_has_virtual_group()) {
2190 				if (mali_executor_physical_rejoin_virtual(group)) {
2191 					mali_pm_update_async();
2192 				}
2193 			}
2194 		}
2195 	} else {
2196 		mali_pm_update_async();
2197 	}
2198 }
2199 
mali_executor_group_disable_internal(struct mali_group * group)2200 static void mali_executor_group_disable_internal(struct mali_group *group)
2201 {
2202 	mali_bool working;
2203 
2204 	MALI_DEBUG_ASSERT_POINTER(group);
2205 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
2206 	MALI_DEBUG_ASSERT(!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
2207 
2208 	working = mali_executor_group_is_in_state(group, EXEC_STATE_WORKING);
2209 	if (MALI_TRUE == working) {
2210 		/** Group to be disabled once it completes current work,
2211 		 * when virtual group completes, also check child groups for this flag */
2212 		mali_group_set_disable_request(group, MALI_TRUE);
2213 		return;
2214 	}
2215 
2216 	/* Put into disabled state */
2217 	if (group == gp_group) {
2218 		/* GP group */
2219 		MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
2220 		gp_group_state = EXEC_STATE_DISABLED;
2221 	} else {
2222 		if (mali_group_is_in_virtual(group)) {
2223 			/* A child group of virtual group. move the specific group from virtual group */
2224 			MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
2225 
2226 			mali_executor_set_state_pp_physical(group,
2227 							    &group_list_disabled,
2228 							    &group_list_disabled_count);
2229 
2230 			mali_group_remove_group(virtual_group, group);
2231 			mali_executor_disable_empty_virtual();
2232 		} else {
2233 			mali_executor_change_group_status_disabled(group);
2234 		}
2235 
2236 		--num_physical_pp_cores_enabled;
2237 		MALI_DEBUG_PRINT(4, ("Disabling group id %d \n", group->pp_core->core_id));
2238 	}
2239 
2240 	if (MALI_GROUP_STATE_INACTIVE != group->state) {
2241 		if (MALI_TRUE == mali_group_deactivate(group)) {
2242 			mali_pm_update_async();
2243 		}
2244 	}
2245 }
2246 
mali_executor_notify_core_change(u32 num_cores)2247 static void mali_executor_notify_core_change(u32 num_cores)
2248 {
2249 	mali_bool done = MALI_FALSE;
2250 
2251 	if (mali_is_mali450() || mali_is_mali470()) {
2252 		return;
2253 	}
2254 
2255 	/*
2256 	 * This function gets a bit complicated because we can't hold the session lock while
2257 	 * allocating notification objects.
2258 	 */
2259 	while (!done) {
2260 		u32 i;
2261 		u32 num_sessions_alloc;
2262 		u32 num_sessions_with_lock;
2263 		u32 used_notification_objects = 0;
2264 		_mali_osk_notification_t **notobjs;
2265 
2266 		/* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
2267 		num_sessions_alloc = mali_session_get_count();
2268 		if (0 == num_sessions_alloc) {
2269 			/* No sessions to report to */
2270 			return;
2271 		}
2272 
2273 		notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
2274 		if (NULL == notobjs) {
2275 			MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
2276 			/* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
2277 			return;
2278 		}
2279 
2280 		for (i = 0; i < num_sessions_alloc; i++) {
2281 			notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
2282 			if (NULL != notobjs[i]) {
2283 				_mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
2284 				data->number_of_enabled_cores = num_cores;
2285 			} else {
2286 				MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
2287 			}
2288 		}
2289 
2290 		mali_session_lock();
2291 
2292 		/* number of sessions will not change while we hold the lock */
2293 		num_sessions_with_lock = mali_session_get_count();
2294 
2295 		if (num_sessions_alloc >= num_sessions_with_lock) {
2296 			/* We have allocated enough notification objects for all the sessions atm */
2297 			struct mali_session_data *session, *tmp;
2298 			MALI_SESSION_FOREACH(session, tmp, link) {
2299 				MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
2300 				if (NULL != notobjs[used_notification_objects]) {
2301 					mali_session_send_notification(session, notobjs[used_notification_objects]);
2302 					notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
2303 				}
2304 				used_notification_objects++;
2305 			}
2306 			done = MALI_TRUE;
2307 		}
2308 
2309 		mali_session_unlock();
2310 
2311 		/* Delete any remaining/unused notification objects */
2312 		for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
2313 			if (NULL != notobjs[used_notification_objects]) {
2314 				_mali_osk_notification_delete(notobjs[used_notification_objects]);
2315 			}
2316 		}
2317 
2318 		_mali_osk_free(notobjs);
2319 	}
2320 }
2321 
mali_executor_core_scaling_is_done(void * data)2322 static mali_bool mali_executor_core_scaling_is_done(void *data)
2323 {
2324 	u32 i;
2325 	u32 num_groups;
2326 	mali_bool ret = MALI_TRUE;
2327 
2328 	MALI_IGNORE(data);
2329 
2330 	mali_executor_lock();
2331 
2332 	num_groups = mali_group_get_glob_num_groups();
2333 
2334 	for (i = 0; i < num_groups; i++) {
2335 		struct mali_group *group = mali_group_get_glob_group(i);
2336 
2337 		if (NULL != group) {
2338 			if (MALI_TRUE == group->disable_requested && NULL != mali_group_get_pp_core(group)) {
2339 				ret = MALI_FALSE;
2340 				break;
2341 			}
2342 		}
2343 	}
2344 	mali_executor_unlock();
2345 
2346 	return ret;
2347 }
2348 
mali_executor_wq_notify_core_change(void * arg)2349 static void mali_executor_wq_notify_core_change(void *arg)
2350 {
2351 	MALI_IGNORE(arg);
2352 
2353 	if (mali_is_mali450() || mali_is_mali470()) {
2354 		return;
2355 	}
2356 
2357 	_mali_osk_wait_queue_wait_event(executor_notify_core_change_wait_queue,
2358 					mali_executor_core_scaling_is_done, NULL);
2359 
2360 	mali_executor_notify_core_change(num_physical_pp_cores_enabled);
2361 }
2362 
2363 /**
2364  * Clear all disable request from the _last_ core scaling behavior.
2365  */
mali_executor_core_scaling_reset(void)2366 static void mali_executor_core_scaling_reset(void)
2367 {
2368 	u32 i;
2369 	u32 num_groups;
2370 
2371 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
2372 
2373 	num_groups = mali_group_get_glob_num_groups();
2374 
2375 	for (i = 0; i < num_groups; i++) {
2376 		struct mali_group *group = mali_group_get_glob_group(i);
2377 
2378 		if (NULL != group) {
2379 			group->disable_requested = MALI_FALSE;
2380 		}
2381 	}
2382 
2383 	for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
2384 		core_scaling_delay_up_mask[i] = 0;
2385 	}
2386 }
2387 
mali_executor_core_scale(unsigned int target_core_nr)2388 static void mali_executor_core_scale(unsigned int target_core_nr)
2389 {
2390 	int current_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
2391 	int target_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
2392 	int i;
2393 
2394 	MALI_DEBUG_ASSERT(0 < target_core_nr);
2395 	MALI_DEBUG_ASSERT(num_physical_pp_cores_total >= target_core_nr);
2396 
2397 	mali_executor_lock();
2398 
2399 	if (target_core_nr < num_physical_pp_cores_enabled) {
2400 		MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, num_physical_pp_cores_enabled - target_core_nr));
2401 	} else {
2402 		MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - num_physical_pp_cores_enabled));
2403 	}
2404 
2405 	/* When a new core scaling request is comming,  we should remove the un-doing
2406 	 * part of the last core scaling request.  It's safe because we have only one
2407 	 * lock(executor lock) protection. */
2408 	mali_executor_core_scaling_reset();
2409 
2410 	mali_pm_get_best_power_cost_mask(num_physical_pp_cores_enabled, current_core_scaling_mask);
2411 	mali_pm_get_best_power_cost_mask(target_core_nr, target_core_scaling_mask);
2412 
2413 	for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
2414 		target_core_scaling_mask[i] = target_core_scaling_mask[i] - current_core_scaling_mask[i];
2415 		MALI_DEBUG_PRINT(5, ("target_core_scaling_mask[%d] = %d\n", i, target_core_scaling_mask[i]));
2416 	}
2417 
2418 	for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
2419 		if (0 > target_core_scaling_mask[i]) {
2420 			struct mali_pm_domain *domain;
2421 
2422 			domain = mali_pm_domain_get_from_index(i);
2423 
2424 			/* Domain is valid and has pp cores */
2425 			if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
2426 				struct mali_group *group;
2427 				struct mali_group *temp;
2428 
2429 				_MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
2430 					if (NULL != mali_group_get_pp_core(group) && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))
2431 					    && (!mali_group_is_virtual(group))) {
2432 						mali_executor_group_disable_internal(group);
2433 						target_core_scaling_mask[i]++;
2434 						if ((0 == target_core_scaling_mask[i])) {
2435 							break;
2436 						}
2437 
2438 					}
2439 				}
2440 			}
2441 		}
2442 	}
2443 
2444 	for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
2445 		/**
2446 		 * Target_core_scaling_mask[i] is bigger than 0,
2447 		 * means we need to enable some pp cores in
2448 		 * this domain whose domain index is i.
2449 		 */
2450 		if (0 < target_core_scaling_mask[i]) {
2451 			struct mali_pm_domain *domain;
2452 
2453 			if (num_physical_pp_cores_enabled >= target_core_nr) {
2454 				break;
2455 			}
2456 
2457 			domain = mali_pm_domain_get_from_index(i);
2458 
2459 			/* Domain is valid and has pp cores */
2460 			if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
2461 				struct mali_group *group;
2462 				struct mali_group *temp;
2463 
2464 				_MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
2465 					if (NULL != mali_group_get_pp_core(group) && mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)
2466 					    && (!mali_group_is_virtual(group))) {
2467 						mali_executor_group_enable_internal(group);
2468 						target_core_scaling_mask[i]--;
2469 
2470 						if ((0 == target_core_scaling_mask[i]) || num_physical_pp_cores_enabled == target_core_nr) {
2471 							break;
2472 						}
2473 					}
2474 				}
2475 			}
2476 		}
2477 	}
2478 
2479 	/**
2480 	 * Here, we may still have some pp cores not been enabled because of some
2481 	 * pp cores need to be disabled are still in working state.
2482 	 */
2483 	for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
2484 		if (0 < target_core_scaling_mask[i]) {
2485 			core_scaling_delay_up_mask[i] = target_core_scaling_mask[i];
2486 		}
2487 	}
2488 
2489 	mali_executor_schedule();
2490 	mali_executor_unlock();
2491 }
2492 
mali_executor_core_scale_in_group_complete(struct mali_group * group)2493 static void mali_executor_core_scale_in_group_complete(struct mali_group *group)
2494 {
2495 	int num_pp_cores_disabled = 0;
2496 	int num_pp_cores_to_enable = 0;
2497 	int i;
2498 
2499 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
2500 	MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_disable_requested(group));
2501 
2502 	/* Disable child group of virtual group */
2503 	if (mali_group_is_virtual(group)) {
2504 		struct mali_group *child;
2505 		struct mali_group *temp;
2506 
2507 		_MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
2508 			if (MALI_TRUE == mali_group_disable_requested(child)) {
2509 				mali_group_set_disable_request(child, MALI_FALSE);
2510 				mali_executor_group_disable_internal(child);
2511 				num_pp_cores_disabled++;
2512 			}
2513 		}
2514 		mali_group_set_disable_request(group, MALI_FALSE);
2515 	} else {
2516 		mali_executor_group_disable_internal(group);
2517 		mali_group_set_disable_request(group, MALI_FALSE);
2518 		if (NULL != mali_group_get_pp_core(group)) {
2519 			num_pp_cores_disabled++;
2520 		}
2521 	}
2522 
2523 	num_pp_cores_to_enable = num_pp_cores_disabled;
2524 
2525 	for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
2526 		if (0 < core_scaling_delay_up_mask[i]) {
2527 			struct mali_pm_domain *domain;
2528 
2529 			if (0 == num_pp_cores_to_enable) {
2530 				break;
2531 			}
2532 
2533 			domain = mali_pm_domain_get_from_index(i);
2534 
2535 			/* Domain is valid and has pp cores */
2536 			if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
2537 				struct mali_group *disabled_group;
2538 				struct mali_group *temp;
2539 
2540 				_MALI_OSK_LIST_FOREACHENTRY(disabled_group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
2541 					if (NULL != mali_group_get_pp_core(disabled_group) && mali_executor_group_is_in_state(disabled_group, EXEC_STATE_DISABLED)) {
2542 						mali_executor_group_enable_internal(disabled_group);
2543 						core_scaling_delay_up_mask[i]--;
2544 						num_pp_cores_to_enable--;
2545 
2546 						if ((0 == core_scaling_delay_up_mask[i]) || 0 == num_pp_cores_to_enable) {
2547 							break;
2548 						}
2549 					}
2550 				}
2551 			}
2552 		}
2553 	}
2554 
2555 	_mali_osk_wait_queue_wake_up(executor_notify_core_change_wait_queue);
2556 }
2557 
mali_executor_change_group_status_disabled(struct mali_group * group)2558 static void mali_executor_change_group_status_disabled(struct mali_group *group)
2559 {
2560 	/* Physical PP group */
2561 	mali_bool idle;
2562 
2563 	MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
2564 
2565 	idle = mali_executor_group_is_in_state(group, EXEC_STATE_IDLE);
2566 	if (MALI_TRUE == idle) {
2567 		mali_executor_change_state_pp_physical(group,
2568 						       &group_list_idle,
2569 						       &group_list_idle_count,
2570 						       &group_list_disabled,
2571 						       &group_list_disabled_count);
2572 	} else {
2573 		mali_executor_change_state_pp_physical(group,
2574 						       &group_list_inactive,
2575 						       &group_list_inactive_count,
2576 						       &group_list_disabled,
2577 						       &group_list_disabled_count);
2578 	}
2579 }
2580 
mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group)2581 static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group)
2582 {
2583 	mali_bool trigger_pm_update = MALI_FALSE;
2584 
2585 	if (group_list_idle_count > 0) {
2586 		if (mali_executor_has_virtual_group()) {
2587 
2588 			/* Rejoin virtual group on Mali-450 */
2589 
2590 			struct mali_group *group;
2591 			struct mali_group *temp;
2592 
2593 			_MALI_OSK_LIST_FOREACHENTRY(group, temp,
2594 						    &group_list_idle,
2595 						    struct mali_group, executor_list) {
2596 				if (mali_executor_physical_rejoin_virtual(
2597 					    group)) {
2598 					trigger_pm_update = MALI_TRUE;
2599 				}
2600 			}
2601 		} else if (deactivate_idle_group) {
2602 			struct mali_group *group;
2603 			struct mali_group *temp;
2604 
2605 			/* Deactivate group on Mali-300/400 */
2606 
2607 			_MALI_OSK_LIST_FOREACHENTRY(group, temp,
2608 						    &group_list_idle,
2609 						    struct mali_group, executor_list) {
2610 				if (mali_group_deactivate(group)) {
2611 					trigger_pm_update = MALI_TRUE;
2612 				}
2613 
2614 				/* Move from idle to inactive */
2615 				mali_executor_change_state_pp_physical(group,
2616 								       &group_list_idle,
2617 								       &group_list_idle_count,
2618 								       &group_list_inactive,
2619 								       &group_list_inactive_count);
2620 			}
2621 		}
2622 	}
2623 
2624 	return trigger_pm_update;
2625 }
2626 
mali_executor_running_status_print(void)2627 void mali_executor_running_status_print(void)
2628 {
2629 	struct mali_group *group = NULL;
2630 	struct mali_group *temp = NULL;
2631 
2632 	MALI_PRINT(("GP running job: %p\n", gp_group->gp_running_job));
2633 	if ((gp_group->gp_core) && (gp_group->is_working)) {
2634 		mali_group_dump_status(gp_group);
2635 	}
2636 	MALI_PRINT(("Physical PP groups in WORKING state (count = %u):\n", group_list_working_count));
2637 	_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) {
2638 		MALI_PRINT(("PP running job: %p, subjob %d \n", group->pp_running_job, group->pp_running_sub_job));
2639 		mali_group_dump_status(group);
2640 	}
2641 	MALI_PRINT(("Physical PP groups in INACTIVE state (count = %u):\n", group_list_inactive_count));
2642 	_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
2643 		MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
2644 		MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
2645 	}
2646 	MALI_PRINT(("Physical PP groups in IDLE state (count = %u):\n", group_list_idle_count));
2647 	_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
2648 		MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
2649 		MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
2650 	}
2651 	MALI_PRINT(("Physical PP groups in DISABLED state (count = %u):\n", group_list_disabled_count));
2652 	_MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
2653 		MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
2654 		MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
2655 	}
2656 
2657 	if (mali_executor_has_virtual_group()) {
2658 		MALI_PRINT(("Virtual group running job: %p\n", virtual_group->pp_running_job));
2659 		MALI_PRINT(("Virtual group status: %d\n", virtual_group_state));
2660 		MALI_PRINT(("Virtual group->status: %d\n", virtual_group->state));
2661 		MALI_PRINT(("\tSW power: %s\n", virtual_group->power_is_on ? "On" : "Off"));
2662 		_MALI_OSK_LIST_FOREACHENTRY(group, temp, &virtual_group->group_list,
2663 					    struct mali_group, group_list) {
2664 			int i = 0;
2665 			MALI_PRINT(("\tchild group(%s) running job: %p\n", group->pp_core->hw_core.description, group->pp_running_job));
2666 			MALI_PRINT(("\tchild group(%s)->status: %d\n", group->pp_core->hw_core.description, group->state));
2667 			MALI_PRINT(("\tchild group(%s) SW power: %s\n", group->pp_core->hw_core.description, group->power_is_on ? "On" : "Off"));
2668 			if (group->pm_domain) {
2669 				MALI_PRINT(("\tPower domain: id %u\n", mali_pm_domain_get_id(group->pm_domain)));
2670 				MALI_PRINT(("\tMask:0x%04x \n", mali_pm_domain_get_mask(group->pm_domain)));
2671 				MALI_PRINT(("\tUse-count:%u \n", mali_pm_domain_get_use_count(group->pm_domain)));
2672 				MALI_PRINT(("\tCurrent power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_current_mask()) ? "On" : "Off"));
2673 				MALI_PRINT(("\tWanted  power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_wanted_mask()) ? "On" : "Off"));
2674 			}
2675 
2676 			for (i = 0; i < 2; i++) {
2677 				if (NULL != group->l2_cache_core[i]) {
2678 					struct mali_pm_domain *domain;
2679 					domain = mali_l2_cache_get_pm_domain(group->l2_cache_core[i]);
2680 					MALI_PRINT(("\t L2(index %d) group SW power: %s\n", i, group->l2_cache_core[i]->power_is_on ? "On" : "Off"));
2681 					if (domain) {
2682 						MALI_PRINT(("\tL2 Power domain: id %u\n", mali_pm_domain_get_id(domain)));
2683 						MALI_PRINT(("\tL2 Mask:0x%04x \n", mali_pm_domain_get_mask(domain)));
2684 						MALI_PRINT(("\tL2 Use-count:%u \n", mali_pm_domain_get_use_count(domain)));
2685 						MALI_PRINT(("\tL2 Current power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_current_mask()) ? "On" : "Off"));
2686 						MALI_PRINT(("\tL2 Wanted  power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_wanted_mask()) ? "On" : "Off"));
2687 					}
2688 				}
2689 			}
2690 		}
2691 		if (EXEC_STATE_WORKING == virtual_group_state) {
2692 			mali_group_dump_status(virtual_group);
2693 		}
2694 	}
2695 }
2696 
mali_executor_status_dump(void)2697 void mali_executor_status_dump(void)
2698 {
2699 	mali_executor_lock();
2700 	mali_scheduler_lock();
2701 
2702 	/* print schedule queue status */
2703 	mali_scheduler_gp_pp_job_queue_print();
2704 
2705 	mali_scheduler_unlock();
2706 	mali_executor_unlock();
2707 }
2708