1 /*
2 * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
3 *
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6 *
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
9 */
10
11 #include "mali_scheduler.h"
12 #include "mali_kernel_common.h"
13 #include "mali_osk.h"
14 #include "mali_osk_profiling.h"
15 #include "mali_kernel_utilization.h"
16 #include "mali_timeline.h"
17 #include "mali_gp_job.h"
18 #include "mali_pp_job.h"
19 #include "mali_executor.h"
20 #include "mali_group.h"
21 #include <linux/wait.h>
22 #include <linux/sched.h>
23 #include "mali_pm_metrics.h"
24
25 #if defined(CONFIG_DMA_SHARED_BUFFER)
26 #include "mali_memory_dma_buf.h"
27 #if defined(CONFIG_MALI_DMA_BUF_FENCE)
28 #include "mali_dma_fence.h"
29 #include <linux/dma-buf.h>
30 #endif
31 #endif
32
33 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
34 #include <linux/sched.h>
35 #include <trace/events/gpu.h>
36 #endif
37 /*
38 * ---------- static defines/constants ----------
39 */
40
41 /*
42 * If dma_buf with map on demand is used, we defer job queue
43 * if in atomic context, since both might sleep.
44 */
45 #if defined(CONFIG_DMA_SHARED_BUFFER)
46 #if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
47 #define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE 1
48 #endif
49 #endif
50
51
52 /*
53 * ---------- global variables (exported due to inline functions) ----------
54 */
55
56 /* Lock protecting this module */
57 _mali_osk_spinlock_irq_t *mali_scheduler_lock_obj = NULL;
58
59 /* Queue of jobs to be executed on the GP group */
60 struct mali_scheduler_job_queue job_queue_gp;
61
62 /* Queue of PP jobs */
63 struct mali_scheduler_job_queue job_queue_pp;
64
65 _mali_osk_atomic_t mali_job_id_autonumber;
66 _mali_osk_atomic_t mali_job_cache_order_autonumber;
67 /*
68 * ---------- static variables ----------
69 */
70
71 _mali_osk_wq_work_t *scheduler_wq_pp_job_delete = NULL;
72 _mali_osk_spinlock_irq_t *scheduler_pp_job_delete_lock = NULL;
73 static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_deletion_queue);
74
75 #if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
76 static _mali_osk_wq_work_t *scheduler_wq_pp_job_queue = NULL;
77 static _mali_osk_spinlock_irq_t *scheduler_pp_job_queue_lock = NULL;
78 static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_queue_list);
79 #endif
80
81 /*
82 * ---------- Forward declaration of static functions ----------
83 */
84
85 static mali_timeline_point mali_scheduler_submit_gp_job(
86 struct mali_session_data *session, struct mali_gp_job *job);
87 static _mali_osk_errcode_t mali_scheduler_submit_pp_job(
88 struct mali_session_data *session, struct mali_pp_job *job, mali_timeline_point *point);
89
90 static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job);
91 static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job);
92
93 static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
94 mali_bool success);
95
96 static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job);
97 void mali_scheduler_do_pp_job_delete(void *arg);
98
99 #if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
100 static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job);
101 static void mali_scheduler_do_pp_job_queue(void *arg);
102 #endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
103
104 /*
105 * ---------- Actual implementation ----------
106 */
107
mali_scheduler_initialize(void)108 _mali_osk_errcode_t mali_scheduler_initialize(void)
109 {
110 _mali_osk_atomic_init(&mali_job_id_autonumber, 0);
111 _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0);
112
113 _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.normal_pri);
114 _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.high_pri);
115 job_queue_gp.depth = 0;
116 job_queue_gp.big_job_num = 0;
117
118 _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.normal_pri);
119 _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.high_pri);
120 job_queue_pp.depth = 0;
121 job_queue_pp.big_job_num = 0;
122
123 mali_scheduler_lock_obj = _mali_osk_spinlock_irq_init(
124 _MALI_OSK_LOCKFLAG_ORDERED,
125 _MALI_OSK_LOCK_ORDER_SCHEDULER);
126 if (NULL == mali_scheduler_lock_obj) {
127 mali_scheduler_terminate();
128 }
129
130 scheduler_wq_pp_job_delete = _mali_osk_wq_create_work(
131 mali_scheduler_do_pp_job_delete, NULL);
132 if (NULL == scheduler_wq_pp_job_delete) {
133 mali_scheduler_terminate();
134 return _MALI_OSK_ERR_FAULT;
135 }
136
137 scheduler_pp_job_delete_lock = _mali_osk_spinlock_irq_init(
138 _MALI_OSK_LOCKFLAG_ORDERED,
139 _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
140 if (NULL == scheduler_pp_job_delete_lock) {
141 mali_scheduler_terminate();
142 return _MALI_OSK_ERR_FAULT;
143 }
144
145 #if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
146 scheduler_wq_pp_job_queue = _mali_osk_wq_create_work(
147 mali_scheduler_do_pp_job_queue, NULL);
148 if (NULL == scheduler_wq_pp_job_queue) {
149 mali_scheduler_terminate();
150 return _MALI_OSK_ERR_FAULT;
151 }
152
153 scheduler_pp_job_queue_lock = _mali_osk_spinlock_irq_init(
154 _MALI_OSK_LOCKFLAG_ORDERED,
155 _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
156 if (NULL == scheduler_pp_job_queue_lock) {
157 mali_scheduler_terminate();
158 return _MALI_OSK_ERR_FAULT;
159 }
160 #endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
161
162 return _MALI_OSK_ERR_OK;
163 }
164
mali_scheduler_terminate(void)165 void mali_scheduler_terminate(void)
166 {
167 #if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
168 if (NULL != scheduler_pp_job_queue_lock) {
169 _mali_osk_spinlock_irq_term(scheduler_pp_job_queue_lock);
170 scheduler_pp_job_queue_lock = NULL;
171 }
172
173 if (NULL != scheduler_wq_pp_job_queue) {
174 _mali_osk_wq_delete_work(scheduler_wq_pp_job_queue);
175 scheduler_wq_pp_job_queue = NULL;
176 }
177 #endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
178
179 if (NULL != scheduler_pp_job_delete_lock) {
180 _mali_osk_spinlock_irq_term(scheduler_pp_job_delete_lock);
181 scheduler_pp_job_delete_lock = NULL;
182 }
183
184 if (NULL != scheduler_wq_pp_job_delete) {
185 _mali_osk_wq_delete_work(scheduler_wq_pp_job_delete);
186 scheduler_wq_pp_job_delete = NULL;
187 }
188
189 if (NULL != mali_scheduler_lock_obj) {
190 _mali_osk_spinlock_irq_term(mali_scheduler_lock_obj);
191 mali_scheduler_lock_obj = NULL;
192 }
193
194 _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
195 _mali_osk_atomic_term(&mali_job_id_autonumber);
196 }
197
mali_scheduler_job_physical_head_count(mali_bool gpu_mode_is_secure)198 u32 mali_scheduler_job_physical_head_count(mali_bool gpu_mode_is_secure)
199 {
200 /*
201 * Count how many physical sub jobs are present from the head of queue
202 * until the first virtual job is present.
203 * Early out when we have reached maximum number of PP cores (8)
204 */
205 u32 count = 0;
206 struct mali_pp_job *job;
207 struct mali_pp_job *temp;
208
209 /* Check for partially started normal pri jobs */
210 if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
211 MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
212
213 job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
214 struct mali_pp_job, list);
215
216 MALI_DEBUG_ASSERT_POINTER(job);
217
218 if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) {
219 /*
220 * Remember; virtual jobs can't be queued and started
221 * at the same time, so this must be a physical job
222 */
223 if ((MALI_FALSE == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job))
224 || (MALI_TRUE == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job))) {
225
226 count += mali_pp_job_unstarted_sub_job_count(job);
227 if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
228 return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
229 }
230 }
231 }
232 }
233
234 _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri,
235 struct mali_pp_job, list) {
236 if ((MALI_FALSE == mali_pp_job_is_virtual(job))
237 && ((MALI_FALSE == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job))
238 || (MALI_TRUE == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job)))) {
239
240 count += mali_pp_job_unstarted_sub_job_count(job);
241 if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
242 return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
243 }
244 } else {
245 /* Came across a virtual job, so stop counting */
246 return count;
247 }
248 }
249
250 _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri,
251 struct mali_pp_job, list) {
252 if ((MALI_FALSE == mali_pp_job_is_virtual(job))
253 && (MALI_FALSE == mali_pp_job_has_started_sub_jobs(job))
254 && ((MALI_FALSE == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job))
255 || (MALI_TRUE == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job)))) {
256
257 count += mali_pp_job_unstarted_sub_job_count(job);
258 if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
259 return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
260 }
261 } else {
262 /* Came across a virtual job, so stop counting */
263 return count;
264 }
265 }
266 return count;
267 }
268
mali_scheduler_job_pp_next(void)269 struct mali_pp_job *mali_scheduler_job_pp_next(void)
270 {
271 struct mali_pp_job *job;
272 struct mali_pp_job *temp;
273
274 MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
275
276 /* Check for partially started normal pri jobs */
277 if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
278 MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
279
280 job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
281 struct mali_pp_job, list);
282
283 MALI_DEBUG_ASSERT_POINTER(job);
284
285 if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) {
286 return job;
287 }
288 }
289
290 _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri,
291 struct mali_pp_job, list) {
292 return job;
293 }
294
295 _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri,
296 struct mali_pp_job, list) {
297 return job;
298 }
299
300 return NULL;
301 }
302
mali_scheduler_job_next_is_virtual(void)303 mali_bool mali_scheduler_job_next_is_virtual(void)
304 {
305 struct mali_pp_job *job;
306
307 job = mali_scheduler_job_pp_virtual_peek();
308 if (NULL != job) {
309 MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
310
311 return MALI_TRUE;
312 }
313
314 return MALI_FALSE;
315 }
316
mali_scheduler_job_gp_get(void)317 struct mali_gp_job *mali_scheduler_job_gp_get(void)
318 {
319 _mali_osk_list_t *queue;
320 struct mali_gp_job *job = NULL;
321
322 MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
323 MALI_DEBUG_ASSERT(0 < job_queue_gp.depth);
324 MALI_DEBUG_ASSERT(job_queue_gp.big_job_num <= job_queue_gp.depth);
325
326 if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) {
327 queue = &job_queue_gp.high_pri;
328 } else {
329 queue = &job_queue_gp.normal_pri;
330 MALI_DEBUG_ASSERT(!_mali_osk_list_empty(queue));
331 }
332
333 job = _MALI_OSK_LIST_ENTRY(queue->next, struct mali_gp_job, list);
334
335 MALI_DEBUG_ASSERT_POINTER(job);
336
337 mali_gp_job_list_remove(job);
338 job_queue_gp.depth--;
339 if (job->big_job) {
340 job_queue_gp.big_job_num --;
341 if (job_queue_gp.big_job_num < MALI_MAX_PENDING_BIG_JOB) {
342 /* wake up process */
343 wait_queue_head_t *queue = mali_session_get_wait_queue();
344 wake_up(queue);
345 }
346 }
347 return job;
348 }
349
mali_scheduler_job_pp_physical_peek(void)350 struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void)
351 {
352 struct mali_pp_job *job = NULL;
353 struct mali_pp_job *tmp_job = NULL;
354
355 MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
356
357 /*
358 * For PP jobs we favour partially started jobs in normal
359 * priority queue over unstarted jobs in high priority queue
360 */
361
362 if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
363 MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
364
365 tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
366 struct mali_pp_job, list);
367 MALI_DEBUG_ASSERT(NULL != tmp_job);
368
369 if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
370 job = tmp_job;
371 }
372 }
373
374 if (NULL == job ||
375 MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) {
376 /*
377 * There isn't a partially started job in normal queue, so
378 * look in high priority queue.
379 */
380 if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
381 MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
382
383 tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
384 struct mali_pp_job, list);
385 MALI_DEBUG_ASSERT(NULL != tmp_job);
386
387 if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
388 job = tmp_job;
389 }
390 }
391 }
392
393 return job;
394 }
395
mali_scheduler_job_pp_virtual_peek(void)396 struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void)
397 {
398 struct mali_pp_job *job = NULL;
399 struct mali_pp_job *tmp_job = NULL;
400
401 MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
402
403 if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
404 MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
405
406 tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
407 struct mali_pp_job, list);
408
409 if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
410 job = tmp_job;
411 }
412 }
413
414 if (NULL == job) {
415 if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
416 MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
417
418 tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
419 struct mali_pp_job, list);
420
421 if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
422 job = tmp_job;
423 }
424 }
425 }
426
427 return job;
428 }
429
mali_scheduler_job_pp_physical_get(u32 * sub_job)430 struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job)
431 {
432 struct mali_pp_job *job = mali_scheduler_job_pp_physical_peek();
433
434 MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_virtual(job));
435
436 if (NULL != job) {
437 *sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
438
439 mali_pp_job_mark_sub_job_started(job, *sub_job);
440 if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(job)) {
441 /* Remove from queue when last sub job has been retrieved */
442 mali_pp_job_list_remove(job);
443 }
444
445 job_queue_pp.depth--;
446
447 /*
448 * Job about to start so it is no longer be
449 * possible to discard WB
450 */
451 mali_pp_job_fb_lookup_remove(job);
452 }
453
454 return job;
455 }
456
mali_scheduler_job_pp_virtual_get(void)457 struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void)
458 {
459 struct mali_pp_job *job = mali_scheduler_job_pp_virtual_peek();
460
461 MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_virtual(job));
462
463 if (NULL != job) {
464 MALI_DEBUG_ASSERT(0 ==
465 mali_pp_job_get_first_unstarted_sub_job(job));
466 MALI_DEBUG_ASSERT(1 ==
467 mali_pp_job_get_sub_job_count(job));
468
469 mali_pp_job_mark_sub_job_started(job, 0);
470
471 mali_pp_job_list_remove(job);
472
473 job_queue_pp.depth--;
474
475 /*
476 * Job about to start so it is no longer be
477 * possible to discard WB
478 */
479 mali_pp_job_fb_lookup_remove(job);
480 }
481
482 return job;
483 }
484
mali_scheduler_activate_gp_job(struct mali_gp_job * job)485 mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job)
486 {
487 MALI_DEBUG_ASSERT_POINTER(job);
488
489 MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n",
490 mali_gp_job_get_id(job), job));
491
492 mali_scheduler_lock();
493
494 if (!mali_scheduler_queue_gp_job(job)) {
495 /* Failed to enqueue job, release job (with error) */
496
497 mali_scheduler_unlock();
498
499 mali_timeline_tracker_release(mali_gp_job_get_tracker(job));
500 mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
501
502 /* This will notify user space and close the job object */
503 mali_scheduler_complete_gp_job(job, MALI_FALSE,
504 MALI_TRUE, MALI_FALSE);
505
506 return MALI_SCHEDULER_MASK_EMPTY;
507 }
508
509 mali_scheduler_unlock();
510
511 return MALI_SCHEDULER_MASK_GP;
512 }
513
mali_scheduler_activate_pp_job(struct mali_pp_job * job)514 mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job)
515 {
516 MALI_DEBUG_ASSERT_POINTER(job);
517
518 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n",
519 mali_pp_job_get_id(job), job));
520
521 if (MALI_TRUE == mali_timeline_tracker_activation_error(
522 mali_pp_job_get_tracker(job))) {
523 MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n",
524 mali_pp_job_get_id(job), job));
525
526 mali_scheduler_lock();
527 mali_pp_job_fb_lookup_remove(job);
528 mali_pp_job_mark_unstarted_failed(job);
529 mali_scheduler_unlock();
530
531 mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
532
533 /* This will notify user space and close the job object */
534 mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
535
536 return MALI_SCHEDULER_MASK_EMPTY;
537 }
538
539 #if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
540 if (mali_pp_job_needs_dma_buf_mapping(job)) {
541 mali_scheduler_deferred_pp_job_queue(job);
542 return MALI_SCHEDULER_MASK_EMPTY;
543 }
544 #endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
545
546 mali_scheduler_lock();
547
548 if (!mali_scheduler_queue_pp_job(job)) {
549 /* Failed to enqueue job, release job (with error) */
550 mali_pp_job_fb_lookup_remove(job);
551 mali_pp_job_mark_unstarted_failed(job);
552 mali_scheduler_unlock();
553
554 mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
555
556 /* This will notify user space and close the job object */
557 mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
558
559 return MALI_SCHEDULER_MASK_EMPTY;
560 }
561
562 mali_scheduler_unlock();
563 return MALI_SCHEDULER_MASK_PP;
564 }
565
mali_scheduler_complete_gp_job(struct mali_gp_job * job,mali_bool success,mali_bool user_notification,mali_bool dequeued)566 void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
567 mali_bool success,
568 mali_bool user_notification,
569 mali_bool dequeued)
570 {
571 if (user_notification) {
572 mali_scheduler_return_gp_job_to_user(job, success);
573 }
574
575 if (dequeued) {
576 _mali_osk_pm_dev_ref_put();
577
578 if (mali_utilization_enabled()) {
579 mali_utilization_gp_end();
580 }
581 mali_pm_record_gpu_idle(MALI_TRUE);
582 }
583
584 mali_gp_job_delete(job);
585 }
586
mali_scheduler_complete_pp_job(struct mali_pp_job * job,u32 num_cores_in_virtual,mali_bool user_notification,mali_bool dequeued)587 void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
588 u32 num_cores_in_virtual,
589 mali_bool user_notification,
590 mali_bool dequeued)
591 {
592 job->user_notification = user_notification;
593 job->num_pp_cores_in_virtual = num_cores_in_virtual;
594
595 #if defined(CONFIG_MALI_DMA_BUF_FENCE)
596 if (NULL != job->rendered_dma_fence)
597 mali_dma_fence_signal_and_put(&job->rendered_dma_fence);
598 #endif
599
600 if (dequeued) {
601 #if defined(CONFIG_MALI_DVFS)
602 if (mali_pp_job_is_window_surface(job)) {
603 struct mali_session_data *session;
604 session = mali_pp_job_get_session(job);
605 mali_session_inc_num_window_jobs(session);
606 }
607 #endif
608 _mali_osk_pm_dev_ref_put();
609
610 if (mali_utilization_enabled()) {
611 mali_utilization_pp_end();
612 }
613 mali_pm_record_gpu_idle(MALI_FALSE);
614 }
615
616 /* With ZRAM feature enabled, all pp jobs will be force to use deferred delete. */
617 mali_scheduler_deferred_pp_job_delete(job);
618 }
619
mali_scheduler_abort_session(struct mali_session_data * session)620 void mali_scheduler_abort_session(struct mali_session_data *session)
621 {
622 struct mali_gp_job *gp_job;
623 struct mali_gp_job *gp_tmp;
624 struct mali_pp_job *pp_job;
625 struct mali_pp_job *pp_tmp;
626 _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_gp);
627 _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_pp);
628
629 MALI_DEBUG_ASSERT_POINTER(session);
630 MALI_DEBUG_ASSERT(session->is_aborting);
631
632 MALI_DEBUG_PRINT(3, ("Mali scheduler: Aborting all queued jobs from session 0x%08X.\n",
633 session));
634
635 mali_scheduler_lock();
636
637 /* Remove from GP normal priority queue */
638 _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.normal_pri,
639 struct mali_gp_job, list) {
640 if (mali_gp_job_get_session(gp_job) == session) {
641 mali_gp_job_list_move(gp_job, &removed_jobs_gp);
642 job_queue_gp.depth--;
643 job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0;
644 }
645 }
646
647 /* Remove from GP high priority queue */
648 _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.high_pri,
649 struct mali_gp_job, list) {
650 if (mali_gp_job_get_session(gp_job) == session) {
651 mali_gp_job_list_move(gp_job, &removed_jobs_gp);
652 job_queue_gp.depth--;
653 job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0;
654 }
655 }
656
657 /* Remove from PP normal priority queue */
658 _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
659 &job_queue_pp.normal_pri,
660 struct mali_pp_job, list) {
661 if (mali_pp_job_get_session(pp_job) == session) {
662 mali_pp_job_fb_lookup_remove(pp_job);
663
664 job_queue_pp.depth -=
665 mali_pp_job_unstarted_sub_job_count(
666 pp_job);
667 mali_pp_job_mark_unstarted_failed(pp_job);
668
669 if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) {
670 if (mali_pp_job_is_complete(pp_job)) {
671 mali_pp_job_list_move(pp_job,
672 &removed_jobs_pp);
673 } else {
674 mali_pp_job_list_remove(pp_job);
675 }
676 }
677 }
678 }
679
680 /* Remove from PP high priority queue */
681 _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
682 &job_queue_pp.high_pri,
683 struct mali_pp_job, list) {
684 if (mali_pp_job_get_session(pp_job) == session) {
685 mali_pp_job_fb_lookup_remove(pp_job);
686
687 job_queue_pp.depth -=
688 mali_pp_job_unstarted_sub_job_count(
689 pp_job);
690 mali_pp_job_mark_unstarted_failed(pp_job);
691
692 if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) {
693 if (mali_pp_job_is_complete(pp_job)) {
694 mali_pp_job_list_move(pp_job,
695 &removed_jobs_pp);
696 } else {
697 mali_pp_job_list_remove(pp_job);
698 }
699 }
700 }
701 }
702
703 /*
704 * Release scheduler lock so we can release trackers
705 * (which will potentially queue new jobs)
706 */
707 mali_scheduler_unlock();
708
709 /* Release and complete all (non-running) found GP jobs */
710 _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &removed_jobs_gp,
711 struct mali_gp_job, list) {
712 mali_timeline_tracker_release(mali_gp_job_get_tracker(gp_job));
713 mali_gp_job_signal_pp_tracker(gp_job, MALI_FALSE);
714 _mali_osk_list_delinit(&gp_job->list);
715 mali_scheduler_complete_gp_job(gp_job,
716 MALI_FALSE, MALI_FALSE, MALI_TRUE);
717 }
718
719 /* Release and complete non-running PP jobs */
720 _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, &removed_jobs_pp,
721 struct mali_pp_job, list) {
722 mali_timeline_tracker_release(mali_pp_job_get_tracker(pp_job));
723 _mali_osk_list_delinit(&pp_job->list);
724 mali_scheduler_complete_pp_job(pp_job, 0,
725 MALI_FALSE, MALI_TRUE);
726 }
727 }
728
_mali_ukk_gp_start_job(void * ctx,_mali_uk_gp_start_job_s * uargs)729 _mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx,
730 _mali_uk_gp_start_job_s *uargs)
731 {
732 struct mali_session_data *session;
733 struct mali_gp_job *job;
734 mali_timeline_point point;
735 u32 __user *point_ptr = NULL;
736
737 MALI_DEBUG_ASSERT_POINTER(uargs);
738 MALI_DEBUG_ASSERT_POINTER(ctx);
739
740 session = (struct mali_session_data *)(uintptr_t)ctx;
741
742 job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(),
743 NULL);
744 if (NULL == job) {
745 MALI_PRINT_ERROR(("Failed to create GP job.\n"));
746 return _MALI_OSK_ERR_NOMEM;
747 }
748
749 point_ptr = (u32 __user *)(uintptr_t)mali_gp_job_get_timeline_point_ptr(job);
750
751 point = mali_scheduler_submit_gp_job(session, job);
752
753 if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
754 /*
755 * Let user space know that something failed
756 * after the job was started.
757 */
758 return _MALI_OSK_ERR_ITEM_NOT_FOUND;
759 }
760
761 return _MALI_OSK_ERR_OK;
762 }
763
_mali_ukk_pp_start_job(void * ctx,_mali_uk_pp_start_job_s * uargs)764 _mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx,
765 _mali_uk_pp_start_job_s *uargs)
766 {
767 _mali_osk_errcode_t ret;
768 struct mali_session_data *session;
769 struct mali_pp_job *job;
770 mali_timeline_point point;
771 u32 __user *point_ptr = NULL;
772
773 MALI_DEBUG_ASSERT_POINTER(uargs);
774 MALI_DEBUG_ASSERT_POINTER(ctx);
775
776 session = (struct mali_session_data *)(uintptr_t)ctx;
777
778 job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
779 if (NULL == job) {
780 MALI_PRINT_ERROR(("Failed to create PP job.\n"));
781 return _MALI_OSK_ERR_NOMEM;
782 }
783
784 point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(job);
785
786 /* Submit PP job. */
787 ret = mali_scheduler_submit_pp_job(session, job, &point);
788 job = NULL;
789
790 if (_MALI_OSK_ERR_OK == ret) {
791 if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
792 /*
793 * Let user space know that something failed
794 * after the jobs were started.
795 */
796 return _MALI_OSK_ERR_ITEM_NOT_FOUND;
797 }
798 }
799
800 return ret;
801 }
802
_mali_ukk_pp_and_gp_start_job(void * ctx,_mali_uk_pp_and_gp_start_job_s * uargs)803 _mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx,
804 _mali_uk_pp_and_gp_start_job_s *uargs)
805 {
806 _mali_osk_errcode_t ret;
807 struct mali_session_data *session;
808 _mali_uk_pp_and_gp_start_job_s kargs;
809 struct mali_pp_job *pp_job;
810 struct mali_gp_job *gp_job;
811 u32 __user *point_ptr = NULL;
812 mali_timeline_point point;
813 _mali_uk_pp_start_job_s __user *pp_args;
814 _mali_uk_gp_start_job_s __user *gp_args;
815
816 MALI_DEBUG_ASSERT_POINTER(ctx);
817 MALI_DEBUG_ASSERT_POINTER(uargs);
818
819 session = (struct mali_session_data *) ctx;
820
821 if (0 != _mali_osk_copy_from_user(&kargs, uargs,
822 sizeof(_mali_uk_pp_and_gp_start_job_s))) {
823 return _MALI_OSK_ERR_NOMEM;
824 }
825
826 pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args;
827 gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args;
828
829 pp_job = mali_pp_job_create(session, pp_args,
830 mali_scheduler_get_new_id());
831 if (NULL == pp_job) {
832 MALI_PRINT_ERROR(("Failed to create PP job.\n"));
833 return _MALI_OSK_ERR_NOMEM;
834 }
835
836 gp_job = mali_gp_job_create(session, gp_args,
837 mali_scheduler_get_new_id(),
838 mali_pp_job_get_tracker(pp_job));
839 if (NULL == gp_job) {
840 MALI_PRINT_ERROR(("Failed to create GP job.\n"));
841 mali_pp_job_delete(pp_job);
842 return _MALI_OSK_ERR_NOMEM;
843 }
844
845 point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(pp_job);
846
847 /* Submit GP job. */
848 mali_scheduler_submit_gp_job(session, gp_job);
849 gp_job = NULL;
850
851 /* Submit PP job. */
852 ret = mali_scheduler_submit_pp_job(session, pp_job, &point);
853 pp_job = NULL;
854
855 if (_MALI_OSK_ERR_OK == ret) {
856 if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
857 /*
858 * Let user space know that something failed
859 * after the jobs were started.
860 */
861 return _MALI_OSK_ERR_ITEM_NOT_FOUND;
862 }
863 }
864
865 return ret;
866 }
867
_mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s * args)868 void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
869 {
870 struct mali_session_data *session;
871 struct mali_pp_job *job;
872 struct mali_pp_job *tmp;
873 u32 fb_lookup_id;
874
875 MALI_DEBUG_ASSERT_POINTER(args);
876 MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
877
878 session = (struct mali_session_data *)(uintptr_t)args->ctx;
879
880 fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
881
882 mali_scheduler_lock();
883
884 /* Iterate over all jobs for given frame builder_id. */
885 _MALI_OSK_LIST_FOREACHENTRY(job, tmp,
886 &session->pp_job_fb_lookup_list[fb_lookup_id],
887 struct mali_pp_job, session_fb_lookup_list) {
888 MALI_DEBUG_CODE(u32 disable_mask = 0);
889
890 if (mali_pp_job_get_frame_builder_id(job) !=
891 (u32) args->fb_id) {
892 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
893 continue;
894 }
895
896 MALI_DEBUG_CODE(disable_mask |= 0xD << (4 * 3));
897
898 if (mali_pp_job_get_wb0_source_addr(job) == args->wb0_memory) {
899 MALI_DEBUG_CODE(disable_mask |= 0x1 << (4 * 1));
900 mali_pp_job_disable_wb0(job);
901 }
902
903 if (mali_pp_job_get_wb1_source_addr(job) == args->wb1_memory) {
904 MALI_DEBUG_CODE(disable_mask |= 0x2 << (4 * 2));
905 mali_pp_job_disable_wb1(job);
906 }
907
908 if (mali_pp_job_get_wb2_source_addr(job) == args->wb2_memory) {
909 MALI_DEBUG_CODE(disable_mask |= 0x3 << (4 * 3));
910 mali_pp_job_disable_wb2(job);
911 }
912 MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n",
913 disable_mask));
914 }
915
916 mali_scheduler_unlock();
917 }
918
919 #if MALI_STATE_TRACKING
mali_scheduler_dump_state(char * buf,u32 size)920 u32 mali_scheduler_dump_state(char *buf, u32 size)
921 {
922 int n = 0;
923
924 n += _mali_osk_snprintf(buf + n, size - n, "GP queues\n");
925 n += _mali_osk_snprintf(buf + n, size - n,
926 "\tQueue depth: %u\n", job_queue_gp.depth);
927 n += _mali_osk_snprintf(buf + n, size - n,
928 "\tNormal priority queue is %s\n",
929 _mali_osk_list_empty(&job_queue_gp.normal_pri) ?
930 "empty" : "not empty");
931 n += _mali_osk_snprintf(buf + n, size - n,
932 "\tHigh priority queue is %s\n",
933 _mali_osk_list_empty(&job_queue_gp.high_pri) ?
934 "empty" : "not empty");
935
936 n += _mali_osk_snprintf(buf + n, size - n,
937 "PP queues\n");
938 n += _mali_osk_snprintf(buf + n, size - n,
939 "\tQueue depth: %u\n", job_queue_pp.depth);
940 n += _mali_osk_snprintf(buf + n, size - n,
941 "\tNormal priority queue is %s\n",
942 _mali_osk_list_empty(&job_queue_pp.normal_pri)
943 ? "empty" : "not empty");
944 n += _mali_osk_snprintf(buf + n, size - n,
945 "\tHigh priority queue is %s\n",
946 _mali_osk_list_empty(&job_queue_pp.high_pri)
947 ? "empty" : "not empty");
948
949 n += _mali_osk_snprintf(buf + n, size - n, "\n");
950
951 return n;
952 }
953 #endif
954
955 /*
956 * ---------- Implementation of static functions ----------
957 */
958
mali_scheduler_submit_gp_job(struct mali_session_data * session,struct mali_gp_job * job)959 static mali_timeline_point mali_scheduler_submit_gp_job(
960 struct mali_session_data *session, struct mali_gp_job *job)
961 {
962 mali_timeline_point point;
963
964 MALI_DEBUG_ASSERT_POINTER(session);
965 MALI_DEBUG_ASSERT_POINTER(job);
966
967 /* Add job to Timeline system. */
968 point = mali_timeline_system_add_tracker(session->timeline_system,
969 mali_gp_job_get_tracker(job), MALI_TIMELINE_GP);
970
971 return point;
972 }
973
mali_scheduler_submit_pp_job(struct mali_session_data * session,struct mali_pp_job * job,mali_timeline_point * point)974 static _mali_osk_errcode_t mali_scheduler_submit_pp_job(
975 struct mali_session_data *session, struct mali_pp_job *job, mali_timeline_point *point)
976
977 {
978 _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
979
980 #if defined(CONFIG_MALI_DMA_BUF_FENCE)
981 struct ww_acquire_ctx ww_actx;
982 u32 i;
983 u32 num_memory_cookies = 0;
984 struct reservation_object **reservation_object_list = NULL;
985 unsigned int num_reservation_object = 0;
986 #endif
987
988 MALI_DEBUG_ASSERT_POINTER(session);
989 MALI_DEBUG_ASSERT_POINTER(job);
990
991 mali_scheduler_lock();
992 /*
993 * Adding job to the lookup list used to quickly discard
994 * writeback units of queued jobs.
995 */
996 mali_pp_job_fb_lookup_add(job);
997 mali_scheduler_unlock();
998
999 #if defined(CONFIG_MALI_DMA_BUF_FENCE)
1000
1001 /* Allocate the reservation_object_list to list the dma reservation object of dependent dma buffer */
1002 num_memory_cookies = mali_pp_job_num_memory_cookies(job);
1003 if (0 < num_memory_cookies) {
1004 reservation_object_list = kzalloc(sizeof(struct reservation_object *) * num_memory_cookies, GFP_KERNEL);
1005 if (NULL == reservation_object_list) {
1006 MALI_PRINT_ERROR(("Failed to alloc the reservation object list.\n"));
1007 ret = _MALI_OSK_ERR_NOMEM;
1008 goto failed_to_alloc_reservation_object_list;
1009 }
1010 }
1011
1012 /* Add the dma reservation object into reservation_object_list*/
1013 for (i = 0; i < num_memory_cookies; i++) {
1014 mali_mem_backend *mem_backend = NULL;
1015 struct reservation_object *tmp_reservation_object = NULL;
1016 u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
1017
1018 mem_backend = mali_mem_backend_struct_search(session, mali_addr);
1019
1020 MALI_DEBUG_ASSERT_POINTER(mem_backend);
1021
1022 if (NULL == mem_backend) {
1023 MALI_PRINT_ERROR(("Failed to find the memory backend for memory cookie[%d].\n", i));
1024 goto failed_to_find_mem_backend;
1025 }
1026
1027 if (MALI_MEM_DMA_BUF != mem_backend->type)
1028 continue;
1029
1030 tmp_reservation_object = mem_backend->dma_buf.attachment->buf->resv;
1031
1032 if (NULL != tmp_reservation_object) {
1033 mali_dma_fence_add_reservation_object_list(tmp_reservation_object,
1034 reservation_object_list, &num_reservation_object);
1035 }
1036 }
1037
1038 /*
1039 * Add the mali dma fence callback to wait for all dependent dma buf,
1040 * and extend the timeline system to support dma fence,
1041 * then create the new internal dma fence to replace all last dma fence for dependent dma buf.
1042 */
1043 if (0 < num_reservation_object) {
1044 int error;
1045 int num_dma_fence_waiter = 0;
1046 /* Create one new dma fence.*/
1047 job->rendered_dma_fence = mali_dma_fence_new(job->session->fence_context,
1048 _mali_osk_atomic_inc_return(&job->session->fence_seqno));
1049
1050 if (NULL == job->rendered_dma_fence) {
1051 MALI_PRINT_ERROR(("Failed to creat one new dma fence.\n"));
1052 ret = _MALI_OSK_ERR_FAULT;
1053 goto failed_to_create_dma_fence;
1054 }
1055
1056 /* In order to avoid deadlock, wait/wound mutex lock to lock all dma buffers*/
1057
1058 error = mali_dma_fence_lock_reservation_object_list(reservation_object_list,
1059 num_reservation_object, &ww_actx);
1060
1061 if (0 != error) {
1062 MALI_PRINT_ERROR(("Failed to lock all reservation objects.\n"));
1063 ret = _MALI_OSK_ERR_FAULT;
1064 goto failed_to_lock_reservation_object_list;
1065 }
1066
1067 mali_dma_fence_context_init(&job->dma_fence_context,
1068 mali_timeline_dma_fence_callback, (void *)job);
1069
1070 /* Add dma fence waiters and dma fence callback. */
1071 for (i = 0; i < num_reservation_object; i++) {
1072 ret = mali_dma_fence_context_add_waiters(&job->dma_fence_context, reservation_object_list[i]);
1073 if (_MALI_OSK_ERR_OK != ret) {
1074 MALI_PRINT_ERROR(("Failed to add waiter into mali dma fence context.\n"));
1075 goto failed_to_add_dma_fence_waiter;
1076 }
1077 }
1078
1079 for (i = 0; i < num_reservation_object; i++) {
1080 reservation_object_add_excl_fence(reservation_object_list[i], job->rendered_dma_fence);
1081 }
1082
1083 num_dma_fence_waiter = job->dma_fence_context.num_dma_fence_waiter;
1084
1085 /* Add job to Timeline system. */
1086 (*point) = mali_timeline_system_add_tracker(session->timeline_system,
1087 mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
1088
1089 if (0 != num_dma_fence_waiter) {
1090 mali_dma_fence_context_dec_count(&job->dma_fence_context);
1091 }
1092
1093 /* Unlock all wait/wound mutex lock. */
1094 mali_dma_fence_unlock_reservation_object_list(reservation_object_list,
1095 num_reservation_object, &ww_actx);
1096 } else {
1097 /* Add job to Timeline system. */
1098 (*point) = mali_timeline_system_add_tracker(session->timeline_system,
1099 mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
1100 }
1101
1102 kfree(reservation_object_list);
1103 return ret;
1104 #else
1105 /* Add job to Timeline system. */
1106 (*point) = mali_timeline_system_add_tracker(session->timeline_system,
1107 mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
1108 #endif
1109
1110 #if defined(CONFIG_MALI_DMA_BUF_FENCE)
1111 failed_to_add_dma_fence_waiter:
1112 mali_dma_fence_context_term(&job->dma_fence_context);
1113 mali_dma_fence_unlock_reservation_object_list(reservation_object_list,
1114 num_reservation_object, &ww_actx);
1115 failed_to_lock_reservation_object_list:
1116 mali_dma_fence_signal_and_put(&job->rendered_dma_fence);
1117 failed_to_create_dma_fence:
1118 failed_to_find_mem_backend:
1119 if (NULL != reservation_object_list)
1120 kfree(reservation_object_list);
1121 failed_to_alloc_reservation_object_list:
1122 mali_pp_job_fb_lookup_remove(job);
1123 #endif
1124 return ret;
1125 }
1126
mali_scheduler_queue_gp_job(struct mali_gp_job * job)1127 static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job)
1128 {
1129 struct mali_session_data *session;
1130 _mali_osk_list_t *queue;
1131
1132 MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
1133 MALI_DEBUG_ASSERT_POINTER(job);
1134
1135 session = mali_gp_job_get_session(job);
1136 MALI_DEBUG_ASSERT_POINTER(session);
1137
1138 if (unlikely(session->is_aborting)) {
1139 MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
1140 mali_gp_job_get_id(job), job));
1141 return MALI_FALSE; /* job not queued */
1142 }
1143
1144 mali_gp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
1145
1146 /* Determine which queue the job should be added to. */
1147 if (session->use_high_priority_job_queue) {
1148 queue = &job_queue_gp.high_pri;
1149 } else {
1150 queue = &job_queue_gp.normal_pri;
1151 }
1152
1153 job_queue_gp.depth += 1;
1154 job_queue_gp.big_job_num += (job->big_job) ? 1 : 0;
1155
1156 /* Add job to queue (mali_gp_job_queue_add find correct place). */
1157 mali_gp_job_list_add(job, queue);
1158
1159 /*
1160 * We hold a PM reference for every job we hold queued (and running)
1161 * It is important that we take this reference after job has been
1162 * added the the queue so that any runtime resume could schedule this
1163 * job right there and then.
1164 */
1165 _mali_osk_pm_dev_ref_get_async();
1166
1167 if (mali_utilization_enabled()) {
1168 /*
1169 * We cheat a little bit by counting the GP as busy from the
1170 * time a GP job is queued. This will be fine because we only
1171 * loose the tiny idle gap between jobs, but we will instead
1172 * get less utilization work to do (less locks taken)
1173 */
1174 mali_utilization_gp_start();
1175 }
1176
1177 mali_pm_record_gpu_active(MALI_TRUE);
1178
1179 /* Add profiling events for job enqueued */
1180 _mali_osk_profiling_add_event(
1181 MALI_PROFILING_EVENT_TYPE_SINGLE |
1182 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1183 MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE,
1184 mali_gp_job_get_pid(job),
1185 mali_gp_job_get_tid(job),
1186 mali_gp_job_get_frame_builder_id(job),
1187 mali_gp_job_get_flush_id(job),
1188 0);
1189
1190 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
1191 trace_gpu_job_enqueue(mali_gp_job_get_tid(job),
1192 mali_gp_job_get_id(job), "GP");
1193 #endif
1194
1195 MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n",
1196 mali_gp_job_get_id(job), job));
1197
1198 return MALI_TRUE; /* job queued */
1199 }
1200
mali_scheduler_queue_pp_job(struct mali_pp_job * job)1201 static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job)
1202 {
1203 struct mali_session_data *session;
1204 _mali_osk_list_t *queue = NULL;
1205
1206 MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
1207 MALI_DEBUG_ASSERT_POINTER(job);
1208
1209 session = mali_pp_job_get_session(job);
1210 MALI_DEBUG_ASSERT_POINTER(session);
1211
1212 if (unlikely(session->is_aborting)) {
1213 MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
1214 mali_pp_job_get_id(job), job));
1215 return MALI_FALSE; /* job not queued */
1216 } else if (unlikely(MALI_SWAP_IN_FAIL == job->swap_status)) {
1217 MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while swap in failed.\n",
1218 mali_pp_job_get_id(job), job));
1219 return MALI_FALSE;
1220 }
1221
1222 mali_pp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
1223
1224 if (session->use_high_priority_job_queue) {
1225 queue = &job_queue_pp.high_pri;
1226 } else {
1227 queue = &job_queue_pp.normal_pri;
1228 }
1229
1230 job_queue_pp.depth +=
1231 mali_pp_job_get_sub_job_count(job);
1232
1233 /* Add job to queue (mali_gp_job_queue_add find correct place). */
1234 mali_pp_job_list_add(job, queue);
1235
1236 /*
1237 * We hold a PM reference for every job we hold queued (and running)
1238 * It is important that we take this reference after job has been
1239 * added the the queue so that any runtime resume could schedule this
1240 * job right there and then.
1241 */
1242 _mali_osk_pm_dev_ref_get_async();
1243
1244 if (mali_utilization_enabled()) {
1245 /*
1246 * We cheat a little bit by counting the PP as busy from the
1247 * time a PP job is queued. This will be fine because we only
1248 * loose the tiny idle gap between jobs, but we will instead
1249 * get less utilization work to do (less locks taken)
1250 */
1251 mali_utilization_pp_start();
1252 }
1253
1254 mali_pm_record_gpu_active(MALI_FALSE);
1255
1256 /* Add profiling events for job enqueued */
1257 _mali_osk_profiling_add_event(
1258 MALI_PROFILING_EVENT_TYPE_SINGLE |
1259 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1260 MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE,
1261 mali_pp_job_get_pid(job),
1262 mali_pp_job_get_tid(job),
1263 mali_pp_job_get_frame_builder_id(job),
1264 mali_pp_job_get_flush_id(job),
1265 0);
1266
1267 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
1268 trace_gpu_job_enqueue(mali_pp_job_get_tid(job),
1269 mali_pp_job_get_id(job), "PP");
1270 #endif
1271
1272 MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
1273 mali_pp_job_is_virtual(job)
1274 ? "Virtual" : "Physical",
1275 mali_pp_job_get_id(job), job,
1276 mali_pp_job_get_sub_job_count(job)));
1277
1278 return MALI_TRUE; /* job queued */
1279 }
1280
mali_scheduler_return_gp_job_to_user(struct mali_gp_job * job,mali_bool success)1281 static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
1282 mali_bool success)
1283 {
1284 _mali_uk_gp_job_finished_s *jobres;
1285 struct mali_session_data *session;
1286 _mali_osk_notification_t *notification;
1287
1288 MALI_DEBUG_ASSERT_POINTER(job);
1289
1290 session = mali_gp_job_get_session(job);
1291 MALI_DEBUG_ASSERT_POINTER(session);
1292
1293 notification = mali_gp_job_get_finished_notification(job);
1294 MALI_DEBUG_ASSERT_POINTER(notification);
1295
1296 jobres = notification->result_buffer;
1297 MALI_DEBUG_ASSERT_POINTER(jobres);
1298
1299 jobres->pending_big_job_num = mali_scheduler_job_gp_big_job_count();
1300
1301 jobres->user_job_ptr = mali_gp_job_get_user_id(job);
1302 if (MALI_TRUE == success) {
1303 jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
1304 } else {
1305 jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
1306 }
1307 jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
1308 jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
1309 jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
1310
1311 mali_session_send_notification(session, notification);
1312 }
1313
mali_scheduler_return_pp_job_to_user(struct mali_pp_job * job,u32 num_cores_in_virtual)1314 void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
1315 u32 num_cores_in_virtual)
1316 {
1317 u32 i;
1318 u32 num_counters_to_copy;
1319 _mali_uk_pp_job_finished_s *jobres;
1320 struct mali_session_data *session;
1321 _mali_osk_notification_t *notification;
1322
1323 if (MALI_TRUE == mali_pp_job_use_no_notification(job)) {
1324 return;
1325 }
1326
1327 MALI_DEBUG_ASSERT_POINTER(job);
1328
1329 session = mali_pp_job_get_session(job);
1330 MALI_DEBUG_ASSERT_POINTER(session);
1331
1332 notification = mali_pp_job_get_finished_notification(job);
1333 MALI_DEBUG_ASSERT_POINTER(notification);
1334
1335 jobres = notification->result_buffer;
1336 MALI_DEBUG_ASSERT_POINTER(jobres);
1337
1338 jobres->user_job_ptr = mali_pp_job_get_user_id(job);
1339 if (MALI_TRUE == mali_pp_job_was_success(job)) {
1340 jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
1341 } else {
1342 jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
1343 }
1344
1345 if (mali_pp_job_is_virtual(job)) {
1346 num_counters_to_copy = num_cores_in_virtual;
1347 } else {
1348 num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
1349 }
1350
1351 for (i = 0; i < num_counters_to_copy; i++) {
1352 jobres->perf_counter0[i] =
1353 mali_pp_job_get_perf_counter_value0(job, i);
1354 jobres->perf_counter1[i] =
1355 mali_pp_job_get_perf_counter_value1(job, i);
1356 jobres->perf_counter_src0 =
1357 mali_pp_job_get_pp_counter_global_src0();
1358 jobres->perf_counter_src1 =
1359 mali_pp_job_get_pp_counter_global_src1();
1360 }
1361
1362 mali_session_send_notification(session, notification);
1363 }
1364
mali_scheduler_deferred_pp_job_delete(struct mali_pp_job * job)1365 static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job)
1366 {
1367 MALI_DEBUG_ASSERT_POINTER(job);
1368
1369 _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
1370 mali_pp_job_list_addtail(job, &scheduler_pp_job_deletion_queue);
1371 _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
1372
1373 _mali_osk_wq_schedule_work(scheduler_wq_pp_job_delete);
1374 }
1375
mali_scheduler_do_pp_job_delete(void * arg)1376 void mali_scheduler_do_pp_job_delete(void *arg)
1377 {
1378 _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
1379 struct mali_pp_job *job;
1380 struct mali_pp_job *tmp;
1381
1382 MALI_IGNORE(arg);
1383
1384 /*
1385 * Quickly "unhook" the jobs pending to be deleted, so we can release
1386 * the lock before we start deleting the job objects
1387 * (without any locks held)
1388 */
1389 _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
1390 _mali_osk_list_move_list(&scheduler_pp_job_deletion_queue, &list);
1391 _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
1392
1393 _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
1394 struct mali_pp_job, list) {
1395 _mali_osk_list_delinit(&job->list);
1396
1397 #if defined(CONFIG_MALI_DMA_BUF_FENCE)
1398 mali_dma_fence_context_term(&job->dma_fence_context);
1399 #endif
1400
1401 mali_pp_job_delete(job); /* delete the job object itself */
1402 }
1403 }
1404
1405 #if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
1406
mali_scheduler_deferred_pp_job_queue(struct mali_pp_job * job)1407 static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job)
1408 {
1409 MALI_DEBUG_ASSERT_POINTER(job);
1410
1411 _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
1412 mali_pp_job_list_addtail(job, &scheduler_pp_job_queue_list);
1413 _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
1414
1415 _mali_osk_wq_schedule_work(scheduler_wq_pp_job_queue);
1416 }
1417
mali_scheduler_do_pp_job_queue(void * arg)1418 static void mali_scheduler_do_pp_job_queue(void *arg)
1419 {
1420 _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
1421 struct mali_pp_job *job;
1422 struct mali_pp_job *tmp;
1423 mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
1424
1425 MALI_IGNORE(arg);
1426
1427 /*
1428 * Quickly "unhook" the jobs pending to be queued, so we can release
1429 * the lock before we start queueing the job objects
1430 * (without any locks held)
1431 */
1432 _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
1433 _mali_osk_list_move_list(&scheduler_pp_job_queue_list, &list);
1434 _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
1435
1436 /* First loop through all jobs and do the pre-work (no locks needed) */
1437 _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
1438 struct mali_pp_job, list) {
1439 if (mali_pp_job_needs_dma_buf_mapping(job)) {
1440 /*
1441 * This operation could fail, but we continue anyway,
1442 * because the worst that could happen is that this
1443 * job will fail due to a Mali page fault.
1444 */
1445 mali_dma_buf_map_job(job);
1446 }
1447 }
1448
1449 mali_scheduler_lock();
1450
1451 /* Then loop through all jobs again to queue them (lock needed) */
1452 _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
1453 struct mali_pp_job, list) {
1454
1455 /* Remove from scheduler_pp_job_queue_list before queueing */
1456 mali_pp_job_list_remove(job);
1457
1458 if (mali_scheduler_queue_pp_job(job)) {
1459 /* Job queued successfully */
1460 schedule_mask |= MALI_SCHEDULER_MASK_PP;
1461 } else {
1462 /* Failed to enqueue job, release job (with error) */
1463 mali_pp_job_fb_lookup_remove(job);
1464 mali_pp_job_mark_unstarted_failed(job);
1465
1466 /* unlock scheduler in this uncommon case */
1467 mali_scheduler_unlock();
1468
1469 schedule_mask |= mali_timeline_tracker_release(
1470 mali_pp_job_get_tracker(job));
1471
1472 /* Notify user space and close the job object */
1473 mali_scheduler_complete_pp_job(job, 0, MALI_TRUE,
1474 MALI_FALSE);
1475
1476 mali_scheduler_lock();
1477 }
1478 }
1479
1480 mali_scheduler_unlock();
1481
1482 /* Trigger scheduling of jobs */
1483 mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
1484 }
1485
1486 #endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
1487
mali_scheduler_gp_pp_job_queue_print(void)1488 void mali_scheduler_gp_pp_job_queue_print(void)
1489 {
1490 struct mali_gp_job *gp_job = NULL;
1491 struct mali_gp_job *tmp_gp_job = NULL;
1492 struct mali_pp_job *pp_job = NULL;
1493 struct mali_pp_job *tmp_pp_job = NULL;
1494
1495 MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
1496 MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj);
1497
1498 /* dump job queup status */
1499 if ((0 == job_queue_gp.depth) && (0 == job_queue_pp.depth)) {
1500 MALI_PRINT(("No GP&PP job in the job queue.\n"));
1501 return;
1502 }
1503
1504 MALI_PRINT(("Total (%d) GP job in the job queue.\n", job_queue_gp.depth));
1505 if (job_queue_gp.depth > 0) {
1506 if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) {
1507 _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.high_pri,
1508 struct mali_gp_job, list) {
1509 MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job high_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid));
1510 }
1511 }
1512
1513 if (!_mali_osk_list_empty(&job_queue_gp.normal_pri)) {
1514 _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.normal_pri,
1515 struct mali_gp_job, list) {
1516 MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job normal_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid));
1517 }
1518 }
1519 }
1520
1521 MALI_PRINT(("Total (%d) PP job in the job queue.\n", job_queue_pp.depth));
1522 if (job_queue_pp.depth > 0) {
1523 if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
1524 _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.high_pri,
1525 struct mali_pp_job, list) {
1526 if (mali_pp_job_is_virtual(pp_job)) {
1527 MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
1528 } else {
1529 MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
1530 }
1531 }
1532 }
1533
1534 if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
1535 _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.normal_pri,
1536 struct mali_pp_job, list) {
1537 if (mali_pp_job_is_virtual(pp_job)) {
1538 MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
1539 } else {
1540 MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
1541 }
1542 }
1543 }
1544 }
1545
1546 /* dump group running job status */
1547 mali_executor_running_status_print();
1548 }
1549