xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_scheduler.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #ifndef _KBASE_CSF_SCHEDULER_H_
23 #define _KBASE_CSF_SCHEDULER_H_
24 
25 #include "mali_kbase_csf.h"
26 #include "mali_kbase_csf_event.h"
27 
28 /**
29  * kbase_csf_scheduler_queue_start() - Enable the running of GPU command queue
30  *                                     on firmware.
31  *
32  * @queue: Pointer to the GPU command queue to be started.
33  *
34  * This function would enable the start of a CSI, within a
35  * CSG, to which the @queue was bound.
36  * If the CSG is already scheduled and resident, the CSI will be started
37  * right away, otherwise once the group is made resident.
38  *
39  * Return: 0 on success, or negative on failure. -EBUSY is returned to
40  * indicate to the caller that queue could not be enabled due to Scheduler
41  * state and the caller can try to enable the queue after sometime.
42  */
43 int kbase_csf_scheduler_queue_start(struct kbase_queue *queue);
44 
45 /**
46  * kbase_csf_scheduler_queue_stop() - Disable the running of GPU command queue
47  *                                    on firmware.
48  *
49  * @queue: Pointer to the GPU command queue to be stopped.
50  *
51  * This function would stop the CSI, within a CSG, to which @queue was bound.
52  *
53  * Return: 0 on success, or negative on failure.
54  */
55 int kbase_csf_scheduler_queue_stop(struct kbase_queue *queue);
56 
57 /**
58  * kbase_csf_scheduler_group_protm_enter - Handle the protm enter event for the
59  *                                         GPU command queue group.
60  *
61  * @group: The command queue group.
62  *
63  * This function could request the firmware to enter the protected mode
64  * and allow the execution of protected region instructions for all the
65  * bound queues of the group that have protm pending bit set in their
66  * respective CS_ACK register.
67  */
68 void kbase_csf_scheduler_group_protm_enter(struct kbase_queue_group *group);
69 
70 /**
71  * kbase_csf_scheduler_group_get_slot() - Checks if a queue group is
72  *                           programmed on a firmware CSG slot
73  *                           and returns the slot number.
74  *
75  * @group: The command queue group.
76  *
77  * Return: The slot number, if the group is programmed on a slot.
78  *         Otherwise returns a negative number.
79  *
80  * Note: This function should not be used if the interrupt_lock is held. Use
81  * kbase_csf_scheduler_group_get_slot_locked() instead.
82  */
83 int kbase_csf_scheduler_group_get_slot(struct kbase_queue_group *group);
84 
85 /**
86  * kbase_csf_scheduler_group_get_slot_locked() - Checks if a queue group is
87  *                           programmed on a firmware CSG slot
88  *                           and returns the slot number.
89  *
90  * @group: The command queue group.
91  *
92  * Return: The slot number, if the group is programmed on a slot.
93  *         Otherwise returns a negative number.
94  *
95  * Note: Caller must hold the interrupt_lock.
96  */
97 int kbase_csf_scheduler_group_get_slot_locked(struct kbase_queue_group *group);
98 
99 /**
100  * kbase_csf_scheduler_group_events_enabled() - Checks if interrupt events
101  *                                     should be handled for a queue group.
102  *
103  * @kbdev: The device of the group.
104  * @group: The queue group.
105  *
106  * Return: true if interrupt events should be handled.
107  *
108  * Note: Caller must hold the interrupt_lock.
109  */
110 bool kbase_csf_scheduler_group_events_enabled(struct kbase_device *kbdev,
111 		struct kbase_queue_group *group);
112 
113 /**
114  * kbase_csf_scheduler_get_group_on_slot()- Gets the queue group that has been
115  *                          programmed to a firmware CSG slot.
116  *
117  * @kbdev: The GPU device.
118  * @slot:  The slot for which to get the queue group.
119  *
120  * Return: Pointer to the programmed queue group.
121  *
122  * Note: Caller must hold the interrupt_lock.
123  */
124 struct kbase_queue_group *kbase_csf_scheduler_get_group_on_slot(
125 		struct kbase_device *kbdev, int slot);
126 
127 /**
128  * kbase_csf_scheduler_group_deschedule() - Deschedule a GPU command queue
129  *                                          group from the firmware.
130  *
131  * @group: Pointer to the queue group to be descheduled.
132  *
133  * This function would disable the scheduling of GPU command queue group on
134  * firmware.
135  */
136 void kbase_csf_scheduler_group_deschedule(struct kbase_queue_group *group);
137 
138 /**
139  * kbase_csf_scheduler_evict_ctx_slots() - Evict all GPU command queue groups
140  *                                         of a given context that are active
141  *                                         running from the firmware.
142  *
143  * @kbdev:          The GPU device.
144  * @kctx:           Kbase context for the evict operation.
145  * @evicted_groups: List_head for returning evicted active queue groups.
146  *
147  * This function would disable the scheduling of GPU command queue groups active
148  * on firmware slots from the given Kbase context. The affected groups are
149  * added to the supplied list_head argument.
150  */
151 void kbase_csf_scheduler_evict_ctx_slots(struct kbase_device *kbdev,
152 		struct kbase_context *kctx, struct list_head *evicted_groups);
153 
154 /**
155  * kbase_csf_scheduler_context_init() - Initialize the context-specific part
156  *                                      for CSF scheduler.
157  *
158  * @kctx: Pointer to kbase context that is being created.
159  *
160  * This function must be called during Kbase context creation.
161  *
162  * Return: 0 on success, or negative on failure.
163  */
164 int kbase_csf_scheduler_context_init(struct kbase_context *kctx);
165 
166 /**
167  * kbase_csf_scheduler_init - Initialize the CSF scheduler
168  *
169  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
170  *
171  * The scheduler does the arbitration for the CSG slots
172  * provided by the firmware between the GPU command queue groups created
173  * by the Clients.
174  * This function must be called after loading firmware and parsing its capabilities.
175  *
176  * Return: 0 on success, or negative on failure.
177  */
178 int kbase_csf_scheduler_init(struct kbase_device *kbdev);
179 
180 /**
181  * kbase_csf_scheduler_early_init - Early initialization for the CSF scheduler
182  *
183  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
184  *
185  * Initialize necessary resources such as locks, workqueue for CSF scheduler.
186  * This must be called at kbase probe.
187  *
188  * Return: 0 on success, or negative on failure.
189  */
190 int kbase_csf_scheduler_early_init(struct kbase_device *kbdev);
191 
192 /**
193  * kbase_csf_scheduler_context_term() - Terminate the context-specific part
194  *                                      for CSF scheduler.
195  *
196  * @kctx: Pointer to kbase context that is being terminated.
197  *
198  * This function must be called during Kbase context termination.
199  */
200 void kbase_csf_scheduler_context_term(struct kbase_context *kctx);
201 
202 /**
203  * kbase_csf_scheduler_term - Terminate the CSF scheduler.
204  *
205  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
206  *
207  * This should be called when unload of firmware is done on device
208  * termination.
209  */
210 void kbase_csf_scheduler_term(struct kbase_device *kbdev);
211 
212 /**
213  * kbase_csf_scheduler_early_term - Early termination of the CSF scheduler.
214  *
215  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
216  *
217  * This should be called only when kbase probe fails or gets rmmoded.
218  */
219 void kbase_csf_scheduler_early_term(struct kbase_device *kbdev);
220 
221 /**
222  * kbase_csf_scheduler_reset - Reset the state of all active GPU command
223  *                             queue groups.
224  *
225  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
226  *
227  * This function will first iterate through all the active/scheduled GPU
228  * command queue groups and suspend them (to avoid losing work for groups
229  * that are not stuck). The groups that could not get suspended would be
230  * descheduled and marked as terminated (which will then lead to unbinding
231  * of all the queues bound to them) and also no more work would be allowed
232  * to execute for them.
233  *
234  * This is similar to the action taken in response to an unexpected OoM event.
235  * No explicit re-initialization is done for CSG & CS interface I/O pages;
236  * instead, that happens implicitly on firmware reload.
237  *
238  * Should be called only after initiating the GPU reset.
239  */
240 void kbase_csf_scheduler_reset(struct kbase_device *kbdev);
241 
242 /**
243  * kbase_csf_scheduler_enable_tick_timer - Enable the scheduler tick timer.
244  *
245  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
246  *
247  * This function will restart the scheduler tick so that regular scheduling can
248  * be resumed without any explicit trigger (like kicking of GPU queues).
249  */
250 void kbase_csf_scheduler_enable_tick_timer(struct kbase_device *kbdev);
251 
252 /**
253  * kbase_csf_scheduler_group_copy_suspend_buf - Suspend a queue
254  *		group and copy suspend buffer.
255  *
256  * @group:	Pointer to the queue group to be suspended.
257  * @sus_buf:	Pointer to the structure which contains details of the
258  *		user buffer and its kernel pinned pages to which we need to copy
259  *		the group suspend buffer.
260  *
261  * This function is called to suspend a queue group and copy the suspend_buffer
262  * contents to the input buffer provided.
263  *
264  * Return:	0 on success, or negative on failure.
265  */
266 int kbase_csf_scheduler_group_copy_suspend_buf(struct kbase_queue_group *group,
267 		struct kbase_suspend_copy_buffer *sus_buf);
268 
269 /**
270  * kbase_csf_scheduler_lock - Acquire the global Scheduler lock.
271  *
272  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
273  *
274  * This function will take the global scheduler lock, in order to serialize
275  * against the Scheduler actions, for access to CS IO pages.
276  */
kbase_csf_scheduler_lock(struct kbase_device * kbdev)277 static inline void kbase_csf_scheduler_lock(struct kbase_device *kbdev)
278 {
279 	mutex_lock(&kbdev->csf.scheduler.lock);
280 }
281 
282 /**
283  * kbase_csf_scheduler_unlock - Release the global Scheduler lock.
284  *
285  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
286  */
kbase_csf_scheduler_unlock(struct kbase_device * kbdev)287 static inline void kbase_csf_scheduler_unlock(struct kbase_device *kbdev)
288 {
289 	mutex_unlock(&kbdev->csf.scheduler.lock);
290 }
291 
292 /**
293  * kbase_csf_scheduler_spin_lock - Acquire Scheduler interrupt spinlock.
294  *
295  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
296  * @flags: Pointer to the memory location that would store the previous
297  *         interrupt state.
298  *
299  * This function will take the global scheduler lock, in order to serialize
300  * against the Scheduler actions, for access to CS IO pages.
301  */
kbase_csf_scheduler_spin_lock(struct kbase_device * kbdev,unsigned long * flags)302 static inline void kbase_csf_scheduler_spin_lock(struct kbase_device *kbdev,
303 						 unsigned long *flags)
304 {
305 	spin_lock_irqsave(&kbdev->csf.scheduler.interrupt_lock, *flags);
306 }
307 
308 /**
309  * kbase_csf_scheduler_spin_unlock - Release Scheduler interrupt spinlock.
310  *
311  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
312  * @flags: Previously stored interrupt state when Scheduler interrupt
313  *         spinlock was acquired.
314  */
kbase_csf_scheduler_spin_unlock(struct kbase_device * kbdev,unsigned long flags)315 static inline void kbase_csf_scheduler_spin_unlock(struct kbase_device *kbdev,
316 						   unsigned long flags)
317 {
318 	spin_unlock_irqrestore(&kbdev->csf.scheduler.interrupt_lock, flags);
319 }
320 
321 /**
322  * kbase_csf_scheduler_spin_lock_assert_held - Assert if the Scheduler
323  *                                          interrupt spinlock is held.
324  *
325  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
326  */
327 static inline void
kbase_csf_scheduler_spin_lock_assert_held(struct kbase_device * kbdev)328 kbase_csf_scheduler_spin_lock_assert_held(struct kbase_device *kbdev)
329 {
330 	lockdep_assert_held(&kbdev->csf.scheduler.interrupt_lock);
331 }
332 
333 /**
334  * kbase_csf_scheduler_timer_is_enabled() - Check if the scheduler wakes up
335  * automatically for periodic tasks.
336  *
337  * @kbdev: Pointer to the device
338  *
339  * Return: true if the scheduler is configured to wake up periodically
340  */
341 bool kbase_csf_scheduler_timer_is_enabled(struct kbase_device *kbdev);
342 
343 /**
344  * kbase_csf_scheduler_timer_set_enabled() - Enable/disable periodic
345  * scheduler tasks.
346  *
347  * @kbdev:  Pointer to the device
348  * @enable: Whether to enable periodic scheduler tasks
349  */
350 void kbase_csf_scheduler_timer_set_enabled(struct kbase_device *kbdev,
351 		bool enable);
352 
353 /**
354  * kbase_csf_scheduler_kick - Perform pending scheduling tasks once.
355  *
356  * Note: This function is only effective if the scheduling timer is disabled.
357  *
358  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
359  */
360 void kbase_csf_scheduler_kick(struct kbase_device *kbdev);
361 
362 /**
363  * kbase_csf_scheduler_protected_mode_in_use() - Check if the scheduler is
364  * running with protected mode tasks.
365  *
366  * @kbdev: Pointer to the device
367  *
368  * Return: true if the scheduler is running with protected mode tasks
369  */
kbase_csf_scheduler_protected_mode_in_use(struct kbase_device * kbdev)370 static inline bool kbase_csf_scheduler_protected_mode_in_use(
371 					struct kbase_device *kbdev)
372 {
373 	return (kbdev->csf.scheduler.active_protm_grp != NULL);
374 }
375 
376 /**
377  * kbase_csf_scheduler_pm_active - Perform scheduler power active operation
378  *
379  * Note: This function will increase the scheduler's internal pm_active_count
380  * value, ensuring that both GPU and MCU are powered for access. The MCU may
381  * not have actually become active when this function returns, so need to
382  * call kbase_csf_scheduler_wait_mcu_active() for that.
383  *
384  * This function should not be called with global scheduler lock held.
385  *
386  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
387  */
388 void kbase_csf_scheduler_pm_active(struct kbase_device *kbdev);
389 
390 /**
391  * kbase_csf_scheduler_pm_idle - Perform the scheduler power idle operation
392  *
393  * Note: This function will decrease the scheduler's internal pm_active_count
394  * value. On reaching 0, the MCU and GPU could be powered off. This function
395  * should not be called with global scheduler lock held.
396  *
397  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
398  */
399 void kbase_csf_scheduler_pm_idle(struct kbase_device *kbdev);
400 
401 /**
402  * kbase_csf_scheduler_wait_mcu_active - Wait for the MCU to actually become active
403  *
404  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
405  *
406  * This function will wait for the MCU to actually become active. It is supposed
407  * to be called after calling kbase_csf_scheduler_pm_active(). It is needed as
408  * kbase_csf_scheduler_pm_active() may not make the MCU active right away.
409  *
410  * Return: 0 if the MCU was successfully activated otherwise an error code.
411  */
412 int kbase_csf_scheduler_wait_mcu_active(struct kbase_device *kbdev);
413 
414 /**
415  * kbase_csf_scheduler_pm_resume_no_lock - Reactivate the scheduler on system resume
416  *
417  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
418  *
419  * This function will make the scheduler resume the scheduling of queue groups
420  * and take the power managemenet reference, if there are any runnable groups.
421  * The caller must have acquired the global Scheduler lock.
422  */
423 void kbase_csf_scheduler_pm_resume_no_lock(struct kbase_device *kbdev);
424 
425 /**
426  * kbase_csf_scheduler_pm_resume - Reactivate the scheduler on system resume
427  *
428  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
429  *
430  * This function will make the scheduler resume the scheduling of queue groups
431  * and take the power managemenet reference, if there are any runnable groups.
432  */
433 void kbase_csf_scheduler_pm_resume(struct kbase_device *kbdev);
434 
435 /**
436  * kbase_csf_scheduler_pm_suspend_no_lock - Idle the scheduler on system suspend
437  *
438  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
439  *
440  * This function will make the scheduler suspend all the running queue groups
441  * and drop its power managemenet reference.
442  * The caller must have acquired the global Scheduler lock.
443  *
444  * Return: 0 on success.
445  */
446 int kbase_csf_scheduler_pm_suspend_no_lock(struct kbase_device *kbdev);
447 
448 /**
449  * kbase_csf_scheduler_pm_suspend - Idle the scheduler on system suspend
450  *
451  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
452  *
453  * This function will make the scheduler suspend all the running queue groups
454  * and drop its power managemenet reference.
455  *
456  * Return: 0 on success.
457  */
458 int kbase_csf_scheduler_pm_suspend(struct kbase_device *kbdev);
459 
460 /**
461  * kbase_csf_scheduler_all_csgs_idle() - Check if the scheduler internal
462  * runtime used slots are all tagged as idle command queue groups.
463  *
464  * @kbdev: Pointer to the device
465  *
466  * Return: true if all the used slots are tagged as idle CSGs.
467  */
kbase_csf_scheduler_all_csgs_idle(struct kbase_device * kbdev)468 static inline bool kbase_csf_scheduler_all_csgs_idle(struct kbase_device *kbdev)
469 {
470 	lockdep_assert_held(&kbdev->csf.scheduler.interrupt_lock);
471 	return bitmap_equal(kbdev->csf.scheduler.csg_slots_idle_mask,
472 			    kbdev->csf.scheduler.csg_inuse_bitmap,
473 			    kbdev->csf.global_iface.group_num);
474 }
475 
476 /**
477  * kbase_csf_scheduler_tick_advance_nolock() - Advance the scheduling tick
478  *
479  * @kbdev: Pointer to the device
480  *
481  * This function advances the scheduling tick by enqueing the tick work item for
482  * immediate execution, but only if the tick hrtimer is active. If the timer
483  * is inactive then the tick work item is already in flight.
484  * The caller must hold the interrupt lock.
485  */
486 static inline void
kbase_csf_scheduler_tick_advance_nolock(struct kbase_device * kbdev)487 kbase_csf_scheduler_tick_advance_nolock(struct kbase_device *kbdev)
488 {
489 	struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
490 
491 	lockdep_assert_held(&scheduler->interrupt_lock);
492 
493 	if (scheduler->tick_timer_active) {
494 		KBASE_KTRACE_ADD(kbdev, SCHEDULER_TICK_ADVANCE, NULL, 0u);
495 		scheduler->tick_timer_active = false;
496 		queue_work(scheduler->wq, &scheduler->tick_work);
497 	} else {
498 		KBASE_KTRACE_ADD(kbdev, SCHEDULER_TICK_NOADVANCE, NULL, 0u);
499 	}
500 }
501 
502 /**
503  * kbase_csf_scheduler_tick_advance() - Advance the scheduling tick
504  *
505  * @kbdev: Pointer to the device
506  *
507  * This function advances the scheduling tick by enqueing the tick work item for
508  * immediate execution, but only if the tick hrtimer is active. If the timer
509  * is inactive then the tick work item is already in flight.
510  */
kbase_csf_scheduler_tick_advance(struct kbase_device * kbdev)511 static inline void kbase_csf_scheduler_tick_advance(struct kbase_device *kbdev)
512 {
513 	struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
514 	unsigned long flags;
515 
516 	spin_lock_irqsave(&scheduler->interrupt_lock, flags);
517 	kbase_csf_scheduler_tick_advance_nolock(kbdev);
518 	spin_unlock_irqrestore(&scheduler->interrupt_lock, flags);
519 }
520 
521 /**
522  * kbase_csf_scheduler_invoke_tick() - Invoke the scheduling tick
523  *
524  * @kbdev: Pointer to the device
525  *
526  * This function will queue the scheduling tick work item for immediate
527  * execution if tick timer is not active. This can be called from interrupt
528  * context to resume the scheduling after GPU was put to sleep.
529  */
kbase_csf_scheduler_invoke_tick(struct kbase_device * kbdev)530 static inline void kbase_csf_scheduler_invoke_tick(struct kbase_device *kbdev)
531 {
532 	struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
533 	unsigned long flags;
534 
535 	KBASE_KTRACE_ADD(kbdev, SCHEDULER_TICK_INVOKE, NULL, 0u);
536 	spin_lock_irqsave(&scheduler->interrupt_lock, flags);
537 	if (!scheduler->tick_timer_active)
538 		queue_work(scheduler->wq, &scheduler->tick_work);
539 	spin_unlock_irqrestore(&scheduler->interrupt_lock, flags);
540 }
541 
542 /**
543  * kbase_csf_scheduler_invoke_tock() - Invoke the scheduling tock
544  *
545  * @kbdev: Pointer to the device
546  *
547  * This function will queue the scheduling tock work item for immediate
548  * execution.
549  */
kbase_csf_scheduler_invoke_tock(struct kbase_device * kbdev)550 static inline void kbase_csf_scheduler_invoke_tock(struct kbase_device *kbdev)
551 {
552 	struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
553 
554 	KBASE_KTRACE_ADD(kbdev, SCHEDULER_TOCK_INVOKE, NULL, 0u);
555 	if (atomic_cmpxchg(&scheduler->pending_tock_work, false, true) == false)
556 		mod_delayed_work(scheduler->wq, &scheduler->tock_work, 0);
557 }
558 
559 /**
560  * kbase_csf_scheduler_queue_has_trace() - report whether the queue has been
561  *                                         configured to operate with the
562  *                                         cs_trace feature.
563  *
564  * @queue: Pointer to the queue.
565  *
566  * Return: True if the gpu queue is configured to operate with the cs_trace
567  *         feature, otherwise false.
568  */
kbase_csf_scheduler_queue_has_trace(struct kbase_queue * queue)569 static inline bool kbase_csf_scheduler_queue_has_trace(struct kbase_queue *queue)
570 {
571 	lockdep_assert_held(&queue->kctx->kbdev->csf.scheduler.lock);
572 	/* In the current arrangement, it is possible for the context to enable
573 	 * the cs_trace after some queues have been registered with cs_trace in
574 	 * disabled state. So each queue has its own enabled/disabled condition.
575 	 */
576 	return (queue->trace_buffer_size && queue->trace_buffer_base);
577 }
578 
579 #ifdef KBASE_PM_RUNTIME
580 /**
581  * kbase_csf_scheduler_reval_idleness_post_sleep() - Check GPU's idleness after
582  *                                                   putting MCU to sleep state
583  *
584  * @kbdev: Pointer to the device
585  *
586  * This function re-evaluates the idleness of on-slot queue groups after MCU
587  * was put to the sleep state and invokes the scheduling tick if any of the
588  * on-slot queue group became non-idle.
589  * CSG_OUTPUT_BLOCK.CSG_STATUS_STATE.IDLE bit is checked to determine the
590  * idleness which is updated by MCU firmware on handling of the sleep request.
591  *
592  * This function is needed to detect if more work was flushed in the window
593  * between the GPU idle notification and the enabling of Doorbell mirror
594  * interrupt (from MCU state machine). Once Doorbell mirror interrupt is
595  * enabled, Host can receive the notification on User doorbell rings.
596  */
597 void kbase_csf_scheduler_reval_idleness_post_sleep(struct kbase_device *kbdev);
598 
599 /**
600  * kbase_csf_scheduler_handle_runtime_suspend() - Handle runtime suspend by
601  *                                                suspending CSGs.
602  *
603  * @kbdev: Pointer to the device
604  *
605  * This function is called from the runtime suspend callback function for
606  * suspending all the on-slot queue groups. If any of the group is found to
607  * be non-idle after the completion of CSG suspend operation or the CSG
608  * suspend operation times out, then the scheduling tick is invoked and an
609  * error is returned so that the GPU power down can be aborted.
610  *
611  * Return: 0 if all the CSGs were suspended, otherwise an error code.
612  */
613 int kbase_csf_scheduler_handle_runtime_suspend(struct kbase_device *kbdev);
614 #endif
615 
616 /**
617  * kbase_csf_scheduler_process_gpu_idle_event() - Process GPU idle IRQ
618  *
619  * @kbdev: Pointer to the device
620  *
621  * This function is called when a GPU idle IRQ has been raised.
622  */
623 void kbase_csf_scheduler_process_gpu_idle_event(struct kbase_device *kbdev);
624 
625 /**
626  * kbase_csf_scheduler_get_nr_active_csgs() - Get the number of active CSGs
627  *
628  * @kbdev: Pointer to the device
629  *
630  * This function calculates the number of CSG slots that have a queue group
631  * resident on them.
632  *
633  * Note: This function should not be used if the interrupt_lock is held. Use
634  * kbase_csf_scheduler_get_nr_active_csgs_locked() instead.
635  *
636  * Return: number of active CSGs.
637  */
638 u32 kbase_csf_scheduler_get_nr_active_csgs(struct kbase_device *kbdev);
639 
640 /**
641  * kbase_csf_scheduler_get_nr_active_csgs_locked() - Get the number of active
642  *                                                   CSGs
643  *
644  * @kbdev: Pointer to the device
645  *
646  * This function calculates the number of CSG slots that have a queue group
647  * resident on them.
648  *
649  * Note: This function should be called with interrupt_lock held.
650  *
651  * Return: number of active CSGs.
652  */
653 u32 kbase_csf_scheduler_get_nr_active_csgs_locked(struct kbase_device *kbdev);
654 
655 /**
656  * kbase_csf_scheduler_force_wakeup() - Forcefully resume the scheduling of CSGs
657  *
658  * @kbdev: Pointer to the device
659  *
660  * This function is called to forcefully resume the scheduling of CSGs, even
661  * when there wasn't any work submitted for them.
662  * This function is only used for testing purpose.
663  */
664 void kbase_csf_scheduler_force_wakeup(struct kbase_device *kbdev);
665 
666 #ifdef KBASE_PM_RUNTIME
667 /**
668  * kbase_csf_scheduler_force_sleep() - Forcefully put the Scheduler to sleeping
669  *                                     state.
670  *
671  * @kbdev: Pointer to the device
672  *
673  * This function is called to forcefully put the Scheduler to sleeping state
674  * and trigger the sleep of MCU. If the CSGs are not idle, then the Scheduler
675  * would get reactivated again immediately.
676  * This function is only used for testing purpose.
677  */
678 void kbase_csf_scheduler_force_sleep(struct kbase_device *kbdev);
679 #endif
680 
681 #endif /* _KBASE_CSF_SCHEDULER_H_ */
682