1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software and is provided to you under the terms of the
6*4882a593Smuzhiyun * GNU General Public License version 2 as published by the Free Software
7*4882a593Smuzhiyun * Foundation, and any use by you of this program is subject to the terms
8*4882a593Smuzhiyun * of such GNU licence.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * A copy of the licence is included with the program, and can also be obtained
11*4882a593Smuzhiyun * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12*4882a593Smuzhiyun * Boston, MA 02110-1301, USA.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /**
21*4882a593Smuzhiyun * @file mali_kbase_js.h
22*4882a593Smuzhiyun * Job Scheduler APIs.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #ifndef _KBASE_JS_H_
26*4882a593Smuzhiyun #define _KBASE_JS_H_
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "mali_kbase_js_defs.h"
29*4882a593Smuzhiyun #include "mali_kbase_context.h"
30*4882a593Smuzhiyun #include "mali_kbase_defs.h"
31*4882a593Smuzhiyun #include "mali_kbase_debug.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include "mali_kbase_js_ctx_attr.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /**
36*4882a593Smuzhiyun * @addtogroup base_api
37*4882a593Smuzhiyun * @{
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /**
41*4882a593Smuzhiyun * @addtogroup base_kbase_api
42*4882a593Smuzhiyun * @{
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun * @addtogroup kbase_js Job Scheduler Internal APIs
47*4882a593Smuzhiyun * @{
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * These APIs are Internal to KBase.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun * @brief Initialize the Job Scheduler
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * The struct kbasep_js_device_data sub-structure of \a kbdev must be zero
56*4882a593Smuzhiyun * initialized before passing to the kbasep_js_devdata_init() function. This is
57*4882a593Smuzhiyun * to give efficient error path code.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun int kbasep_js_devdata_init(struct kbase_device * const kbdev);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /**
62*4882a593Smuzhiyun * @brief Halt the Job Scheduler.
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
65*4882a593Smuzhiyun * sub-structure was never initialized/failed initialization, to give efficient
66*4882a593Smuzhiyun * error-path code.
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
69*4882a593Smuzhiyun * be zero initialized before passing to the kbasep_js_devdata_init()
70*4882a593Smuzhiyun * function. This is to give efficient error path code.
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * It is a Programming Error to call this whilst there are still kbase_context
73*4882a593Smuzhiyun * structures registered with this scheduler.
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun void kbasep_js_devdata_halt(struct kbase_device *kbdev);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /**
79*4882a593Smuzhiyun * @brief Terminate the Job Scheduler
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
82*4882a593Smuzhiyun * sub-structure was never initialized/failed initialization, to give efficient
83*4882a593Smuzhiyun * error-path code.
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
86*4882a593Smuzhiyun * be zero initialized before passing to the kbasep_js_devdata_init()
87*4882a593Smuzhiyun * function. This is to give efficient error path code.
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * It is a Programming Error to call this whilst there are still kbase_context
90*4882a593Smuzhiyun * structures registered with this scheduler.
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun void kbasep_js_devdata_term(struct kbase_device *kbdev);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun * @brief Initialize the Scheduling Component of a struct kbase_context on the Job Scheduler.
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun * This effectively registers a struct kbase_context with a Job Scheduler.
98*4882a593Smuzhiyun *
99*4882a593Smuzhiyun * It does not register any jobs owned by the struct kbase_context with the scheduler.
100*4882a593Smuzhiyun * Those must be separately registered by kbasep_js_add_job().
101*4882a593Smuzhiyun *
102*4882a593Smuzhiyun * The struct kbase_context must be zero intitialized before passing to the
103*4882a593Smuzhiyun * kbase_js_init() function. This is to give efficient error path code.
104*4882a593Smuzhiyun */
105*4882a593Smuzhiyun int kbasep_js_kctx_init(struct kbase_context * const kctx);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /**
108*4882a593Smuzhiyun * @brief Terminate the Scheduling Component of a struct kbase_context on the Job Scheduler
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * This effectively de-registers a struct kbase_context from its Job Scheduler
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * It is safe to call this on a struct kbase_context that has never had or failed
113*4882a593Smuzhiyun * initialization of its jctx.sched_info member, to give efficient error-path
114*4882a593Smuzhiyun * code.
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * For this to work, the struct kbase_context must be zero intitialized before passing
117*4882a593Smuzhiyun * to the kbase_js_init() function.
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun * It is a Programming Error to call this whilst there are still jobs
120*4882a593Smuzhiyun * registered with this context.
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun void kbasep_js_kctx_term(struct kbase_context *kctx);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /**
125*4882a593Smuzhiyun * @brief Add a job chain to the Job Scheduler, and take necessary actions to
126*4882a593Smuzhiyun * schedule the context/run the job.
127*4882a593Smuzhiyun *
128*4882a593Smuzhiyun * This atomically does the following:
129*4882a593Smuzhiyun * - Update the numbers of jobs information
130*4882a593Smuzhiyun * - Add the job to the run pool if necessary (part of init_job)
131*4882a593Smuzhiyun *
132*4882a593Smuzhiyun * Once this is done, then an appropriate action is taken:
133*4882a593Smuzhiyun * - If the ctx is scheduled, it attempts to start the next job (which might be
134*4882a593Smuzhiyun * this added job)
135*4882a593Smuzhiyun * - Otherwise, and if this is the first job on the context, it enqueues it on
136*4882a593Smuzhiyun * the Policy Queue
137*4882a593Smuzhiyun *
138*4882a593Smuzhiyun * The Policy's Queue can be updated by this in the following ways:
139*4882a593Smuzhiyun * - In the above case that this is the first job on the context
140*4882a593Smuzhiyun * - If the context is high priority and the context is not scheduled, then it
141*4882a593Smuzhiyun * could cause the Policy to schedule out a low-priority context, allowing
142*4882a593Smuzhiyun * this context to be scheduled in.
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * If the context is already scheduled on the RunPool, then adding a job to it
145*4882a593Smuzhiyun * is guarenteed not to update the Policy Queue. And so, the caller is
146*4882a593Smuzhiyun * guarenteed to not need to try scheduling a context from the Run Pool - it
147*4882a593Smuzhiyun * can safely assert that the result is false.
148*4882a593Smuzhiyun *
149*4882a593Smuzhiyun * It is a programming error to have more than U32_MAX jobs in flight at a time.
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * The following locking conditions are made on the caller:
152*4882a593Smuzhiyun * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
153*4882a593Smuzhiyun * - it must \em not hold hwaccess_lock (as this will be obtained internally)
154*4882a593Smuzhiyun * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
155*4882a593Smuzhiyun * obtained internally)
156*4882a593Smuzhiyun * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
157*4882a593Smuzhiyun *
158*4882a593Smuzhiyun * @return true indicates that the Policy Queue was updated, and so the
159*4882a593Smuzhiyun * caller will need to try scheduling a context onto the Run Pool.
160*4882a593Smuzhiyun * @return false indicates that no updates were made to the Policy Queue,
161*4882a593Smuzhiyun * so no further action is required from the caller. This is \b always returned
162*4882a593Smuzhiyun * when the context is currently scheduled.
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun bool kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun * @brief Remove a job chain from the Job Scheduler, except for its 'retained state'.
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * Completely removing a job requires several calls:
170*4882a593Smuzhiyun * - kbasep_js_copy_atom_retained_state(), to capture the 'retained state' of
171*4882a593Smuzhiyun * the atom
172*4882a593Smuzhiyun * - kbasep_js_remove_job(), to partially remove the atom from the Job Scheduler
173*4882a593Smuzhiyun * - kbasep_js_runpool_release_ctx_and_katom_retained_state(), to release the
174*4882a593Smuzhiyun * remaining state held as part of the job having been run.
175*4882a593Smuzhiyun *
176*4882a593Smuzhiyun * In the common case of atoms completing normally, this set of actions is more optimal for spinlock purposes than having kbasep_js_remove_job() handle all of the actions.
177*4882a593Smuzhiyun *
178*4882a593Smuzhiyun * In the case of cancelling atoms, it is easier to call kbasep_js_remove_cancelled_job(), which handles all the necessary actions.
179*4882a593Smuzhiyun *
180*4882a593Smuzhiyun * It is a programming error to call this when:
181*4882a593Smuzhiyun * - \a atom is not a job belonging to kctx.
182*4882a593Smuzhiyun * - \a atom has already been removed from the Job Scheduler.
183*4882a593Smuzhiyun * - \a atom is still in the runpool
184*4882a593Smuzhiyun *
185*4882a593Smuzhiyun * Do not use this for removing jobs being killed by kbase_jd_cancel() - use
186*4882a593Smuzhiyun * kbasep_js_remove_cancelled_job() instead.
187*4882a593Smuzhiyun *
188*4882a593Smuzhiyun * The following locking conditions are made on the caller:
189*4882a593Smuzhiyun * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
190*4882a593Smuzhiyun *
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun void kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /**
195*4882a593Smuzhiyun * @brief Completely remove a job chain from the Job Scheduler, in the case
196*4882a593Smuzhiyun * where the job chain was cancelled.
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun * This is a variant of kbasep_js_remove_job() that takes care of removing all
199*4882a593Smuzhiyun * of the retained state too. This is generally useful for cancelled atoms,
200*4882a593Smuzhiyun * which need not be handled in an optimal way.
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * It is a programming error to call this when:
203*4882a593Smuzhiyun * - \a atom is not a job belonging to kctx.
204*4882a593Smuzhiyun * - \a atom has already been removed from the Job Scheduler.
205*4882a593Smuzhiyun * - \a atom is still in the runpool:
206*4882a593Smuzhiyun * - it is not being killed with kbasep_jd_cancel()
207*4882a593Smuzhiyun *
208*4882a593Smuzhiyun * The following locking conditions are made on the caller:
209*4882a593Smuzhiyun * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
210*4882a593Smuzhiyun * - it must \em not hold the hwaccess_lock, (as this will be obtained
211*4882a593Smuzhiyun * internally)
212*4882a593Smuzhiyun * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this could be
213*4882a593Smuzhiyun * obtained internally)
214*4882a593Smuzhiyun *
215*4882a593Smuzhiyun * @return true indicates that ctx attributes have changed and the caller
216*4882a593Smuzhiyun * should call kbase_js_sched_all() to try to run more jobs
217*4882a593Smuzhiyun * @return false otherwise
218*4882a593Smuzhiyun */
219*4882a593Smuzhiyun bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
220*4882a593Smuzhiyun struct kbase_context *kctx,
221*4882a593Smuzhiyun struct kbase_jd_atom *katom);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /**
224*4882a593Smuzhiyun * @brief Refcount a context as being busy, preventing it from being scheduled
225*4882a593Smuzhiyun * out.
226*4882a593Smuzhiyun *
227*4882a593Smuzhiyun * @note This function can safely be called from IRQ context.
228*4882a593Smuzhiyun *
229*4882a593Smuzhiyun * The following locking conditions are made on the caller:
230*4882a593Smuzhiyun * - it must \em not hold mmu_hw_mutex and hwaccess_lock, because they will be
231*4882a593Smuzhiyun * used internally.
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * @return value != false if the retain succeeded, and the context will not be scheduled out.
234*4882a593Smuzhiyun * @return false if the retain failed (because the context is being/has been scheduled out).
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /**
239*4882a593Smuzhiyun * @brief Refcount a context as being busy, preventing it from being scheduled
240*4882a593Smuzhiyun * out.
241*4882a593Smuzhiyun *
242*4882a593Smuzhiyun * @note This function can safely be called from IRQ context.
243*4882a593Smuzhiyun *
244*4882a593Smuzhiyun * The following locks must be held by the caller:
245*4882a593Smuzhiyun * - mmu_hw_mutex, hwaccess_lock
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * @return value != false if the retain succeeded, and the context will not be scheduled out.
248*4882a593Smuzhiyun * @return false if the retain failed (because the context is being/has been scheduled out).
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /**
253*4882a593Smuzhiyun * @brief Lookup a context in the Run Pool based upon its current address space
254*4882a593Smuzhiyun * and ensure that is stays scheduled in.
255*4882a593Smuzhiyun *
256*4882a593Smuzhiyun * The context is refcounted as being busy to prevent it from scheduling
257*4882a593Smuzhiyun * out. It must be released with kbasep_js_runpool_release_ctx() when it is no
258*4882a593Smuzhiyun * longer required to stay scheduled in.
259*4882a593Smuzhiyun *
260*4882a593Smuzhiyun * @note This function can safely be called from IRQ context.
261*4882a593Smuzhiyun *
262*4882a593Smuzhiyun * The following locking conditions are made on the caller:
263*4882a593Smuzhiyun * - it must \em not hold the hwaccess_lock, because it will be used internally.
264*4882a593Smuzhiyun * If the hwaccess_lock is already held, then the caller should use
265*4882a593Smuzhiyun * kbasep_js_runpool_lookup_ctx_nolock() instead.
266*4882a593Smuzhiyun *
267*4882a593Smuzhiyun * @return a valid struct kbase_context on success, which has been refcounted as being busy.
268*4882a593Smuzhiyun * @return NULL on failure, indicating that no context was found in \a as_nr
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, int as_nr);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /**
273*4882a593Smuzhiyun * @brief Handling the requeuing/killing of a context that was evicted from the
274*4882a593Smuzhiyun * policy queue or runpool.
275*4882a593Smuzhiyun *
276*4882a593Smuzhiyun * This should be used whenever handing off a context that has been evicted
277*4882a593Smuzhiyun * from the policy queue or the runpool:
278*4882a593Smuzhiyun * - If the context is not dying and has jobs, it gets re-added to the policy
279*4882a593Smuzhiyun * queue
280*4882a593Smuzhiyun * - Otherwise, it is not added
281*4882a593Smuzhiyun *
282*4882a593Smuzhiyun * In addition, if the context is dying the jobs are killed asynchronously.
283*4882a593Smuzhiyun *
284*4882a593Smuzhiyun * In all cases, the Power Manager active reference is released
285*4882a593Smuzhiyun * (kbase_pm_context_idle()) whenever the has_pm_ref parameter is true. \a
286*4882a593Smuzhiyun * has_pm_ref must be set to false whenever the context was not previously in
287*4882a593Smuzhiyun * the runpool and does not hold a Power Manager active refcount. Note that
288*4882a593Smuzhiyun * contexts in a rollback of kbasep_js_try_schedule_head_ctx() might have an
289*4882a593Smuzhiyun * active refcount even though they weren't in the runpool.
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * The following locking conditions are made on the caller:
292*4882a593Smuzhiyun * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
293*4882a593Smuzhiyun * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
294*4882a593Smuzhiyun * obtained internally)
295*4882a593Smuzhiyun */
296*4882a593Smuzhiyun void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, bool has_pm_ref);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /**
299*4882a593Smuzhiyun * @brief Release a refcount of a context being busy, allowing it to be
300*4882a593Smuzhiyun * scheduled out.
301*4882a593Smuzhiyun *
302*4882a593Smuzhiyun * When the refcount reaches zero and the context \em might be scheduled out
303*4882a593Smuzhiyun * (depending on whether the Scheudling Policy has deemed it so, or if it has run
304*4882a593Smuzhiyun * out of jobs).
305*4882a593Smuzhiyun *
306*4882a593Smuzhiyun * If the context does get scheduled out, then The following actions will be
307*4882a593Smuzhiyun * taken as part of deschduling a context:
308*4882a593Smuzhiyun * - For the context being descheduled:
309*4882a593Smuzhiyun * - If the context is in the processing of dying (all the jobs are being
310*4882a593Smuzhiyun * removed from it), then descheduling also kills off any jobs remaining in the
311*4882a593Smuzhiyun * context.
312*4882a593Smuzhiyun * - If the context is not dying, and any jobs remain after descheduling the
313*4882a593Smuzhiyun * context then it is re-enqueued to the Policy's Queue.
314*4882a593Smuzhiyun * - Otherwise, the context is still known to the scheduler, but remains absent
315*4882a593Smuzhiyun * from the Policy Queue until a job is next added to it.
316*4882a593Smuzhiyun * - In all descheduling cases, the Power Manager active reference (obtained
317*4882a593Smuzhiyun * during kbasep_js_try_schedule_head_ctx()) is released (kbase_pm_context_idle()).
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * Whilst the context is being descheduled, this also handles actions that
320*4882a593Smuzhiyun * cause more atoms to be run:
321*4882a593Smuzhiyun * - Attempt submitting atoms when the Context Attributes on the Runpool have
322*4882a593Smuzhiyun * changed. This is because the context being scheduled out could mean that
323*4882a593Smuzhiyun * there are more opportunities to run atoms.
324*4882a593Smuzhiyun * - Attempt submitting to a slot that was previously blocked due to affinity
325*4882a593Smuzhiyun * restrictions. This is usually only necessary when releasing a context
326*4882a593Smuzhiyun * happens as part of completing a previous job, but is harmless nonetheless.
327*4882a593Smuzhiyun * - Attempt scheduling in a new context (if one is available), and if necessary,
328*4882a593Smuzhiyun * running a job from that new context.
329*4882a593Smuzhiyun *
330*4882a593Smuzhiyun * Unlike retaining a context in the runpool, this function \b cannot be called
331*4882a593Smuzhiyun * from IRQ context.
332*4882a593Smuzhiyun *
333*4882a593Smuzhiyun * It is a programming error to call this on a \a kctx that is not currently
334*4882a593Smuzhiyun * scheduled, or that already has a zero refcount.
335*4882a593Smuzhiyun *
336*4882a593Smuzhiyun * The following locking conditions are made on the caller:
337*4882a593Smuzhiyun * - it must \em not hold the hwaccess_lock, because it will be used internally.
338*4882a593Smuzhiyun * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
339*4882a593Smuzhiyun * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
340*4882a593Smuzhiyun * obtained internally)
341*4882a593Smuzhiyun * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
342*4882a593Smuzhiyun * obtained internally)
343*4882a593Smuzhiyun * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
344*4882a593Smuzhiyun * obtained internally)
345*4882a593Smuzhiyun *
346*4882a593Smuzhiyun */
347*4882a593Smuzhiyun void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /**
350*4882a593Smuzhiyun * @brief Variant of kbasep_js_runpool_release_ctx() that handles additional
351*4882a593Smuzhiyun * actions from completing an atom.
352*4882a593Smuzhiyun *
353*4882a593Smuzhiyun * This is usually called as part of completing an atom and releasing the
354*4882a593Smuzhiyun * refcount on the context held by the atom.
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * Therefore, the extra actions carried out are part of handling actions queued
357*4882a593Smuzhiyun * on a completed atom, namely:
358*4882a593Smuzhiyun * - Releasing the atom's context attributes
359*4882a593Smuzhiyun * - Retrying the submission on a particular slot, because we couldn't submit
360*4882a593Smuzhiyun * on that slot from an IRQ handler.
361*4882a593Smuzhiyun *
362*4882a593Smuzhiyun * The locking conditions of this function are the same as those for
363*4882a593Smuzhiyun * kbasep_js_runpool_release_ctx()
364*4882a593Smuzhiyun */
365*4882a593Smuzhiyun void kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /**
368*4882a593Smuzhiyun * @brief Variant of kbase_js_runpool_release_ctx() that assumes that
369*4882a593Smuzhiyun * kbasep_js_device_data::runpool_mutex and
370*4882a593Smuzhiyun * kbasep_js_kctx_info::ctx::jsctx_mutex are held by the caller, and does not
371*4882a593Smuzhiyun * attempt to schedule new contexts.
372*4882a593Smuzhiyun */
373*4882a593Smuzhiyun void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
374*4882a593Smuzhiyun struct kbase_context *kctx);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /**
377*4882a593Smuzhiyun * @brief Schedule in a privileged context
378*4882a593Smuzhiyun *
379*4882a593Smuzhiyun * This schedules a context in regardless of the context priority.
380*4882a593Smuzhiyun * If the runpool is full, a context will be forced out of the runpool and the function will wait
381*4882a593Smuzhiyun * for the new context to be scheduled in.
382*4882a593Smuzhiyun * The context will be kept scheduled in (and the corresponding address space reserved) until
383*4882a593Smuzhiyun * kbasep_js_release_privileged_ctx is called).
384*4882a593Smuzhiyun *
385*4882a593Smuzhiyun * The following locking conditions are made on the caller:
386*4882a593Smuzhiyun * - it must \em not hold the hwaccess_lock, because it will be used internally.
387*4882a593Smuzhiyun * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
388*4882a593Smuzhiyun * obtained internally)
389*4882a593Smuzhiyun * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
390*4882a593Smuzhiyun * obtained internally)
391*4882a593Smuzhiyun * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
392*4882a593Smuzhiyun * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex, because it will
393*4882a593Smuzhiyun * be used internally.
394*4882a593Smuzhiyun *
395*4882a593Smuzhiyun */
396*4882a593Smuzhiyun void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /**
399*4882a593Smuzhiyun * @brief Release a privileged context, allowing it to be scheduled out.
400*4882a593Smuzhiyun *
401*4882a593Smuzhiyun * See kbasep_js_runpool_release_ctx for potential side effects.
402*4882a593Smuzhiyun *
403*4882a593Smuzhiyun * The following locking conditions are made on the caller:
404*4882a593Smuzhiyun * - it must \em not hold the hwaccess_lock, because it will be used internally.
405*4882a593Smuzhiyun * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
406*4882a593Smuzhiyun * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
407*4882a593Smuzhiyun * obtained internally)
408*4882a593Smuzhiyun * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
409*4882a593Smuzhiyun * obtained internally)
410*4882a593Smuzhiyun *
411*4882a593Smuzhiyun */
412*4882a593Smuzhiyun void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /**
415*4882a593Smuzhiyun * @brief Try to submit the next job on each slot
416*4882a593Smuzhiyun *
417*4882a593Smuzhiyun * The following locks may be used:
418*4882a593Smuzhiyun * - kbasep_js_device_data::runpool_mutex
419*4882a593Smuzhiyun * - hwaccess_lock
420*4882a593Smuzhiyun */
421*4882a593Smuzhiyun void kbase_js_try_run_jobs(struct kbase_device *kbdev);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /**
424*4882a593Smuzhiyun * @brief Suspend the job scheduler during a Power Management Suspend event.
425*4882a593Smuzhiyun *
426*4882a593Smuzhiyun * Causes all contexts to be removed from the runpool, and prevents any
427*4882a593Smuzhiyun * contexts from (re)entering the runpool.
428*4882a593Smuzhiyun *
429*4882a593Smuzhiyun * This does not handle suspending the one privileged context: the caller must
430*4882a593Smuzhiyun * instead do this by by suspending the GPU HW Counter Instrumentation.
431*4882a593Smuzhiyun *
432*4882a593Smuzhiyun * This will eventually cause all Power Management active references held by
433*4882a593Smuzhiyun * contexts on the runpool to be released, without running any more atoms.
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * The caller must then wait for all Power Mangement active refcount to become
436*4882a593Smuzhiyun * zero before completing the suspend.
437*4882a593Smuzhiyun *
438*4882a593Smuzhiyun * The emptying mechanism may take some time to complete, since it can wait for
439*4882a593Smuzhiyun * jobs to complete naturally instead of forcing them to end quickly. However,
440*4882a593Smuzhiyun * this is bounded by the Job Scheduler's Job Timeouts. Hence, this
441*4882a593Smuzhiyun * function is guaranteed to complete in a finite time.
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun void kbasep_js_suspend(struct kbase_device *kbdev);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /**
446*4882a593Smuzhiyun * @brief Resume the Job Scheduler after a Power Management Resume event.
447*4882a593Smuzhiyun *
448*4882a593Smuzhiyun * This restores the actions from kbasep_js_suspend():
449*4882a593Smuzhiyun * - Schedules contexts back into the runpool
450*4882a593Smuzhiyun * - Resumes running atoms on the GPU
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun void kbasep_js_resume(struct kbase_device *kbdev);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /**
455*4882a593Smuzhiyun * @brief Submit an atom to the job scheduler.
456*4882a593Smuzhiyun *
457*4882a593Smuzhiyun * The atom is enqueued on the context's ringbuffer. The caller must have
458*4882a593Smuzhiyun * ensured that all dependencies can be represented in the ringbuffer.
459*4882a593Smuzhiyun *
460*4882a593Smuzhiyun * Caller must hold jctx->lock
461*4882a593Smuzhiyun *
462*4882a593Smuzhiyun * @param[in] kctx Context pointer
463*4882a593Smuzhiyun * @param[in] atom Pointer to the atom to submit
464*4882a593Smuzhiyun *
465*4882a593Smuzhiyun * @return Whether the context requires to be enqueued. */
466*4882a593Smuzhiyun bool kbase_js_dep_resolved_submit(struct kbase_context *kctx,
467*4882a593Smuzhiyun struct kbase_jd_atom *katom);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /**
470*4882a593Smuzhiyun * jsctx_ll_flush_to_rb() - Pushes atoms from the linked list to ringbuffer.
471*4882a593Smuzhiyun * @kctx: Context Pointer
472*4882a593Smuzhiyun * @prio: Priority (specifies the queue together with js).
473*4882a593Smuzhiyun * @js: Job slot (specifies the queue together with prio).
474*4882a593Smuzhiyun *
475*4882a593Smuzhiyun * Pushes all possible atoms from the linked list to the ringbuffer.
476*4882a593Smuzhiyun * Number of atoms are limited to free space in the ringbuffer and
477*4882a593Smuzhiyun * number of available atoms in the linked list.
478*4882a593Smuzhiyun *
479*4882a593Smuzhiyun */
480*4882a593Smuzhiyun void jsctx_ll_flush_to_rb(struct kbase_context *kctx, int prio, int js);
481*4882a593Smuzhiyun /**
482*4882a593Smuzhiyun * @brief Pull an atom from a context in the job scheduler for execution.
483*4882a593Smuzhiyun *
484*4882a593Smuzhiyun * The atom will not be removed from the ringbuffer at this stage.
485*4882a593Smuzhiyun *
486*4882a593Smuzhiyun * The HW access lock must be held when calling this function.
487*4882a593Smuzhiyun *
488*4882a593Smuzhiyun * @param[in] kctx Context to pull from
489*4882a593Smuzhiyun * @param[in] js Job slot to pull from
490*4882a593Smuzhiyun * @return Pointer to an atom, or NULL if there are no atoms for this
491*4882a593Smuzhiyun * slot that can be currently run.
492*4882a593Smuzhiyun */
493*4882a593Smuzhiyun struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /**
496*4882a593Smuzhiyun * @brief Return an atom to the job scheduler ringbuffer.
497*4882a593Smuzhiyun *
498*4882a593Smuzhiyun * An atom is 'unpulled' if execution is stopped but intended to be returned to
499*4882a593Smuzhiyun * later. The most common reason for this is that the atom has been
500*4882a593Smuzhiyun * soft-stopped.
501*4882a593Smuzhiyun *
502*4882a593Smuzhiyun * Note that if multiple atoms are to be 'unpulled', they must be returned in
503*4882a593Smuzhiyun * the reverse order to which they were originally pulled. It is a programming
504*4882a593Smuzhiyun * error to return atoms in any other order.
505*4882a593Smuzhiyun *
506*4882a593Smuzhiyun * The HW access lock must be held when calling this function.
507*4882a593Smuzhiyun *
508*4882a593Smuzhiyun * @param[in] kctx Context pointer
509*4882a593Smuzhiyun * @param[in] atom Pointer to the atom to unpull
510*4882a593Smuzhiyun */
511*4882a593Smuzhiyun void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /**
514*4882a593Smuzhiyun * @brief Complete an atom from jd_done_worker(), removing it from the job
515*4882a593Smuzhiyun * scheduler ringbuffer.
516*4882a593Smuzhiyun *
517*4882a593Smuzhiyun * If the atom failed then all dependee atoms marked for failure propagation
518*4882a593Smuzhiyun * will also fail.
519*4882a593Smuzhiyun *
520*4882a593Smuzhiyun * @param[in] kctx Context pointer
521*4882a593Smuzhiyun * @param[in] katom Pointer to the atom to complete
522*4882a593Smuzhiyun * @return true if the context is now idle (no jobs pulled)
523*4882a593Smuzhiyun * false otherwise
524*4882a593Smuzhiyun */
525*4882a593Smuzhiyun bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
526*4882a593Smuzhiyun struct kbase_jd_atom *katom);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun /**
529*4882a593Smuzhiyun * @brief Complete an atom.
530*4882a593Smuzhiyun *
531*4882a593Smuzhiyun * Most of the work required to complete an atom will be performed by
532*4882a593Smuzhiyun * jd_done_worker().
533*4882a593Smuzhiyun *
534*4882a593Smuzhiyun * The HW access lock must be held when calling this function.
535*4882a593Smuzhiyun *
536*4882a593Smuzhiyun * @param[in] katom Pointer to the atom to complete
537*4882a593Smuzhiyun * @param[in] end_timestamp The time that the atom completed (may be NULL)
538*4882a593Smuzhiyun *
539*4882a593Smuzhiyun * Return: Atom that has now been unblocked and can now be run, or NULL if none
540*4882a593Smuzhiyun */
541*4882a593Smuzhiyun struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
542*4882a593Smuzhiyun ktime_t *end_timestamp);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /**
545*4882a593Smuzhiyun * @brief Submit atoms from all available contexts.
546*4882a593Smuzhiyun *
547*4882a593Smuzhiyun * This will attempt to submit as many jobs as possible to the provided job
548*4882a593Smuzhiyun * slots. It will exit when either all job slots are full, or all contexts have
549*4882a593Smuzhiyun * been used.
550*4882a593Smuzhiyun *
551*4882a593Smuzhiyun * @param[in] kbdev Device pointer
552*4882a593Smuzhiyun * @param[in] js_mask Mask of job slots to submit to
553*4882a593Smuzhiyun */
554*4882a593Smuzhiyun void kbase_js_sched(struct kbase_device *kbdev, int js_mask);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /**
557*4882a593Smuzhiyun * kbase_jd_zap_context - Attempt to deschedule a context that is being
558*4882a593Smuzhiyun * destroyed
559*4882a593Smuzhiyun * @kctx: Context pointer
560*4882a593Smuzhiyun *
561*4882a593Smuzhiyun * This will attempt to remove a context from any internal job scheduler queues
562*4882a593Smuzhiyun * and perform any other actions to ensure a context will not be submitted
563*4882a593Smuzhiyun * from.
564*4882a593Smuzhiyun *
565*4882a593Smuzhiyun * If the context is currently scheduled, then the caller must wait for all
566*4882a593Smuzhiyun * pending jobs to complete before taking any further action.
567*4882a593Smuzhiyun */
568*4882a593Smuzhiyun void kbase_js_zap_context(struct kbase_context *kctx);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun /**
571*4882a593Smuzhiyun * @brief Validate an atom
572*4882a593Smuzhiyun *
573*4882a593Smuzhiyun * This will determine whether the atom can be scheduled onto the GPU. Atoms
574*4882a593Smuzhiyun * with invalid combinations of core requirements will be rejected.
575*4882a593Smuzhiyun *
576*4882a593Smuzhiyun * @param[in] kbdev Device pointer
577*4882a593Smuzhiyun * @param[in] katom Atom to validate
578*4882a593Smuzhiyun * @return true if atom is valid
579*4882a593Smuzhiyun * false otherwise
580*4882a593Smuzhiyun */
581*4882a593Smuzhiyun bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
582*4882a593Smuzhiyun struct kbase_jd_atom *katom);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /**
585*4882a593Smuzhiyun * kbase_js_set_timeouts - update all JS timeouts with user specified data
586*4882a593Smuzhiyun * @kbdev: Device pointer
587*4882a593Smuzhiyun *
588*4882a593Smuzhiyun * Timeouts are specified through the 'js_timeouts' sysfs file. If a timeout is
589*4882a593Smuzhiyun * set to a positive number then that becomes the new value used, if a timeout
590*4882a593Smuzhiyun * is negative then the default is set.
591*4882a593Smuzhiyun */
592*4882a593Smuzhiyun void kbase_js_set_timeouts(struct kbase_device *kbdev);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /*
595*4882a593Smuzhiyun * Helpers follow
596*4882a593Smuzhiyun */
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /**
599*4882a593Smuzhiyun * @brief Check that a context is allowed to submit jobs on this policy
600*4882a593Smuzhiyun *
601*4882a593Smuzhiyun * The purpose of this abstraction is to hide the underlying data size, and wrap up
602*4882a593Smuzhiyun * the long repeated line of code.
603*4882a593Smuzhiyun *
604*4882a593Smuzhiyun * As with any bool, never test the return value with true.
605*4882a593Smuzhiyun *
606*4882a593Smuzhiyun * The caller must hold hwaccess_lock.
607*4882a593Smuzhiyun */
kbasep_js_is_submit_allowed(struct kbasep_js_device_data * js_devdata,struct kbase_context * kctx)608*4882a593Smuzhiyun static inline bool kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun u16 test_bit;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun /* Ensure context really is scheduled in */
613*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
614*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun test_bit = (u16) (1u << kctx->as_nr);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun return (bool) (js_devdata->runpool_irq.submit_allowed & test_bit);
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /**
622*4882a593Smuzhiyun * @brief Allow a context to submit jobs on this policy
623*4882a593Smuzhiyun *
624*4882a593Smuzhiyun * The purpose of this abstraction is to hide the underlying data size, and wrap up
625*4882a593Smuzhiyun * the long repeated line of code.
626*4882a593Smuzhiyun *
627*4882a593Smuzhiyun * The caller must hold hwaccess_lock.
628*4882a593Smuzhiyun */
kbasep_js_set_submit_allowed(struct kbasep_js_device_data * js_devdata,struct kbase_context * kctx)629*4882a593Smuzhiyun static inline void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun u16 set_bit;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /* Ensure context really is scheduled in */
634*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
635*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun set_bit = (u16) (1u << kctx->as_nr);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun js_devdata->runpool_irq.submit_allowed |= set_bit;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /**
645*4882a593Smuzhiyun * @brief Prevent a context from submitting more jobs on this policy
646*4882a593Smuzhiyun *
647*4882a593Smuzhiyun * The purpose of this abstraction is to hide the underlying data size, and wrap up
648*4882a593Smuzhiyun * the long repeated line of code.
649*4882a593Smuzhiyun *
650*4882a593Smuzhiyun * The caller must hold hwaccess_lock.
651*4882a593Smuzhiyun */
kbasep_js_clear_submit_allowed(struct kbasep_js_device_data * js_devdata,struct kbase_context * kctx)652*4882a593Smuzhiyun static inline void kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun u16 clear_bit;
655*4882a593Smuzhiyun u16 clear_mask;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /* Ensure context really is scheduled in */
658*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
659*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun clear_bit = (u16) (1u << kctx->as_nr);
662*4882a593Smuzhiyun clear_mask = ~clear_bit;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun js_devdata->runpool_irq.submit_allowed &= clear_mask;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /**
670*4882a593Smuzhiyun * @brief Manage the 'retry_submit_on_slot' part of a kbase_jd_atom
671*4882a593Smuzhiyun */
kbasep_js_clear_job_retry_submit(struct kbase_jd_atom * atom)672*4882a593Smuzhiyun static inline void kbasep_js_clear_job_retry_submit(struct kbase_jd_atom *atom)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun atom->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /**
678*4882a593Smuzhiyun * Mark a slot as requiring resubmission by carrying that information on a
679*4882a593Smuzhiyun * completing atom.
680*4882a593Smuzhiyun *
681*4882a593Smuzhiyun * @note This can ASSERT in debug builds if the submit slot has been set to
682*4882a593Smuzhiyun * something other than the current value for @a js. This is because you might
683*4882a593Smuzhiyun * be unintentionally stopping more jobs being submitted on the old submit
684*4882a593Smuzhiyun * slot, and that might cause a scheduling-hang.
685*4882a593Smuzhiyun *
686*4882a593Smuzhiyun * @note If you can guarantee that the atoms for the original slot will be
687*4882a593Smuzhiyun * submitted on some other slot, then call kbasep_js_clear_job_retry_submit()
688*4882a593Smuzhiyun * first to silence the ASSERT.
689*4882a593Smuzhiyun */
kbasep_js_set_job_retry_submit_slot(struct kbase_jd_atom * atom,int js)690*4882a593Smuzhiyun static inline void kbasep_js_set_job_retry_submit_slot(struct kbase_jd_atom *atom, int js)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(0 <= js && js <= BASE_JM_MAX_NR_SLOTS);
693*4882a593Smuzhiyun KBASE_DEBUG_ASSERT((atom->retry_submit_on_slot ==
694*4882a593Smuzhiyun KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID)
695*4882a593Smuzhiyun || (atom->retry_submit_on_slot == js));
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun atom->retry_submit_on_slot = js;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /**
701*4882a593Smuzhiyun * Create an initial 'invalid' atom retained state, that requires no
702*4882a593Smuzhiyun * atom-related work to be done on releasing with
703*4882a593Smuzhiyun * kbasep_js_runpool_release_ctx_and_katom_retained_state()
704*4882a593Smuzhiyun */
kbasep_js_atom_retained_state_init_invalid(struct kbasep_js_atom_retained_state * retained_state)705*4882a593Smuzhiyun static inline void kbasep_js_atom_retained_state_init_invalid(struct kbasep_js_atom_retained_state *retained_state)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun retained_state->event_code = BASE_JD_EVENT_NOT_STARTED;
708*4882a593Smuzhiyun retained_state->core_req = KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID;
709*4882a593Smuzhiyun retained_state->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /**
713*4882a593Smuzhiyun * Copy atom state that can be made available after jd_done_nolock() is called
714*4882a593Smuzhiyun * on that atom.
715*4882a593Smuzhiyun */
kbasep_js_atom_retained_state_copy(struct kbasep_js_atom_retained_state * retained_state,const struct kbase_jd_atom * katom)716*4882a593Smuzhiyun static inline void kbasep_js_atom_retained_state_copy(struct kbasep_js_atom_retained_state *retained_state, const struct kbase_jd_atom *katom)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun retained_state->event_code = katom->event_code;
719*4882a593Smuzhiyun retained_state->core_req = katom->core_req;
720*4882a593Smuzhiyun retained_state->retry_submit_on_slot = katom->retry_submit_on_slot;
721*4882a593Smuzhiyun retained_state->sched_priority = katom->sched_priority;
722*4882a593Smuzhiyun retained_state->device_nr = katom->device_nr;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun /**
726*4882a593Smuzhiyun * @brief Determine whether an atom has finished (given its retained state),
727*4882a593Smuzhiyun * and so should be given back to userspace/removed from the system.
728*4882a593Smuzhiyun *
729*4882a593Smuzhiyun * Reasons for an atom not finishing include:
730*4882a593Smuzhiyun * - Being soft-stopped (and so, the atom should be resubmitted sometime later)
731*4882a593Smuzhiyun *
732*4882a593Smuzhiyun * @param[in] katom_retained_state the retained state of the atom to check
733*4882a593Smuzhiyun * @return false if the atom has not finished
734*4882a593Smuzhiyun * @return !=false if the atom has finished
735*4882a593Smuzhiyun */
kbasep_js_has_atom_finished(const struct kbasep_js_atom_retained_state * katom_retained_state)736*4882a593Smuzhiyun static inline bool kbasep_js_has_atom_finished(const struct kbasep_js_atom_retained_state *katom_retained_state)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun return (bool) (katom_retained_state->event_code != BASE_JD_EVENT_STOPPED && katom_retained_state->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT);
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /**
742*4882a593Smuzhiyun * @brief Determine whether a struct kbasep_js_atom_retained_state is valid
743*4882a593Smuzhiyun *
744*4882a593Smuzhiyun * An invalid struct kbasep_js_atom_retained_state is allowed, and indicates that the
745*4882a593Smuzhiyun * code should just ignore it.
746*4882a593Smuzhiyun *
747*4882a593Smuzhiyun * @param[in] katom_retained_state the atom's retained state to check
748*4882a593Smuzhiyun * @return false if the retained state is invalid, and can be ignored
749*4882a593Smuzhiyun * @return !=false if the retained state is valid
750*4882a593Smuzhiyun */
kbasep_js_atom_retained_state_is_valid(const struct kbasep_js_atom_retained_state * katom_retained_state)751*4882a593Smuzhiyun static inline bool kbasep_js_atom_retained_state_is_valid(const struct kbasep_js_atom_retained_state *katom_retained_state)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun return (bool) (katom_retained_state->core_req != KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
kbasep_js_get_atom_retry_submit_slot(const struct kbasep_js_atom_retained_state * katom_retained_state,int * res)756*4882a593Smuzhiyun static inline bool kbasep_js_get_atom_retry_submit_slot(const struct kbasep_js_atom_retained_state *katom_retained_state, int *res)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun int js = katom_retained_state->retry_submit_on_slot;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun *res = js;
761*4882a593Smuzhiyun return (bool) (js >= 0);
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /**
765*4882a593Smuzhiyun * @brief Variant of kbasep_js_runpool_lookup_ctx() that can be used when the
766*4882a593Smuzhiyun * context is guaranteed to be already previously retained.
767*4882a593Smuzhiyun *
768*4882a593Smuzhiyun * It is a programming error to supply the \a as_nr of a context that has not
769*4882a593Smuzhiyun * been previously retained/has a busy refcount of zero. The only exception is
770*4882a593Smuzhiyun * when there is no ctx in \a as_nr (NULL returned).
771*4882a593Smuzhiyun *
772*4882a593Smuzhiyun * The following locking conditions are made on the caller:
773*4882a593Smuzhiyun * - it must \em not hold the hwaccess_lock, because it will be used internally.
774*4882a593Smuzhiyun *
775*4882a593Smuzhiyun * @return a valid struct kbase_context on success, with a refcount that is guaranteed
776*4882a593Smuzhiyun * to be non-zero and unmodified by this function.
777*4882a593Smuzhiyun * @return NULL on failure, indicating that no context was found in \a as_nr
778*4882a593Smuzhiyun */
kbasep_js_runpool_lookup_ctx_noretain(struct kbase_device * kbdev,int as_nr)779*4882a593Smuzhiyun static inline struct kbase_context *kbasep_js_runpool_lookup_ctx_noretain(struct kbase_device *kbdev, int as_nr)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun struct kbase_context *found_kctx;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kbdev != NULL);
784*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun found_kctx = kbdev->as_to_kctx[as_nr];
787*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(found_kctx == NULL ||
788*4882a593Smuzhiyun atomic_read(&found_kctx->refcount) > 0);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun return found_kctx;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /*
794*4882a593Smuzhiyun * The following locking conditions are made on the caller:
795*4882a593Smuzhiyun * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
796*4882a593Smuzhiyun * - The caller must hold the kbasep_js_device_data::runpool_mutex
797*4882a593Smuzhiyun */
kbase_js_runpool_inc_context_count(struct kbase_device * kbdev,struct kbase_context * kctx)798*4882a593Smuzhiyun static inline void kbase_js_runpool_inc_context_count(
799*4882a593Smuzhiyun struct kbase_device *kbdev,
800*4882a593Smuzhiyun struct kbase_context *kctx)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun struct kbasep_js_device_data *js_devdata;
803*4882a593Smuzhiyun struct kbasep_js_kctx_info *js_kctx_info;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kbdev != NULL);
806*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kctx != NULL);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun js_devdata = &kbdev->js_data;
809*4882a593Smuzhiyun js_kctx_info = &kctx->jctx.sched_info;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
812*4882a593Smuzhiyun lockdep_assert_held(&js_devdata->runpool_mutex);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /* Track total contexts */
815*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running < S8_MAX);
816*4882a593Smuzhiyun ++(js_devdata->nr_all_contexts_running);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
819*4882a593Smuzhiyun /* Track contexts that can submit jobs */
820*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running <
821*4882a593Smuzhiyun S8_MAX);
822*4882a593Smuzhiyun ++(js_devdata->nr_user_contexts_running);
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /*
827*4882a593Smuzhiyun * The following locking conditions are made on the caller:
828*4882a593Smuzhiyun * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
829*4882a593Smuzhiyun * - The caller must hold the kbasep_js_device_data::runpool_mutex
830*4882a593Smuzhiyun */
kbase_js_runpool_dec_context_count(struct kbase_device * kbdev,struct kbase_context * kctx)831*4882a593Smuzhiyun static inline void kbase_js_runpool_dec_context_count(
832*4882a593Smuzhiyun struct kbase_device *kbdev,
833*4882a593Smuzhiyun struct kbase_context *kctx)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun struct kbasep_js_device_data *js_devdata;
836*4882a593Smuzhiyun struct kbasep_js_kctx_info *js_kctx_info;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kbdev != NULL);
839*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kctx != NULL);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun js_devdata = &kbdev->js_data;
842*4882a593Smuzhiyun js_kctx_info = &kctx->jctx.sched_info;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
845*4882a593Smuzhiyun lockdep_assert_held(&js_devdata->runpool_mutex);
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun /* Track total contexts */
848*4882a593Smuzhiyun --(js_devdata->nr_all_contexts_running);
849*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running >= 0);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
852*4882a593Smuzhiyun /* Track contexts that can submit jobs */
853*4882a593Smuzhiyun --(js_devdata->nr_user_contexts_running);
854*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running >= 0);
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun /**
860*4882a593Smuzhiyun * @brief Submit atoms from all available contexts to all job slots.
861*4882a593Smuzhiyun *
862*4882a593Smuzhiyun * This will attempt to submit as many jobs as possible. It will exit when
863*4882a593Smuzhiyun * either all job slots are full, or all contexts have been used.
864*4882a593Smuzhiyun *
865*4882a593Smuzhiyun * @param[in] kbdev Device pointer
866*4882a593Smuzhiyun */
kbase_js_sched_all(struct kbase_device * kbdev)867*4882a593Smuzhiyun static inline void kbase_js_sched_all(struct kbase_device *kbdev)
868*4882a593Smuzhiyun {
869*4882a593Smuzhiyun kbase_js_sched(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1);
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun extern const int
873*4882a593Smuzhiyun kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS];
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun extern const base_jd_prio
876*4882a593Smuzhiyun kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun /**
879*4882a593Smuzhiyun * kbasep_js_atom_prio_to_sched_prio(): - Convert atom priority (base_jd_prio)
880*4882a593Smuzhiyun * to relative ordering
881*4882a593Smuzhiyun * @atom_prio: Priority ID to translate.
882*4882a593Smuzhiyun *
883*4882a593Smuzhiyun * Atom priority values for @ref base_jd_prio cannot be compared directly to
884*4882a593Smuzhiyun * find out which are higher or lower.
885*4882a593Smuzhiyun *
886*4882a593Smuzhiyun * This function will convert base_jd_prio values for successively lower
887*4882a593Smuzhiyun * priorities into a monotonically increasing sequence. That is, the lower the
888*4882a593Smuzhiyun * base_jd_prio priority, the higher the value produced by this function. This
889*4882a593Smuzhiyun * is in accordance with how the rest of the kernel treates priority.
890*4882a593Smuzhiyun *
891*4882a593Smuzhiyun * The mapping is 1:1 and the size of the valid input range is the same as the
892*4882a593Smuzhiyun * size of the valid output range, i.e.
893*4882a593Smuzhiyun * KBASE_JS_ATOM_SCHED_PRIO_COUNT == BASE_JD_NR_PRIO_LEVELS
894*4882a593Smuzhiyun *
895*4882a593Smuzhiyun * Note This must be kept in sync with BASE_JD_PRIO_<...> definitions
896*4882a593Smuzhiyun *
897*4882a593Smuzhiyun * Return: On success: a value in the inclusive range
898*4882a593Smuzhiyun * 0..KBASE_JS_ATOM_SCHED_PRIO_COUNT-1. On failure:
899*4882a593Smuzhiyun * KBASE_JS_ATOM_SCHED_PRIO_INVALID
900*4882a593Smuzhiyun */
kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio)901*4882a593Smuzhiyun static inline int kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun if (atom_prio >= BASE_JD_NR_PRIO_LEVELS)
904*4882a593Smuzhiyun return KBASE_JS_ATOM_SCHED_PRIO_INVALID;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return kbasep_js_atom_priority_to_relative[atom_prio];
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
kbasep_js_sched_prio_to_atom_prio(int sched_prio)909*4882a593Smuzhiyun static inline base_jd_prio kbasep_js_sched_prio_to_atom_prio(int sched_prio)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun unsigned int prio_idx;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(0 <= sched_prio
914*4882a593Smuzhiyun && sched_prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT);
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun prio_idx = (unsigned int)sched_prio;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun return kbasep_js_relative_priority_to_atom[prio_idx];
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun /** @} *//* end group kbase_js */
922*4882a593Smuzhiyun /** @} *//* end group base_kbase_api */
923*4882a593Smuzhiyun /** @} *//* end group base_api */
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun #endif /* _KBASE_JS_H_ */
926