xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mali_kbase_hwaccess_jm.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2014-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * HW access job manager common APIs
24  */
25 
26 #ifndef _KBASE_HWACCESS_JM_H_
27 #define _KBASE_HWACCESS_JM_H_
28 
29 /**
30  * kbase_backend_run_atom() - Run an atom on the GPU
31  * @kbdev:	Device pointer
32  * @katom:	Atom to run
33  *
34  * Caller must hold the HW access lock
35  */
36 void kbase_backend_run_atom(struct kbase_device *kbdev,
37 				struct kbase_jd_atom *katom);
38 
39 /**
40  * kbase_backend_slot_update - Update state based on slot ringbuffers
41  *
42  * @kbdev:  Device pointer
43  *
44  * Inspect the jobs in the slot ringbuffers and update state.
45  *
46  * This will cause jobs to be submitted to hardware if they are unblocked
47  */
48 void kbase_backend_slot_update(struct kbase_device *kbdev);
49 
50 /**
51  * kbase_backend_find_and_release_free_address_space() - Release a free AS
52  * @kbdev:	Device pointer
53  * @kctx:	Context pointer
54  *
55  * This function can evict an idle context from the runpool, freeing up the
56  * address space it was using.
57  *
58  * The address space is marked as in use. The caller must either assign a
59  * context using kbase_gpu_use_ctx(), or release it using
60  * kbase_ctx_sched_release()
61  *
62  * Return: Number of free address space, or KBASEP_AS_NR_INVALID if none
63  *	   available
64  */
65 int kbase_backend_find_and_release_free_address_space(
66 		struct kbase_device *kbdev, struct kbase_context *kctx);
67 
68 /**
69  * kbase_backend_use_ctx() - Activate a currently unscheduled context, using the
70  *			     provided address space.
71  * @kbdev:	Device pointer
72  * @kctx:	Context pointer. May be NULL
73  * @as_nr:	Free address space to use
74  *
75  * kbase_gpu_next_job() will pull atoms from the active context.
76  *
77  * Return: true if successful, false if ASID not assigned.
78  */
79 bool kbase_backend_use_ctx(struct kbase_device *kbdev,
80 				struct kbase_context *kctx,
81 				int as_nr);
82 
83 /**
84  * kbase_backend_use_ctx_sched() - Activate a context.
85  * @kbdev:	Device pointer
86  * @kctx:	Context pointer
87  * @js:         Job slot to activate context on
88  *
89  * kbase_gpu_next_job() will pull atoms from the active context.
90  *
91  * The context must already be scheduled and assigned to an address space. If
92  * the context is not scheduled, then kbase_gpu_use_ctx() should be used
93  * instead.
94  *
95  * Caller must hold hwaccess_lock
96  *
97  * Return: true if context is now active, false otherwise (ie if context does
98  *	   not have an address space assigned)
99  */
100 bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev, struct kbase_context *kctx,
101 				 unsigned int js);
102 
103 /**
104  * kbase_backend_release_ctx_irq - Release a context from the GPU. This will
105  *                                 de-assign the assigned address space.
106  * @kbdev: Device pointer
107  * @kctx:  Context pointer
108  *
109  * Caller must hold kbase_device->mmu_hw_mutex and hwaccess_lock
110  */
111 void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
112 				struct kbase_context *kctx);
113 
114 /**
115  * kbase_backend_release_ctx_noirq - Release a context from the GPU. This will
116  *                                   de-assign the assigned address space.
117  * @kbdev: Device pointer
118  * @kctx:  Context pointer
119  *
120  * Caller must hold kbase_device->mmu_hw_mutex
121  *
122  * This function must perform any operations that could not be performed in IRQ
123  * context by kbase_backend_release_ctx_irq().
124  */
125 void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
126 						struct kbase_context *kctx);
127 
128 /**
129  * kbase_backend_cache_clean - Perform a cache clean if the given atom requires
130  *                            one
131  * @kbdev:	Device pointer
132  * @katom:	Pointer to the failed atom
133  *
134  * On some GPUs, the GPU cache must be cleaned following a failed atom. This
135  * function performs a clean if it is required by @katom.
136  */
137 void kbase_backend_cache_clean(struct kbase_device *kbdev,
138 		struct kbase_jd_atom *katom);
139 
140 
141 /**
142  * kbase_backend_complete_wq() - Perform backend-specific actions required on
143  *				 completing an atom.
144  * @kbdev:	Device pointer
145  * @katom:	Pointer to the atom to complete
146  *
147  * This function should only be called from kbase_jd_done_worker() or
148  * js_return_worker().
149  *
150  * Return: true if atom has completed, false if atom should be re-submitted
151  */
152 void kbase_backend_complete_wq(struct kbase_device *kbdev,
153 				struct kbase_jd_atom *katom);
154 
155 #if !MALI_USE_CSF
156 /**
157  * kbase_backend_complete_wq_post_sched - Perform backend-specific actions
158  *                                        required on completing an atom, after
159  *                                        any scheduling has taken place.
160  * @kbdev:         Device pointer
161  * @core_req:      Core requirements of atom
162  *
163  * This function should only be called from kbase_jd_done_worker() or
164  * js_return_worker().
165  */
166 void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
167 		base_jd_core_req core_req);
168 #endif /* !MALI_USE_CSF */
169 
170 /**
171  * kbase_backend_reset() - The GPU is being reset. Cancel all jobs on the GPU
172  *			   and remove any others from the ringbuffers.
173  * @kbdev:		Device pointer
174  * @end_timestamp:	Timestamp of reset
175  */
176 void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp);
177 
178 /**
179  * kbase_backend_inspect_tail - Return the atom currently at the tail of slot
180  *                              @js
181  * @kbdev: Device pointer
182  * @js:    Job slot to inspect
183  *
184  * Return: Atom currently at the head of slot @js, or NULL
185  */
186 struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev, unsigned int js);
187 
188 /**
189  * kbase_backend_nr_atoms_on_slot() - Return the number of atoms currently on a
190  *				      slot.
191  * @kbdev:	Device pointer
192  * @js:		Job slot to inspect
193  *
194  * Return: Number of atoms currently on slot
195  */
196 int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, unsigned int js);
197 
198 /**
199  * kbase_backend_nr_atoms_submitted() - Return the number of atoms on a slot
200  *					that are currently on the GPU.
201  * @kbdev:	Device pointer
202  * @js:		Job slot to inspect
203  *
204  * Return: Number of atoms currently on slot @js that are currently on the GPU.
205  */
206 int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, unsigned int js);
207 
208 /**
209  * kbase_backend_ctx_count_changed() - Number of contexts ready to submit jobs
210  *				       has changed.
211  * @kbdev:	Device pointer
212  *
213  * Perform any required backend-specific actions (eg starting/stopping
214  * scheduling timers).
215  */
216 void kbase_backend_ctx_count_changed(struct kbase_device *kbdev);
217 
218 /**
219  * kbase_backend_timeouts_changed() - Job Scheduler timeouts have changed.
220  * @kbdev:	Device pointer
221  *
222  * Perform any required backend-specific actions (eg updating timeouts of
223  * currently running atoms).
224  */
225 void kbase_backend_timeouts_changed(struct kbase_device *kbdev);
226 
227 /**
228  * kbase_backend_slot_free() - Return the number of jobs that can be currently
229  *			       submitted to slot @js.
230  * @kbdev:	Device pointer
231  * @js:		Job slot to inspect
232  *
233  * Return: Number of jobs that can be submitted.
234  */
235 int kbase_backend_slot_free(struct kbase_device *kbdev, unsigned int js);
236 
237 /**
238  * kbase_job_check_leave_disjoint - potentially leave disjoint state
239  * @kbdev: kbase device
240  * @target_katom: atom which is finishing
241  *
242  * Work out whether to leave disjoint state when finishing an atom that was
243  * originated by kbase_job_check_enter_disjoint().
244  */
245 void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
246 		struct kbase_jd_atom *target_katom);
247 
248 /**
249  * kbase_backend_jm_kill_running_jobs_from_kctx - Kill all jobs that are
250  *                               currently running on GPU from a context
251  * @kctx: Context pointer
252  *
253  * This is used in response to a page fault to remove all jobs from the faulting
254  * context from the hardware.
255  *
256  * Caller must hold hwaccess_lock.
257  */
258 void kbase_backend_jm_kill_running_jobs_from_kctx(struct kbase_context *kctx);
259 
260 /**
261  * kbase_jm_wait_for_zero_jobs - Wait for context to have zero jobs running, and
262  *                               to be descheduled.
263  * @kctx: Context pointer
264  *
265  * This should be called following kbase_js_zap_context(), to ensure the context
266  * can be safely destroyed.
267  */
268 void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx);
269 
270 /**
271  * kbase_backend_get_current_flush_id - Return the current flush ID
272  *
273  * @kbdev: Device pointer
274  *
275  * Return: the current flush ID to be recorded for each job chain
276  */
277 u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev);
278 
279 /**
280  * kbase_job_slot_hardstop - Hard-stop the specified job slot
281  * @kctx:         The kbase context that contains the job(s) that should
282  *                be hard-stopped
283  * @js:           The job slot to hard-stop
284  * @target_katom: The job that should be hard-stopped (or NULL for all
285  *                jobs from the context)
286  * Context:
287  *   The job slot lock must be held when calling this function.
288  */
289 void kbase_job_slot_hardstop(struct kbase_context *kctx, unsigned int js,
290 			     struct kbase_jd_atom *target_katom);
291 
292 /**
293  * kbase_gpu_atoms_submitted_any() - Inspect whether there are any atoms
294  * currently on the GPU
295  * @kbdev:  Device pointer
296  *
297  * Return: true if there are any atoms on the GPU, false otherwise
298  */
299 bool kbase_gpu_atoms_submitted_any(struct kbase_device *kbdev);
300 
301 /**
302  * kbase_backend_slot_kctx_purge_locked - Perform a purge on the slot_rb tracked
303  *                                        kctx
304  *
305  * @kbdev:	Device pointer
306  * @kctx:	The kbase context that needs to be purged from slot_rb[]
307  *
308  * For JM GPUs, the L1 read only caches may need a start_flush invalidation,
309  * potentially on all slots (even if the kctx was only using a single slot),
310  * following a context termination or address-space ID recycle. This function
311  * performs a clean-up purge on the given kctx which if it has been tracked by
312  * slot_rb[] objects.
313  *
314  * Caller must hold kbase_device->hwaccess_lock.
315  */
316 void kbase_backend_slot_kctx_purge_locked(struct kbase_device *kbdev, struct kbase_context *kctx);
317 
318 #endif /* _KBASE_HWACCESS_JM_H_ */
319