xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  *
3  * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15 
16 
17 
18 
19 /*
20  * HW access job manager common APIs
21  */
22 
23 #ifndef _KBASE_HWACCESS_JM_H_
24 #define _KBASE_HWACCESS_JM_H_
25 
26 /**
27  * kbase_backend_run_atom() - Run an atom on the GPU
28  * @kbdev:	Device pointer
29  * @atom:	Atom to run
30  *
31  * Caller must hold the HW access lock
32  */
33 void kbase_backend_run_atom(struct kbase_device *kbdev,
34 				struct kbase_jd_atom *katom);
35 
36 /**
37  * kbase_backend_slot_update - Update state based on slot ringbuffers
38  *
39  * @kbdev:  Device pointer
40  *
41  * Inspect the jobs in the slot ringbuffers and update state.
42  *
43  * This will cause jobs to be submitted to hardware if they are unblocked
44  */
45 void kbase_backend_slot_update(struct kbase_device *kbdev);
46 
47 /**
48  * kbase_backend_find_and_release_free_address_space() - Release a free AS
49  * @kbdev:	Device pointer
50  * @kctx:	Context pointer
51  *
52  * This function can evict an idle context from the runpool, freeing up the
53  * address space it was using.
54  *
55  * The address space is marked as in use. The caller must either assign a
56  * context using kbase_gpu_use_ctx(), or release it using
57  * kbase_ctx_sched_release()
58  *
59  * Return: Number of free address space, or KBASEP_AS_NR_INVALID if none
60  *	   available
61  */
62 int kbase_backend_find_and_release_free_address_space(
63 		struct kbase_device *kbdev, struct kbase_context *kctx);
64 
65 /**
66  * kbase_backend_use_ctx() - Activate a currently unscheduled context, using the
67  *			     provided address space.
68  * @kbdev:	Device pointer
69  * @kctx:	Context pointer. May be NULL
70  * @as_nr:	Free address space to use
71  *
72  * kbase_gpu_next_job() will pull atoms from the active context.
73  *
74  * Return: true if successful, false if ASID not assigned.
75  */
76 bool kbase_backend_use_ctx(struct kbase_device *kbdev,
77 				struct kbase_context *kctx,
78 				int as_nr);
79 
80 /**
81  * kbase_backend_use_ctx_sched() - Activate a context.
82  * @kbdev:	Device pointer
83  * @kctx:	Context pointer
84  *
85  * kbase_gpu_next_job() will pull atoms from the active context.
86  *
87  * The context must already be scheduled and assigned to an address space. If
88  * the context is not scheduled, then kbase_gpu_use_ctx() should be used
89  * instead.
90  *
91  * Caller must hold hwaccess_lock
92  *
93  * Return: true if context is now active, false otherwise (ie if context does
94  *	   not have an address space assigned)
95  */
96 bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
97 					struct kbase_context *kctx);
98 
99 /**
100  * kbase_backend_release_ctx_irq - Release a context from the GPU. This will
101  *                                 de-assign the assigned address space.
102  * @kbdev: Device pointer
103  * @kctx:  Context pointer
104  *
105  * Caller must hold kbase_device->mmu_hw_mutex and hwaccess_lock
106  */
107 void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
108 				struct kbase_context *kctx);
109 
110 /**
111  * kbase_backend_release_ctx_noirq - Release a context from the GPU. This will
112  *                                   de-assign the assigned address space.
113  * @kbdev: Device pointer
114  * @kctx:  Context pointer
115  *
116  * Caller must hold kbase_device->mmu_hw_mutex
117  *
118  * This function must perform any operations that could not be performed in IRQ
119  * context by kbase_backend_release_ctx_irq().
120  */
121 void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
122 						struct kbase_context *kctx);
123 
124 /**
125  * kbase_backend_cacheclean - Perform a cache clean if the given atom requires
126  *                            one
127  * @kbdev:	Device pointer
128  * @katom:	Pointer to the failed atom
129  *
130  * On some GPUs, the GPU cache must be cleaned following a failed atom. This
131  * function performs a clean if it is required by @katom.
132  */
133 void kbase_backend_cacheclean(struct kbase_device *kbdev,
134 		struct kbase_jd_atom *katom);
135 
136 
137 /**
138  * kbase_backend_complete_wq() - Perform backend-specific actions required on
139  *				 completing an atom.
140  * @kbdev:	Device pointer
141  * @katom:	Pointer to the atom to complete
142  *
143  * This function should only be called from kbase_jd_done_worker() or
144  * js_return_worker().
145  *
146  * Return: true if atom has completed, false if atom should be re-submitted
147  */
148 void kbase_backend_complete_wq(struct kbase_device *kbdev,
149 				struct kbase_jd_atom *katom);
150 
151 /**
152  * kbase_backend_complete_wq_post_sched - Perform backend-specific actions
153  *                                        required on completing an atom, after
154  *                                        any scheduling has taken place.
155  * @kbdev:         Device pointer
156  * @core_req:      Core requirements of atom
157  * @affinity:      Affinity of atom
158  * @coreref_state: Coreref state of atom
159  *
160  * This function should only be called from kbase_jd_done_worker() or
161  * js_return_worker().
162  */
163 void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
164 		base_jd_core_req core_req, u64 affinity,
165 		enum kbase_atom_coreref_state coreref_state);
166 
167 /**
168  * kbase_backend_reset() - The GPU is being reset. Cancel all jobs on the GPU
169  *			   and remove any others from the ringbuffers.
170  * @kbdev:		Device pointer
171  * @end_timestamp:	Timestamp of reset
172  */
173 void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp);
174 
175 /**
176  * kbase_backend_inspect_head() - Return the atom currently at the head of slot
177  *				  @js
178  * @kbdev:	Device pointer
179  * @js:		Job slot to inspect
180  *
181  * Return : Atom currently at the head of slot @js, or NULL
182  */
183 struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev,
184 					int js);
185 
186 /**
187  * kbase_backend_inspect_tail - Return the atom currently at the tail of slot
188  *                              @js
189  * @kbdev: Device pointer
190  * @js:    Job slot to inspect
191  *
192  * Return : Atom currently at the head of slot @js, or NULL
193  */
194 struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
195 					int js);
196 
197 /**
198  * kbase_backend_nr_atoms_on_slot() - Return the number of atoms currently on a
199  *				      slot.
200  * @kbdev:	Device pointer
201  * @js:		Job slot to inspect
202  *
203  * Return : Number of atoms currently on slot
204  */
205 int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js);
206 
207 /**
208  * kbase_backend_nr_atoms_submitted() - Return the number of atoms on a slot
209  *					that are currently on the GPU.
210  * @kbdev:	Device pointer
211  * @js:		Job slot to inspect
212  *
213  * Return : Number of atoms currently on slot @js that are currently on the GPU.
214  */
215 int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js);
216 
217 /**
218  * kbase_backend_ctx_count_changed() - Number of contexts ready to submit jobs
219  *				       has changed.
220  * @kbdev:	Device pointer
221  *
222  * Perform any required backend-specific actions (eg starting/stopping
223  * scheduling timers).
224  */
225 void kbase_backend_ctx_count_changed(struct kbase_device *kbdev);
226 
227 /**
228  * kbase_backend_timeouts_changed() - Job Scheduler timeouts have changed.
229  * @kbdev:	Device pointer
230  *
231  * Perform any required backend-specific actions (eg updating timeouts of
232  * currently running atoms).
233  */
234 void kbase_backend_timeouts_changed(struct kbase_device *kbdev);
235 
236 /**
237  * kbase_backend_slot_free() - Return the number of jobs that can be currently
238  *			       submitted to slot @js.
239  * @kbdev:	Device pointer
240  * @js:		Job slot to inspect
241  *
242  * Return : Number of jobs that can be submitted.
243  */
244 int kbase_backend_slot_free(struct kbase_device *kbdev, int js);
245 
246 /**
247  * kbase_job_check_enter_disjoint - potentially leave disjoint state
248  * @kbdev: kbase device
249  * @target_katom: atom which is finishing
250  *
251  * Work out whether to leave disjoint state when finishing an atom that was
252  * originated by kbase_job_check_enter_disjoint().
253  */
254 void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
255 		struct kbase_jd_atom *target_katom);
256 
257 /**
258  * kbase_backend_jm_kill_jobs_from_kctx - Kill all jobs that are currently
259  *                                        running from a context
260  * @kctx: Context pointer
261  *
262  * This is used in response to a page fault to remove all jobs from the faulting
263  * context from the hardware.
264  */
265 void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx);
266 
267 /**
268  * kbase_jm_wait_for_zero_jobs - Wait for context to have zero jobs running, and
269  *                               to be descheduled.
270  * @kctx: Context pointer
271  *
272  * This should be called following kbase_js_zap_context(), to ensure the context
273  * can be safely destroyed.
274  */
275 void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx);
276 
277 /**
278  * kbase_backend_get_current_flush_id - Return the current flush ID
279  *
280  * @kbdev: Device pointer
281  *
282  * Return: the current flush ID to be recorded for each job chain
283  */
284 u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev);
285 
286 #if KBASE_GPU_RESET_EN
287 /**
288  * kbase_prepare_to_reset_gpu - Prepare for resetting the GPU.
289  * @kbdev: Device pointer
290  *
291  * This function just soft-stops all the slots to ensure that as many jobs as
292  * possible are saved.
293  *
294  * Return: a boolean which should be interpreted as follows:
295  * - true  - Prepared for reset, kbase_reset_gpu should be called.
296  * - false - Another thread is performing a reset, kbase_reset_gpu should
297  *                not be called.
298  */
299 bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev);
300 
301 /**
302  * kbase_reset_gpu - Reset the GPU
303  * @kbdev: Device pointer
304  *
305  * This function should be called after kbase_prepare_to_reset_gpu if it returns
306  * true. It should never be called without a corresponding call to
307  * kbase_prepare_to_reset_gpu.
308  *
309  * After this function is called (or not called if kbase_prepare_to_reset_gpu
310  * returned false), the caller should wait for kbdev->reset_waitq to be
311  * signalled to know when the reset has completed.
312  */
313 void kbase_reset_gpu(struct kbase_device *kbdev);
314 
315 /**
316  * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU.
317  * @kbdev: Device pointer
318  *
319  * This function just soft-stops all the slots to ensure that as many jobs as
320  * possible are saved.
321  *
322  * Return: a boolean which should be interpreted as follows:
323  * - true  - Prepared for reset, kbase_reset_gpu should be called.
324  * - false - Another thread is performing a reset, kbase_reset_gpu should
325  *                not be called.
326  */
327 bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev);
328 
329 /**
330  * kbase_reset_gpu_locked - Reset the GPU
331  * @kbdev: Device pointer
332  *
333  * This function should be called after kbase_prepare_to_reset_gpu if it
334  * returns true. It should never be called without a corresponding call to
335  * kbase_prepare_to_reset_gpu.
336  *
337  * After this function is called (or not called if kbase_prepare_to_reset_gpu
338  * returned false), the caller should wait for kbdev->reset_waitq to be
339  * signalled to know when the reset has completed.
340  */
341 void kbase_reset_gpu_locked(struct kbase_device *kbdev);
342 
343 /**
344  * kbase_reset_gpu_silent - Reset the GPU silently
345  * @kbdev: Device pointer
346  *
347  * Reset the GPU without trying to cancel jobs and don't emit messages into
348  * the kernel log while doing the reset.
349  *
350  * This function should be used in cases where we are doing a controlled reset
351  * of the GPU as part of normal processing (e.g. exiting protected mode) where
352  * the driver will have ensured the scheduler has been idled and all other
353  * users of the GPU (e.g. instrumentation) have been suspended.
354  */
355 void kbase_reset_gpu_silent(struct kbase_device *kbdev);
356 
357 /**
358  * kbase_reset_gpu_active - Reports if the GPU is being reset
359  * @kbdev: Device pointer
360  *
361  * Return: True if the GPU is in the process of being reset.
362  */
363 bool kbase_reset_gpu_active(struct kbase_device *kbdev);
364 #endif
365 
366 /**
367  * kbase_job_slot_hardstop - Hard-stop the specified job slot
368  * @kctx:         The kbase context that contains the job(s) that should
369  *                be hard-stopped
370  * @js:           The job slot to hard-stop
371  * @target_katom: The job that should be hard-stopped (or NULL for all
372  *                jobs from the context)
373  * Context:
374  *   The job slot lock must be held when calling this function.
375  */
376 void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
377 				struct kbase_jd_atom *target_katom);
378 
379 extern struct protected_mode_ops kbase_native_protected_ops;
380 
381 #endif /* _KBASE_HWACCESS_JM_H_ */
382