xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mali_kbase_ctx_sched.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2017-2018, 2020-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #ifndef _KBASE_CTX_SCHED_H_
23 #define _KBASE_CTX_SCHED_H_
24 
25 #include <mali_kbase.h>
26 
27 /**
28  * DOC: The Context Scheduler manages address space assignment and reference
29  * counting to kbase_context. The interface has been designed to minimise
30  * interactions between the Job Scheduler and Power Management/MMU to support
31  * the existing Job Scheduler interface.
32  *
33  * The initial implementation of the Context Scheduler does not schedule
34  * contexts. Instead it relies on the Job Scheduler to make decisions of
35  * when to schedule/evict contexts if address spaces are starved. In the
36  * future, once an interface between the CS and JS has been devised to
37  * provide enough information about how each context is consuming GPU resources,
38  * those decisions can be made in the CS itself, thereby reducing duplicated
39  * code.
40  */
41 
42 /**
43  * kbase_ctx_sched_init() - Initialise the context scheduler
44  * @kbdev: The device for which the context scheduler needs to be initialised
45  *
46  * This must be called during device initialisation. The number of hardware
47  * address spaces must already be established before calling this function.
48  *
49  * Return: 0 for success, otherwise failure
50  */
51 int kbase_ctx_sched_init(struct kbase_device *kbdev);
52 
53 /**
54  * kbase_ctx_sched_term - Terminate the context scheduler
55  * @kbdev: The device for which the context scheduler needs to be terminated
56  *
57  * This must be called during device termination after all contexts have been
58  * destroyed.
59  */
60 void kbase_ctx_sched_term(struct kbase_device *kbdev);
61 
62 /**
63  * kbase_ctx_sched_ctx_init - Initialize per-context data fields for scheduling
64  * @kctx: The context to initialize
65  *
66  * This must be called during context initialization before any other context
67  * scheduling functions are called on @kctx
68  */
69 void kbase_ctx_sched_init_ctx(struct kbase_context *kctx);
70 
71 /**
72  * kbase_ctx_sched_retain_ctx - Retain a reference to the @ref kbase_context
73  * @kctx: The context to which to retain a reference
74  *
75  * This function should be called whenever an address space should be assigned
76  * to a context and programmed onto the MMU. It should typically be called
77  * when jobs are ready to be submitted to the GPU.
78  *
79  * It can be called as many times as necessary. The address space will be
80  * assigned to the context for as long as there is a reference to said context.
81  *
82  * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
83  * held whilst calling this function.
84  *
85  * Return: The address space that the context has been assigned to or
86  *         KBASEP_AS_NR_INVALID if no address space was available.
87  */
88 int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx);
89 
90 /**
91  * kbase_ctx_sched_retain_ctx_refcount - Retain a reference to the @ref kbase_context
92  * @kctx: The context to which to retain a reference
93  *
94  * This function only retains a reference to the context. It must be called
95  * only when the context already has a reference.
96  *
97  * This is typically called inside an atomic session where we know the context
98  * is already scheduled in but want to take an extra reference to ensure that
99  * it doesn't get descheduled.
100  *
101  * The kbase_device::hwaccess_lock must be held whilst calling this function
102  */
103 void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx);
104 
105 /**
106  * kbase_ctx_sched_release_ctx - Release a reference to the @ref kbase_context
107  * @kctx: The context from which to release a reference
108  *
109  * This function should be called whenever an address space could be unassigned
110  * from a context. When there are no more references to said context, the
111  * address space previously assigned to this context shall be reassigned to
112  * other contexts as needed.
113  *
114  * The kbase_device::hwaccess_lock must be held whilst calling this function
115  */
116 void kbase_ctx_sched_release_ctx(struct kbase_context *kctx);
117 
118 /**
119  * kbase_ctx_sched_remove_ctx - Unassign previously assigned address space
120  * @kctx: The context to be removed
121  *
122  * This function should be called when a context is being destroyed. The
123  * context must no longer have any reference. If it has been assigned an
124  * address space before then the AS will be unprogrammed.
125  */
126 void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx);
127 
128 /**
129  * kbase_ctx_sched_restore_all_as - Reprogram all address spaces
130  * @kbdev: The device for which address spaces to be reprogrammed
131  *
132  * This function shall reprogram all address spaces previously assigned to
133  * contexts. It can be used after the GPU is reset.
134  *
135  * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
136  * held whilst calling this function.
137  */
138 void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev);
139 
140 /**
141  * kbase_ctx_sched_as_to_ctx_refcount - Lookup a context based on its current
142  * address space and ensure that is stays scheduled in
143  * @kbdev: The device for which the returned context must belong
144  * @as_nr: address space assigned to the context of interest
145  *
146  * The context is refcounted as being busy to prevent it from scheduling
147  * out. It must be released with kbase_ctx_sched_release_ctx() when it is no
148  * longer required to stay scheduled in.
149  *
150  * This function can safely be called from IRQ context.
151  *
152  * The following locking conditions are made on the caller:
153  * * it must not hold the kbase_device::hwaccess_lock, because it will be used
154  *   internally.
155  *
156  * Return: a valid struct kbase_context on success, which has been refcounted
157  * as being busy or return NULL on failure, indicating that no context was found
158  * in as_nr.
159  */
160 struct kbase_context *kbase_ctx_sched_as_to_ctx_refcount(
161 		struct kbase_device *kbdev, size_t as_nr);
162 
163 /**
164  * kbase_ctx_sched_as_to_ctx - Lookup a context based on its current address
165  * space
166  * @kbdev: The device for which the returned context must belong
167  * @as_nr: address space assigned to the context of interest
168  *
169  * Return: a valid struct kbase_context on success or NULL on failure,
170  * indicating that no context was found in as_nr.
171  */
172 struct kbase_context *kbase_ctx_sched_as_to_ctx(struct kbase_device *kbdev,
173 		size_t as_nr);
174 
175 /**
176  * kbase_ctx_sched_as_to_ctx_nolock - Lookup a context based on its current
177  * address space.
178  * @kbdev: The device for which the returned context must belong
179  * @as_nr: address space assigned to the context of interest
180  *
181  * The following lock must be held by the caller:
182  * * kbase_device::hwaccess_lock
183  *
184  * Return: a valid struct kbase_context on success or NULL on failure,
185  * indicating that no context was found in as_nr.
186  */
187 struct kbase_context *kbase_ctx_sched_as_to_ctx_nolock(
188 		struct kbase_device *kbdev, size_t as_nr);
189 
190 /**
191  * kbase_ctx_sched_inc_refcount_nolock - Refcount a context as being busy,
192  * preventing it from being scheduled out.
193  * @kctx: Context to be refcounted
194  *
195  * The following locks must be held by the caller:
196  * &kbase_device.mmu_hw_mutex
197  * &kbase_device.hwaccess_lock
198  *
199  * Return: true if refcount succeeded, and the context will not be scheduled
200  * out, false if the refcount failed (because the context is being/has been
201  * scheduled out).
202  */
203 bool kbase_ctx_sched_inc_refcount_nolock(struct kbase_context *kctx);
204 
205 /**
206  * kbase_ctx_sched_inc_refcount - Refcount a context as being busy, preventing
207  * it from being scheduled out.
208  * @kctx: Context to be refcounted
209  *
210  * The following locking conditions are made on the caller:
211  * * it must not hold kbase_device::mmu_hw_mutex and
212  *   kbase_device::hwaccess_lock, because they will be used internally.
213  *
214  * Return: true if refcount succeeded, and the context will not be scheduled
215  * out, false if the refcount failed (because the context is being/has been
216  * scheduled out).
217  */
218 bool kbase_ctx_sched_inc_refcount(struct kbase_context *kctx);
219 
220 /**
221  * kbase_ctx_sched_release_ctx_lock - Release a reference count of a context
222  * @kctx: Context for which refcount should be decreased
223  *
224  * Effectivelly, this is a wrapper for kbase_ctx_sched_release_ctx, but
225  * kbase_device::hwaccess_lock is required NOT to be locked.
226  */
227 void kbase_ctx_sched_release_ctx_lock(struct kbase_context *kctx);
228 
229 #if MALI_USE_CSF
230 /**
231  * kbase_ctx_sched_inc_refcount_if_as_valid - Refcount the context if it has GPU
232  *                                            address space slot assigned to it.
233  *
234  * @kctx: Context to be refcounted
235  *
236  * This function takes a reference on the context if it has a GPU address space
237  * slot assigned to it. The address space slot will not be available for
238  * re-assignment until the reference is released.
239  *
240  * Return: true if refcount succeeded and the address space slot will not be
241  * reassigned, false if the refcount failed (because the address space slot
242  * was not assigned).
243  */
244 bool kbase_ctx_sched_inc_refcount_if_as_valid(struct kbase_context *kctx);
245 #endif
246 
247 #endif /* _KBASE_CTX_SCHED_H_ */
248