1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2012-2016, 2018, 2020-2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include <mali_kbase.h>
23 #include <mali_kbase_config.h>
24
25 /*
26 * Private functions follow
27 */
28
29 /**
30 * kbasep_js_ctx_attr_runpool_retain_attr - Check whether a ctx has a certain attribute
31 * and if so, retain that attribute on the runpool.
32 *
33 * @kbdev: Device pointer
34 * @kctx: KBase context
35 * @attribute: Atribute to check/retain
36 *
37 * Requires:
38 * - jsctx mutex
39 * - runpool_irq spinlock
40 * - ctx is scheduled on the runpool
41 *
42 * Return: true indicates a change in ctx attributes state of the runpool.
43 * In this state, the scheduler might be able to submit more jobs than
44 * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
45 * or similar is called sometime later.
46 * false indicates no change in ctx attributes state of the runpool.
47 */
kbasep_js_ctx_attr_runpool_retain_attr(struct kbase_device * kbdev,struct kbase_context * kctx,enum kbasep_js_ctx_attr attribute)48 static bool kbasep_js_ctx_attr_runpool_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
49 {
50 struct kbasep_js_device_data *js_devdata;
51 struct kbasep_js_kctx_info *js_kctx_info;
52 bool runpool_state_changed = false;
53
54 KBASE_DEBUG_ASSERT(kbdev != NULL);
55 KBASE_DEBUG_ASSERT(kctx != NULL);
56 KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
57 js_devdata = &kbdev->js_data;
58 js_kctx_info = &kctx->jctx.sched_info;
59
60 lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
61 lockdep_assert_held(&kbdev->hwaccess_lock);
62
63 KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
64
65 if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) {
66 KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] < S8_MAX);
67 ++(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]);
68
69 if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 1) {
70 /* First refcount indicates a state change */
71 runpool_state_changed = true;
72 KBASE_KTRACE_ADD_JM(kbdev, JS_CTX_ATTR_NOW_ON_RUNPOOL, kctx, NULL, 0u, attribute);
73 }
74 }
75
76 return runpool_state_changed;
77 }
78
79 /**
80 * kbasep_js_ctx_attr_runpool_release_attr - Check whether a ctx has a certain attribute,
81 * and if so, release that attribute on the runpool.
82 *
83 * @kbdev: Device pointer
84 * @kctx: KBase context
85 * @attribute: Atribute to release
86 *
87 * Requires:
88 * - jsctx mutex
89 * - runpool_irq spinlock
90 * - ctx is scheduled on the runpool
91 *
92 * Return: true indicates a change in ctx attributes state of the runpool.
93 * In this state, the scheduler might be able to submit more jobs than
94 * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
95 * or similar is called sometime later.
96 * false indicates no change in ctx attributes state of the runpool.
97 */
kbasep_js_ctx_attr_runpool_release_attr(struct kbase_device * kbdev,struct kbase_context * kctx,enum kbasep_js_ctx_attr attribute)98 static bool kbasep_js_ctx_attr_runpool_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
99 {
100 struct kbasep_js_device_data *js_devdata;
101 struct kbasep_js_kctx_info *js_kctx_info;
102 bool runpool_state_changed = false;
103
104 KBASE_DEBUG_ASSERT(kbdev != NULL);
105 KBASE_DEBUG_ASSERT(kctx != NULL);
106 KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
107 js_devdata = &kbdev->js_data;
108 js_kctx_info = &kctx->jctx.sched_info;
109
110 lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
111 lockdep_assert_held(&kbdev->hwaccess_lock);
112 KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
113
114 if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) {
115 KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] > 0);
116 --(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]);
117
118 if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 0) {
119 /* Last de-refcount indicates a state change */
120 runpool_state_changed = true;
121 KBASE_KTRACE_ADD_JM(kbdev, JS_CTX_ATTR_NOW_OFF_RUNPOOL, kctx, NULL, 0u, attribute);
122 }
123 }
124
125 return runpool_state_changed;
126 }
127
128 /**
129 * kbasep_js_ctx_attr_ctx_retain_attr - Retain a certain attribute on a ctx,
130 * also retaining it on the runpool if the context is scheduled.
131 *
132 * @kbdev: Device pointer
133 * @kctx: KBase context
134 * @attribute: Atribute to retain
135 *
136 * Requires:
137 * - jsctx mutex
138 * - If the context is scheduled, then runpool_irq spinlock must also be held
139 *
140 * Return: true indicates a change in ctx attributes state of the runpool.
141 * This may allow the scheduler to submit more jobs than previously.
142 * false indicates no change in ctx attributes state of the runpool.
143 */
kbasep_js_ctx_attr_ctx_retain_attr(struct kbase_device * kbdev,struct kbase_context * kctx,enum kbasep_js_ctx_attr attribute)144 static bool kbasep_js_ctx_attr_ctx_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
145 {
146 struct kbasep_js_kctx_info *js_kctx_info;
147 bool runpool_state_changed = false;
148
149 KBASE_DEBUG_ASSERT(kbdev != NULL);
150 KBASE_DEBUG_ASSERT(kctx != NULL);
151 KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
152 js_kctx_info = &kctx->jctx.sched_info;
153
154 lockdep_assert_held(&kbdev->hwaccess_lock);
155 lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
156 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] < U32_MAX);
157
158 ++(js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
159
160 if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) {
161 /* Only ref-count the attribute on the runpool for the first time this contexts sees this attribute */
162 KBASE_KTRACE_ADD_JM(kbdev, JS_CTX_ATTR_NOW_ON_CTX, kctx, NULL, 0u, attribute);
163 runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, attribute);
164 }
165
166 return runpool_state_changed;
167 }
168
169 /**
170 * kbasep_js_ctx_attr_ctx_release_attr - Release a certain attribute on a ctx,
171 * also releasing it from the runpool if the context is scheduled.
172 *
173 * @kbdev: Device pointer
174 * @kctx: KBase context
175 * @attribute: Atribute to release
176 *
177 * Requires:
178 * - jsctx mutex
179 * - If the context is scheduled, then runpool_irq spinlock must also be held
180 *
181 * Return: true indicates a change in ctx attributes state of the runpool.
182 * This may allow the scheduler to submit more jobs than previously.
183 * false indicates no change in ctx attributes state of the runpool.
184 */
kbasep_js_ctx_attr_ctx_release_attr(struct kbase_device * kbdev,struct kbase_context * kctx,enum kbasep_js_ctx_attr attribute)185 static bool kbasep_js_ctx_attr_ctx_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
186 {
187 struct kbasep_js_kctx_info *js_kctx_info;
188 bool runpool_state_changed = false;
189
190 KBASE_DEBUG_ASSERT(kbdev != NULL);
191 KBASE_DEBUG_ASSERT(kctx != NULL);
192 KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
193 js_kctx_info = &kctx->jctx.sched_info;
194
195 lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
196 KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] > 0);
197
198 if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) {
199 lockdep_assert_held(&kbdev->hwaccess_lock);
200 /* Only de-ref-count the attribute on the runpool when this is the last ctx-reference to it */
201 runpool_state_changed = kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, attribute);
202 KBASE_KTRACE_ADD_JM(kbdev, JS_CTX_ATTR_NOW_OFF_CTX, kctx, NULL, 0u, attribute);
203 }
204
205 /* De-ref must happen afterwards, because kbasep_js_ctx_attr_runpool_release() needs to check it too */
206 --(js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
207
208 return runpool_state_changed;
209 }
210
211 /*
212 * More commonly used public functions
213 */
214
kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device * kbdev,struct kbase_context * kctx)215 void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
216 {
217 bool runpool_state_changed;
218 int i;
219
220 /* Retain any existing attributes */
221 for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
222 if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) {
223 /* The context is being scheduled in, so update the runpool with the new attributes */
224 runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i);
225
226 /* We don't need to know about state changed, because retaining a
227 * context occurs on scheduling it, and that itself will also try
228 * to run new atoms
229 */
230 CSTD_UNUSED(runpool_state_changed);
231 }
232 }
233 }
234
kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device * kbdev,struct kbase_context * kctx)235 bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
236 {
237 bool runpool_state_changed = false;
238 int i;
239
240 /* Release any existing attributes */
241 for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
242 if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) {
243 /* The context is being scheduled out, so update the runpool on the removed attributes */
244 runpool_state_changed |= kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i);
245 }
246 }
247
248 return runpool_state_changed;
249 }
250
kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device * kbdev,struct kbase_context * kctx,struct kbase_jd_atom * katom)251 void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
252 {
253 bool runpool_state_changed = false;
254 base_jd_core_req core_req;
255
256 KBASE_DEBUG_ASSERT(katom);
257 core_req = katom->core_req;
258
259 if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
260 runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
261 else
262 runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
263
264 if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) {
265 /* Atom that can run on slot1 or slot2, and can use all cores */
266 runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES);
267 }
268
269 /* We don't need to know about state changed, because retaining an atom
270 * occurs on adding it, and that itself will also try to run new atoms
271 */
272 CSTD_UNUSED(runpool_state_changed);
273 }
274
kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device * kbdev,struct kbase_context * kctx,struct kbasep_js_atom_retained_state * katom_retained_state)275 bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state)
276 {
277 bool runpool_state_changed = false;
278 base_jd_core_req core_req;
279
280 KBASE_DEBUG_ASSERT(katom_retained_state);
281 core_req = katom_retained_state->core_req;
282
283 /* No-op for invalid atoms */
284 if (kbasep_js_atom_retained_state_is_valid(katom_retained_state) == false)
285 return false;
286
287 if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
288 runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
289 else
290 runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
291
292 if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) {
293 /* Atom that can run on slot1 or slot2, and can use all cores */
294 runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES);
295 }
296
297 return runpool_state_changed;
298 }
299