xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mali_kbase_js.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2011-2023 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * Job Scheduler Implementation
24  */
25 #include <mali_kbase.h>
26 #include <mali_kbase_js.h>
27 #include <tl/mali_kbase_tracepoints.h>
28 #include <mali_linux_trace.h>
29 #include <mali_kbase_hw.h>
30 #include <mali_kbase_ctx_sched.h>
31 
32 #include <mali_kbase_defs.h>
33 #include <mali_kbase_config_defaults.h>
34 
35 #include "mali_kbase_jm.h"
36 #include "mali_kbase_hwaccess_jm.h"
37 #include <mali_kbase_hwaccess_time.h>
38 #include <linux/priority_control_manager.h>
39 
40 /*
41  * Private types
42  */
43 
44 /* Bitpattern indicating the result of releasing a context */
45 enum {
46 	/* The context was descheduled - caller should try scheduling in a new
47 	 * one to keep the runpool full
48 	 */
49 	KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED = (1u << 0),
50 	/* Ctx attributes were changed - caller should try scheduling all
51 	 * contexts
52 	 */
53 	KBASEP_JS_RELEASE_RESULT_SCHED_ALL = (1u << 1)
54 };
55 
56 typedef u32 kbasep_js_release_result;
57 
58 const int kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS] = {
59 	KBASE_JS_ATOM_SCHED_PRIO_MED,      /* BASE_JD_PRIO_MEDIUM */
60 	KBASE_JS_ATOM_SCHED_PRIO_HIGH,     /* BASE_JD_PRIO_HIGH */
61 	KBASE_JS_ATOM_SCHED_PRIO_LOW,      /* BASE_JD_PRIO_LOW */
62 	KBASE_JS_ATOM_SCHED_PRIO_REALTIME  /* BASE_JD_PRIO_REALTIME */
63 };
64 
65 const base_jd_prio
66 kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT] = {
67 	BASE_JD_PRIO_REALTIME,   /* KBASE_JS_ATOM_SCHED_PRIO_REALTIME */
68 	BASE_JD_PRIO_HIGH,       /* KBASE_JS_ATOM_SCHED_PRIO_HIGH */
69 	BASE_JD_PRIO_MEDIUM,     /* KBASE_JS_ATOM_SCHED_PRIO_MED */
70 	BASE_JD_PRIO_LOW         /* KBASE_JS_ATOM_SCHED_PRIO_LOW */
71 };
72 
73 
74 /*
75  * Private function prototypes
76  */
77 static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
78 		struct kbase_device *kbdev, struct kbase_context *kctx,
79 		struct kbasep_js_atom_retained_state *katom_retained_state);
80 
81 static unsigned int kbase_js_get_slot(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
82 
83 static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
84 				     kbasep_js_ctx_job_cb *callback);
85 
86 /* Helper for ktrace */
87 #if KBASE_KTRACE_ENABLE
kbase_ktrace_get_ctx_refcnt(struct kbase_context * kctx)88 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx)
89 {
90 	return atomic_read(&kctx->refcount);
91 }
92 #else /* KBASE_KTRACE_ENABLE  */
kbase_ktrace_get_ctx_refcnt(struct kbase_context * kctx)93 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx)
94 {
95 	CSTD_UNUSED(kctx);
96 	return 0;
97 }
98 #endif /* KBASE_KTRACE_ENABLE  */
99 
100 /*
101  * Private functions
102  */
103 
104 /**
105  * core_reqs_from_jsn_features - Convert JSn_FEATURES to core requirements
106  * @features: JSn_FEATURE register value
107  *
108  * Given a JSn_FEATURE register value returns the core requirements that match
109  *
110  * Return: Core requirement bit mask
111  */
core_reqs_from_jsn_features(u16 features)112 static base_jd_core_req core_reqs_from_jsn_features(u16 features)
113 {
114 	base_jd_core_req core_req = 0u;
115 
116 	if ((features & JS_FEATURE_SET_VALUE_JOB) != 0)
117 		core_req |= BASE_JD_REQ_V;
118 
119 	if ((features & JS_FEATURE_CACHE_FLUSH_JOB) != 0)
120 		core_req |= BASE_JD_REQ_CF;
121 
122 	if ((features & JS_FEATURE_COMPUTE_JOB) != 0)
123 		core_req |= BASE_JD_REQ_CS;
124 
125 	if ((features & JS_FEATURE_TILER_JOB) != 0)
126 		core_req |= BASE_JD_REQ_T;
127 
128 	if ((features & JS_FEATURE_FRAGMENT_JOB) != 0)
129 		core_req |= BASE_JD_REQ_FS;
130 
131 	return core_req;
132 }
133 
kbase_js_sync_timers(struct kbase_device * kbdev)134 static void kbase_js_sync_timers(struct kbase_device *kbdev)
135 {
136 	mutex_lock(&kbdev->js_data.runpool_mutex);
137 	kbase_backend_ctx_count_changed(kbdev);
138 	mutex_unlock(&kbdev->js_data.runpool_mutex);
139 }
140 
141 /**
142  * jsctx_rb_none_to_pull_prio(): - Check if there are no pullable atoms
143  * @kctx: Pointer to kbase context with ring buffer.
144  * @js:   Job slot id to check.
145  * @prio: Priority to check.
146  *
147  * Return true if there are no atoms to pull. There may be running atoms in the
148  * ring buffer even if there are no atoms to pull. It is also possible for the
149  * ring buffer to be full (with running atoms) when this functions returns
150  * true.
151  *
152  * Return: true if there are no atoms to pull, false otherwise.
153  */
jsctx_rb_none_to_pull_prio(struct kbase_context * kctx,unsigned int js,int prio)154 static inline bool jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, unsigned int js, int prio)
155 {
156 	bool none_to_pull;
157 	struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
158 
159 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
160 
161 	none_to_pull = RB_EMPTY_ROOT(&rb->runnable_tree);
162 
163 	dev_dbg(kctx->kbdev->dev, "Slot %u (prio %d) is %spullable in kctx %pK\n", js, prio,
164 		none_to_pull ? "not " : "", kctx);
165 
166 	return none_to_pull;
167 }
168 
169 /**
170  * jsctx_rb_none_to_pull(): - Check if all priority ring buffers have no
171  * pullable atoms
172  * @kctx: Pointer to kbase context with ring buffer.
173  * @js:   Job slot id to check.
174  *
175  * Caller must hold hwaccess_lock
176  *
177  * Return: true if the ring buffers for all priorities have no pullable atoms,
178  *	   false otherwise.
179  */
jsctx_rb_none_to_pull(struct kbase_context * kctx,unsigned int js)180 static inline bool jsctx_rb_none_to_pull(struct kbase_context *kctx, unsigned int js)
181 {
182 	int prio;
183 
184 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
185 
186 	for (prio = KBASE_JS_ATOM_SCHED_PRIO_FIRST;
187 		prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
188 		if (!jsctx_rb_none_to_pull_prio(kctx, js, prio))
189 			return false;
190 	}
191 
192 	return true;
193 }
194 
195 /**
196  * jsctx_queue_foreach_prio(): - Execute callback for each entry in the queue.
197  * @kctx:     Pointer to kbase context with the queue.
198  * @js:       Job slot id to iterate.
199  * @prio:     Priority id to iterate.
200  * @callback: Function pointer to callback.
201  *
202  * Iterate over a queue and invoke @callback for each entry in the queue, and
203  * remove the entry from the queue.
204  *
205  * If entries are added to the queue while this is running those entries may, or
206  * may not be covered. To ensure that all entries in the buffer have been
207  * enumerated when this function returns jsctx->lock must be held when calling
208  * this function.
209  *
210  * The HW access lock must always be held when calling this function.
211  */
jsctx_queue_foreach_prio(struct kbase_context * kctx,unsigned int js,int prio,kbasep_js_ctx_job_cb * callback)212 static void jsctx_queue_foreach_prio(struct kbase_context *kctx, unsigned int js, int prio,
213 				     kbasep_js_ctx_job_cb *callback)
214 {
215 	struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
216 
217 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
218 
219 	while (!RB_EMPTY_ROOT(&queue->runnable_tree)) {
220 		struct rb_node *node = rb_first(&queue->runnable_tree);
221 		struct kbase_jd_atom *entry = rb_entry(node,
222 				struct kbase_jd_atom, runnable_tree_node);
223 
224 		rb_erase(node, &queue->runnable_tree);
225 		callback(kctx->kbdev, entry);
226 
227 		/* Runnable end-of-renderpass atoms can also be in the linked
228 		 * list of atoms blocked on cross-slot dependencies. Remove them
229 		 * to avoid calling the callback twice.
230 		 */
231 		if (entry->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST) {
232 			WARN_ON(!(entry->core_req &
233 				BASE_JD_REQ_END_RENDERPASS));
234 			dev_dbg(kctx->kbdev->dev,
235 				"Del runnable atom %pK from X_DEP list\n",
236 				(void *)entry);
237 
238 			list_del(&entry->queue);
239 			entry->atom_flags &=
240 					~KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
241 		}
242 	}
243 
244 	while (!list_empty(&queue->x_dep_head)) {
245 		struct kbase_jd_atom *entry = list_entry(queue->x_dep_head.next,
246 				struct kbase_jd_atom, queue);
247 
248 		WARN_ON(!(entry->atom_flags &
249 			KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST));
250 		dev_dbg(kctx->kbdev->dev,
251 			"Del blocked atom %pK from X_DEP list\n",
252 			(void *)entry);
253 
254 		list_del(queue->x_dep_head.next);
255 		entry->atom_flags &=
256 				~KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
257 
258 		callback(kctx->kbdev, entry);
259 	}
260 }
261 
262 /**
263  * jsctx_queue_foreach(): - Execute callback for each entry in every queue
264  * @kctx:     Pointer to kbase context with queue.
265  * @js:       Job slot id to iterate.
266  * @callback: Function pointer to callback.
267  *
268  * Iterate over all the different priorities, and for each call
269  * jsctx_queue_foreach_prio() to iterate over the queue and invoke @callback
270  * for each entry, and remove the entry from the queue.
271  */
jsctx_queue_foreach(struct kbase_context * kctx,unsigned int js,kbasep_js_ctx_job_cb * callback)272 static inline void jsctx_queue_foreach(struct kbase_context *kctx, unsigned int js,
273 				       kbasep_js_ctx_job_cb *callback)
274 {
275 	int prio;
276 
277 	for (prio = KBASE_JS_ATOM_SCHED_PRIO_FIRST;
278 		prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++)
279 		jsctx_queue_foreach_prio(kctx, js, prio, callback);
280 }
281 
282 /**
283  * jsctx_rb_peek_prio(): - Check buffer and get next atom
284  * @kctx: Pointer to kbase context with ring buffer.
285  * @js:   Job slot id to check.
286  * @prio: Priority id to check.
287  *
288  * Check the ring buffer for the specified @js and @prio and return a pointer to
289  * the next atom, unless the ring buffer is empty.
290  *
291  * Return: Pointer to next atom in buffer, or NULL if there is no atom.
292  */
jsctx_rb_peek_prio(struct kbase_context * kctx,unsigned int js,int prio)293 static inline struct kbase_jd_atom *jsctx_rb_peek_prio(struct kbase_context *kctx, unsigned int js,
294 						       int prio)
295 {
296 	struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
297 	struct rb_node *node;
298 
299 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
300 	dev_dbg(kctx->kbdev->dev, "Peeking runnable tree of kctx %pK for prio %d (s:%u)\n",
301 		(void *)kctx, prio, js);
302 
303 	node = rb_first(&rb->runnable_tree);
304 	if (!node) {
305 		dev_dbg(kctx->kbdev->dev, "Tree is empty\n");
306 		return NULL;
307 	}
308 
309 	return rb_entry(node, struct kbase_jd_atom, runnable_tree_node);
310 }
311 
312 /**
313  * jsctx_rb_peek(): - Check all priority buffers and get next atom
314  * @kctx: Pointer to kbase context with ring buffer.
315  * @js:   Job slot id to check.
316  *
317  * Check the ring buffers for all priorities, starting from
318  * KBASE_JS_ATOM_SCHED_PRIO_REALTIME, for the specified @js and @prio and return a
319  * pointer to the next atom, unless all the priority's ring buffers are empty.
320  *
321  * Caller must hold the hwaccess_lock.
322  *
323  * Return: Pointer to next atom in buffer, or NULL if there is no atom.
324  */
jsctx_rb_peek(struct kbase_context * kctx,unsigned int js)325 static inline struct kbase_jd_atom *jsctx_rb_peek(struct kbase_context *kctx, unsigned int js)
326 {
327 	int prio;
328 
329 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
330 
331 	for (prio = KBASE_JS_ATOM_SCHED_PRIO_FIRST;
332 		prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
333 		struct kbase_jd_atom *katom;
334 
335 		katom = jsctx_rb_peek_prio(kctx, js, prio);
336 		if (katom)
337 			return katom;
338 	}
339 
340 	return NULL;
341 }
342 
343 /**
344  * jsctx_rb_pull(): - Mark atom in list as running
345  * @kctx:  Pointer to kbase context with ring buffer.
346  * @katom: Pointer to katom to pull.
347  *
348  * Mark an atom previously obtained from jsctx_rb_peek() as running.
349  *
350  * @katom must currently be at the head of the ring buffer.
351  */
352 static inline void
jsctx_rb_pull(struct kbase_context * kctx,struct kbase_jd_atom * katom)353 jsctx_rb_pull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
354 {
355 	int prio = katom->sched_priority;
356 	unsigned int js = katom->slot_nr;
357 	struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
358 
359 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
360 
361 	dev_dbg(kctx->kbdev->dev, "Erasing atom %pK from runnable tree of kctx %pK\n",
362 		(void *)katom, (void *)kctx);
363 
364 	/* Atoms must be pulled in the correct order. */
365 	WARN_ON(katom != jsctx_rb_peek_prio(kctx, js, prio));
366 
367 	rb_erase(&katom->runnable_tree_node, &rb->runnable_tree);
368 }
369 
370 static void
jsctx_tree_add(struct kbase_context * kctx,struct kbase_jd_atom * katom)371 jsctx_tree_add(struct kbase_context *kctx, struct kbase_jd_atom *katom)
372 {
373 	struct kbase_device *kbdev = kctx->kbdev;
374 	int prio = katom->sched_priority;
375 	unsigned int js = katom->slot_nr;
376 	struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
377 	struct rb_node **new = &(queue->runnable_tree.rb_node), *parent = NULL;
378 
379 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
380 
381 	dev_dbg(kbdev->dev, "Adding atom %pK to runnable tree of kctx %pK (s:%u)\n", (void *)katom,
382 		(void *)kctx, js);
383 
384 	while (*new) {
385 		struct kbase_jd_atom *entry = container_of(*new,
386 				struct kbase_jd_atom, runnable_tree_node);
387 
388 		parent = *new;
389 		if (kbase_jd_atom_is_younger(katom, entry))
390 			new = &((*new)->rb_left);
391 		else
392 			new = &((*new)->rb_right);
393 	}
394 
395 	/* Add new node and rebalance tree. */
396 	rb_link_node(&katom->runnable_tree_node, parent, new);
397 	rb_insert_color(&katom->runnable_tree_node, &queue->runnable_tree);
398 
399 	KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(kbdev, katom, TL_ATOM_STATE_READY);
400 }
401 
402 /**
403  * jsctx_rb_unpull(): - Undo marking of atom in list as running
404  * @kctx:  Pointer to kbase context with ring buffer.
405  * @katom: Pointer to katom to unpull.
406  *
407  * Undo jsctx_rb_pull() and put @katom back in the queue.
408  *
409  * jsctx_rb_unpull() must be called on atoms in the same order the atoms were
410  * pulled.
411  */
412 static inline void
jsctx_rb_unpull(struct kbase_context * kctx,struct kbase_jd_atom * katom)413 jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
414 {
415 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
416 
417 	KBASE_KTRACE_ADD_JM(kctx->kbdev, JS_UNPULL_JOB, kctx, katom, katom->jc,
418 			    0u);
419 
420 	jsctx_tree_add(kctx, katom);
421 }
422 
423 static bool kbase_js_ctx_pullable(struct kbase_context *kctx, unsigned int js, bool is_scheduled);
424 static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
425 						  struct kbase_context *kctx, unsigned int js);
426 static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
427 						    struct kbase_context *kctx, unsigned int js);
428 
429 typedef bool(katom_ordering_func)(const struct kbase_jd_atom *,
430 				  const struct kbase_jd_atom *);
431 
kbase_js_atom_runs_before(struct kbase_device * kbdev,const struct kbase_jd_atom * katom_a,const struct kbase_jd_atom * katom_b,const kbase_atom_ordering_flag_t order_flags)432 bool kbase_js_atom_runs_before(struct kbase_device *kbdev,
433 			       const struct kbase_jd_atom *katom_a,
434 			       const struct kbase_jd_atom *katom_b,
435 			       const kbase_atom_ordering_flag_t order_flags)
436 {
437 	struct kbase_context *kctx_a = katom_a->kctx;
438 	struct kbase_context *kctx_b = katom_b->kctx;
439 	katom_ordering_func *samectxatomprio_ordering_func =
440 		kbase_jd_atom_is_younger;
441 
442 	lockdep_assert_held(&kbdev->hwaccess_lock);
443 
444 	if (order_flags & KBASE_ATOM_ORDERING_FLAG_SEQNR)
445 		samectxatomprio_ordering_func = kbase_jd_atom_is_earlier;
446 
447 	/* It only makes sense to make this test for atoms on the same slot */
448 	WARN_ON(katom_a->slot_nr != katom_b->slot_nr);
449 
450 	if (kbdev->js_ctx_scheduling_mode ==
451 	    KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE) {
452 		/* In local priority mode, querying either way around for "a
453 		 * should run before b" and "b should run before a" should
454 		 * always be false when they're from different contexts
455 		 */
456 		if (kctx_a != kctx_b)
457 			return false;
458 	} else {
459 		/* In system priority mode, ordering is done first strictly by
460 		 * context priority, even when katom_b might be lower priority
461 		 * than katom_a. This is due to scheduling of contexts in order
462 		 * of highest priority first, regardless of whether the atoms
463 		 * for a particular slot from such contexts have the highest
464 		 * priority or not.
465 		 */
466 		if (kctx_a != kctx_b) {
467 			if (kctx_a->priority < kctx_b->priority)
468 				return true;
469 			if (kctx_a->priority > kctx_b->priority)
470 				return false;
471 		}
472 	}
473 
474 	/* For same contexts/contexts with the same context priority (in system
475 	 * priority mode), ordering is next done by atom priority
476 	 */
477 	if (katom_a->sched_priority < katom_b->sched_priority)
478 		return true;
479 	if (katom_a->sched_priority > katom_b->sched_priority)
480 		return false;
481 	/* For atoms of same priority on the same kctx, they are
482 	 * ordered by seq_nr/age (dependent on caller)
483 	 */
484 	if (kctx_a == kctx_b && samectxatomprio_ordering_func(katom_a, katom_b))
485 		return true;
486 
487 	return false;
488 }
489 
490 /*
491  * Functions private to KBase ('Protected' functions)
492  */
kbasep_js_devdata_init(struct kbase_device * const kbdev)493 int kbasep_js_devdata_init(struct kbase_device * const kbdev)
494 {
495 	struct kbasep_js_device_data *jsdd;
496 	int i, j;
497 
498 	KBASE_DEBUG_ASSERT(kbdev != NULL);
499 
500 	jsdd = &kbdev->js_data;
501 
502 #ifdef CONFIG_MALI_BIFROST_DEBUG
503 	/* Soft-stop will be disabled on a single context by default unless
504 	 * softstop_always is set
505 	 */
506 	jsdd->softstop_always = false;
507 #endif				/* CONFIG_MALI_BIFROST_DEBUG */
508 	jsdd->nr_all_contexts_running = 0;
509 	jsdd->nr_user_contexts_running = 0;
510 	jsdd->nr_contexts_pullable = 0;
511 	atomic_set(&jsdd->nr_contexts_runnable, 0);
512 	/* No ctx allowed to submit */
513 	jsdd->runpool_irq.submit_allowed = 0u;
514 	memset(jsdd->runpool_irq.ctx_attr_ref_count, 0,
515 			sizeof(jsdd->runpool_irq.ctx_attr_ref_count));
516 	memset(jsdd->runpool_irq.slot_affinities, 0,
517 			sizeof(jsdd->runpool_irq.slot_affinities));
518 	memset(jsdd->runpool_irq.slot_affinity_refcount, 0,
519 			sizeof(jsdd->runpool_irq.slot_affinity_refcount));
520 	INIT_LIST_HEAD(&jsdd->suspended_soft_jobs_list);
521 
522 	/* Config attributes */
523 	jsdd->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS;
524 	jsdd->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS;
525 	jsdd->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL;
526 	jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS;
527 	jsdd->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL;
528 	jsdd->hard_stop_ticks_dumping = DEFAULT_JS_HARD_STOP_TICKS_DUMPING;
529 	jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS;
530 	jsdd->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL;
531 
532 	jsdd->gpu_reset_ticks_dumping = DEFAULT_JS_RESET_TICKS_DUMPING;
533 	jsdd->ctx_timeslice_ns = DEFAULT_JS_CTX_TIMESLICE_NS;
534 	atomic_set(&jsdd->soft_job_timeout_ms, DEFAULT_JS_SOFT_JOB_TIMEOUT);
535 	jsdd->js_free_wait_time_ms = kbase_get_timeout_ms(kbdev, JM_DEFAULT_JS_FREE_TIMEOUT);
536 
537 	dev_dbg(kbdev->dev, "JS Config Attribs: ");
538 	dev_dbg(kbdev->dev, "\tscheduling_period_ns:%u",
539 			jsdd->scheduling_period_ns);
540 	dev_dbg(kbdev->dev, "\tsoft_stop_ticks:%u",
541 			jsdd->soft_stop_ticks);
542 	dev_dbg(kbdev->dev, "\tsoft_stop_ticks_cl:%u",
543 			jsdd->soft_stop_ticks_cl);
544 	dev_dbg(kbdev->dev, "\thard_stop_ticks_ss:%u",
545 			jsdd->hard_stop_ticks_ss);
546 	dev_dbg(kbdev->dev, "\thard_stop_ticks_cl:%u",
547 			jsdd->hard_stop_ticks_cl);
548 	dev_dbg(kbdev->dev, "\thard_stop_ticks_dumping:%u",
549 			jsdd->hard_stop_ticks_dumping);
550 	dev_dbg(kbdev->dev, "\tgpu_reset_ticks_ss:%u",
551 			jsdd->gpu_reset_ticks_ss);
552 	dev_dbg(kbdev->dev, "\tgpu_reset_ticks_cl:%u",
553 			jsdd->gpu_reset_ticks_cl);
554 	dev_dbg(kbdev->dev, "\tgpu_reset_ticks_dumping:%u",
555 			jsdd->gpu_reset_ticks_dumping);
556 	dev_dbg(kbdev->dev, "\tctx_timeslice_ns:%u",
557 			jsdd->ctx_timeslice_ns);
558 	dev_dbg(kbdev->dev, "\tsoft_job_timeout:%i",
559 		atomic_read(&jsdd->soft_job_timeout_ms));
560 	dev_dbg(kbdev->dev, "\tjs_free_wait_time_ms:%u", jsdd->js_free_wait_time_ms);
561 
562 	if (!(jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_ss &&
563 			jsdd->hard_stop_ticks_ss < jsdd->gpu_reset_ticks_ss &&
564 			jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_dumping &&
565 			jsdd->hard_stop_ticks_dumping <
566 			jsdd->gpu_reset_ticks_dumping)) {
567 		dev_err(kbdev->dev, "Job scheduler timeouts invalid; soft/hard/reset tick counts should be in increasing order\n");
568 		return -EINVAL;
569 	}
570 
571 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS
572 	dev_dbg(kbdev->dev, "Job Scheduling Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.",
573 			jsdd->soft_stop_ticks,
574 			jsdd->scheduling_period_ns);
575 #endif
576 #if KBASE_DISABLE_SCHEDULING_HARD_STOPS
577 	dev_dbg(kbdev->dev, "Job Scheduling Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_dumping==%u at %uns per tick. Other hard-stops may still occur.",
578 			jsdd->hard_stop_ticks_ss,
579 			jsdd->hard_stop_ticks_dumping,
580 			jsdd->scheduling_period_ns);
581 #endif
582 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS && KBASE_DISABLE_SCHEDULING_HARD_STOPS
583 	dev_dbg(kbdev->dev, "Note: The JS tick timer (if coded) will still be run, but do nothing.");
584 #endif
585 
586 	for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i)
587 		jsdd->js_reqs[i] = core_reqs_from_jsn_features(
588 			kbdev->gpu_props.props.raw_props.js_features[i]);
589 
590 	/* On error, we could continue on: providing none of the below resources
591 	 * rely on the ones above
592 	 */
593 
594 	mutex_init(&jsdd->runpool_mutex);
595 	mutex_init(&jsdd->queue_mutex);
596 	sema_init(&jsdd->schedule_sem, 1);
597 
598 	for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) {
599 		for (j = KBASE_JS_ATOM_SCHED_PRIO_FIRST; j < KBASE_JS_ATOM_SCHED_PRIO_COUNT; ++j) {
600 			INIT_LIST_HEAD(&jsdd->ctx_list_pullable[i][j]);
601 			INIT_LIST_HEAD(&jsdd->ctx_list_unpullable[i][j]);
602 		}
603 	}
604 
605 	return 0;
606 }
607 
kbasep_js_devdata_halt(struct kbase_device * kbdev)608 void kbasep_js_devdata_halt(struct kbase_device *kbdev)
609 {
610 	CSTD_UNUSED(kbdev);
611 }
612 
kbasep_js_devdata_term(struct kbase_device * kbdev)613 void kbasep_js_devdata_term(struct kbase_device *kbdev)
614 {
615 	struct kbasep_js_device_data *js_devdata;
616 	s8 zero_ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT] = { 0, };
617 	CSTD_UNUSED(js_devdata);
618 
619 	KBASE_DEBUG_ASSERT(kbdev != NULL);
620 
621 	js_devdata = &kbdev->js_data;
622 
623 	/* The caller must de-register all contexts before calling this
624 	 */
625 	KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running == 0);
626 	KBASE_DEBUG_ASSERT(memcmp(
627 				  js_devdata->runpool_irq.ctx_attr_ref_count,
628 				  zero_ctx_attr_ref_count,
629 				  sizeof(zero_ctx_attr_ref_count)) == 0);
630 	CSTD_UNUSED(zero_ctx_attr_ref_count);
631 }
632 
kbasep_js_kctx_init(struct kbase_context * const kctx)633 int kbasep_js_kctx_init(struct kbase_context *const kctx)
634 {
635 	struct kbasep_js_kctx_info *js_kctx_info;
636 	int i, j;
637 	CSTD_UNUSED(js_kctx_info);
638 
639 	KBASE_DEBUG_ASSERT(kctx != NULL);
640 
641 	kbase_ctx_sched_init_ctx(kctx);
642 
643 	for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
644 		INIT_LIST_HEAD(&kctx->jctx.sched_info.ctx.ctx_list_entry[i]);
645 
646 	js_kctx_info = &kctx->jctx.sched_info;
647 
648 	kctx->slots_pullable = 0;
649 	js_kctx_info->ctx.nr_jobs = 0;
650 	kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
651 	kbase_ctx_flag_clear(kctx, KCTX_DYING);
652 	memset(js_kctx_info->ctx.ctx_attr_ref_count, 0,
653 			sizeof(js_kctx_info->ctx.ctx_attr_ref_count));
654 
655 	/* Initially, the context is disabled from submission until the create
656 	 * flags are set
657 	 */
658 	kbase_ctx_flag_set(kctx, KCTX_SUBMIT_DISABLED);
659 
660 	/* On error, we could continue on: providing none of the below resources
661 	 * rely on the ones above
662 	 */
663 	mutex_init(&js_kctx_info->ctx.jsctx_mutex);
664 
665 	init_waitqueue_head(&js_kctx_info->ctx.is_scheduled_wait);
666 
667 	for (i = KBASE_JS_ATOM_SCHED_PRIO_FIRST; i < KBASE_JS_ATOM_SCHED_PRIO_COUNT; i++) {
668 		for (j = 0; j < BASE_JM_MAX_NR_SLOTS; j++) {
669 			INIT_LIST_HEAD(&kctx->jsctx_queue[i][j].x_dep_head);
670 			kctx->jsctx_queue[i][j].runnable_tree = RB_ROOT;
671 		}
672 	}
673 
674 	return 0;
675 }
676 
kbasep_js_kctx_term(struct kbase_context * kctx)677 void kbasep_js_kctx_term(struct kbase_context *kctx)
678 {
679 	struct kbase_device *kbdev;
680 	struct kbasep_js_kctx_info *js_kctx_info;
681 	unsigned int js;
682 	bool update_ctx_count = false;
683 	unsigned long flags;
684 	CSTD_UNUSED(js_kctx_info);
685 
686 	KBASE_DEBUG_ASSERT(kctx != NULL);
687 
688 	kbdev = kctx->kbdev;
689 	KBASE_DEBUG_ASSERT(kbdev != NULL);
690 
691 	js_kctx_info = &kctx->jctx.sched_info;
692 
693 	/* The caller must de-register all jobs before calling this */
694 	KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
695 	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs == 0);
696 
697 	mutex_lock(&kbdev->js_data.queue_mutex);
698 	mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
699 
700 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
701 	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
702 		list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
703 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
704 
705 	if (kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)) {
706 		WARN_ON(atomic_read(&kbdev->js_data.nr_contexts_runnable) <= 0);
707 		atomic_dec(&kbdev->js_data.nr_contexts_runnable);
708 		update_ctx_count = true;
709 		kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
710 	}
711 
712 	mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
713 	mutex_unlock(&kbdev->js_data.queue_mutex);
714 
715 	if (update_ctx_count) {
716 		mutex_lock(&kbdev->js_data.runpool_mutex);
717 		kbase_backend_ctx_count_changed(kbdev);
718 		mutex_unlock(&kbdev->js_data.runpool_mutex);
719 	}
720 
721 	kbase_ctx_sched_remove_ctx(kctx);
722 }
723 
724 /*
725  * Priority blocking management functions
726  */
727 
728 /* Should not normally use directly - use kbase_jsctx_slot_atom_pulled_dec() instead */
kbase_jsctx_slot_prio_blocked_clear(struct kbase_context * kctx,unsigned int js,int sched_prio)729 static void kbase_jsctx_slot_prio_blocked_clear(struct kbase_context *kctx, unsigned int js,
730 						int sched_prio)
731 {
732 	struct kbase_jsctx_slot_tracking *slot_tracking =
733 		&kctx->slot_tracking[js];
734 
735 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
736 
737 	slot_tracking->blocked &= ~(((kbase_js_prio_bitmap_t)1) << sched_prio);
738 	KBASE_KTRACE_ADD_JM_SLOT_INFO(kctx->kbdev, JS_SLOT_PRIO_UNBLOCKED, kctx,
739 				      NULL, 0, js, (unsigned int)sched_prio);
740 }
741 
kbase_jsctx_slot_atoms_pulled(struct kbase_context * kctx,unsigned int js)742 static int kbase_jsctx_slot_atoms_pulled(struct kbase_context *kctx, unsigned int js)
743 {
744 	return atomic_read(&kctx->slot_tracking[js].atoms_pulled);
745 }
746 
747 /*
748  * A priority level on a slot is blocked when:
749  * - that priority level is blocked
750  * - or, any higher priority level is blocked
751  */
kbase_jsctx_slot_prio_is_blocked(struct kbase_context * kctx,unsigned int js,int sched_prio)752 static bool kbase_jsctx_slot_prio_is_blocked(struct kbase_context *kctx, unsigned int js,
753 					     int sched_prio)
754 {
755 	struct kbase_jsctx_slot_tracking *slot_tracking =
756 		&kctx->slot_tracking[js];
757 	kbase_js_prio_bitmap_t prio_bit, higher_prios_mask;
758 
759 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
760 
761 	/* done in two separate shifts to prevent future undefined behavior
762 	 * should the number of priority levels == (bit width of the type)
763 	 */
764 	prio_bit = (((kbase_js_prio_bitmap_t)1) << sched_prio);
765 	/* all bits of sched_prio or higher, with sched_prio = 0 being the
766 	 * highest priority
767 	 */
768 	higher_prios_mask = (prio_bit << 1) - 1u;
769 	return (slot_tracking->blocked & higher_prios_mask) != 0u;
770 }
771 
772 /**
773  * kbase_jsctx_slot_atom_pulled_inc - Increase counts of atoms that have being
774  *                                    pulled for a slot from a ctx, based on
775  *                                    this atom
776  * @kctx: kbase context
777  * @katom: atom pulled
778  *
779  * Manages counts of atoms pulled (including per-priority-level counts), for
780  * later determining when a ctx can become unblocked on a slot.
781  *
782  * Once a slot has been blocked at @katom's priority level, it should not be
783  * pulled from, hence this function should not be called in that case.
784  *
785  * The return value is to aid tracking of when @kctx becomes runnable.
786  *
787  * Return: new total count of atoms pulled from all slots on @kctx
788  */
kbase_jsctx_slot_atom_pulled_inc(struct kbase_context * kctx,const struct kbase_jd_atom * katom)789 static int kbase_jsctx_slot_atom_pulled_inc(struct kbase_context *kctx,
790 					    const struct kbase_jd_atom *katom)
791 {
792 	unsigned int js = katom->slot_nr;
793 	int sched_prio = katom->sched_priority;
794 	struct kbase_jsctx_slot_tracking *slot_tracking =
795 		&kctx->slot_tracking[js];
796 	int nr_atoms_pulled;
797 
798 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
799 
800 	WARN(kbase_jsctx_slot_prio_is_blocked(kctx, js, sched_prio),
801 	     "Should not have pulled atoms for slot %u from a context that is blocked at priority %d or higher",
802 	     js, sched_prio);
803 
804 	nr_atoms_pulled = atomic_inc_return(&kctx->atoms_pulled_all_slots);
805 	atomic_inc(&slot_tracking->atoms_pulled);
806 	slot_tracking->atoms_pulled_pri[sched_prio]++;
807 
808 	return nr_atoms_pulled;
809 }
810 
811 /**
812  * kbase_jsctx_slot_atom_pulled_dec- Decrease counts of atoms that have being
813  *                                   pulled for a slot from a ctx, and
814  *                                   re-evaluate whether a context is blocked
815  *                                   on this slot
816  * @kctx: kbase context
817  * @katom: atom that has just been removed from a job slot
818  *
819  * @kctx can become unblocked on a slot for a priority level when it no longer
820  * has any pulled atoms at that priority level on that slot, and all higher
821  * (numerically lower) priority levels are also unblocked @kctx on that
822  * slot. The latter condition is to retain priority ordering within @kctx.
823  *
824  * Return: true if the slot was previously blocked but has now become unblocked
825  * at @katom's priority level, false otherwise.
826  */
kbase_jsctx_slot_atom_pulled_dec(struct kbase_context * kctx,const struct kbase_jd_atom * katom)827 static bool kbase_jsctx_slot_atom_pulled_dec(struct kbase_context *kctx,
828 					     const struct kbase_jd_atom *katom)
829 {
830 	unsigned int js = katom->slot_nr;
831 	int sched_prio = katom->sched_priority;
832 	int atoms_pulled_pri;
833 	struct kbase_jsctx_slot_tracking *slot_tracking =
834 		&kctx->slot_tracking[js];
835 	bool slot_prio_became_unblocked = false;
836 
837 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
838 
839 	atomic_dec(&kctx->atoms_pulled_all_slots);
840 	atomic_dec(&slot_tracking->atoms_pulled);
841 
842 	atoms_pulled_pri = --(slot_tracking->atoms_pulled_pri[sched_prio]);
843 
844 	/* We can safely clear this priority level's blocked status even if
845 	 * higher priority levels are still blocked: a subsequent query to
846 	 * kbase_jsctx_slot_prio_is_blocked() will still return true
847 	 */
848 	if (!atoms_pulled_pri &&
849 	    kbase_jsctx_slot_prio_is_blocked(kctx, js, sched_prio)) {
850 		kbase_jsctx_slot_prio_blocked_clear(kctx, js, sched_prio);
851 
852 		if (!kbase_jsctx_slot_prio_is_blocked(kctx, js, sched_prio))
853 			slot_prio_became_unblocked = true;
854 	}
855 
856 	if (slot_prio_became_unblocked)
857 		KBASE_KTRACE_ADD_JM_SLOT_INFO(kctx->kbdev,
858 					      JS_SLOT_PRIO_AND_HIGHER_UNBLOCKED,
859 					      kctx, katom, katom->jc, js,
860 					      (unsigned int)sched_prio);
861 
862 	return slot_prio_became_unblocked;
863 }
864 
865 /**
866  * kbase_js_ctx_list_add_pullable_nolock - Variant of
867  *                                         kbase_jd_ctx_list_add_pullable()
868  *                                         where the caller must hold
869  *                                         hwaccess_lock
870  * @kbdev:  Device pointer
871  * @kctx:   Context to add to queue
872  * @js:     Job slot to use
873  *
874  * Caller must hold hwaccess_lock
875  *
876  * Return: true if caller should call kbase_backend_ctx_count_changed()
877  */
kbase_js_ctx_list_add_pullable_nolock(struct kbase_device * kbdev,struct kbase_context * kctx,unsigned int js)878 static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
879 						  struct kbase_context *kctx, unsigned int js)
880 {
881 	bool ret = false;
882 
883 	lockdep_assert_held(&kbdev->hwaccess_lock);
884 	dev_dbg(kbdev->dev, "Add pullable tail kctx %pK (s:%u)\n", (void *)kctx, js);
885 
886 	if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
887 		list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
888 
889 	list_add_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
890 			&kbdev->js_data.ctx_list_pullable[js][kctx->priority]);
891 
892 	if (!kctx->slots_pullable) {
893 		kbdev->js_data.nr_contexts_pullable++;
894 		ret = true;
895 		if (!kbase_jsctx_atoms_pulled(kctx)) {
896 			WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
897 			kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
898 			atomic_inc(&kbdev->js_data.nr_contexts_runnable);
899 		}
900 	}
901 	kctx->slots_pullable |= (1 << js);
902 
903 	return ret;
904 }
905 
906 /**
907  * kbase_js_ctx_list_add_pullable_head_nolock - Variant of
908  *                                              kbase_js_ctx_list_add_pullable_head()
909  *                                              where the caller must hold
910  *                                              hwaccess_lock
911  * @kbdev:  Device pointer
912  * @kctx:   Context to add to queue
913  * @js:     Job slot to use
914  *
915  * Caller must hold hwaccess_lock
916  *
917  * Return:  true if caller should call kbase_backend_ctx_count_changed()
918  */
kbase_js_ctx_list_add_pullable_head_nolock(struct kbase_device * kbdev,struct kbase_context * kctx,unsigned int js)919 static bool kbase_js_ctx_list_add_pullable_head_nolock(struct kbase_device *kbdev,
920 						       struct kbase_context *kctx, unsigned int js)
921 {
922 	bool ret = false;
923 
924 	lockdep_assert_held(&kbdev->hwaccess_lock);
925 	dev_dbg(kbdev->dev, "Add pullable head kctx %pK (s:%u)\n", (void *)kctx, js);
926 
927 	if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
928 		list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
929 
930 	list_add(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
931 			&kbdev->js_data.ctx_list_pullable[js][kctx->priority]);
932 
933 	if (!kctx->slots_pullable) {
934 		kbdev->js_data.nr_contexts_pullable++;
935 		ret = true;
936 		if (!kbase_jsctx_atoms_pulled(kctx)) {
937 			WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
938 			kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
939 			atomic_inc(&kbdev->js_data.nr_contexts_runnable);
940 		}
941 	}
942 	kctx->slots_pullable |= (1 << js);
943 
944 	return ret;
945 }
946 
947 /**
948  * kbase_js_ctx_list_add_pullable_head - Add context to the head of the
949  *                                       per-slot pullable context queue
950  * @kbdev:  Device pointer
951  * @kctx:   Context to add to queue
952  * @js:     Job slot to use
953  *
954  * If the context is on either the pullable or unpullable queues, then it is
955  * removed before being added to the head.
956  *
957  * This function should be used when a context has been scheduled, but no jobs
958  * can currently be pulled from it.
959  *
960  * Return:  true if caller should call kbase_backend_ctx_count_changed()
961  */
kbase_js_ctx_list_add_pullable_head(struct kbase_device * kbdev,struct kbase_context * kctx,unsigned int js)962 static bool kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev,
963 						struct kbase_context *kctx, unsigned int js)
964 {
965 	bool ret;
966 	unsigned long flags;
967 
968 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
969 	ret = kbase_js_ctx_list_add_pullable_head_nolock(kbdev, kctx, js);
970 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
971 
972 	return ret;
973 }
974 
975 /**
976  * kbase_js_ctx_list_add_unpullable_nolock - Add context to the tail of the
977  *                                           per-slot unpullable context queue
978  * @kbdev:  Device pointer
979  * @kctx:   Context to add to queue
980  * @js:     Job slot to use
981  *
982  * The context must already be on the per-slot pullable queue. It will be
983  * removed from the pullable queue before being added to the unpullable queue.
984  *
985  * This function should be used when a context has been pulled from, and there
986  * are no jobs remaining on the specified slot.
987  *
988  * Caller must hold hwaccess_lock
989  *
990  * Return:  true if caller should call kbase_backend_ctx_count_changed()
991  */
kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device * kbdev,struct kbase_context * kctx,unsigned int js)992 static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
993 						    struct kbase_context *kctx, unsigned int js)
994 {
995 	bool ret = false;
996 
997 	lockdep_assert_held(&kbdev->hwaccess_lock);
998 	dev_dbg(kbdev->dev, "Add unpullable tail kctx %pK (s:%u)\n", (void *)kctx, js);
999 
1000 	list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
1001 		&kbdev->js_data.ctx_list_unpullable[js][kctx->priority]);
1002 
1003 	if (kctx->slots_pullable == (1 << js)) {
1004 		kbdev->js_data.nr_contexts_pullable--;
1005 		ret = true;
1006 		if (!kbase_jsctx_atoms_pulled(kctx)) {
1007 			WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
1008 			kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
1009 			atomic_dec(&kbdev->js_data.nr_contexts_runnable);
1010 		}
1011 	}
1012 	kctx->slots_pullable &= ~(1 << js);
1013 
1014 	return ret;
1015 }
1016 
1017 /**
1018  * kbase_js_ctx_list_remove_nolock - Remove context from the per-slot pullable
1019  *                                   or unpullable context queues
1020  * @kbdev:  Device pointer
1021  * @kctx:   Context to remove from queue
1022  * @js:     Job slot to use
1023  *
1024  * The context must already be on one of the queues.
1025  *
1026  * This function should be used when a context has no jobs on the GPU, and no
1027  * jobs remaining for the specified slot.
1028  *
1029  * Caller must hold hwaccess_lock
1030  *
1031  * Return:  true if caller should call kbase_backend_ctx_count_changed()
1032  */
kbase_js_ctx_list_remove_nolock(struct kbase_device * kbdev,struct kbase_context * kctx,unsigned int js)1033 static bool kbase_js_ctx_list_remove_nolock(struct kbase_device *kbdev, struct kbase_context *kctx,
1034 					    unsigned int js)
1035 {
1036 	bool ret = false;
1037 
1038 	lockdep_assert_held(&kbdev->hwaccess_lock);
1039 
1040 	WARN_ON(list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]));
1041 
1042 	list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
1043 
1044 	if (kctx->slots_pullable == (1 << js)) {
1045 		kbdev->js_data.nr_contexts_pullable--;
1046 		ret = true;
1047 		if (!kbase_jsctx_atoms_pulled(kctx)) {
1048 			WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
1049 			kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
1050 			atomic_dec(&kbdev->js_data.nr_contexts_runnable);
1051 		}
1052 	}
1053 	kctx->slots_pullable &= ~(1 << js);
1054 
1055 	return ret;
1056 }
1057 
1058 /**
1059  * kbase_js_ctx_list_pop_head_nolock - Variant of kbase_js_ctx_list_pop_head()
1060  *                                     where the caller must hold
1061  *                                     hwaccess_lock
1062  * @kbdev:  Device pointer
1063  * @js:     Job slot to use
1064  *
1065  * Caller must hold hwaccess_lock
1066  *
1067  * Return:  Context to use for specified slot.
1068  *          NULL if no contexts present for specified slot
1069  */
kbase_js_ctx_list_pop_head_nolock(struct kbase_device * kbdev,unsigned int js)1070 static struct kbase_context *kbase_js_ctx_list_pop_head_nolock(struct kbase_device *kbdev,
1071 							       unsigned int js)
1072 {
1073 	struct kbase_context *kctx;
1074 	int i;
1075 
1076 	lockdep_assert_held(&kbdev->hwaccess_lock);
1077 
1078 	for (i = KBASE_JS_ATOM_SCHED_PRIO_FIRST; i < KBASE_JS_ATOM_SCHED_PRIO_COUNT; i++) {
1079 		if (list_empty(&kbdev->js_data.ctx_list_pullable[js][i]))
1080 			continue;
1081 
1082 		kctx = list_entry(kbdev->js_data.ctx_list_pullable[js][i].next,
1083 				struct kbase_context,
1084 				jctx.sched_info.ctx.ctx_list_entry[js]);
1085 
1086 		list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
1087 		dev_dbg(kbdev->dev, "Popped %pK from the pullable queue (s:%u)\n", (void *)kctx,
1088 			js);
1089 		return kctx;
1090 	}
1091 	return NULL;
1092 }
1093 
1094 /**
1095  * kbase_js_ctx_list_pop_head - Pop the head context off the per-slot pullable
1096  *                              queue.
1097  * @kbdev:  Device pointer
1098  * @js:     Job slot to use
1099  *
1100  * Return:  Context to use for specified slot.
1101  *          NULL if no contexts present for specified slot
1102  */
kbase_js_ctx_list_pop_head(struct kbase_device * kbdev,unsigned int js)1103 static struct kbase_context *kbase_js_ctx_list_pop_head(struct kbase_device *kbdev, unsigned int js)
1104 {
1105 	struct kbase_context *kctx;
1106 	unsigned long flags;
1107 
1108 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1109 	kctx = kbase_js_ctx_list_pop_head_nolock(kbdev, js);
1110 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1111 
1112 	return kctx;
1113 }
1114 
1115 /**
1116  * kbase_js_ctx_pullable - Return if a context can be pulled from on the
1117  *                         specified slot
1118  * @kctx:          Context pointer
1119  * @js:            Job slot to use
1120  * @is_scheduled:  true if the context is currently scheduled
1121  *
1122  * Caller must hold hwaccess_lock
1123  *
1124  * Return:         true if context can be pulled from on specified slot
1125  *                 false otherwise
1126  */
kbase_js_ctx_pullable(struct kbase_context * kctx,unsigned int js,bool is_scheduled)1127 static bool kbase_js_ctx_pullable(struct kbase_context *kctx, unsigned int js, bool is_scheduled)
1128 {
1129 	struct kbasep_js_device_data *js_devdata;
1130 	struct kbase_jd_atom *katom;
1131 	struct kbase_device *kbdev = kctx->kbdev;
1132 
1133 	lockdep_assert_held(&kbdev->hwaccess_lock);
1134 
1135 	js_devdata = &kbdev->js_data;
1136 
1137 	if (is_scheduled) {
1138 		if (!kbasep_js_is_submit_allowed(js_devdata, kctx)) {
1139 			dev_dbg(kbdev->dev, "JS: No submit allowed for kctx %pK\n",
1140 				(void *)kctx);
1141 			return false;
1142 		}
1143 	}
1144 	katom = jsctx_rb_peek(kctx, js);
1145 	if (!katom) {
1146 		dev_dbg(kbdev->dev, "JS: No pullable atom in kctx %pK (s:%u)\n", (void *)kctx, js);
1147 		return false; /* No pullable atoms */
1148 	}
1149 	if (kbase_jsctx_slot_prio_is_blocked(kctx, js, katom->sched_priority)) {
1150 		KBASE_KTRACE_ADD_JM_SLOT_INFO(
1151 			kctx->kbdev, JS_SLOT_PRIO_IS_BLOCKED, kctx, katom,
1152 			katom->jc, js, (unsigned int)katom->sched_priority);
1153 		dev_dbg(kbdev->dev,
1154 			"JS: kctx %pK is blocked from submitting atoms at priority %d and lower (s:%u)\n",
1155 			(void *)kctx, katom->sched_priority, js);
1156 		return false;
1157 	}
1158 	if (atomic_read(&katom->blocked)) {
1159 		dev_dbg(kbdev->dev, "JS: Atom %pK is blocked in js_ctx_pullable\n",
1160 			(void *)katom);
1161 		return false; /* next atom blocked */
1162 	}
1163 	if (kbase_js_atom_blocked_on_x_dep(katom)) {
1164 		if (katom->x_pre_dep->gpu_rb_state ==
1165 				KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB ||
1166 				katom->x_pre_dep->will_fail_event_code) {
1167 			dev_dbg(kbdev->dev,
1168 				"JS: X pre-dep %pK is not present in slot FIFO or will fail\n",
1169 				(void *)katom->x_pre_dep);
1170 			return false;
1171 		}
1172 		if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
1173 			kbase_backend_nr_atoms_on_slot(kctx->kbdev, js)) {
1174 			dev_dbg(kbdev->dev,
1175 				"JS: Atom %pK has cross-slot fail dependency and atoms on slot (s:%u)\n",
1176 				(void *)katom, js);
1177 			return false;
1178 		}
1179 	}
1180 
1181 	dev_dbg(kbdev->dev, "JS: Atom %pK is pullable in kctx %pK (s:%u)\n", (void *)katom,
1182 		(void *)kctx, js);
1183 
1184 	return true;
1185 }
1186 
kbase_js_dep_validate(struct kbase_context * kctx,struct kbase_jd_atom * katom)1187 static bool kbase_js_dep_validate(struct kbase_context *kctx,
1188 				struct kbase_jd_atom *katom)
1189 {
1190 	struct kbase_device *kbdev = kctx->kbdev;
1191 	bool ret = true;
1192 	bool has_dep = false, has_x_dep = false;
1193 	unsigned int js = kbase_js_get_slot(kbdev, katom);
1194 	int prio = katom->sched_priority;
1195 	int i;
1196 
1197 	for (i = 0; i < 2; i++) {
1198 		struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
1199 
1200 		if (dep_atom) {
1201 			unsigned int dep_js = kbase_js_get_slot(kbdev, dep_atom);
1202 			int dep_prio = dep_atom->sched_priority;
1203 
1204 			dev_dbg(kbdev->dev,
1205 				"Checking dep %d of atom %pK (s:%d) on %pK (s:%d)\n",
1206 				i, (void *)katom, js, (void *)dep_atom, dep_js);
1207 
1208 			/* Dependent atom must already have been submitted */
1209 			if (!(dep_atom->atom_flags &
1210 					KBASE_KATOM_FLAG_JSCTX_IN_TREE)) {
1211 				dev_dbg(kbdev->dev,
1212 					"Blocker not submitted yet\n");
1213 				ret = false;
1214 				break;
1215 			}
1216 
1217 			/* Dependencies with different priorities can't
1218 			 * be represented in the ringbuffer
1219 			 */
1220 			if (prio != dep_prio) {
1221 				dev_dbg(kbdev->dev,
1222 					"Different atom priorities\n");
1223 				ret = false;
1224 				break;
1225 			}
1226 
1227 			if (js == dep_js) {
1228 				/* Only one same-slot dependency can be
1229 				 * represented in the ringbuffer
1230 				 */
1231 				if (has_dep) {
1232 					dev_dbg(kbdev->dev,
1233 						"Too many same-slot deps\n");
1234 					ret = false;
1235 					break;
1236 				}
1237 				/* Each dependee atom can only have one
1238 				 * same-slot dependency
1239 				 */
1240 				if (dep_atom->post_dep) {
1241 					dev_dbg(kbdev->dev,
1242 						"Too many same-slot successors\n");
1243 					ret = false;
1244 					break;
1245 				}
1246 				has_dep = true;
1247 			} else {
1248 				/* Only one cross-slot dependency can be
1249 				 * represented in the ringbuffer
1250 				 */
1251 				if (has_x_dep) {
1252 					dev_dbg(kbdev->dev,
1253 						"Too many cross-slot deps\n");
1254 					ret = false;
1255 					break;
1256 				}
1257 				/* Each dependee atom can only have one
1258 				 * cross-slot dependency
1259 				 */
1260 				if (dep_atom->x_post_dep) {
1261 					dev_dbg(kbdev->dev,
1262 						"Too many cross-slot successors\n");
1263 					ret = false;
1264 					break;
1265 				}
1266 				/* The dependee atom can not already be in the
1267 				 * HW access ringbuffer
1268 				 */
1269 				if (dep_atom->gpu_rb_state !=
1270 					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
1271 					dev_dbg(kbdev->dev,
1272 						"Blocker already in ringbuffer (state:%d)\n",
1273 						dep_atom->gpu_rb_state);
1274 					ret = false;
1275 					break;
1276 				}
1277 				/* The dependee atom can not already have
1278 				 * completed
1279 				 */
1280 				if (dep_atom->status !=
1281 						KBASE_JD_ATOM_STATE_IN_JS) {
1282 					dev_dbg(kbdev->dev,
1283 						"Blocker already completed (status:%d)\n",
1284 						dep_atom->status);
1285 					ret = false;
1286 					break;
1287 				}
1288 
1289 				has_x_dep = true;
1290 			}
1291 
1292 			/* Dependency can be represented in ringbuffers */
1293 		}
1294 	}
1295 
1296 	/* If dependencies can be represented by ringbuffer then clear them from
1297 	 * atom structure
1298 	 */
1299 	if (ret) {
1300 		for (i = 0; i < 2; i++) {
1301 			struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
1302 
1303 			if (dep_atom) {
1304 				int dep_js = kbase_js_get_slot(kbdev, dep_atom);
1305 
1306 				dev_dbg(kbdev->dev,
1307 					"Clearing dep %d of atom %pK (s:%d) on %pK (s:%d)\n",
1308 					i, (void *)katom, js, (void *)dep_atom,
1309 					dep_js);
1310 
1311 				if ((js != dep_js) &&
1312 					(dep_atom->status !=
1313 						KBASE_JD_ATOM_STATE_COMPLETED)
1314 					&& (dep_atom->status !=
1315 					KBASE_JD_ATOM_STATE_HW_COMPLETED)
1316 					&& (dep_atom->status !=
1317 						KBASE_JD_ATOM_STATE_UNUSED)) {
1318 
1319 					katom->atom_flags |=
1320 						KBASE_KATOM_FLAG_X_DEP_BLOCKED;
1321 
1322 					dev_dbg(kbdev->dev, "Set X_DEP flag on atom %pK\n",
1323 						(void *)katom);
1324 
1325 					katom->x_pre_dep = dep_atom;
1326 					dep_atom->x_post_dep = katom;
1327 					if (kbase_jd_katom_dep_type(
1328 							&katom->dep[i]) ==
1329 							BASE_JD_DEP_TYPE_DATA)
1330 						katom->atom_flags |=
1331 						KBASE_KATOM_FLAG_FAIL_BLOCKER;
1332 				}
1333 				if ((kbase_jd_katom_dep_type(&katom->dep[i])
1334 						== BASE_JD_DEP_TYPE_DATA) &&
1335 						(js == dep_js)) {
1336 					katom->pre_dep = dep_atom;
1337 					dep_atom->post_dep = katom;
1338 				}
1339 
1340 				list_del(&katom->dep_item[i]);
1341 				kbase_jd_katom_dep_clear(&katom->dep[i]);
1342 			}
1343 		}
1344 	} else {
1345 		dev_dbg(kbdev->dev,
1346 			"Deps of atom %pK (s:%d) could not be represented\n",
1347 			(void *)katom, js);
1348 	}
1349 
1350 	return ret;
1351 }
1352 
kbase_js_set_ctx_priority(struct kbase_context * kctx,int new_priority)1353 void kbase_js_set_ctx_priority(struct kbase_context *kctx, int new_priority)
1354 {
1355 	struct kbase_device *kbdev = kctx->kbdev;
1356 	unsigned int js;
1357 
1358 	lockdep_assert_held(&kbdev->hwaccess_lock);
1359 
1360 	/* Move kctx to the pullable/upullable list as per the new priority */
1361 	if (new_priority != kctx->priority) {
1362 		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
1363 			if (kctx->slots_pullable & (1 << js))
1364 				list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
1365 					&kbdev->js_data.ctx_list_pullable[js][new_priority]);
1366 			else
1367 				list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
1368 					&kbdev->js_data.ctx_list_unpullable[js][new_priority]);
1369 		}
1370 
1371 		kctx->priority = new_priority;
1372 	}
1373 }
1374 
kbase_js_update_ctx_priority(struct kbase_context * kctx)1375 void kbase_js_update_ctx_priority(struct kbase_context *kctx)
1376 {
1377 	struct kbase_device *kbdev = kctx->kbdev;
1378 	int new_priority = KBASE_JS_ATOM_SCHED_PRIO_LOW;
1379 	int prio;
1380 
1381 	lockdep_assert_held(&kbdev->hwaccess_lock);
1382 
1383 	if (kbdev->js_ctx_scheduling_mode == KBASE_JS_SYSTEM_PRIORITY_MODE) {
1384 		/* Determine the new priority for context, as per the priority
1385 		 * of currently in-use atoms.
1386 		 */
1387 		for (prio = KBASE_JS_ATOM_SCHED_PRIO_FIRST;
1388 			prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
1389 			if (kctx->atoms_count[prio]) {
1390 				new_priority = prio;
1391 				break;
1392 			}
1393 		}
1394 	}
1395 
1396 	kbase_js_set_ctx_priority(kctx, new_priority);
1397 }
1398 KBASE_EXPORT_TEST_API(kbase_js_update_ctx_priority);
1399 
1400 /**
1401  * js_add_start_rp() - Add an atom that starts a renderpass to the job scheduler
1402  * @start_katom: Pointer to the atom to be added.
1403  * Return: 0 if successful or a negative value on failure.
1404  */
js_add_start_rp(struct kbase_jd_atom * const start_katom)1405 static int js_add_start_rp(struct kbase_jd_atom *const start_katom)
1406 {
1407 	struct kbase_context *const kctx = start_katom->kctx;
1408 	struct kbase_jd_renderpass *rp;
1409 	struct kbase_device *const kbdev = kctx->kbdev;
1410 	unsigned long flags;
1411 
1412 	lockdep_assert_held(&kctx->jctx.lock);
1413 
1414 	if (WARN_ON(!(start_katom->core_req & BASE_JD_REQ_START_RENDERPASS)))
1415 		return -EINVAL;
1416 
1417 	if (start_katom->core_req & BASE_JD_REQ_END_RENDERPASS)
1418 		return -EINVAL;
1419 
1420 	compiletime_assert((1ull << (sizeof(start_katom->renderpass_id) * 8)) <=
1421 			ARRAY_SIZE(kctx->jctx.renderpasses),
1422 			"Should check invalid access to renderpasses");
1423 
1424 	rp = &kctx->jctx.renderpasses[start_katom->renderpass_id];
1425 
1426 	if (rp->state != KBASE_JD_RP_COMPLETE)
1427 		return -EINVAL;
1428 
1429 	dev_dbg(kctx->kbdev->dev, "JS add start atom %pK of RP %d\n",
1430 		(void *)start_katom, start_katom->renderpass_id);
1431 
1432 	/* The following members are read when updating the job slot
1433 	 * ringbuffer/fifo therefore they require additional locking.
1434 	 */
1435 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1436 
1437 	rp->state = KBASE_JD_RP_START;
1438 	rp->start_katom = start_katom;
1439 	rp->end_katom = NULL;
1440 	INIT_LIST_HEAD(&rp->oom_reg_list);
1441 
1442 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1443 
1444 	return 0;
1445 }
1446 
1447 /**
1448  * js_add_end_rp() - Add an atom that ends a renderpass to the job scheduler
1449  * @end_katom: Pointer to the atom to be added.
1450  * Return: 0 if successful or a negative value on failure.
1451  */
js_add_end_rp(struct kbase_jd_atom * const end_katom)1452 static int js_add_end_rp(struct kbase_jd_atom *const end_katom)
1453 {
1454 	struct kbase_context *const kctx = end_katom->kctx;
1455 	struct kbase_jd_renderpass *rp;
1456 	struct kbase_device *const kbdev = kctx->kbdev;
1457 
1458 	lockdep_assert_held(&kctx->jctx.lock);
1459 
1460 	if (WARN_ON(!(end_katom->core_req & BASE_JD_REQ_END_RENDERPASS)))
1461 		return -EINVAL;
1462 
1463 	if (end_katom->core_req & BASE_JD_REQ_START_RENDERPASS)
1464 		return -EINVAL;
1465 
1466 	compiletime_assert((1ull << (sizeof(end_katom->renderpass_id) * 8)) <=
1467 			ARRAY_SIZE(kctx->jctx.renderpasses),
1468 			"Should check invalid access to renderpasses");
1469 
1470 	rp = &kctx->jctx.renderpasses[end_katom->renderpass_id];
1471 
1472 	dev_dbg(kbdev->dev, "JS add end atom %pK in state %d of RP %d\n",
1473 		(void *)end_katom, (int)rp->state, end_katom->renderpass_id);
1474 
1475 	if (rp->state == KBASE_JD_RP_COMPLETE)
1476 		return -EINVAL;
1477 
1478 	if (rp->end_katom == NULL) {
1479 		/* We can't be in a retry state until the fragment job chain
1480 		 * has completed.
1481 		 */
1482 		unsigned long flags;
1483 
1484 		WARN_ON(rp->state == KBASE_JD_RP_RETRY);
1485 		WARN_ON(rp->state == KBASE_JD_RP_RETRY_PEND_OOM);
1486 		WARN_ON(rp->state == KBASE_JD_RP_RETRY_OOM);
1487 
1488 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1489 		rp->end_katom = end_katom;
1490 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1491 	} else
1492 		WARN_ON(rp->end_katom != end_katom);
1493 
1494 	return 0;
1495 }
1496 
kbasep_js_add_job(struct kbase_context * kctx,struct kbase_jd_atom * atom)1497 bool kbasep_js_add_job(struct kbase_context *kctx,
1498 		struct kbase_jd_atom *atom)
1499 {
1500 	unsigned long flags;
1501 	struct kbasep_js_kctx_info *js_kctx_info;
1502 	struct kbase_device *kbdev;
1503 	struct kbasep_js_device_data *js_devdata;
1504 	int err = 0;
1505 
1506 	bool enqueue_required = false;
1507 	bool timer_sync = false;
1508 
1509 	KBASE_DEBUG_ASSERT(kctx != NULL);
1510 	KBASE_DEBUG_ASSERT(atom != NULL);
1511 	lockdep_assert_held(&kctx->jctx.lock);
1512 
1513 	kbdev = kctx->kbdev;
1514 	js_devdata = &kbdev->js_data;
1515 	js_kctx_info = &kctx->jctx.sched_info;
1516 
1517 	mutex_lock(&js_devdata->queue_mutex);
1518 	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1519 
1520 	if (atom->core_req & BASE_JD_REQ_START_RENDERPASS)
1521 		err = js_add_start_rp(atom);
1522 	else if (atom->core_req & BASE_JD_REQ_END_RENDERPASS)
1523 		err = js_add_end_rp(atom);
1524 
1525 	if (err < 0) {
1526 		atom->event_code = BASE_JD_EVENT_JOB_INVALID;
1527 		atom->status = KBASE_JD_ATOM_STATE_COMPLETED;
1528 		goto out_unlock;
1529 	}
1530 
1531 	/*
1532 	 * Begin Runpool transaction
1533 	 */
1534 	mutex_lock(&js_devdata->runpool_mutex);
1535 
1536 	/* Refcount ctx.nr_jobs */
1537 	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs < U32_MAX);
1538 	++(js_kctx_info->ctx.nr_jobs);
1539 	dev_dbg(kbdev->dev, "Add atom %pK to kctx %pK; now %d in ctx\n",
1540 		(void *)atom, (void *)kctx, js_kctx_info->ctx.nr_jobs);
1541 
1542 	/* Lock for state available during IRQ */
1543 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1544 
1545 	if (++kctx->atoms_count[atom->sched_priority] == 1)
1546 		kbase_js_update_ctx_priority(kctx);
1547 
1548 	if (!kbase_js_dep_validate(kctx, atom)) {
1549 		/* Dependencies could not be represented */
1550 		--(js_kctx_info->ctx.nr_jobs);
1551 		dev_dbg(kbdev->dev,
1552 			"Remove atom %pK from kctx %pK; now %d in ctx\n",
1553 			(void *)atom, (void *)kctx, js_kctx_info->ctx.nr_jobs);
1554 
1555 		/* Setting atom status back to queued as it still has unresolved
1556 		 * dependencies
1557 		 */
1558 		atom->status = KBASE_JD_ATOM_STATE_QUEUED;
1559 		dev_dbg(kbdev->dev, "Atom %pK status to queued\n", (void *)atom);
1560 
1561 		/* Undo the count, as the atom will get added again later but
1562 		 * leave the context priority adjusted or boosted, in case if
1563 		 * this was the first higher priority atom received for this
1564 		 * context.
1565 		 * This will prevent the scenario of priority inversion, where
1566 		 * another context having medium priority atoms keeps getting
1567 		 * scheduled over this context, which is having both lower and
1568 		 * higher priority atoms, but higher priority atoms are blocked
1569 		 * due to dependency on lower priority atoms. With priority
1570 		 * boost the high priority atom will get to run at earliest.
1571 		 */
1572 		kctx->atoms_count[atom->sched_priority]--;
1573 
1574 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1575 		mutex_unlock(&js_devdata->runpool_mutex);
1576 
1577 		goto out_unlock;
1578 	}
1579 
1580 	enqueue_required = kbase_js_dep_resolved_submit(kctx, atom);
1581 
1582 	KBASE_KTRACE_ADD_JM_REFCOUNT(kbdev, JS_ADD_JOB, kctx, atom, atom->jc,
1583 				kbase_ktrace_get_ctx_refcnt(kctx));
1584 
1585 	/* Context Attribute Refcounting */
1586 	kbasep_js_ctx_attr_ctx_retain_atom(kbdev, kctx, atom);
1587 
1588 	if (enqueue_required) {
1589 		if (kbase_js_ctx_pullable(kctx, atom->slot_nr, false))
1590 			timer_sync = kbase_js_ctx_list_add_pullable_nolock(
1591 					kbdev, kctx, atom->slot_nr);
1592 		else
1593 			timer_sync = kbase_js_ctx_list_add_unpullable_nolock(
1594 					kbdev, kctx, atom->slot_nr);
1595 	}
1596 	/* If this context is active and the atom is the first on its slot,
1597 	 * kick the job manager to attempt to fast-start the atom
1598 	 */
1599 	if (enqueue_required && kctx ==
1600 			kbdev->hwaccess.active_kctx[atom->slot_nr])
1601 		kbase_jm_try_kick(kbdev, 1 << atom->slot_nr);
1602 
1603 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1604 	if (timer_sync)
1605 		kbase_backend_ctx_count_changed(kbdev);
1606 	mutex_unlock(&js_devdata->runpool_mutex);
1607 	/* End runpool transaction */
1608 
1609 	if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
1610 		if (kbase_ctx_flag(kctx, KCTX_DYING)) {
1611 			/* A job got added while/after kbase_job_zap_context()
1612 			 * was called on a non-scheduled context. Kill that job
1613 			 * by killing the context.
1614 			 */
1615 			kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx,
1616 					false);
1617 		} else if (js_kctx_info->ctx.nr_jobs == 1) {
1618 			/* Handle Refcount going from 0 to 1: schedule the
1619 			 * context on the Queue
1620 			 */
1621 			KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
1622 			dev_dbg(kbdev->dev, "JS: Enqueue Context %pK", kctx);
1623 
1624 			/* Queue was updated - caller must try to schedule the
1625 			 * head context
1626 			 */
1627 			WARN_ON(!enqueue_required);
1628 		}
1629 	}
1630 out_unlock:
1631 	dev_dbg(kbdev->dev, "Enqueue of kctx %pK is %srequired\n",
1632 		kctx, enqueue_required ? "" : "not ");
1633 
1634 	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
1635 
1636 	mutex_unlock(&js_devdata->queue_mutex);
1637 
1638 	return enqueue_required;
1639 }
1640 
kbasep_js_remove_job(struct kbase_device * kbdev,struct kbase_context * kctx,struct kbase_jd_atom * atom)1641 void kbasep_js_remove_job(struct kbase_device *kbdev,
1642 		struct kbase_context *kctx, struct kbase_jd_atom *atom)
1643 {
1644 	struct kbasep_js_kctx_info *js_kctx_info;
1645 	unsigned long flags;
1646 
1647 	KBASE_DEBUG_ASSERT(kbdev != NULL);
1648 	KBASE_DEBUG_ASSERT(kctx != NULL);
1649 	KBASE_DEBUG_ASSERT(atom != NULL);
1650 
1651 	js_kctx_info = &kctx->jctx.sched_info;
1652 
1653 	KBASE_KTRACE_ADD_JM_REFCOUNT(kbdev, JS_REMOVE_JOB, kctx, atom, atom->jc,
1654 			kbase_ktrace_get_ctx_refcnt(kctx));
1655 
1656 	/* De-refcount ctx.nr_jobs */
1657 	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs > 0);
1658 	--(js_kctx_info->ctx.nr_jobs);
1659 	dev_dbg(kbdev->dev,
1660 		"Remove atom %pK from kctx %pK; now %d in ctx\n",
1661 		(void *)atom, (void *)kctx, js_kctx_info->ctx.nr_jobs);
1662 
1663 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1664 	if (--kctx->atoms_count[atom->sched_priority] == 0)
1665 		kbase_js_update_ctx_priority(kctx);
1666 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1667 }
1668 
kbasep_js_remove_cancelled_job(struct kbase_device * kbdev,struct kbase_context * kctx,struct kbase_jd_atom * katom)1669 bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
1670 		struct kbase_context *kctx, struct kbase_jd_atom *katom)
1671 {
1672 	unsigned long flags;
1673 	struct kbasep_js_atom_retained_state katom_retained_state;
1674 	bool attr_state_changed;
1675 
1676 	KBASE_DEBUG_ASSERT(kbdev != NULL);
1677 	KBASE_DEBUG_ASSERT(kctx != NULL);
1678 	KBASE_DEBUG_ASSERT(katom != NULL);
1679 
1680 	kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
1681 	kbasep_js_remove_job(kbdev, kctx, katom);
1682 
1683 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1684 
1685 	/* The atom has 'finished' (will not be re-run), so no need to call
1686 	 * kbasep_js_has_atom_finished().
1687 	 *
1688 	 * This is because it returns false for soft-stopped atoms, but we
1689 	 * want to override that, because we're cancelling an atom regardless of
1690 	 * whether it was soft-stopped or not
1691 	 */
1692 	attr_state_changed = kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx,
1693 			&katom_retained_state);
1694 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1695 
1696 	return attr_state_changed;
1697 }
1698 
1699 /**
1700  * kbasep_js_run_jobs_after_ctx_and_atom_release - Try running more jobs after
1701  *                           releasing a context and/or atom
1702  * @kbdev:                   The kbase_device to operate on
1703  * @kctx:                    The kbase_context to operate on
1704  * @katom_retained_state:    Retained state from the atom
1705  * @runpool_ctx_attr_change: True if the runpool context attributes have changed
1706  *
1707  * This collates a set of actions that must happen whilst hwaccess_lock is held.
1708  *
1709  * This includes running more jobs when:
1710  * - The previously released kctx caused a ctx attribute change,
1711  * - The released atom caused a ctx attribute change,
1712  * - Slots were previously blocked due to affinity restrictions,
1713  * - Submission during IRQ handling failed.
1714  *
1715  * Return: %KBASEP_JS_RELEASE_RESULT_SCHED_ALL if context attributes were
1716  *         changed. The caller should try scheduling all contexts
1717  */
kbasep_js_run_jobs_after_ctx_and_atom_release(struct kbase_device * kbdev,struct kbase_context * kctx,struct kbasep_js_atom_retained_state * katom_retained_state,bool runpool_ctx_attr_change)1718 static kbasep_js_release_result kbasep_js_run_jobs_after_ctx_and_atom_release(
1719 		struct kbase_device *kbdev,
1720 		struct kbase_context *kctx,
1721 		struct kbasep_js_atom_retained_state *katom_retained_state,
1722 		bool runpool_ctx_attr_change)
1723 {
1724 	struct kbasep_js_device_data *js_devdata;
1725 	kbasep_js_release_result result = 0;
1726 
1727 	KBASE_DEBUG_ASSERT(kbdev != NULL);
1728 	KBASE_DEBUG_ASSERT(kctx != NULL);
1729 	KBASE_DEBUG_ASSERT(katom_retained_state != NULL);
1730 	js_devdata = &kbdev->js_data;
1731 
1732 	lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
1733 	lockdep_assert_held(&js_devdata->runpool_mutex);
1734 	lockdep_assert_held(&kbdev->hwaccess_lock);
1735 
1736 	if (js_devdata->nr_user_contexts_running != 0 && runpool_ctx_attr_change) {
1737 		/* A change in runpool ctx attributes might mean we can
1738 		 * run more jobs than before
1739 		 */
1740 		result = KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
1741 
1742 		KBASE_KTRACE_ADD_JM_SLOT(kbdev, JD_DONE_TRY_RUN_NEXT_JOB,
1743 					kctx, NULL, 0u, 0);
1744 	}
1745 	return result;
1746 }
1747 
1748 /**
1749  * kbasep_js_runpool_release_ctx_internal - Internal function to release the reference
1750  *                                          on a ctx and an atom's "retained state", only
1751  *                                          taking the runpool and as transaction mutexes
1752  * @kbdev:                   The kbase_device to operate on
1753  * @kctx:                    The kbase_context to operate on
1754  * @katom_retained_state:    Retained state from the atom
1755  *
1756  * This also starts more jobs running in the case of an ctx-attribute state change
1757  *
1758  * This does none of the followup actions for scheduling:
1759  * - It does not schedule in a new context
1760  * - It does not requeue or handle dying contexts
1761  *
1762  * For those tasks, just call kbasep_js_runpool_release_ctx() instead
1763  *
1764  * Has following requirements
1765  * - Context is scheduled in, and kctx->as_nr matches kctx_as_nr
1766  * - Context has a non-zero refcount
1767  * - Caller holds js_kctx_info->ctx.jsctx_mutex
1768  * - Caller holds js_devdata->runpool_mutex
1769  *
1770  * Return: A bitpattern, containing KBASEP_JS_RELEASE_RESULT_* flags, indicating
1771  *         the result of releasing a context that whether the caller should try
1772  *         scheduling a new context or should try scheduling all contexts.
1773  */
kbasep_js_runpool_release_ctx_internal(struct kbase_device * kbdev,struct kbase_context * kctx,struct kbasep_js_atom_retained_state * katom_retained_state)1774 static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
1775 		struct kbase_device *kbdev,
1776 		struct kbase_context *kctx,
1777 		struct kbasep_js_atom_retained_state *katom_retained_state)
1778 {
1779 	unsigned long flags;
1780 	struct kbasep_js_device_data *js_devdata;
1781 	struct kbasep_js_kctx_info *js_kctx_info;
1782 
1783 	kbasep_js_release_result release_result = 0u;
1784 	bool runpool_ctx_attr_change = false;
1785 	int kctx_as_nr;
1786 	int new_ref_count;
1787 	CSTD_UNUSED(kctx_as_nr);
1788 
1789 	KBASE_DEBUG_ASSERT(kbdev != NULL);
1790 	KBASE_DEBUG_ASSERT(kctx != NULL);
1791 	js_kctx_info = &kctx->jctx.sched_info;
1792 	js_devdata = &kbdev->js_data;
1793 
1794 	/* Ensure context really is scheduled in */
1795 	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
1796 
1797 	kctx_as_nr = kctx->as_nr;
1798 	KBASE_DEBUG_ASSERT(kctx_as_nr != KBASEP_AS_NR_INVALID);
1799 	KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0);
1800 
1801 	/*
1802 	 * Transaction begins on AS and runpool_irq
1803 	 *
1804 	 * Assert about out calling contract
1805 	 */
1806 	mutex_lock(&kbdev->pm.lock);
1807 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1808 
1809 	KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr);
1810 	KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0);
1811 
1812 	/* Update refcount */
1813 	kbase_ctx_sched_release_ctx(kctx);
1814 	new_ref_count = atomic_read(&kctx->refcount);
1815 
1816 	/* Release the atom if it finished (i.e. wasn't soft-stopped) */
1817 	if (kbasep_js_has_atom_finished(katom_retained_state))
1818 		runpool_ctx_attr_change |= kbasep_js_ctx_attr_ctx_release_atom(
1819 				kbdev, kctx, katom_retained_state);
1820 
1821 	if (new_ref_count == 2 && kbase_ctx_flag(kctx, KCTX_PRIVILEGED) &&
1822 #ifdef CONFIG_MALI_ARBITER_SUPPORT
1823 			!kbase_pm_is_gpu_lost(kbdev) &&
1824 #endif
1825 			!kbase_pm_is_suspending(kbdev)) {
1826 		/* Context is kept scheduled into an address space even when
1827 		 * there are no jobs, in this case we have to handle the
1828 		 * situation where all jobs have been evicted from the GPU and
1829 		 * submission is disabled.
1830 		 *
1831 		 * At this point we re-enable submission to allow further jobs
1832 		 * to be executed
1833 		 */
1834 		kbasep_js_set_submit_allowed(js_devdata, kctx);
1835 	}
1836 
1837 	/* Make a set of checks to see if the context should be scheduled out.
1838 	 * Note that there'll always be at least 1 reference to the context
1839 	 * which was previously acquired by kbasep_js_schedule_ctx().
1840 	 */
1841 	if (new_ref_count == 1 &&
1842 		(!kbasep_js_is_submit_allowed(js_devdata, kctx) ||
1843 #ifdef CONFIG_MALI_ARBITER_SUPPORT
1844 			kbase_pm_is_gpu_lost(kbdev) ||
1845 #endif
1846 			kbase_pm_is_suspending(kbdev))) {
1847 		int num_slots = kbdev->gpu_props.num_job_slots;
1848 		int slot;
1849 
1850 		/* Last reference, and we've been told to remove this context
1851 		 * from the Run Pool
1852 		 */
1853 		dev_dbg(kbdev->dev, "JS: RunPool Remove Context %pK because refcount=%d, jobs=%d, allowed=%d",
1854 				kctx, new_ref_count, js_kctx_info->ctx.nr_jobs,
1855 				kbasep_js_is_submit_allowed(js_devdata, kctx));
1856 
1857 		KBASE_TLSTREAM_TL_NRET_AS_CTX(kbdev, &kbdev->as[kctx->as_nr], kctx);
1858 
1859 		kbase_backend_release_ctx_irq(kbdev, kctx);
1860 
1861 		for (slot = 0; slot < num_slots; slot++) {
1862 			if (kbdev->hwaccess.active_kctx[slot] == kctx) {
1863 				dev_dbg(kbdev->dev, "Marking kctx %pK as inactive (s:%d)\n",
1864 					(void *)kctx, slot);
1865 				kbdev->hwaccess.active_kctx[slot] = NULL;
1866 			}
1867 		}
1868 
1869 		/* Ctx Attribute handling
1870 		 *
1871 		 * Releasing atoms attributes must either happen before this, or
1872 		 * after the KCTX_SHEDULED flag is changed, otherwise we
1873 		 * double-decount the attributes
1874 		 */
1875 		runpool_ctx_attr_change |=
1876 			kbasep_js_ctx_attr_runpool_release_ctx(kbdev, kctx);
1877 
1878 		/* Releasing the context and katom retained state can allow
1879 		 * more jobs to run
1880 		 */
1881 		release_result |=
1882 			kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev,
1883 						kctx, katom_retained_state,
1884 						runpool_ctx_attr_change);
1885 
1886 		/*
1887 		 * Transaction ends on AS and runpool_irq:
1888 		 *
1889 		 * By this point, the AS-related data is now clear and ready
1890 		 * for re-use.
1891 		 *
1892 		 * Since releases only occur once for each previous successful
1893 		 * retain, and no more retains are allowed on this context, no
1894 		 * other thread will be operating in this
1895 		 * code whilst we are
1896 		 */
1897 
1898 		/* Recalculate pullable status for all slots */
1899 		for (slot = 0; slot < num_slots; slot++) {
1900 			if (kbase_js_ctx_pullable(kctx, slot, false))
1901 				kbase_js_ctx_list_add_pullable_nolock(kbdev,
1902 						kctx, slot);
1903 		}
1904 
1905 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1906 
1907 		kbase_backend_release_ctx_noirq(kbdev, kctx);
1908 
1909 		mutex_unlock(&kbdev->pm.lock);
1910 
1911 		/* Note: Don't reuse kctx_as_nr now */
1912 
1913 		/* Synchronize with any timers */
1914 		kbase_backend_ctx_count_changed(kbdev);
1915 
1916 		/* update book-keeping info */
1917 		kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
1918 		/* Signal any waiter that the context is not scheduled, so is
1919 		 * safe for termination - once the jsctx_mutex is also dropped,
1920 		 * and jobs have finished.
1921 		 */
1922 		wake_up(&js_kctx_info->ctx.is_scheduled_wait);
1923 
1924 		/* Queue an action to occur after we've dropped the lock */
1925 		release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED |
1926 			KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
1927 	} else {
1928 		kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx,
1929 				katom_retained_state, runpool_ctx_attr_change);
1930 
1931 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1932 		mutex_unlock(&kbdev->pm.lock);
1933 	}
1934 
1935 	return release_result;
1936 }
1937 
kbasep_js_runpool_release_ctx_nolock(struct kbase_device * kbdev,struct kbase_context * kctx)1938 void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
1939 						struct kbase_context *kctx)
1940 {
1941 	struct kbasep_js_atom_retained_state katom_retained_state;
1942 
1943 	/* Setup a dummy katom_retained_state */
1944 	kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
1945 
1946 	kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
1947 							&katom_retained_state);
1948 }
1949 
kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device * kbdev,struct kbase_context * kctx,bool has_pm_ref)1950 void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev,
1951 		struct kbase_context *kctx, bool has_pm_ref)
1952 {
1953 	KBASE_DEBUG_ASSERT(kbdev != NULL);
1954 	KBASE_DEBUG_ASSERT(kctx != NULL);
1955 
1956 	/* This is called if and only if you've you've detached the context from
1957 	 * the Runpool Queue, and not added it back to the Runpool
1958 	 */
1959 	KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
1960 
1961 	if (kbase_ctx_flag(kctx, KCTX_DYING)) {
1962 		/* Dying: don't requeue, but kill all jobs on the context. This
1963 		 * happens asynchronously
1964 		 */
1965 		dev_dbg(kbdev->dev,
1966 			"JS: ** Killing Context %pK on RunPool Remove **", kctx);
1967 		kbase_js_foreach_ctx_job(kctx, &kbase_jd_cancel);
1968 	}
1969 }
1970 
kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device * kbdev,struct kbase_context * kctx,struct kbasep_js_atom_retained_state * katom_retained_state)1971 void kbasep_js_runpool_release_ctx_and_katom_retained_state(
1972 		struct kbase_device *kbdev, struct kbase_context *kctx,
1973 		struct kbasep_js_atom_retained_state *katom_retained_state)
1974 {
1975 	struct kbasep_js_device_data *js_devdata;
1976 	struct kbasep_js_kctx_info *js_kctx_info;
1977 	kbasep_js_release_result release_result;
1978 
1979 	KBASE_DEBUG_ASSERT(kbdev != NULL);
1980 	KBASE_DEBUG_ASSERT(kctx != NULL);
1981 	js_kctx_info = &kctx->jctx.sched_info;
1982 	js_devdata = &kbdev->js_data;
1983 
1984 	mutex_lock(&js_devdata->queue_mutex);
1985 	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
1986 	mutex_lock(&js_devdata->runpool_mutex);
1987 
1988 	release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
1989 			katom_retained_state);
1990 
1991 	/* Drop the runpool mutex to allow requeing kctx */
1992 	mutex_unlock(&js_devdata->runpool_mutex);
1993 
1994 	if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
1995 		kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
1996 
1997 	/* Drop the jsctx_mutex to allow scheduling in a new context */
1998 
1999 	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2000 	mutex_unlock(&js_devdata->queue_mutex);
2001 
2002 	if (release_result & KBASEP_JS_RELEASE_RESULT_SCHED_ALL)
2003 		kbase_js_sched_all(kbdev);
2004 }
2005 
kbasep_js_runpool_release_ctx(struct kbase_device * kbdev,struct kbase_context * kctx)2006 void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev,
2007 		struct kbase_context *kctx)
2008 {
2009 	struct kbasep_js_atom_retained_state katom_retained_state;
2010 
2011 	kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
2012 
2013 	kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
2014 			&katom_retained_state);
2015 }
2016 
2017 /* Variant of kbasep_js_runpool_release_ctx() that doesn't call into
2018  * kbase_js_sched_all()
2019  */
kbasep_js_runpool_release_ctx_no_schedule(struct kbase_device * kbdev,struct kbase_context * kctx)2020 static void kbasep_js_runpool_release_ctx_no_schedule(
2021 		struct kbase_device *kbdev, struct kbase_context *kctx)
2022 {
2023 	struct kbasep_js_device_data *js_devdata;
2024 	struct kbasep_js_kctx_info *js_kctx_info;
2025 	kbasep_js_release_result release_result;
2026 	struct kbasep_js_atom_retained_state katom_retained_state_struct;
2027 	struct kbasep_js_atom_retained_state *katom_retained_state =
2028 		&katom_retained_state_struct;
2029 
2030 	KBASE_DEBUG_ASSERT(kbdev != NULL);
2031 	KBASE_DEBUG_ASSERT(kctx != NULL);
2032 	js_kctx_info = &kctx->jctx.sched_info;
2033 	js_devdata = &kbdev->js_data;
2034 	kbasep_js_atom_retained_state_init_invalid(katom_retained_state);
2035 
2036 	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2037 	mutex_lock(&js_devdata->runpool_mutex);
2038 
2039 	release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
2040 			katom_retained_state);
2041 
2042 	/* Drop the runpool mutex to allow requeing kctx */
2043 	mutex_unlock(&js_devdata->runpool_mutex);
2044 	if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
2045 		kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
2046 
2047 	/* Drop the jsctx_mutex to allow scheduling in a new context */
2048 	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2049 
2050 	/* NOTE: could return release_result if the caller would like to know
2051 	 * whether it should schedule a new context, but currently no callers do
2052 	 */
2053 }
2054 
kbase_js_set_timeouts(struct kbase_device * kbdev)2055 void kbase_js_set_timeouts(struct kbase_device *kbdev)
2056 {
2057 	lockdep_assert_held(&kbdev->hwaccess_lock);
2058 
2059 	kbase_backend_timeouts_changed(kbdev);
2060 }
2061 
kbasep_js_schedule_ctx(struct kbase_device * kbdev,struct kbase_context * kctx,unsigned int js)2062 static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev, struct kbase_context *kctx,
2063 				   unsigned int js)
2064 {
2065 	struct kbasep_js_device_data *js_devdata;
2066 	struct kbasep_js_kctx_info *js_kctx_info;
2067 	unsigned long flags;
2068 	bool kctx_suspended = false;
2069 	int as_nr;
2070 
2071 	dev_dbg(kbdev->dev, "Scheduling kctx %pK (s:%u)\n", kctx, js);
2072 
2073 	js_devdata = &kbdev->js_data;
2074 	js_kctx_info = &kctx->jctx.sched_info;
2075 
2076 	/* Pick available address space for this context */
2077 	mutex_lock(&kbdev->mmu_hw_mutex);
2078 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2079 	as_nr = kbase_ctx_sched_retain_ctx(kctx);
2080 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2081 	mutex_unlock(&kbdev->mmu_hw_mutex);
2082 	if (as_nr == KBASEP_AS_NR_INVALID) {
2083 		as_nr = kbase_backend_find_and_release_free_address_space(
2084 				kbdev, kctx);
2085 		if (as_nr != KBASEP_AS_NR_INVALID) {
2086 			/* Attempt to retain the context again, this should
2087 			 * succeed
2088 			 */
2089 			mutex_lock(&kbdev->mmu_hw_mutex);
2090 			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2091 			as_nr = kbase_ctx_sched_retain_ctx(kctx);
2092 			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2093 			mutex_unlock(&kbdev->mmu_hw_mutex);
2094 
2095 			WARN_ON(as_nr == KBASEP_AS_NR_INVALID);
2096 		}
2097 	}
2098 	if ((as_nr < 0) || (as_nr >= BASE_MAX_NR_AS))
2099 		return false; /* No address space currently available */
2100 
2101 	/*
2102 	 * Atomic transaction on the Context and Run Pool begins
2103 	 */
2104 	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2105 	mutex_lock(&js_devdata->runpool_mutex);
2106 	mutex_lock(&kbdev->mmu_hw_mutex);
2107 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2108 
2109 	/* Check to see if context is dying due to kbase_job_zap_context() */
2110 	if (kbase_ctx_flag(kctx, KCTX_DYING)) {
2111 		/* Roll back the transaction so far and return */
2112 		kbase_ctx_sched_release_ctx(kctx);
2113 
2114 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2115 		mutex_unlock(&kbdev->mmu_hw_mutex);
2116 		mutex_unlock(&js_devdata->runpool_mutex);
2117 		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2118 
2119 		return false;
2120 	}
2121 
2122 	KBASE_KTRACE_ADD_JM_REFCOUNT(kbdev, JS_TRY_SCHEDULE_HEAD_CTX, kctx, NULL,
2123 				0u,
2124 				kbase_ktrace_get_ctx_refcnt(kctx));
2125 
2126 	kbase_ctx_flag_set(kctx, KCTX_SCHEDULED);
2127 
2128 	/* Assign context to previously chosen address space */
2129 	if (!kbase_backend_use_ctx(kbdev, kctx, as_nr)) {
2130 		/* Roll back the transaction so far and return */
2131 		kbase_ctx_sched_release_ctx(kctx);
2132 		kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
2133 
2134 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2135 		mutex_unlock(&kbdev->mmu_hw_mutex);
2136 		mutex_unlock(&js_devdata->runpool_mutex);
2137 		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2138 
2139 		return false;
2140 	}
2141 
2142 	kbdev->hwaccess.active_kctx[js] = kctx;
2143 
2144 	KBASE_TLSTREAM_TL_RET_AS_CTX(kbdev, &kbdev->as[kctx->as_nr], kctx);
2145 
2146 	/* Cause any future waiter-on-termination to wait until the context is
2147 	 * descheduled
2148 	 */
2149 	wake_up(&js_kctx_info->ctx.is_scheduled_wait);
2150 
2151 	/* Re-check for suspending: a suspend could've occurred, and all the
2152 	 * contexts could've been removed from the runpool before we took this
2153 	 * lock. In this case, we don't want to allow this context to run jobs,
2154 	 * we just want it out immediately.
2155 	 *
2156 	 * The DMB required to read the suspend flag was issued recently as part
2157 	 * of the hwaccess_lock locking. If a suspend occurs *after* that lock
2158 	 * was taken (i.e. this condition doesn't execute), then the
2159 	 * kbasep_js_suspend() code will cleanup this context instead (by virtue
2160 	 * of it being called strictly after the suspend flag is set, and will
2161 	 * wait for this lock to drop)
2162 	 */
2163 #ifdef CONFIG_MALI_ARBITER_SUPPORT
2164 	if (kbase_pm_is_suspending(kbdev) || kbase_pm_is_gpu_lost(kbdev)) {
2165 #else
2166 	if (kbase_pm_is_suspending(kbdev)) {
2167 #endif
2168 		/* Cause it to leave at some later point */
2169 		bool retained;
2170 		CSTD_UNUSED(retained);
2171 
2172 		retained = kbase_ctx_sched_inc_refcount_nolock(kctx);
2173 		KBASE_DEBUG_ASSERT(retained);
2174 
2175 		kbasep_js_clear_submit_allowed(js_devdata, kctx);
2176 		kctx_suspended = true;
2177 	}
2178 
2179 	kbase_ctx_flag_clear(kctx, KCTX_PULLED_SINCE_ACTIVE_JS0 << js);
2180 
2181 	/* Transaction complete */
2182 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2183 	mutex_unlock(&kbdev->mmu_hw_mutex);
2184 
2185 	/* Synchronize with any timers */
2186 	kbase_backend_ctx_count_changed(kbdev);
2187 
2188 	mutex_unlock(&js_devdata->runpool_mutex);
2189 	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2190 	/* Note: after this point, the context could potentially get scheduled
2191 	 * out immediately
2192 	 */
2193 
2194 	if (kctx_suspended) {
2195 		/* Finishing forcing out the context due to a suspend. Use a
2196 		 * variant of kbasep_js_runpool_release_ctx() that doesn't
2197 		 * schedule a new context, to prevent a risk of recursion back
2198 		 * into this function
2199 		 */
2200 		kbasep_js_runpool_release_ctx_no_schedule(kbdev, kctx);
2201 		return false;
2202 	}
2203 	return true;
2204 }
2205 
2206 static bool kbase_js_use_ctx(struct kbase_device *kbdev, struct kbase_context *kctx,
2207 			     unsigned int js)
2208 {
2209 	unsigned long flags;
2210 
2211 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2212 
2213 	if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
2214 			kbase_backend_use_ctx_sched(kbdev, kctx, js)) {
2215 		dev_dbg(kbdev->dev, "kctx %pK already has ASID - mark as active (s:%u)\n",
2216 			(void *)kctx, js);
2217 
2218 		if (kbdev->hwaccess.active_kctx[js] != kctx) {
2219 			kbdev->hwaccess.active_kctx[js] = kctx;
2220 			kbase_ctx_flag_clear(kctx,
2221 					KCTX_PULLED_SINCE_ACTIVE_JS0 << js);
2222 		}
2223 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2224 		return true; /* Context already scheduled */
2225 	}
2226 
2227 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2228 	return kbasep_js_schedule_ctx(kbdev, kctx, js);
2229 }
2230 
2231 void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev,
2232 		struct kbase_context *kctx)
2233 {
2234 	struct kbasep_js_kctx_info *js_kctx_info;
2235 	struct kbasep_js_device_data *js_devdata;
2236 	bool is_scheduled;
2237 
2238 	KBASE_DEBUG_ASSERT(kbdev != NULL);
2239 	KBASE_DEBUG_ASSERT(kctx != NULL);
2240 
2241 	js_devdata = &kbdev->js_data;
2242 	js_kctx_info = &kctx->jctx.sched_info;
2243 
2244 #ifdef CONFIG_MALI_ARBITER_SUPPORT
2245 	/* This should only happen in response to a system call
2246 	 * from a user-space thread.
2247 	 * In a non-arbitrated environment this can never happen
2248 	 * whilst suspending.
2249 	 *
2250 	 * In an arbitrated environment, user-space threads can run
2251 	 * while we are suspended (for example GPU not available
2252 	 * to this VM), however in that case we will block on
2253 	 * the wait event for KCTX_SCHEDULED, since no context
2254 	 * can be scheduled until we have the GPU again.
2255 	 */
2256 	if (kbdev->arb.arb_if == NULL)
2257 		if (WARN_ON(kbase_pm_is_suspending(kbdev)))
2258 			return;
2259 #else
2260 	/* This should only happen in response to a system call
2261 	 * from a user-space thread.
2262 	 * In a non-arbitrated environment this can never happen
2263 	 * whilst suspending.
2264 	 */
2265 	if (WARN_ON(kbase_pm_is_suspending(kbdev)))
2266 		return;
2267 #endif
2268 
2269 	mutex_lock(&js_devdata->queue_mutex);
2270 	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2271 
2272 	/* Mark the context as privileged */
2273 	kbase_ctx_flag_set(kctx, KCTX_PRIVILEGED);
2274 
2275 	is_scheduled = kbase_ctx_flag(kctx, KCTX_SCHEDULED);
2276 	if (!is_scheduled) {
2277 		/* Add the context to the pullable list */
2278 		if (kbase_js_ctx_list_add_pullable_head(kbdev, kctx, 0))
2279 			kbase_js_sync_timers(kbdev);
2280 
2281 		/* Fast-starting requires the jsctx_mutex to be dropped,
2282 		 * because it works on multiple ctxs
2283 		 */
2284 		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2285 		mutex_unlock(&js_devdata->queue_mutex);
2286 
2287 		/* Try to schedule the context in */
2288 		kbase_js_sched_all(kbdev);
2289 
2290 		/* Wait for the context to be scheduled in */
2291 		wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait,
2292 			   kbase_ctx_flag(kctx, KCTX_SCHEDULED));
2293 	} else {
2294 		/* Already scheduled in - We need to retain it to keep the
2295 		 * corresponding address space
2296 		 */
2297 		WARN_ON(!kbase_ctx_sched_inc_refcount(kctx));
2298 		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2299 		mutex_unlock(&js_devdata->queue_mutex);
2300 	}
2301 }
2302 KBASE_EXPORT_TEST_API(kbasep_js_schedule_privileged_ctx);
2303 
2304 void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev,
2305 		struct kbase_context *kctx)
2306 {
2307 	struct kbasep_js_kctx_info *js_kctx_info;
2308 
2309 	KBASE_DEBUG_ASSERT(kctx != NULL);
2310 	js_kctx_info = &kctx->jctx.sched_info;
2311 
2312 	/* We don't need to use the address space anymore */
2313 	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2314 	kbase_ctx_flag_clear(kctx, KCTX_PRIVILEGED);
2315 	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2316 
2317 	/* Release the context - it will be scheduled out */
2318 	kbasep_js_runpool_release_ctx(kbdev, kctx);
2319 
2320 	kbase_js_sched_all(kbdev);
2321 }
2322 KBASE_EXPORT_TEST_API(kbasep_js_release_privileged_ctx);
2323 
2324 void kbasep_js_suspend(struct kbase_device *kbdev)
2325 {
2326 	unsigned long flags;
2327 	struct kbasep_js_device_data *js_devdata;
2328 	int i;
2329 	u16 retained = 0u;
2330 
2331 	KBASE_DEBUG_ASSERT(kbdev);
2332 	KBASE_DEBUG_ASSERT(kbase_pm_is_suspending(kbdev));
2333 	js_devdata = &kbdev->js_data;
2334 
2335 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2336 
2337 	/* Prevent all contexts from submitting */
2338 	js_devdata->runpool_irq.submit_allowed = 0;
2339 
2340 	/* Retain each of the contexts, so we can cause it to leave even if it
2341 	 * had no refcount to begin with
2342 	 */
2343 	for (i = BASE_MAX_NR_AS - 1; i >= 0; --i) {
2344 		struct kbase_context *kctx = kbdev->as_to_kctx[i];
2345 
2346 		retained = retained << 1;
2347 
2348 		if (kctx && !(kbdev->as_free & (1u << i))) {
2349 			kbase_ctx_sched_retain_ctx_refcount(kctx);
2350 			retained |= 1u;
2351 			/* This loop will not have an effect on the privileged
2352 			 * contexts as they would have an extra ref count
2353 			 * compared to the normal contexts, so they will hold
2354 			 * on to their address spaces. MMU will re-enabled for
2355 			 * them on resume.
2356 			 */
2357 		}
2358 	}
2359 
2360 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2361 
2362 	/* De-ref the previous retain to ensure each context gets pulled out
2363 	 * sometime later.
2364 	 */
2365 	for (i = 0;
2366 		 i < BASE_MAX_NR_AS;
2367 		 ++i, retained = retained >> 1) {
2368 		struct kbase_context *kctx = kbdev->as_to_kctx[i];
2369 
2370 		if (retained & 1u)
2371 			kbasep_js_runpool_release_ctx(kbdev, kctx);
2372 	}
2373 
2374 	/* Caller must wait for all Power Manager active references to be
2375 	 * dropped
2376 	 */
2377 }
2378 
2379 void kbasep_js_resume(struct kbase_device *kbdev)
2380 {
2381 	struct kbasep_js_device_data *js_devdata;
2382 	int js, prio;
2383 
2384 	KBASE_DEBUG_ASSERT(kbdev);
2385 	js_devdata = &kbdev->js_data;
2386 	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
2387 
2388 	mutex_lock(&js_devdata->queue_mutex);
2389 	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
2390 		for (prio = KBASE_JS_ATOM_SCHED_PRIO_FIRST;
2391 			prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
2392 			struct kbase_context *kctx, *n;
2393 			unsigned long flags;
2394 
2395 #ifndef CONFIG_MALI_ARBITER_SUPPORT
2396 			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2397 
2398 			list_for_each_entry_safe(kctx, n,
2399 				 &kbdev->js_data.ctx_list_unpullable[js][prio],
2400 				 jctx.sched_info.ctx.ctx_list_entry[js]) {
2401 				struct kbasep_js_kctx_info *js_kctx_info;
2402 				bool timer_sync = false;
2403 
2404 				/* Drop lock so we can take kctx mutexes */
2405 				spin_unlock_irqrestore(&kbdev->hwaccess_lock,
2406 						flags);
2407 
2408 				js_kctx_info = &kctx->jctx.sched_info;
2409 
2410 				mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2411 				mutex_lock(&js_devdata->runpool_mutex);
2412 				spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2413 
2414 				if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
2415 					kbase_js_ctx_pullable(kctx, js, false))
2416 					timer_sync =
2417 						kbase_js_ctx_list_add_pullable_nolock(
2418 								kbdev, kctx, js);
2419 
2420 				spin_unlock_irqrestore(&kbdev->hwaccess_lock,
2421 						flags);
2422 
2423 				if (timer_sync)
2424 					kbase_backend_ctx_count_changed(kbdev);
2425 
2426 				mutex_unlock(&js_devdata->runpool_mutex);
2427 				mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
2428 
2429 				/* Take lock before accessing list again */
2430 				spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2431 			}
2432 			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2433 #else
2434 			bool timer_sync = false;
2435 
2436 			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2437 
2438 			list_for_each_entry_safe(kctx, n,
2439 				 &kbdev->js_data.ctx_list_unpullable[js][prio],
2440 				 jctx.sched_info.ctx.ctx_list_entry[js]) {
2441 
2442 				if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
2443 					kbase_js_ctx_pullable(kctx, js, false))
2444 					timer_sync |=
2445 						kbase_js_ctx_list_add_pullable_nolock(
2446 							kbdev, kctx, js);
2447 			}
2448 
2449 			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2450 
2451 			if (timer_sync) {
2452 				mutex_lock(&js_devdata->runpool_mutex);
2453 				kbase_backend_ctx_count_changed(kbdev);
2454 				mutex_unlock(&js_devdata->runpool_mutex);
2455 			}
2456 #endif
2457 		}
2458 	}
2459 	mutex_unlock(&js_devdata->queue_mutex);
2460 
2461 	/* Restart atom processing */
2462 	kbase_js_sched_all(kbdev);
2463 
2464 	/* JS Resume complete */
2465 }
2466 
2467 bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
2468 				struct kbase_jd_atom *katom)
2469 {
2470 	if ((katom->core_req & BASE_JD_REQ_FS) &&
2471 	    (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE |
2472 								BASE_JD_REQ_T)))
2473 		return false;
2474 
2475 	if ((katom->core_req & BASE_JD_REQ_JOB_SLOT) &&
2476 			(katom->jobslot >= BASE_JM_MAX_NR_SLOTS))
2477 		return false;
2478 
2479 	return true;
2480 }
2481 
2482 static unsigned int kbase_js_get_slot(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
2483 {
2484 	if (katom->core_req & BASE_JD_REQ_JOB_SLOT)
2485 		return katom->jobslot;
2486 
2487 	if (katom->core_req & BASE_JD_REQ_FS)
2488 		return 0;
2489 
2490 	if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
2491 		if (katom->device_nr == 1 &&
2492 				kbdev->gpu_props.num_core_groups == 2)
2493 			return 2;
2494 	}
2495 
2496 	return 1;
2497 }
2498 
2499 bool kbase_js_dep_resolved_submit(struct kbase_context *kctx,
2500 					struct kbase_jd_atom *katom)
2501 {
2502 	bool enqueue_required, add_required = true;
2503 
2504 	katom->slot_nr = kbase_js_get_slot(kctx->kbdev, katom);
2505 
2506 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
2507 	lockdep_assert_held(&kctx->jctx.lock);
2508 
2509 	/* If slot will transition from unpullable to pullable then add to
2510 	 * pullable list
2511 	 */
2512 	if (jsctx_rb_none_to_pull(kctx, katom->slot_nr))
2513 		enqueue_required = true;
2514 	else
2515 		enqueue_required = false;
2516 
2517 	if ((katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) ||
2518 			(katom->pre_dep && (katom->pre_dep->atom_flags &
2519 			KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) {
2520 		int prio = katom->sched_priority;
2521 		unsigned int js = katom->slot_nr;
2522 		struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
2523 
2524 		dev_dbg(kctx->kbdev->dev, "Add atom %pK to X_DEP list (s:%u)\n", (void *)katom, js);
2525 
2526 		list_add_tail(&katom->queue, &queue->x_dep_head);
2527 		katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
2528 		if (kbase_js_atom_blocked_on_x_dep(katom)) {
2529 			enqueue_required = false;
2530 			add_required = false;
2531 		}
2532 	} else {
2533 		dev_dbg(kctx->kbdev->dev, "Atom %pK not added to X_DEP list\n",
2534 			(void *)katom);
2535 	}
2536 
2537 	if (add_required) {
2538 		/* Check if there are lower priority jobs to soft stop */
2539 		kbase_job_slot_ctx_priority_check_locked(kctx, katom);
2540 
2541 		/* Add atom to ring buffer. */
2542 		jsctx_tree_add(kctx, katom);
2543 		katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE;
2544 	}
2545 
2546 	dev_dbg(kctx->kbdev->dev,
2547 		"Enqueue of kctx %pK is %srequired to submit atom %pK\n",
2548 		kctx, enqueue_required ? "" : "not ", katom);
2549 
2550 	return enqueue_required;
2551 }
2552 
2553 /**
2554  * kbase_js_move_to_tree - Move atom (and any dependent atoms) to the
2555  *                         runnable_tree, ready for execution
2556  * @katom: Atom to submit
2557  *
2558  * It is assumed that @katom does not have KBASE_KATOM_FLAG_X_DEP_BLOCKED set,
2559  * but is still present in the x_dep list. If @katom has a same-slot dependent
2560  * atom then that atom (and any dependents) will also be moved.
2561  */
2562 static void kbase_js_move_to_tree(struct kbase_jd_atom *katom)
2563 {
2564 	struct kbase_context *const kctx = katom->kctx;
2565 
2566 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
2567 
2568 	while (katom) {
2569 		WARN_ON(!(katom->atom_flags &
2570 				KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST));
2571 
2572 		if (!kbase_js_atom_blocked_on_x_dep(katom)) {
2573 			dev_dbg(kctx->kbdev->dev,
2574 				"Del atom %pK from X_DEP list in js_move_to_tree\n",
2575 				(void *)katom);
2576 
2577 			list_del(&katom->queue);
2578 			katom->atom_flags &=
2579 					~KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
2580 			/* For incremental rendering, an end-of-renderpass atom
2581 			 * may have had its dependency on start-of-renderpass
2582 			 * ignored and may therefore already be in the tree.
2583 			 */
2584 			if (!(katom->atom_flags &
2585 				KBASE_KATOM_FLAG_JSCTX_IN_TREE)) {
2586 				jsctx_tree_add(kctx, katom);
2587 				katom->atom_flags |=
2588 					KBASE_KATOM_FLAG_JSCTX_IN_TREE;
2589 			}
2590 		} else {
2591 			dev_dbg(kctx->kbdev->dev,
2592 				"Atom %pK blocked on x-dep in js_move_to_tree\n",
2593 				(void *)katom);
2594 			break;
2595 		}
2596 
2597 		katom = katom->post_dep;
2598 	}
2599 }
2600 
2601 
2602 /**
2603  * kbase_js_evict_deps - Evict dependencies of a failed atom.
2604  * @kctx:       Context pointer
2605  * @katom:      Pointer to the atom that has failed.
2606  * @js:         The job slot the katom was run on.
2607  * @prio:       Priority of the katom.
2608  *
2609  * Remove all post dependencies of an atom from the context ringbuffers.
2610  *
2611  * The original atom's event_code will be propogated to all dependent atoms.
2612  *
2613  * Context: Caller must hold the HW access lock
2614  */
2615 static void kbase_js_evict_deps(struct kbase_context *kctx, struct kbase_jd_atom *katom,
2616 				unsigned int js, int prio)
2617 {
2618 	struct kbase_jd_atom *x_dep = katom->x_post_dep;
2619 	struct kbase_jd_atom *next_katom = katom->post_dep;
2620 
2621 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
2622 
2623 	if (next_katom) {
2624 		KBASE_DEBUG_ASSERT(next_katom->status !=
2625 				KBASE_JD_ATOM_STATE_HW_COMPLETED);
2626 		next_katom->will_fail_event_code = katom->event_code;
2627 
2628 	}
2629 
2630 	/* Has cross slot depenency. */
2631 	if (x_dep && (x_dep->atom_flags & (KBASE_KATOM_FLAG_JSCTX_IN_TREE |
2632 				KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) {
2633 		/* Remove dependency.*/
2634 		x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
2635 
2636 		dev_dbg(kctx->kbdev->dev, "Cleared X_DEP flag on atom %pK\n",
2637 			(void *)x_dep);
2638 
2639 		/* Fail if it had a data dependency. */
2640 		if (x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER)
2641 			x_dep->will_fail_event_code = katom->event_code;
2642 
2643 		if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST)
2644 			kbase_js_move_to_tree(x_dep);
2645 	}
2646 }
2647 
2648 struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, unsigned int js)
2649 {
2650 	struct kbase_jd_atom *katom;
2651 	struct kbasep_js_device_data *js_devdata;
2652 	struct kbase_device *kbdev;
2653 	int pulled;
2654 
2655 	KBASE_DEBUG_ASSERT(kctx);
2656 
2657 	kbdev = kctx->kbdev;
2658 	dev_dbg(kbdev->dev, "JS: pulling an atom from kctx %pK (s:%u)\n", (void *)kctx, js);
2659 
2660 	js_devdata = &kbdev->js_data;
2661 	lockdep_assert_held(&kbdev->hwaccess_lock);
2662 
2663 	if (!kbasep_js_is_submit_allowed(js_devdata, kctx)) {
2664 		dev_dbg(kbdev->dev, "JS: No submit allowed for kctx %pK\n",
2665 			(void *)kctx);
2666 		return NULL;
2667 	}
2668 #ifdef CONFIG_MALI_ARBITER_SUPPORT
2669 	if (kbase_pm_is_suspending(kbdev) || kbase_pm_is_gpu_lost(kbdev))
2670 #else
2671 	if (kbase_pm_is_suspending(kbdev))
2672 #endif
2673 		return NULL;
2674 
2675 	katom = jsctx_rb_peek(kctx, js);
2676 	if (!katom) {
2677 		dev_dbg(kbdev->dev, "JS: No pullable atom in kctx %pK (s:%u)\n", (void *)kctx, js);
2678 		return NULL;
2679 	}
2680 	if (kbase_jsctx_slot_prio_is_blocked(kctx, js, katom->sched_priority)) {
2681 		dev_dbg(kbdev->dev,
2682 			"JS: kctx %pK is blocked from submitting atoms at priority %d and lower (s:%u)\n",
2683 			(void *)kctx, katom->sched_priority, js);
2684 		return NULL;
2685 	}
2686 	if (atomic_read(&katom->blocked)) {
2687 		dev_dbg(kbdev->dev, "JS: Atom %pK is blocked in js_pull\n",
2688 			(void *)katom);
2689 		return NULL;
2690 	}
2691 
2692 	/* Due to ordering restrictions when unpulling atoms on failure, we do
2693 	 * not allow multiple runs of fail-dep atoms from the same context to be
2694 	 * present on the same slot
2695 	 */
2696 	if (katom->pre_dep && kbase_jsctx_slot_atoms_pulled(kctx, js)) {
2697 		struct kbase_jd_atom *prev_atom =
2698 				kbase_backend_inspect_tail(kbdev, js);
2699 
2700 		if (prev_atom && prev_atom->kctx != kctx)
2701 			return NULL;
2702 	}
2703 
2704 	if (kbase_js_atom_blocked_on_x_dep(katom)) {
2705 		if (katom->x_pre_dep->gpu_rb_state ==
2706 				KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB ||
2707 				katom->x_pre_dep->will_fail_event_code)	{
2708 			dev_dbg(kbdev->dev,
2709 				"JS: X pre-dep %pK is not present in slot FIFO or will fail\n",
2710 				(void *)katom->x_pre_dep);
2711 			return NULL;
2712 		}
2713 		if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
2714 				kbase_backend_nr_atoms_on_slot(kbdev, js)) {
2715 			dev_dbg(kbdev->dev,
2716 				"JS: Atom %pK has cross-slot fail dependency and atoms on slot (s:%u)\n",
2717 				(void *)katom, js);
2718 			return NULL;
2719 		}
2720 	}
2721 
2722 	KBASE_KTRACE_ADD_JM_SLOT_INFO(kbdev, JS_PULL_JOB, kctx, katom,
2723 				      katom->jc, js, katom->sched_priority);
2724 	kbase_ctx_flag_set(kctx, KCTX_PULLED);
2725 	kbase_ctx_flag_set(kctx, (KCTX_PULLED_SINCE_ACTIVE_JS0 << js));
2726 
2727 	pulled = kbase_jsctx_slot_atom_pulled_inc(kctx, katom);
2728 	if (pulled == 1 && !kctx->slots_pullable) {
2729 		WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
2730 		kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
2731 		atomic_inc(&kbdev->js_data.nr_contexts_runnable);
2732 	}
2733 	jsctx_rb_pull(kctx, katom);
2734 
2735 	kbase_ctx_sched_retain_ctx_refcount(kctx);
2736 
2737 	katom->ticks = 0;
2738 
2739 	dev_dbg(kbdev->dev, "JS: successfully pulled atom %pK from kctx %pK (s:%u)\n",
2740 		(void *)katom, (void *)kctx, js);
2741 
2742 	return katom;
2743 }
2744 
2745 /**
2746  * js_return_of_start_rp() - Handle soft-stop of an atom that starts a
2747  *                           renderpass
2748  * @start_katom: Pointer to the start-of-renderpass atom that was soft-stopped
2749  *
2750  * This function is called to switch to incremental rendering if the tiler job
2751  * chain at the start of a renderpass has used too much memory. It prevents the
2752  * tiler job being pulled for execution in the job scheduler again until the
2753  * next phase of incremental rendering is complete.
2754  *
2755  * If the end-of-renderpass atom is already in the job scheduler (because a
2756  * previous attempt at tiling used too much memory during the same renderpass)
2757  * then it is unblocked; otherwise, it is run by handing it to the scheduler.
2758  */
2759 static void js_return_of_start_rp(struct kbase_jd_atom *const start_katom)
2760 {
2761 	struct kbase_context *const kctx = start_katom->kctx;
2762 	struct kbase_device *const kbdev = kctx->kbdev;
2763 	struct kbase_jd_renderpass *rp;
2764 	struct kbase_jd_atom *end_katom;
2765 	unsigned long flags;
2766 
2767 	lockdep_assert_held(&kctx->jctx.lock);
2768 
2769 	if (WARN_ON(!(start_katom->core_req & BASE_JD_REQ_START_RENDERPASS)))
2770 		return;
2771 
2772 	compiletime_assert((1ull << (sizeof(start_katom->renderpass_id) * 8)) <=
2773 			ARRAY_SIZE(kctx->jctx.renderpasses),
2774 			"Should check invalid access to renderpasses");
2775 
2776 	rp = &kctx->jctx.renderpasses[start_katom->renderpass_id];
2777 
2778 	if (WARN_ON(rp->start_katom != start_katom))
2779 		return;
2780 
2781 	dev_dbg(kctx->kbdev->dev,
2782 		"JS return start atom %pK in state %d of RP %d\n",
2783 		(void *)start_katom, (int)rp->state,
2784 		start_katom->renderpass_id);
2785 
2786 	if (WARN_ON(rp->state == KBASE_JD_RP_COMPLETE))
2787 		return;
2788 
2789 	/* The tiler job might have been soft-stopped for some reason other
2790 	 * than running out of memory.
2791 	 */
2792 	if (rp->state == KBASE_JD_RP_START || rp->state == KBASE_JD_RP_RETRY) {
2793 		dev_dbg(kctx->kbdev->dev,
2794 			"JS return isn't OOM in state %d of RP %d\n",
2795 			(int)rp->state, start_katom->renderpass_id);
2796 		return;
2797 	}
2798 
2799 	dev_dbg(kctx->kbdev->dev,
2800 		"JS return confirm OOM in state %d of RP %d\n",
2801 		(int)rp->state, start_katom->renderpass_id);
2802 
2803 	if (WARN_ON(rp->state != KBASE_JD_RP_PEND_OOM &&
2804 		rp->state != KBASE_JD_RP_RETRY_PEND_OOM))
2805 		return;
2806 
2807 	/* Prevent the tiler job being pulled for execution in the
2808 	 * job scheduler again.
2809 	 */
2810 	dev_dbg(kbdev->dev, "Blocking start atom %pK\n",
2811 		(void *)start_katom);
2812 	atomic_inc(&start_katom->blocked);
2813 
2814 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2815 
2816 	rp->state = (rp->state == KBASE_JD_RP_PEND_OOM) ?
2817 		KBASE_JD_RP_OOM : KBASE_JD_RP_RETRY_OOM;
2818 
2819 	/* Was the fragment job chain submitted to kbase yet? */
2820 	end_katom = rp->end_katom;
2821 	if (end_katom) {
2822 		dev_dbg(kctx->kbdev->dev, "JS return add end atom %pK\n",
2823 			(void *)end_katom);
2824 
2825 		if (rp->state == KBASE_JD_RP_RETRY_OOM) {
2826 			/* Allow the end of the renderpass to be pulled for
2827 			 * execution again to continue incremental rendering.
2828 			 */
2829 			dev_dbg(kbdev->dev, "Unblocking end atom %pK\n",
2830 				(void *)end_katom);
2831 			atomic_dec(&end_katom->blocked);
2832 			WARN_ON(!(end_katom->atom_flags &
2833 				KBASE_KATOM_FLAG_JSCTX_IN_TREE));
2834 			WARN_ON(end_katom->status != KBASE_JD_ATOM_STATE_IN_JS);
2835 
2836 			kbase_js_ctx_list_add_pullable_nolock(kbdev, kctx,
2837 				end_katom->slot_nr);
2838 
2839 			/* Expect the fragment job chain to be scheduled without
2840 			 * further action because this function is called when
2841 			 * returning an atom to the job scheduler ringbuffer.
2842 			 */
2843 			end_katom = NULL;
2844 		} else {
2845 			WARN_ON(end_katom->status !=
2846 				KBASE_JD_ATOM_STATE_QUEUED &&
2847 				end_katom->status != KBASE_JD_ATOM_STATE_IN_JS);
2848 		}
2849 	}
2850 
2851 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2852 
2853 	if (end_katom)
2854 		kbase_jd_dep_clear_locked(end_katom);
2855 }
2856 
2857 /**
2858  * js_return_of_end_rp() - Handle completion of an atom that ends a renderpass
2859  * @end_katom: Pointer to the end-of-renderpass atom that was completed
2860  *
2861  * This function is called to continue incremental rendering if the tiler job
2862  * chain at the start of a renderpass used too much memory. It resets the
2863  * mechanism for detecting excessive memory usage then allows the soft-stopped
2864  * tiler job chain to be pulled for execution again.
2865  *
2866  * The start-of-renderpass atom must already been submitted to kbase.
2867  */
2868 static void js_return_of_end_rp(struct kbase_jd_atom *const end_katom)
2869 {
2870 	struct kbase_context *const kctx = end_katom->kctx;
2871 	struct kbase_device *const kbdev = kctx->kbdev;
2872 	struct kbase_jd_renderpass *rp;
2873 	struct kbase_jd_atom *start_katom;
2874 	unsigned long flags;
2875 
2876 	lockdep_assert_held(&kctx->jctx.lock);
2877 
2878 	if (WARN_ON(!(end_katom->core_req & BASE_JD_REQ_END_RENDERPASS)))
2879 		return;
2880 
2881 	compiletime_assert((1ull << (sizeof(end_katom->renderpass_id) * 8)) <=
2882 			ARRAY_SIZE(kctx->jctx.renderpasses),
2883 			"Should check invalid access to renderpasses");
2884 
2885 	rp = &kctx->jctx.renderpasses[end_katom->renderpass_id];
2886 
2887 	if (WARN_ON(rp->end_katom != end_katom))
2888 		return;
2889 
2890 	dev_dbg(kctx->kbdev->dev,
2891 		"JS return end atom %pK in state %d of RP %d\n",
2892 		(void *)end_katom, (int)rp->state, end_katom->renderpass_id);
2893 
2894 	if (WARN_ON(rp->state != KBASE_JD_RP_OOM &&
2895 		rp->state != KBASE_JD_RP_RETRY_OOM))
2896 		return;
2897 
2898 	/* Reduce the number of mapped pages in the memory regions that
2899 	 * triggered out-of-memory last time so that we can detect excessive
2900 	 * memory usage again.
2901 	 */
2902 	kbase_gpu_vm_lock(kctx);
2903 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2904 
2905 	while (!list_empty(&rp->oom_reg_list)) {
2906 		struct kbase_va_region *reg =
2907 			list_first_entry(&rp->oom_reg_list,
2908 					 struct kbase_va_region, link);
2909 
2910 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2911 
2912 		dev_dbg(kbdev->dev,
2913 			"Reset backing to %zu pages for region %pK\n",
2914 			reg->threshold_pages, (void *)reg);
2915 
2916 		if (!WARN_ON(reg->flags & KBASE_REG_VA_FREED))
2917 			kbase_mem_shrink(kctx, reg, reg->threshold_pages);
2918 
2919 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2920 		dev_dbg(kbdev->dev, "Deleting region %pK from list\n",
2921 			(void *)reg);
2922 		list_del_init(&reg->link);
2923 		kbase_va_region_alloc_put(kctx, reg);
2924 	}
2925 
2926 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2927 	kbase_gpu_vm_unlock(kctx);
2928 
2929 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2930 	rp->state = KBASE_JD_RP_RETRY;
2931 	dev_dbg(kbdev->dev, "Changed state to %d for retry\n", rp->state);
2932 
2933 	/* Allow the start of the renderpass to be pulled for execution again
2934 	 * to begin/continue incremental rendering.
2935 	 */
2936 	start_katom = rp->start_katom;
2937 	if (!WARN_ON(!start_katom)) {
2938 		dev_dbg(kbdev->dev, "Unblocking start atom %pK\n",
2939 			(void *)start_katom);
2940 		atomic_dec(&start_katom->blocked);
2941 		(void)kbase_js_ctx_list_add_pullable_head_nolock(kbdev, kctx,
2942 			start_katom->slot_nr);
2943 	}
2944 
2945 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2946 }
2947 
2948 static void js_return_worker(struct work_struct *data)
2949 {
2950 	struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
2951 									work);
2952 	struct kbase_context *kctx = katom->kctx;
2953 	struct kbase_device *kbdev = kctx->kbdev;
2954 	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
2955 	struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
2956 	struct kbasep_js_atom_retained_state retained_state;
2957 	int js = katom->slot_nr;
2958 	bool slot_became_unblocked;
2959 	bool timer_sync = false;
2960 	bool context_idle = false;
2961 	unsigned long flags;
2962 	base_jd_core_req core_req = katom->core_req;
2963 	u64 cache_jc = katom->jc;
2964 
2965 	dev_dbg(kbdev->dev, "%s for atom %pK with event code 0x%x\n",
2966 		__func__, (void *)katom, katom->event_code);
2967 
2968 	KBASE_KTRACE_ADD_JM(kbdev, JS_RETURN_WORKER, kctx, katom, katom->jc, 0);
2969 
2970 	if (katom->event_code != BASE_JD_EVENT_END_RP_DONE)
2971 		KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX(kbdev, katom);
2972 
2973 	kbase_backend_complete_wq(kbdev, katom);
2974 
2975 	kbasep_js_atom_retained_state_copy(&retained_state, katom);
2976 
2977 	mutex_lock(&js_devdata->queue_mutex);
2978 	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
2979 
2980 	if (katom->event_code != BASE_JD_EVENT_END_RP_DONE)
2981 		atomic_dec(&katom->blocked);
2982 
2983 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2984 
2985 	slot_became_unblocked = kbase_jsctx_slot_atom_pulled_dec(kctx, katom);
2986 
2987 	if (!kbase_jsctx_slot_atoms_pulled(kctx, js) &&
2988 	    jsctx_rb_none_to_pull(kctx, js))
2989 		timer_sync |= kbase_js_ctx_list_remove_nolock(kbdev, kctx, js);
2990 
2991 	/* If the context is now unblocked on this slot after soft-stopped
2992 	 * atoms, then only mark it as pullable on this slot if it is not
2993 	 * idle
2994 	 */
2995 	if (slot_became_unblocked && kbase_jsctx_atoms_pulled(kctx) &&
2996 	    kbase_js_ctx_pullable(kctx, js, true))
2997 		timer_sync |=
2998 			kbase_js_ctx_list_add_pullable_nolock(kbdev, kctx, js);
2999 
3000 	if (!kbase_jsctx_atoms_pulled(kctx)) {
3001 		dev_dbg(kbdev->dev,
3002 			"No atoms currently pulled from context %pK\n",
3003 			(void *)kctx);
3004 
3005 		if (!kctx->slots_pullable) {
3006 			dev_dbg(kbdev->dev,
3007 				"Context %pK %s counted as runnable\n",
3008 				(void *)kctx,
3009 				kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF) ?
3010 					"is" : "isn't");
3011 
3012 			WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
3013 			kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
3014 			atomic_dec(&kbdev->js_data.nr_contexts_runnable);
3015 			timer_sync = true;
3016 		}
3017 
3018 		if (kctx->as_nr != KBASEP_AS_NR_INVALID &&
3019 				!kbase_ctx_flag(kctx, KCTX_DYING)) {
3020 			int num_slots = kbdev->gpu_props.num_job_slots;
3021 			int slot;
3022 
3023 			if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
3024 				kbasep_js_set_submit_allowed(js_devdata, kctx);
3025 
3026 			for (slot = 0; slot < num_slots; slot++) {
3027 				if (kbase_js_ctx_pullable(kctx, slot, true))
3028 					timer_sync |=
3029 					kbase_js_ctx_list_add_pullable_nolock(
3030 							kbdev, kctx, slot);
3031 			}
3032 		}
3033 
3034 		kbase_jm_idle_ctx(kbdev, kctx);
3035 
3036 		context_idle = true;
3037 	}
3038 
3039 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3040 
3041 	if (context_idle) {
3042 		dev_dbg(kbdev->dev,
3043 			"Context %pK %s counted as active\n",
3044 			(void *)kctx,
3045 			kbase_ctx_flag(kctx, KCTX_ACTIVE) ?
3046 				"is" : "isn't");
3047 		WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
3048 		kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
3049 		kbase_pm_context_idle(kbdev);
3050 	}
3051 
3052 	if (timer_sync)
3053 		kbase_js_sync_timers(kbdev);
3054 
3055 	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
3056 	mutex_unlock(&js_devdata->queue_mutex);
3057 
3058 	if (katom->core_req & BASE_JD_REQ_START_RENDERPASS) {
3059 		mutex_lock(&kctx->jctx.lock);
3060 		js_return_of_start_rp(katom);
3061 		mutex_unlock(&kctx->jctx.lock);
3062 	} else if (katom->event_code == BASE_JD_EVENT_END_RP_DONE) {
3063 		mutex_lock(&kctx->jctx.lock);
3064 		js_return_of_end_rp(katom);
3065 		mutex_unlock(&kctx->jctx.lock);
3066 	}
3067 
3068 	dev_dbg(kbdev->dev, "JS: retained state %s finished",
3069 		kbasep_js_has_atom_finished(&retained_state) ?
3070 		"has" : "hasn't");
3071 
3072 	WARN_ON(kbasep_js_has_atom_finished(&retained_state));
3073 
3074 	kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
3075 							&retained_state);
3076 
3077 	kbase_js_sched_all(kbdev);
3078 
3079 	kbase_backend_complete_wq_post_sched(kbdev, core_req);
3080 
3081 	KBASE_KTRACE_ADD_JM(kbdev, JS_RETURN_WORKER_END, kctx, NULL, cache_jc,
3082 			    0);
3083 
3084 	dev_dbg(kbdev->dev, "Leaving %s for atom %pK\n",
3085 		__func__, (void *)katom);
3086 }
3087 
3088 void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
3089 {
3090 	dev_dbg(kctx->kbdev->dev, "Unpulling atom %pK in kctx %pK\n",
3091 		(void *)katom, (void *)kctx);
3092 
3093 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
3094 
3095 	jsctx_rb_unpull(kctx, katom);
3096 
3097 	WARN_ON(work_pending(&katom->work));
3098 
3099 	/* Block re-submission until workqueue has run */
3100 	atomic_inc(&katom->blocked);
3101 
3102 	kbase_job_check_leave_disjoint(kctx->kbdev, katom);
3103 
3104 	INIT_WORK(&katom->work, js_return_worker);
3105 	queue_work(kctx->jctx.job_done_wq, &katom->work);
3106 }
3107 
3108 /**
3109  * js_complete_start_rp() - Handle completion of atom that starts a renderpass
3110  * @kctx:        Context pointer
3111  * @start_katom: Pointer to the atom that completed
3112  *
3113  * Put any references to virtual memory regions that might have been added by
3114  * kbase_job_slot_softstop_start_rp() because the tiler job chain completed
3115  * despite any pending soft-stop request.
3116  *
3117  * If the atom that just completed was soft-stopped during a previous attempt to
3118  * run it then there should be a blocked end-of-renderpass atom waiting for it,
3119  * which we must unblock to process the output of the tiler job chain.
3120  *
3121  * Return: true if caller should call kbase_backend_ctx_count_changed()
3122  */
3123 static bool js_complete_start_rp(struct kbase_context *kctx,
3124 	struct kbase_jd_atom *const start_katom)
3125 {
3126 	struct kbase_device *const kbdev = kctx->kbdev;
3127 	struct kbase_jd_renderpass *rp;
3128 	bool timer_sync = false;
3129 
3130 	lockdep_assert_held(&kctx->jctx.lock);
3131 
3132 	if (WARN_ON(!(start_katom->core_req & BASE_JD_REQ_START_RENDERPASS)))
3133 		return false;
3134 
3135 	compiletime_assert((1ull << (sizeof(start_katom->renderpass_id) * 8)) <=
3136 			ARRAY_SIZE(kctx->jctx.renderpasses),
3137 			"Should check invalid access to renderpasses");
3138 
3139 	rp = &kctx->jctx.renderpasses[start_katom->renderpass_id];
3140 
3141 	if (WARN_ON(rp->start_katom != start_katom))
3142 		return false;
3143 
3144 	dev_dbg(kctx->kbdev->dev,
3145 		"Start atom %pK is done in state %d of RP %d\n",
3146 		(void *)start_katom, (int)rp->state,
3147 		start_katom->renderpass_id);
3148 
3149 	if (WARN_ON(rp->state == KBASE_JD_RP_COMPLETE))
3150 		return false;
3151 
3152 	if (rp->state == KBASE_JD_RP_PEND_OOM ||
3153 		rp->state == KBASE_JD_RP_RETRY_PEND_OOM) {
3154 		unsigned long flags;
3155 
3156 		dev_dbg(kctx->kbdev->dev,
3157 			"Start atom %pK completed before soft-stop\n",
3158 			(void *)start_katom);
3159 
3160 		kbase_gpu_vm_lock(kctx);
3161 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3162 
3163 		while (!list_empty(&rp->oom_reg_list)) {
3164 			struct kbase_va_region *reg =
3165 				list_first_entry(&rp->oom_reg_list,
3166 						 struct kbase_va_region, link);
3167 
3168 			WARN_ON(reg->flags & KBASE_REG_VA_FREED);
3169 			dev_dbg(kctx->kbdev->dev, "Deleting region %pK from list\n",
3170 				(void *)reg);
3171 			list_del_init(&reg->link);
3172 			kbase_va_region_alloc_put(kctx, reg);
3173 		}
3174 
3175 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3176 		kbase_gpu_vm_unlock(kctx);
3177 	} else {
3178 		dev_dbg(kctx->kbdev->dev,
3179 			"Start atom %pK did not exceed memory threshold\n",
3180 			(void *)start_katom);
3181 
3182 		WARN_ON(rp->state != KBASE_JD_RP_START &&
3183 			rp->state != KBASE_JD_RP_RETRY);
3184 	}
3185 
3186 	if (rp->state == KBASE_JD_RP_RETRY ||
3187 		rp->state == KBASE_JD_RP_RETRY_PEND_OOM) {
3188 		struct kbase_jd_atom *const end_katom = rp->end_katom;
3189 
3190 		if (!WARN_ON(!end_katom)) {
3191 			unsigned long flags;
3192 
3193 			/* Allow the end of the renderpass to be pulled for
3194 			 * execution again to continue incremental rendering.
3195 			 */
3196 			dev_dbg(kbdev->dev, "Unblocking end atom %pK!\n",
3197 				(void *)end_katom);
3198 			atomic_dec(&end_katom->blocked);
3199 
3200 			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3201 			timer_sync = kbase_js_ctx_list_add_pullable_nolock(
3202 					kbdev, kctx, end_katom->slot_nr);
3203 			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3204 		}
3205 	}
3206 
3207 	return timer_sync;
3208 }
3209 
3210 /**
3211  * js_complete_end_rp() - Handle final completion of atom that ends a renderpass
3212  * @kctx:      Context pointer
3213  * @end_katom: Pointer to the atom that completed for the last time
3214  *
3215  * This function must only be called if the renderpass actually completed
3216  * without the tiler job chain at the start using too much memory; otherwise
3217  * completion of the end-of-renderpass atom is handled similarly to a soft-stop.
3218  */
3219 static void js_complete_end_rp(struct kbase_context *kctx,
3220 	struct kbase_jd_atom *const end_katom)
3221 {
3222 	struct kbase_device *const kbdev = kctx->kbdev;
3223 	unsigned long flags;
3224 	struct kbase_jd_renderpass *rp;
3225 
3226 	lockdep_assert_held(&kctx->jctx.lock);
3227 
3228 	if (WARN_ON(!(end_katom->core_req & BASE_JD_REQ_END_RENDERPASS)))
3229 		return;
3230 
3231 	compiletime_assert((1ull << (sizeof(end_katom->renderpass_id) * 8)) <=
3232 			ARRAY_SIZE(kctx->jctx.renderpasses),
3233 			"Should check invalid access to renderpasses");
3234 
3235 	rp = &kctx->jctx.renderpasses[end_katom->renderpass_id];
3236 
3237 	if (WARN_ON(rp->end_katom != end_katom))
3238 		return;
3239 
3240 	dev_dbg(kbdev->dev, "End atom %pK is done in state %d of RP %d\n",
3241 		(void *)end_katom, (int)rp->state, end_katom->renderpass_id);
3242 
3243 	if (WARN_ON(rp->state == KBASE_JD_RP_COMPLETE) ||
3244 		WARN_ON(rp->state == KBASE_JD_RP_OOM) ||
3245 		WARN_ON(rp->state == KBASE_JD_RP_RETRY_OOM))
3246 		return;
3247 
3248 	/* Rendering completed without running out of memory.
3249 	 */
3250 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3251 	WARN_ON(!list_empty(&rp->oom_reg_list));
3252 	rp->state = KBASE_JD_RP_COMPLETE;
3253 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3254 
3255 	dev_dbg(kbdev->dev, "Renderpass %d is complete\n",
3256 		end_katom->renderpass_id);
3257 }
3258 
3259 bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
3260 						struct kbase_jd_atom *katom)
3261 {
3262 	struct kbasep_js_kctx_info *js_kctx_info;
3263 	struct kbasep_js_device_data *js_devdata;
3264 	struct kbase_device *kbdev;
3265 	unsigned long flags;
3266 	bool timer_sync = false;
3267 	int atom_slot;
3268 	bool context_idle = false;
3269 	int prio = katom->sched_priority;
3270 
3271 	kbdev = kctx->kbdev;
3272 	atom_slot = katom->slot_nr;
3273 
3274 	dev_dbg(kbdev->dev, "%s for atom %pK (s:%d)\n",
3275 		__func__, (void *)katom, atom_slot);
3276 
3277 	/* Update the incremental rendering state machine.
3278 	 */
3279 	if (katom->core_req & BASE_JD_REQ_START_RENDERPASS)
3280 		timer_sync |= js_complete_start_rp(kctx, katom);
3281 	else if (katom->core_req & BASE_JD_REQ_END_RENDERPASS)
3282 		js_complete_end_rp(kctx, katom);
3283 
3284 	js_kctx_info = &kctx->jctx.sched_info;
3285 	js_devdata = &kbdev->js_data;
3286 
3287 	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
3288 
3289 	mutex_lock(&js_devdata->runpool_mutex);
3290 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3291 
3292 	if (katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE) {
3293 		bool slot_became_unblocked;
3294 
3295 		dev_dbg(kbdev->dev, "Atom %pK is in runnable_tree\n",
3296 			(void *)katom);
3297 
3298 		slot_became_unblocked =
3299 			kbase_jsctx_slot_atom_pulled_dec(kctx, katom);
3300 		context_idle = !kbase_jsctx_atoms_pulled(kctx);
3301 
3302 		if (!kbase_jsctx_atoms_pulled(kctx) && !kctx->slots_pullable) {
3303 			WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
3304 			kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
3305 			atomic_dec(&kbdev->js_data.nr_contexts_runnable);
3306 			timer_sync = true;
3307 		}
3308 
3309 		/* If this slot has been blocked due to soft-stopped atoms, and
3310 		 * all atoms have now been processed at this priority level and
3311 		 * higher, then unblock the slot
3312 		 */
3313 		if (slot_became_unblocked) {
3314 			dev_dbg(kbdev->dev,
3315 				"kctx %pK is no longer blocked from submitting on slot %d at priority %d or higher\n",
3316 				(void *)kctx, atom_slot, prio);
3317 
3318 			if (kbase_js_ctx_pullable(kctx, atom_slot, true))
3319 				timer_sync |=
3320 					kbase_js_ctx_list_add_pullable_nolock(
3321 						kbdev, kctx, atom_slot);
3322 		}
3323 	}
3324 	WARN_ON(!(katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE));
3325 
3326 	if (!kbase_jsctx_slot_atoms_pulled(kctx, atom_slot) &&
3327 	    jsctx_rb_none_to_pull(kctx, atom_slot)) {
3328 		if (!list_empty(
3329 			&kctx->jctx.sched_info.ctx.ctx_list_entry[atom_slot]))
3330 			timer_sync |= kbase_js_ctx_list_remove_nolock(
3331 					kctx->kbdev, kctx, atom_slot);
3332 	}
3333 
3334 	/*
3335 	 * If submission is disabled on this context (most likely due to an
3336 	 * atom failure) and there are now no atoms left in the system then
3337 	 * re-enable submission so that context can be scheduled again.
3338 	 */
3339 	if (!kbasep_js_is_submit_allowed(js_devdata, kctx) &&
3340 	    !kbase_jsctx_atoms_pulled(kctx) &&
3341 	    !kbase_ctx_flag(kctx, KCTX_DYING)) {
3342 		unsigned int js;
3343 
3344 		kbasep_js_set_submit_allowed(js_devdata, kctx);
3345 
3346 		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
3347 			if (kbase_js_ctx_pullable(kctx, js, true))
3348 				timer_sync |=
3349 					kbase_js_ctx_list_add_pullable_nolock(
3350 							kbdev, kctx, js);
3351 		}
3352 	} else if (katom->x_post_dep &&
3353 			kbasep_js_is_submit_allowed(js_devdata, kctx)) {
3354 		unsigned int js;
3355 
3356 		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
3357 			if (kbase_js_ctx_pullable(kctx, js, true))
3358 				timer_sync |=
3359 					kbase_js_ctx_list_add_pullable_nolock(
3360 							kbdev, kctx, js);
3361 		}
3362 	}
3363 
3364 	/* Mark context as inactive. The pm reference will be dropped later in
3365 	 * jd_done_worker().
3366 	 */
3367 	if (context_idle) {
3368 		dev_dbg(kbdev->dev, "kctx %pK is no longer active\n",
3369 			(void *)kctx);
3370 		kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
3371 	}
3372 
3373 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3374 	if (timer_sync)
3375 		kbase_backend_ctx_count_changed(kbdev);
3376 	mutex_unlock(&js_devdata->runpool_mutex);
3377 
3378 	dev_dbg(kbdev->dev, "Leaving %s\n", __func__);
3379 	return context_idle;
3380 }
3381 
3382 /**
3383  * js_end_rp_is_complete() - Check whether an atom that ends a renderpass has
3384  *                           completed for the last time.
3385  *
3386  * @end_katom: Pointer to the atom that completed on the hardware.
3387  *
3388  * An atom that ends a renderpass may be run on the hardware several times
3389  * before notifying userspace or allowing dependent atoms to be executed.
3390  *
3391  * This function is used to decide whether or not to allow end-of-renderpass
3392  * atom completion. It only returns false if the atom at the start of the
3393  * renderpass was soft-stopped because it used too much memory during the most
3394  * recent attempt at tiling.
3395  *
3396  * Return: True if the atom completed for the last time.
3397  */
3398 static bool js_end_rp_is_complete(struct kbase_jd_atom *const end_katom)
3399 {
3400 	struct kbase_context *const kctx = end_katom->kctx;
3401 	struct kbase_device *const kbdev = kctx->kbdev;
3402 	struct kbase_jd_renderpass *rp;
3403 
3404 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
3405 
3406 	if (WARN_ON(!(end_katom->core_req & BASE_JD_REQ_END_RENDERPASS)))
3407 		return true;
3408 
3409 	compiletime_assert((1ull << (sizeof(end_katom->renderpass_id) * 8)) <=
3410 			ARRAY_SIZE(kctx->jctx.renderpasses),
3411 			"Should check invalid access to renderpasses");
3412 
3413 	rp = &kctx->jctx.renderpasses[end_katom->renderpass_id];
3414 
3415 	if (WARN_ON(rp->end_katom != end_katom))
3416 		return true;
3417 
3418 	dev_dbg(kbdev->dev,
3419 		"JS complete end atom %pK in state %d of RP %d\n",
3420 		(void *)end_katom, (int)rp->state,
3421 		end_katom->renderpass_id);
3422 
3423 	if (WARN_ON(rp->state == KBASE_JD_RP_COMPLETE))
3424 		return true;
3425 
3426 	/* Failure of end-of-renderpass atoms must not return to the
3427 	 * start of the renderpass.
3428 	 */
3429 	if (end_katom->event_code != BASE_JD_EVENT_DONE)
3430 		return true;
3431 
3432 	if (rp->state != KBASE_JD_RP_OOM &&
3433 		rp->state != KBASE_JD_RP_RETRY_OOM)
3434 		return true;
3435 
3436 	dev_dbg(kbdev->dev, "Suppressing end atom completion\n");
3437 	return false;
3438 }
3439 
3440 struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
3441 		ktime_t *end_timestamp)
3442 {
3443 	struct kbase_device *kbdev;
3444 	struct kbase_context *kctx = katom->kctx;
3445 	struct kbase_jd_atom *x_dep = katom->x_post_dep;
3446 
3447 	kbdev = kctx->kbdev;
3448 	dev_dbg(kbdev->dev, "Atom %pK complete in kctx %pK (post-dep %pK)\n",
3449 		(void *)katom, (void *)kctx, (void *)x_dep);
3450 
3451 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
3452 
3453 	if ((katom->core_req & BASE_JD_REQ_END_RENDERPASS) &&
3454 		!js_end_rp_is_complete(katom)) {
3455 		katom->event_code = BASE_JD_EVENT_END_RP_DONE;
3456 		kbase_js_unpull(kctx, katom);
3457 		return NULL;
3458 	}
3459 
3460 	if (katom->will_fail_event_code)
3461 		katom->event_code = katom->will_fail_event_code;
3462 
3463 	katom->status = KBASE_JD_ATOM_STATE_HW_COMPLETED;
3464 	dev_dbg(kbdev->dev, "Atom %pK status to HW completed\n", (void *)katom);
3465 	if (kbase_is_quick_reset_enabled(kbdev)) {
3466 		kbdev->num_of_atoms_hw_completed++;
3467 		if (kbdev->num_of_atoms_hw_completed >= 20)
3468 			kbase_disable_quick_reset(kbdev);
3469 	}
3470 
3471 	if (katom->event_code != BASE_JD_EVENT_DONE) {
3472 		kbase_js_evict_deps(kctx, katom, katom->slot_nr,
3473 				katom->sched_priority);
3474 	}
3475 
3476 	KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, NULL,
3477 		katom->slot_nr, 0, TL_JS_EVENT_STOP);
3478 
3479 	trace_sysgraph_gpu(SGR_COMPLETE, kctx->id,
3480 			kbase_jd_atom_id(katom->kctx, katom), katom->slot_nr);
3481 
3482 	KBASE_TLSTREAM_TL_JD_DONE_START(kbdev, katom);
3483 	kbase_jd_done(katom, katom->slot_nr, end_timestamp, 0);
3484 	KBASE_TLSTREAM_TL_JD_DONE_END(kbdev, katom);
3485 
3486 	/* Unblock cross dependency if present */
3487 	if (x_dep && (katom->event_code == BASE_JD_EVENT_DONE ||
3488 		!(x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER)) &&
3489 		(x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST)) {
3490 		bool was_pullable = kbase_js_ctx_pullable(kctx, x_dep->slot_nr,
3491 				false);
3492 		x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
3493 		dev_dbg(kbdev->dev, "Cleared X_DEP flag on atom %pK\n",
3494 			(void *)x_dep);
3495 
3496 		kbase_js_move_to_tree(x_dep);
3497 
3498 		if (!was_pullable && kbase_js_ctx_pullable(kctx, x_dep->slot_nr,
3499 				false))
3500 			kbase_js_ctx_list_add_pullable_nolock(kbdev, kctx,
3501 					x_dep->slot_nr);
3502 
3503 		if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE) {
3504 			dev_dbg(kbdev->dev, "Atom %pK is in runnable tree\n",
3505 				(void *)x_dep);
3506 			return x_dep;
3507 		}
3508 	} else {
3509 		dev_dbg(kbdev->dev,
3510 			"No cross-slot dep to unblock for atom %pK\n",
3511 			(void *)katom);
3512 	}
3513 
3514 	return NULL;
3515 }
3516 
3517 /**
3518  * kbase_js_atom_blocked_on_x_dep - Decide whether to ignore a cross-slot
3519  *                                  dependency
3520  * @katom:	Pointer to an atom in the slot ringbuffer
3521  *
3522  * A cross-slot dependency is ignored if necessary to unblock incremental
3523  * rendering. If the atom at the start of a renderpass used too much memory
3524  * and was soft-stopped then the atom at the end of a renderpass is submitted
3525  * to hardware regardless of its dependency on the start-of-renderpass atom.
3526  * This can happen multiple times for the same pair of atoms.
3527  *
3528  * Return: true to block the atom or false to allow it to be submitted to
3529  *         hardware
3530  */
3531 bool kbase_js_atom_blocked_on_x_dep(struct kbase_jd_atom *const katom)
3532 {
3533 	struct kbase_context *const kctx = katom->kctx;
3534 	struct kbase_device *kbdev = kctx->kbdev;
3535 	struct kbase_jd_renderpass *rp;
3536 
3537 	lockdep_assert_held(&kbdev->hwaccess_lock);
3538 
3539 	if (!(katom->atom_flags &
3540 			KBASE_KATOM_FLAG_X_DEP_BLOCKED)) {
3541 		dev_dbg(kbdev->dev, "Atom %pK is not blocked on a cross-slot dependency",
3542 			(void *)katom);
3543 		return false;
3544 	}
3545 
3546 	if (!(katom->core_req & BASE_JD_REQ_END_RENDERPASS)) {
3547 		dev_dbg(kbdev->dev, "Atom %pK is blocked on a cross-slot dependency",
3548 			(void *)katom);
3549 		return true;
3550 	}
3551 
3552 	compiletime_assert((1ull << (sizeof(katom->renderpass_id) * 8)) <=
3553 			ARRAY_SIZE(kctx->jctx.renderpasses),
3554 			"Should check invalid access to renderpasses");
3555 
3556 	rp = &kctx->jctx.renderpasses[katom->renderpass_id];
3557 	/* We can read a subset of renderpass state without holding
3558 	 * higher-level locks (but not end_katom, for example).
3559 	 */
3560 
3561 	WARN_ON(rp->state == KBASE_JD_RP_COMPLETE);
3562 
3563 	dev_dbg(kbdev->dev, "End atom has cross-slot dep in state %d\n",
3564 		(int)rp->state);
3565 
3566 	if (rp->state != KBASE_JD_RP_OOM && rp->state != KBASE_JD_RP_RETRY_OOM)
3567 		return true;
3568 
3569 	/* Tiler ran out of memory so allow the fragment job chain to run
3570 	 * if it only depends on the tiler job chain.
3571 	 */
3572 	if (katom->x_pre_dep != rp->start_katom) {
3573 		dev_dbg(kbdev->dev, "Dependency is on %pK not start atom %pK\n",
3574 			(void *)katom->x_pre_dep, (void *)rp->start_katom);
3575 		return true;
3576 	}
3577 
3578 	dev_dbg(kbdev->dev, "Ignoring cross-slot dep on atom %pK\n",
3579 		(void *)katom->x_pre_dep);
3580 
3581 	return false;
3582 }
3583 
3584 void kbase_js_sched(struct kbase_device *kbdev, unsigned int js_mask)
3585 {
3586 	struct kbasep_js_device_data *js_devdata;
3587 	struct kbase_context *last_active[BASE_JM_MAX_NR_SLOTS];
3588 	bool timer_sync = false;
3589 	bool ctx_waiting[BASE_JM_MAX_NR_SLOTS];
3590 	unsigned int js;
3591 
3592 	KBASE_TLSTREAM_TL_JS_SCHED_START(kbdev, 0);
3593 
3594 	dev_dbg(kbdev->dev, "%s kbdev %pK mask 0x%x\n",
3595 		__func__, (void *)kbdev, (unsigned int)js_mask);
3596 
3597 	js_devdata = &kbdev->js_data;
3598 
3599 	down(&js_devdata->schedule_sem);
3600 	mutex_lock(&js_devdata->queue_mutex);
3601 
3602 	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
3603 		last_active[js] = kbdev->hwaccess.active_kctx[js];
3604 		ctx_waiting[js] = false;
3605 	}
3606 
3607 	while (js_mask) {
3608 		js = ffs(js_mask) - 1;
3609 
3610 		while (1) {
3611 			struct kbase_context *kctx;
3612 			unsigned long flags;
3613 			bool context_idle = false;
3614 
3615 			kctx = kbase_js_ctx_list_pop_head(kbdev, js);
3616 
3617 			if (!kctx) {
3618 				js_mask &= ~(1 << js);
3619 				dev_dbg(kbdev->dev, "No kctx on pullable list (s:%u)\n", js);
3620 				break;
3621 			}
3622 
3623 			if (!kbase_ctx_flag(kctx, KCTX_ACTIVE)) {
3624 				context_idle = true;
3625 
3626 				dev_dbg(kbdev->dev, "kctx %pK is not active (s:%u)\n", (void *)kctx,
3627 					js);
3628 
3629 				if (kbase_pm_context_active_handle_suspend(
3630 									kbdev,
3631 				      KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
3632 					dev_dbg(kbdev->dev, "Suspend pending (s:%u)\n", js);
3633 					/* Suspend pending - return context to
3634 					 * queue and stop scheduling
3635 					 */
3636 					mutex_lock(
3637 					&kctx->jctx.sched_info.ctx.jsctx_mutex);
3638 					if (kbase_js_ctx_list_add_pullable_head(
3639 						kctx->kbdev, kctx, js))
3640 						kbase_js_sync_timers(kbdev);
3641 					mutex_unlock(
3642 					&kctx->jctx.sched_info.ctx.jsctx_mutex);
3643 					mutex_unlock(&js_devdata->queue_mutex);
3644 					up(&js_devdata->schedule_sem);
3645 					KBASE_TLSTREAM_TL_JS_SCHED_END(kbdev,
3646 									  0);
3647 					return;
3648 				}
3649 				kbase_ctx_flag_set(kctx, KCTX_ACTIVE);
3650 			}
3651 
3652 			if (!kbase_js_use_ctx(kbdev, kctx, js)) {
3653 				mutex_lock(
3654 					&kctx->jctx.sched_info.ctx.jsctx_mutex);
3655 
3656 				dev_dbg(kbdev->dev,
3657 					"kctx %pK cannot be used at this time\n",
3658 					kctx);
3659 
3660 				spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3661 				if (kbase_js_ctx_pullable(kctx, js, false)
3662 				    || kbase_ctx_flag(kctx, KCTX_PRIVILEGED))
3663 					timer_sync |=
3664 					kbase_js_ctx_list_add_pullable_head_nolock(
3665 							kctx->kbdev, kctx, js);
3666 				else
3667 					timer_sync |=
3668 					kbase_js_ctx_list_add_unpullable_nolock(
3669 							kctx->kbdev, kctx, js);
3670 				spin_unlock_irqrestore(&kbdev->hwaccess_lock,
3671 						flags);
3672 				mutex_unlock(
3673 					&kctx->jctx.sched_info.ctx.jsctx_mutex);
3674 				if (context_idle) {
3675 					WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
3676 					kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
3677 					kbase_pm_context_idle(kbdev);
3678 				}
3679 
3680 				/* No more jobs can be submitted on this slot */
3681 				js_mask &= ~(1 << js);
3682 				break;
3683 			}
3684 			mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
3685 			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3686 
3687 			kbase_ctx_flag_clear(kctx, KCTX_PULLED);
3688 
3689 			if (!kbase_jm_kick(kbdev, 1 << js)) {
3690 				dev_dbg(kbdev->dev, "No more jobs can be submitted (s:%u)\n", js);
3691 				js_mask &= ~(1 << js);
3692 			}
3693 			if (!kbase_ctx_flag(kctx, KCTX_PULLED)) {
3694 				bool pullable;
3695 
3696 				dev_dbg(kbdev->dev, "No atoms pulled from kctx %pK (s:%u)\n",
3697 					(void *)kctx, js);
3698 
3699 				pullable = kbase_js_ctx_pullable(kctx, js,
3700 						true);
3701 
3702 				/* Failed to pull jobs - push to head of list.
3703 				 * Unless this context is already 'active', in
3704 				 * which case it's effectively already scheduled
3705 				 * so push it to the back of the list.
3706 				 */
3707 				if (pullable && kctx == last_active[js] &&
3708 						kbase_ctx_flag(kctx,
3709 						(KCTX_PULLED_SINCE_ACTIVE_JS0 <<
3710 						js)))
3711 					timer_sync |=
3712 					kbase_js_ctx_list_add_pullable_nolock(
3713 							kctx->kbdev,
3714 							kctx, js);
3715 				else if (pullable)
3716 					timer_sync |=
3717 					kbase_js_ctx_list_add_pullable_head_nolock(
3718 							kctx->kbdev,
3719 							kctx, js);
3720 				else
3721 					timer_sync |=
3722 					kbase_js_ctx_list_add_unpullable_nolock(
3723 								kctx->kbdev,
3724 								kctx, js);
3725 
3726 				/* If this context is not the active context,
3727 				 * but the active context is pullable on this
3728 				 * slot, then we need to remove the active
3729 				 * marker to prevent it from submitting atoms in
3730 				 * the IRQ handler, which would prevent this
3731 				 * context from making progress.
3732 				 */
3733 				if (last_active[js] && kctx != last_active[js]
3734 						&& kbase_js_ctx_pullable(
3735 						last_active[js], js, true))
3736 					ctx_waiting[js] = true;
3737 
3738 				if (context_idle) {
3739 					kbase_jm_idle_ctx(kbdev, kctx);
3740 					spin_unlock_irqrestore(
3741 							&kbdev->hwaccess_lock,
3742 							flags);
3743 					WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
3744 					kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
3745 					kbase_pm_context_idle(kbdev);
3746 				} else {
3747 					spin_unlock_irqrestore(
3748 							&kbdev->hwaccess_lock,
3749 							flags);
3750 				}
3751 				mutex_unlock(
3752 					&kctx->jctx.sched_info.ctx.jsctx_mutex);
3753 
3754 				js_mask &= ~(1 << js);
3755 				break; /* Could not run atoms on this slot */
3756 			}
3757 
3758 			dev_dbg(kbdev->dev, "Push kctx %pK to back of list\n",
3759 				(void *)kctx);
3760 			if (kbase_js_ctx_pullable(kctx, js, true))
3761 				timer_sync |=
3762 					kbase_js_ctx_list_add_pullable_nolock(
3763 							kctx->kbdev, kctx, js);
3764 			else
3765 				timer_sync |=
3766 					kbase_js_ctx_list_add_unpullable_nolock(
3767 							kctx->kbdev, kctx, js);
3768 
3769 			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3770 			mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
3771 		}
3772 	}
3773 
3774 	if (timer_sync)
3775 		kbase_js_sync_timers(kbdev);
3776 
3777 	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
3778 		if (kbdev->hwaccess.active_kctx[js] == last_active[js] &&
3779 				ctx_waiting[js]) {
3780 			dev_dbg(kbdev->dev, "Marking kctx %pK as inactive (s:%u)\n",
3781 				(void *)last_active[js], js);
3782 			kbdev->hwaccess.active_kctx[js] = NULL;
3783 		}
3784 	}
3785 
3786 	mutex_unlock(&js_devdata->queue_mutex);
3787 	up(&js_devdata->schedule_sem);
3788 	KBASE_TLSTREAM_TL_JS_SCHED_END(kbdev, 0);
3789 }
3790 
3791 void kbase_js_zap_context(struct kbase_context *kctx)
3792 {
3793 	struct kbase_device *kbdev = kctx->kbdev;
3794 	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
3795 	struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
3796 
3797 	/*
3798 	 * Critical assumption: No more submission is possible outside of the
3799 	 * workqueue. This is because the OS *must* prevent U/K calls (IOCTLs)
3800 	 * whilst the struct kbase_context is terminating.
3801 	 */
3802 
3803 	/* First, atomically do the following:
3804 	 * - mark the context as dying
3805 	 * - try to evict it from the queue
3806 	 */
3807 	mutex_lock(&kctx->jctx.lock);
3808 	mutex_lock(&js_devdata->queue_mutex);
3809 	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
3810 	kbase_ctx_flag_set(kctx, KCTX_DYING);
3811 
3812 	dev_dbg(kbdev->dev, "Zap: Try Evict Ctx %pK", kctx);
3813 
3814 	/*
3815 	 * At this point we know:
3816 	 * - If eviction succeeded, it was in the queue, but now no
3817 	 *   longer is
3818 	 *  - We must cancel the jobs here. No Power Manager active reference to
3819 	 *    release.
3820 	 *  - This happens asynchronously - kbase_jd_zap_context() will wait for
3821 	 *    those jobs to be killed.
3822 	 * - If eviction failed, then it wasn't in the queue. It is one
3823 	 *   of the following:
3824 	 *  - a. it didn't have any jobs, and so is not in the Queue or
3825 	 *       the Run Pool (not scheduled)
3826 	 *   - Hence, no more work required to cancel jobs. No Power Manager
3827 	 *     active reference to release.
3828 	 *  - b. it was in the middle of a scheduling transaction (and thus must
3829 	 *       have at least 1 job). This can happen from a syscall or a
3830 	 *       kernel thread. We still hold the jsctx_mutex, and so the thread
3831 	 *       must be waiting inside kbasep_js_try_schedule_head_ctx(),
3832 	 *       before checking whether the runpool is full. That thread will
3833 	 *       continue after we drop the mutex, and will notice the context
3834 	 *       is dying. It will rollback the transaction, killing all jobs at
3835 	 *       the same time. kbase_jd_zap_context() will wait for those jobs
3836 	 *       to be killed.
3837 	 *   - Hence, no more work required to cancel jobs, or to release the
3838 	 *     Power Manager active reference.
3839 	 *  - c. it is scheduled, and may or may not be running jobs
3840 	 * - We must cause it to leave the runpool by stopping it from
3841 	 * submitting any more jobs. When it finally does leave,
3842 	 * kbasep_js_runpool_requeue_or_kill_ctx() will kill all remaining jobs
3843 	 * (because it is dying), release the Power Manager active reference,
3844 	 * and will not requeue the context in the queue.
3845 	 * kbase_jd_zap_context() will wait for those jobs to be killed.
3846 	 *  - Hence, work required just to make it leave the runpool. Cancelling
3847 	 *    jobs and releasing the Power manager active reference will be
3848 	 *    handled when it leaves the runpool.
3849 	 */
3850 	if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
3851 		unsigned long flags;
3852 		unsigned int js;
3853 
3854 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3855 		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
3856 			if (!list_empty(
3857 				&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
3858 				list_del_init(
3859 				&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
3860 		}
3861 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3862 
3863 		/* The following events require us to kill off remaining jobs
3864 		 * and update PM book-keeping:
3865 		 * - we evicted it correctly (it must have jobs to be in the
3866 		 *   Queue)
3867 		 *
3868 		 * These events need no action, but take this path anyway:
3869 		 * - Case a: it didn't have any jobs, and was never in the Queue
3870 		 * - Case b: scheduling transaction will be partially rolled-
3871 		 *           back (this already cancels the jobs)
3872 		 */
3873 
3874 		KBASE_KTRACE_ADD_JM(kbdev, JM_ZAP_NON_SCHEDULED, kctx, NULL, 0u, kbase_ctx_flag(kctx, KCTX_SCHEDULED));
3875 
3876 		dev_dbg(kbdev->dev, "Zap: Ctx %pK scheduled=0", kctx);
3877 
3878 		/* Only cancel jobs when we evicted from the
3879 		 * queue. No Power Manager active reference was held.
3880 		 *
3881 		 * Having is_dying set ensures that this kills, and doesn't
3882 		 * requeue
3883 		 */
3884 		kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, false);
3885 
3886 		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
3887 		mutex_unlock(&js_devdata->queue_mutex);
3888 		mutex_unlock(&kctx->jctx.lock);
3889 	} else {
3890 		unsigned long flags;
3891 		bool was_retained;
3892 		CSTD_UNUSED(was_retained);
3893 
3894 		/* Case c: didn't evict, but it is scheduled - it's in the Run
3895 		 * Pool
3896 		 */
3897 		KBASE_KTRACE_ADD_JM(kbdev, JM_ZAP_SCHEDULED, kctx, NULL, 0u, kbase_ctx_flag(kctx, KCTX_SCHEDULED));
3898 		dev_dbg(kbdev->dev, "Zap: Ctx %pK is in RunPool", kctx);
3899 
3900 		/* Disable the ctx from submitting any more jobs */
3901 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3902 
3903 		kbasep_js_clear_submit_allowed(js_devdata, kctx);
3904 
3905 		/* Retain and (later) release the context whilst it is now
3906 		 * disallowed from submitting jobs - ensures that someone
3907 		 * somewhere will be removing the context later on
3908 		 */
3909 		was_retained = kbase_ctx_sched_inc_refcount_nolock(kctx);
3910 
3911 		/* Since it's scheduled and we have the jsctx_mutex, it must be
3912 		 * retained successfully
3913 		 */
3914 		KBASE_DEBUG_ASSERT(was_retained);
3915 
3916 		dev_dbg(kbdev->dev, "Zap: Ctx %pK Kill Any Running jobs", kctx);
3917 
3918 		/* Cancel any remaining running jobs for this kctx - if any.
3919 		 * Submit is disallowed which takes effect immediately, so no
3920 		 * more new jobs will appear after we do this.
3921 		 */
3922 		kbase_backend_jm_kill_running_jobs_from_kctx(kctx);
3923 
3924 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3925 		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
3926 		mutex_unlock(&js_devdata->queue_mutex);
3927 		mutex_unlock(&kctx->jctx.lock);
3928 
3929 		dev_dbg(kbdev->dev, "Zap: Ctx %pK Release (may or may not schedule out immediately)",
3930 									kctx);
3931 
3932 		kbasep_js_runpool_release_ctx(kbdev, kctx);
3933 	}
3934 
3935 	KBASE_KTRACE_ADD_JM(kbdev, JM_ZAP_DONE, kctx, NULL, 0u, 0u);
3936 
3937 	/* After this, you must wait on both the
3938 	 * kbase_jd_context::zero_jobs_wait and the
3939 	 * kbasep_js_kctx_info::ctx::is_scheduled_waitq - to wait for the jobs
3940 	 * to be destroyed, and the context to be de-scheduled (if it was on the
3941 	 * runpool).
3942 	 *
3943 	 * kbase_jd_zap_context() will do this.
3944 	 */
3945 }
3946 
3947 static inline int trace_get_refcnt(struct kbase_device *kbdev,
3948 					struct kbase_context *kctx)
3949 {
3950 	return atomic_read(&kctx->refcount);
3951 }
3952 
3953 /**
3954  * kbase_js_foreach_ctx_job(): - Call a function on all jobs in context
3955  * @kctx:     Pointer to context.
3956  * @callback: Pointer to function to call for each job.
3957  *
3958  * Call a function on all jobs belonging to a non-queued, non-running
3959  * context, and detach the jobs from the context as it goes.
3960  *
3961  * Due to the locks that might be held at the time of the call, the callback
3962  * may need to defer work on a workqueue to complete its actions (e.g. when
3963  * cancelling jobs)
3964  *
3965  * Atoms will be removed from the queue, so this must only be called when
3966  * cancelling jobs (which occurs as part of context destruction).
3967  *
3968  * The locking conditions on the caller are as follows:
3969  * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
3970  */
3971 static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
3972 				     kbasep_js_ctx_job_cb *callback)
3973 {
3974 	struct kbase_device *kbdev;
3975 	unsigned long flags;
3976 	unsigned int js;
3977 
3978 	kbdev = kctx->kbdev;
3979 
3980 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3981 
3982 	KBASE_KTRACE_ADD_JM_REFCOUNT(kbdev, JS_POLICY_FOREACH_CTX_JOBS, kctx, NULL,
3983 					0u, trace_get_refcnt(kbdev, kctx));
3984 
3985 	/* Invoke callback on jobs on each slot in turn */
3986 	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
3987 		jsctx_queue_foreach(kctx, js, callback);
3988 
3989 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3990 }
3991 
3992 base_jd_prio kbase_js_priority_check(struct kbase_device *kbdev, base_jd_prio priority)
3993 {
3994 	struct priority_control_manager_device *pcm_device = kbdev->pcm_dev;
3995 	int req_priority, out_priority;
3996 
3997 	req_priority = kbasep_js_atom_prio_to_sched_prio(priority);
3998 	out_priority = req_priority;
3999 	/* Does not use pcm defined priority check if PCM not defined or if
4000 	 * kbasep_js_atom_prio_to_sched_prio returns an error
4001 	 * (KBASE_JS_ATOM_SCHED_PRIO_INVALID).
4002 	 */
4003 	if (pcm_device && (req_priority != KBASE_JS_ATOM_SCHED_PRIO_INVALID))
4004 		out_priority = pcm_device->ops.pcm_scheduler_priority_check(pcm_device, current,
4005 									    req_priority);
4006 	return kbasep_js_sched_prio_to_atom_prio(kbdev, out_priority);
4007 }
4008