xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/i915_scheduler.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * SPDX-License-Identifier: MIT
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright © 2018 Intel Corporation
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef _I915_SCHEDULER_H_
8*4882a593Smuzhiyun #define _I915_SCHEDULER_H_
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/bitops.h>
11*4882a593Smuzhiyun #include <linux/list.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "i915_scheduler_types.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define priolist_for_each_request(it, plist, idx) \
17*4882a593Smuzhiyun 	for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
18*4882a593Smuzhiyun 		list_for_each_entry(it, &(plist)->requests[idx], sched.link)
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define priolist_for_each_request_consume(it, n, plist, idx) \
21*4882a593Smuzhiyun 	for (; \
22*4882a593Smuzhiyun 	     (plist)->used ? (idx = __ffs((plist)->used)), 1 : 0; \
23*4882a593Smuzhiyun 	     (plist)->used &= ~BIT(idx)) \
24*4882a593Smuzhiyun 		list_for_each_entry_safe(it, n, \
25*4882a593Smuzhiyun 					 &(plist)->requests[idx], \
26*4882a593Smuzhiyun 					 sched.link)
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun void i915_sched_node_init(struct i915_sched_node *node);
29*4882a593Smuzhiyun void i915_sched_node_reinit(struct i915_sched_node *node);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
32*4882a593Smuzhiyun 				      struct i915_sched_node *signal,
33*4882a593Smuzhiyun 				      struct i915_dependency *dep,
34*4882a593Smuzhiyun 				      unsigned long flags);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun int i915_sched_node_add_dependency(struct i915_sched_node *node,
37*4882a593Smuzhiyun 				   struct i915_sched_node *signal,
38*4882a593Smuzhiyun 				   unsigned long flags);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun void i915_sched_node_fini(struct i915_sched_node *node);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun void i915_schedule(struct i915_request *request,
43*4882a593Smuzhiyun 		   const struct i915_sched_attr *attr);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun struct list_head *
48*4882a593Smuzhiyun i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun void __i915_priolist_free(struct i915_priolist *p);
i915_priolist_free(struct i915_priolist * p)51*4882a593Smuzhiyun static inline void i915_priolist_free(struct i915_priolist *p)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	if (p->priority != I915_PRIORITY_NORMAL)
54*4882a593Smuzhiyun 		__i915_priolist_free(p);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #endif /* _I915_SCHEDULER_H_ */
58