xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/i915_active.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * SPDX-License-Identifier: MIT
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright © 2019 Intel Corporation
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/debugobjects.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include "gt/intel_context.h"
10*4882a593Smuzhiyun #include "gt/intel_engine_heartbeat.h"
11*4882a593Smuzhiyun #include "gt/intel_engine_pm.h"
12*4882a593Smuzhiyun #include "gt/intel_ring.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "i915_drv.h"
15*4882a593Smuzhiyun #include "i915_active.h"
16*4882a593Smuzhiyun #include "i915_globals.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun  * Active refs memory management
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * To be more economical with memory, we reap all the i915_active trees as
22*4882a593Smuzhiyun  * they idle (when we know the active requests are inactive) and allocate the
23*4882a593Smuzhiyun  * nodes from a local slab cache to hopefully reduce the fragmentation.
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun static struct i915_global_active {
26*4882a593Smuzhiyun 	struct i915_global base;
27*4882a593Smuzhiyun 	struct kmem_cache *slab_cache;
28*4882a593Smuzhiyun } global;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct active_node {
31*4882a593Smuzhiyun 	struct rb_node node;
32*4882a593Smuzhiyun 	struct i915_active_fence base;
33*4882a593Smuzhiyun 	struct i915_active *ref;
34*4882a593Smuzhiyun 	u64 timeline;
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun static inline struct active_node *
node_from_active(struct i915_active_fence * active)40*4882a593Smuzhiyun node_from_active(struct i915_active_fence *active)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	return container_of(active, struct active_node, base);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
46*4882a593Smuzhiyun 
is_barrier(const struct i915_active_fence * active)47*4882a593Smuzhiyun static inline bool is_barrier(const struct i915_active_fence *active)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	return IS_ERR(rcu_access_pointer(active->fence));
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
barrier_to_ll(struct active_node * node)52*4882a593Smuzhiyun static inline struct llist_node *barrier_to_ll(struct active_node *node)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	GEM_BUG_ON(!is_barrier(&node->base));
55*4882a593Smuzhiyun 	return (struct llist_node *)&node->base.cb.node;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun static inline struct intel_engine_cs *
__barrier_to_engine(struct active_node * node)59*4882a593Smuzhiyun __barrier_to_engine(struct active_node *node)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static inline struct intel_engine_cs *
barrier_to_engine(struct active_node * node)65*4882a593Smuzhiyun barrier_to_engine(struct active_node *node)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	GEM_BUG_ON(!is_barrier(&node->base));
68*4882a593Smuzhiyun 	return __barrier_to_engine(node);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
barrier_from_ll(struct llist_node * x)71*4882a593Smuzhiyun static inline struct active_node *barrier_from_ll(struct llist_node *x)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	return container_of((struct list_head *)x,
74*4882a593Smuzhiyun 			    struct active_node, base.cb.node);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
78*4882a593Smuzhiyun 
active_debug_hint(void * addr)79*4882a593Smuzhiyun static void *active_debug_hint(void *addr)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	struct i915_active *ref = addr;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun static const struct debug_obj_descr active_debug_desc = {
87*4882a593Smuzhiyun 	.name = "i915_active",
88*4882a593Smuzhiyun 	.debug_hint = active_debug_hint,
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun 
debug_active_init(struct i915_active * ref)91*4882a593Smuzhiyun static void debug_active_init(struct i915_active *ref)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	debug_object_init(ref, &active_debug_desc);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
debug_active_activate(struct i915_active * ref)96*4882a593Smuzhiyun static void debug_active_activate(struct i915_active *ref)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	lockdep_assert_held(&ref->tree_lock);
99*4882a593Smuzhiyun 	if (!atomic_read(&ref->count)) /* before the first inc */
100*4882a593Smuzhiyun 		debug_object_activate(ref, &active_debug_desc);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
debug_active_deactivate(struct i915_active * ref)103*4882a593Smuzhiyun static void debug_active_deactivate(struct i915_active *ref)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	lockdep_assert_held(&ref->tree_lock);
106*4882a593Smuzhiyun 	if (!atomic_read(&ref->count)) /* after the last dec */
107*4882a593Smuzhiyun 		debug_object_deactivate(ref, &active_debug_desc);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
debug_active_fini(struct i915_active * ref)110*4882a593Smuzhiyun static void debug_active_fini(struct i915_active *ref)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	debug_object_free(ref, &active_debug_desc);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
debug_active_assert(struct i915_active * ref)115*4882a593Smuzhiyun static void debug_active_assert(struct i915_active *ref)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	debug_object_assert_init(ref, &active_debug_desc);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun #else
121*4882a593Smuzhiyun 
debug_active_init(struct i915_active * ref)122*4882a593Smuzhiyun static inline void debug_active_init(struct i915_active *ref) { }
debug_active_activate(struct i915_active * ref)123*4882a593Smuzhiyun static inline void debug_active_activate(struct i915_active *ref) { }
debug_active_deactivate(struct i915_active * ref)124*4882a593Smuzhiyun static inline void debug_active_deactivate(struct i915_active *ref) { }
debug_active_fini(struct i915_active * ref)125*4882a593Smuzhiyun static inline void debug_active_fini(struct i915_active *ref) { }
debug_active_assert(struct i915_active * ref)126*4882a593Smuzhiyun static inline void debug_active_assert(struct i915_active *ref) { }
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun #endif
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun static void
__active_retire(struct i915_active * ref)131*4882a593Smuzhiyun __active_retire(struct i915_active *ref)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	struct rb_root root = RB_ROOT;
134*4882a593Smuzhiyun 	struct active_node *it, *n;
135*4882a593Smuzhiyun 	unsigned long flags;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	GEM_BUG_ON(i915_active_is_idle(ref));
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/* return the unused nodes to our slabcache -- flushing the allocator */
140*4882a593Smuzhiyun 	if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
141*4882a593Smuzhiyun 		return;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
144*4882a593Smuzhiyun 	debug_active_deactivate(ref);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/* Even if we have not used the cache, we may still have a barrier */
147*4882a593Smuzhiyun 	if (!ref->cache)
148*4882a593Smuzhiyun 		ref->cache = fetch_node(ref->tree.rb_node);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	/* Keep the MRU cached node for reuse */
151*4882a593Smuzhiyun 	if (ref->cache) {
152*4882a593Smuzhiyun 		/* Discard all other nodes in the tree */
153*4882a593Smuzhiyun 		rb_erase(&ref->cache->node, &ref->tree);
154*4882a593Smuzhiyun 		root = ref->tree;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 		/* Rebuild the tree with only the cached node */
157*4882a593Smuzhiyun 		rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
158*4882a593Smuzhiyun 		rb_insert_color(&ref->cache->node, &ref->tree);
159*4882a593Smuzhiyun 		GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 		/* Make the cached node available for reuse with any timeline */
162*4882a593Smuzhiyun 		if (IS_ENABLED(CONFIG_64BIT))
163*4882a593Smuzhiyun 			ref->cache->timeline = 0; /* needs cmpxchg(u64) */
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ref->tree_lock, flags);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/* After the final retire, the entire struct may be freed */
169*4882a593Smuzhiyun 	if (ref->retire)
170*4882a593Smuzhiyun 		ref->retire(ref);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/* ... except if you wait on it, you must manage your own references! */
173*4882a593Smuzhiyun 	wake_up_var(ref);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* Finally free the discarded timeline tree  */
176*4882a593Smuzhiyun 	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
177*4882a593Smuzhiyun 		GEM_BUG_ON(i915_active_fence_isset(&it->base));
178*4882a593Smuzhiyun 		kmem_cache_free(global.slab_cache, it);
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun static void
active_work(struct work_struct * wrk)183*4882a593Smuzhiyun active_work(struct work_struct *wrk)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct i915_active *ref = container_of(wrk, typeof(*ref), work);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	GEM_BUG_ON(!atomic_read(&ref->count));
188*4882a593Smuzhiyun 	if (atomic_add_unless(&ref->count, -1, 1))
189*4882a593Smuzhiyun 		return;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	__active_retire(ref);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun static void
active_retire(struct i915_active * ref)195*4882a593Smuzhiyun active_retire(struct i915_active *ref)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	GEM_BUG_ON(!atomic_read(&ref->count));
198*4882a593Smuzhiyun 	if (atomic_add_unless(&ref->count, -1, 1))
199*4882a593Smuzhiyun 		return;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
202*4882a593Smuzhiyun 		queue_work(system_unbound_wq, &ref->work);
203*4882a593Smuzhiyun 		return;
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	__active_retire(ref);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun static inline struct dma_fence **
__active_fence_slot(struct i915_active_fence * active)210*4882a593Smuzhiyun __active_fence_slot(struct i915_active_fence *active)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	return (struct dma_fence ** __force)&active->fence;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun static inline bool
active_fence_cb(struct dma_fence * fence,struct dma_fence_cb * cb)216*4882a593Smuzhiyun active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	struct i915_active_fence *active =
219*4882a593Smuzhiyun 		container_of(cb, typeof(*active), cb);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun static void
node_retire(struct dma_fence * fence,struct dma_fence_cb * cb)225*4882a593Smuzhiyun node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	if (active_fence_cb(fence, cb))
228*4882a593Smuzhiyun 		active_retire(container_of(cb, struct active_node, base.cb)->ref);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun static void
excl_retire(struct dma_fence * fence,struct dma_fence_cb * cb)232*4882a593Smuzhiyun excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	if (active_fence_cb(fence, cb))
235*4882a593Smuzhiyun 		active_retire(container_of(cb, struct i915_active, excl.cb));
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
__active_lookup(struct i915_active * ref,u64 idx)238*4882a593Smuzhiyun static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct active_node *it;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/*
245*4882a593Smuzhiyun 	 * We track the most recently used timeline to skip a rbtree search
246*4882a593Smuzhiyun 	 * for the common case, under typical loads we never need the rbtree
247*4882a593Smuzhiyun 	 * at all. We can reuse the last slot if it is empty, that is
248*4882a593Smuzhiyun 	 * after the previous activity has been retired, or if it matches the
249*4882a593Smuzhiyun 	 * current timeline.
250*4882a593Smuzhiyun 	 */
251*4882a593Smuzhiyun 	it = READ_ONCE(ref->cache);
252*4882a593Smuzhiyun 	if (it) {
253*4882a593Smuzhiyun 		u64 cached = READ_ONCE(it->timeline);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 		/* Once claimed, this slot will only belong to this idx */
256*4882a593Smuzhiyun 		if (cached == idx)
257*4882a593Smuzhiyun 			return it;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun #ifdef CONFIG_64BIT /* for cmpxchg(u64) */
260*4882a593Smuzhiyun 		/*
261*4882a593Smuzhiyun 		 * An unclaimed cache [.timeline=0] can only be claimed once.
262*4882a593Smuzhiyun 		 *
263*4882a593Smuzhiyun 		 * If the value is already non-zero, some other thread has
264*4882a593Smuzhiyun 		 * claimed the cache and we know that is does not match our
265*4882a593Smuzhiyun 		 * idx. If, and only if, the timeline is currently zero is it
266*4882a593Smuzhiyun 		 * worth competing to claim it atomically for ourselves (for
267*4882a593Smuzhiyun 		 * only the winner of that race will cmpxchg return the old
268*4882a593Smuzhiyun 		 * value of 0).
269*4882a593Smuzhiyun 		 */
270*4882a593Smuzhiyun 		if (!cached && !cmpxchg(&it->timeline, 0, idx))
271*4882a593Smuzhiyun 			return it;
272*4882a593Smuzhiyun #endif
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	BUILD_BUG_ON(offsetof(typeof(*it), node));
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	/* While active, the tree can only be built; not destroyed */
278*4882a593Smuzhiyun 	GEM_BUG_ON(i915_active_is_idle(ref));
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	it = fetch_node(ref->tree.rb_node);
281*4882a593Smuzhiyun 	while (it) {
282*4882a593Smuzhiyun 		if (it->timeline < idx) {
283*4882a593Smuzhiyun 			it = fetch_node(it->node.rb_right);
284*4882a593Smuzhiyun 		} else if (it->timeline > idx) {
285*4882a593Smuzhiyun 			it = fetch_node(it->node.rb_left);
286*4882a593Smuzhiyun 		} else {
287*4882a593Smuzhiyun 			WRITE_ONCE(ref->cache, it);
288*4882a593Smuzhiyun 			break;
289*4882a593Smuzhiyun 		}
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/* NB: If the tree rotated beneath us, we may miss our target. */
293*4882a593Smuzhiyun 	return it;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun static struct i915_active_fence *
active_instance(struct i915_active * ref,u64 idx)297*4882a593Smuzhiyun active_instance(struct i915_active *ref, u64 idx)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct active_node *node, *prealloc;
300*4882a593Smuzhiyun 	struct rb_node **p, *parent;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	node = __active_lookup(ref, idx);
303*4882a593Smuzhiyun 	if (likely(node))
304*4882a593Smuzhiyun 		return &node->base;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/* Preallocate a replacement, just in case */
307*4882a593Smuzhiyun 	prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
308*4882a593Smuzhiyun 	if (!prealloc)
309*4882a593Smuzhiyun 		return NULL;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	spin_lock_irq(&ref->tree_lock);
312*4882a593Smuzhiyun 	GEM_BUG_ON(i915_active_is_idle(ref));
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	parent = NULL;
315*4882a593Smuzhiyun 	p = &ref->tree.rb_node;
316*4882a593Smuzhiyun 	while (*p) {
317*4882a593Smuzhiyun 		parent = *p;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 		node = rb_entry(parent, struct active_node, node);
320*4882a593Smuzhiyun 		if (node->timeline == idx) {
321*4882a593Smuzhiyun 			kmem_cache_free(global.slab_cache, prealloc);
322*4882a593Smuzhiyun 			goto out;
323*4882a593Smuzhiyun 		}
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 		if (node->timeline < idx)
326*4882a593Smuzhiyun 			p = &parent->rb_right;
327*4882a593Smuzhiyun 		else
328*4882a593Smuzhiyun 			p = &parent->rb_left;
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	node = prealloc;
332*4882a593Smuzhiyun 	__i915_active_fence_init(&node->base, NULL, node_retire);
333*4882a593Smuzhiyun 	node->ref = ref;
334*4882a593Smuzhiyun 	node->timeline = idx;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	rb_link_node(&node->node, parent, p);
337*4882a593Smuzhiyun 	rb_insert_color(&node->node, &ref->tree);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun out:
340*4882a593Smuzhiyun 	WRITE_ONCE(ref->cache, node);
341*4882a593Smuzhiyun 	spin_unlock_irq(&ref->tree_lock);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	return &node->base;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
__i915_active_init(struct i915_active * ref,int (* active)(struct i915_active * ref),void (* retire)(struct i915_active * ref),struct lock_class_key * mkey,struct lock_class_key * wkey)346*4882a593Smuzhiyun void __i915_active_init(struct i915_active *ref,
347*4882a593Smuzhiyun 			int (*active)(struct i915_active *ref),
348*4882a593Smuzhiyun 			void (*retire)(struct i915_active *ref),
349*4882a593Smuzhiyun 			struct lock_class_key *mkey,
350*4882a593Smuzhiyun 			struct lock_class_key *wkey)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	unsigned long bits;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	debug_active_init(ref);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	ref->flags = 0;
357*4882a593Smuzhiyun 	ref->active = active;
358*4882a593Smuzhiyun 	ref->retire = ptr_unpack_bits(retire, &bits, 2);
359*4882a593Smuzhiyun 	if (bits & I915_ACTIVE_MAY_SLEEP)
360*4882a593Smuzhiyun 		ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	spin_lock_init(&ref->tree_lock);
363*4882a593Smuzhiyun 	ref->tree = RB_ROOT;
364*4882a593Smuzhiyun 	ref->cache = NULL;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	init_llist_head(&ref->preallocated_barriers);
367*4882a593Smuzhiyun 	atomic_set(&ref->count, 0);
368*4882a593Smuzhiyun 	__mutex_init(&ref->mutex, "i915_active", mkey);
369*4882a593Smuzhiyun 	__i915_active_fence_init(&ref->excl, NULL, excl_retire);
370*4882a593Smuzhiyun 	INIT_WORK(&ref->work, active_work);
371*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_LOCKDEP)
372*4882a593Smuzhiyun 	lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
373*4882a593Smuzhiyun #endif
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
____active_del_barrier(struct i915_active * ref,struct active_node * node,struct intel_engine_cs * engine)376*4882a593Smuzhiyun static bool ____active_del_barrier(struct i915_active *ref,
377*4882a593Smuzhiyun 				   struct active_node *node,
378*4882a593Smuzhiyun 				   struct intel_engine_cs *engine)
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	struct llist_node *head = NULL, *tail = NULL;
382*4882a593Smuzhiyun 	struct llist_node *pos, *next;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/*
387*4882a593Smuzhiyun 	 * Rebuild the llist excluding our node. We may perform this
388*4882a593Smuzhiyun 	 * outside of the kernel_context timeline mutex and so someone
389*4882a593Smuzhiyun 	 * else may be manipulating the engine->barrier_tasks, in
390*4882a593Smuzhiyun 	 * which case either we or they will be upset :)
391*4882a593Smuzhiyun 	 *
392*4882a593Smuzhiyun 	 * A second __active_del_barrier() will report failure to claim
393*4882a593Smuzhiyun 	 * the active_node and the caller will just shrug and know not to
394*4882a593Smuzhiyun 	 * claim ownership of its node.
395*4882a593Smuzhiyun 	 *
396*4882a593Smuzhiyun 	 * A concurrent i915_request_add_active_barriers() will miss adding
397*4882a593Smuzhiyun 	 * any of the tasks, but we will try again on the next -- and since
398*4882a593Smuzhiyun 	 * we are actively using the barrier, we know that there will be
399*4882a593Smuzhiyun 	 * at least another opportunity when we idle.
400*4882a593Smuzhiyun 	 */
401*4882a593Smuzhiyun 	llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
402*4882a593Smuzhiyun 		if (node == barrier_from_ll(pos)) {
403*4882a593Smuzhiyun 			node = NULL;
404*4882a593Smuzhiyun 			continue;
405*4882a593Smuzhiyun 		}
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 		pos->next = head;
408*4882a593Smuzhiyun 		head = pos;
409*4882a593Smuzhiyun 		if (!tail)
410*4882a593Smuzhiyun 			tail = pos;
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun 	if (head)
413*4882a593Smuzhiyun 		llist_add_batch(head, tail, &engine->barrier_tasks);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	return !node;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun static bool
__active_del_barrier(struct i915_active * ref,struct active_node * node)419*4882a593Smuzhiyun __active_del_barrier(struct i915_active *ref, struct active_node *node)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	return ____active_del_barrier(ref, node, barrier_to_engine(node));
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun static bool
replace_barrier(struct i915_active * ref,struct i915_active_fence * active)425*4882a593Smuzhiyun replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	if (!is_barrier(active)) /* proto-node used by our idle barrier? */
428*4882a593Smuzhiyun 		return false;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	/*
431*4882a593Smuzhiyun 	 * This request is on the kernel_context timeline, and so
432*4882a593Smuzhiyun 	 * we can use it to substitute for the pending idle-barrer
433*4882a593Smuzhiyun 	 * request that we want to emit on the kernel_context.
434*4882a593Smuzhiyun 	 */
435*4882a593Smuzhiyun 	__active_del_barrier(ref, node_from_active(active));
436*4882a593Smuzhiyun 	return true;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
i915_active_ref(struct i915_active * ref,u64 idx,struct dma_fence * fence)439*4882a593Smuzhiyun int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	struct i915_active_fence *active;
442*4882a593Smuzhiyun 	int err;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	/* Prevent reaping in case we malloc/wait while building the tree */
445*4882a593Smuzhiyun 	err = i915_active_acquire(ref);
446*4882a593Smuzhiyun 	if (err)
447*4882a593Smuzhiyun 		return err;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	active = active_instance(ref, idx);
450*4882a593Smuzhiyun 	if (!active) {
451*4882a593Smuzhiyun 		err = -ENOMEM;
452*4882a593Smuzhiyun 		goto out;
453*4882a593Smuzhiyun 	}
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	if (replace_barrier(ref, active)) {
456*4882a593Smuzhiyun 		RCU_INIT_POINTER(active->fence, NULL);
457*4882a593Smuzhiyun 		atomic_dec(&ref->count);
458*4882a593Smuzhiyun 	}
459*4882a593Smuzhiyun 	if (!__i915_active_fence_set(active, fence))
460*4882a593Smuzhiyun 		__i915_active_acquire(ref);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun out:
463*4882a593Smuzhiyun 	i915_active_release(ref);
464*4882a593Smuzhiyun 	return err;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun static struct dma_fence *
__i915_active_set_fence(struct i915_active * ref,struct i915_active_fence * active,struct dma_fence * fence)468*4882a593Smuzhiyun __i915_active_set_fence(struct i915_active *ref,
469*4882a593Smuzhiyun 			struct i915_active_fence *active,
470*4882a593Smuzhiyun 			struct dma_fence *fence)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	struct dma_fence *prev;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	if (replace_barrier(ref, active)) {
475*4882a593Smuzhiyun 		RCU_INIT_POINTER(active->fence, fence);
476*4882a593Smuzhiyun 		return NULL;
477*4882a593Smuzhiyun 	}
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	rcu_read_lock();
480*4882a593Smuzhiyun 	prev = __i915_active_fence_set(active, fence);
481*4882a593Smuzhiyun 	if (prev)
482*4882a593Smuzhiyun 		prev = dma_fence_get_rcu(prev);
483*4882a593Smuzhiyun 	else
484*4882a593Smuzhiyun 		__i915_active_acquire(ref);
485*4882a593Smuzhiyun 	rcu_read_unlock();
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	return prev;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun static struct i915_active_fence *
__active_fence(struct i915_active * ref,u64 idx)491*4882a593Smuzhiyun __active_fence(struct i915_active *ref, u64 idx)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	struct active_node *it;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	it = __active_lookup(ref, idx);
496*4882a593Smuzhiyun 	if (unlikely(!it)) { /* Contention with parallel tree builders! */
497*4882a593Smuzhiyun 		spin_lock_irq(&ref->tree_lock);
498*4882a593Smuzhiyun 		it = __active_lookup(ref, idx);
499*4882a593Smuzhiyun 		spin_unlock_irq(&ref->tree_lock);
500*4882a593Smuzhiyun 	}
501*4882a593Smuzhiyun 	GEM_BUG_ON(!it); /* slot must be preallocated */
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	return &it->base;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun struct dma_fence *
__i915_active_ref(struct i915_active * ref,u64 idx,struct dma_fence * fence)507*4882a593Smuzhiyun __i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	/* Only valid while active, see i915_active_acquire_for_context() */
510*4882a593Smuzhiyun 	return __i915_active_set_fence(ref, __active_fence(ref, idx), fence);
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun struct dma_fence *
i915_active_set_exclusive(struct i915_active * ref,struct dma_fence * f)514*4882a593Smuzhiyun i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	/* We expect the caller to manage the exclusive timeline ordering */
517*4882a593Smuzhiyun 	return __i915_active_set_fence(ref, &ref->excl, f);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
i915_active_acquire_if_busy(struct i915_active * ref)520*4882a593Smuzhiyun bool i915_active_acquire_if_busy(struct i915_active *ref)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun 	debug_active_assert(ref);
523*4882a593Smuzhiyun 	return atomic_add_unless(&ref->count, 1, 0);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun 
__i915_active_activate(struct i915_active * ref)526*4882a593Smuzhiyun static void __i915_active_activate(struct i915_active *ref)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	spin_lock_irq(&ref->tree_lock); /* __active_retire() */
529*4882a593Smuzhiyun 	if (!atomic_fetch_inc(&ref->count))
530*4882a593Smuzhiyun 		debug_active_activate(ref);
531*4882a593Smuzhiyun 	spin_unlock_irq(&ref->tree_lock);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
i915_active_acquire(struct i915_active * ref)534*4882a593Smuzhiyun int i915_active_acquire(struct i915_active *ref)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	int err;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	if (i915_active_acquire_if_busy(ref))
539*4882a593Smuzhiyun 		return 0;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	if (!ref->active) {
542*4882a593Smuzhiyun 		__i915_active_activate(ref);
543*4882a593Smuzhiyun 		return 0;
544*4882a593Smuzhiyun 	}
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	err = mutex_lock_interruptible(&ref->mutex);
547*4882a593Smuzhiyun 	if (err)
548*4882a593Smuzhiyun 		return err;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	if (likely(!i915_active_acquire_if_busy(ref))) {
551*4882a593Smuzhiyun 		err = ref->active(ref);
552*4882a593Smuzhiyun 		if (!err)
553*4882a593Smuzhiyun 			__i915_active_activate(ref);
554*4882a593Smuzhiyun 	}
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	mutex_unlock(&ref->mutex);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	return err;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
i915_active_acquire_for_context(struct i915_active * ref,u64 idx)561*4882a593Smuzhiyun int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	struct i915_active_fence *active;
564*4882a593Smuzhiyun 	int err;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	err = i915_active_acquire(ref);
567*4882a593Smuzhiyun 	if (err)
568*4882a593Smuzhiyun 		return err;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	active = active_instance(ref, idx);
571*4882a593Smuzhiyun 	if (!active) {
572*4882a593Smuzhiyun 		i915_active_release(ref);
573*4882a593Smuzhiyun 		return -ENOMEM;
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	return 0; /* return with active ref */
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
i915_active_release(struct i915_active * ref)579*4882a593Smuzhiyun void i915_active_release(struct i915_active *ref)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	debug_active_assert(ref);
582*4882a593Smuzhiyun 	active_retire(ref);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
enable_signaling(struct i915_active_fence * active)585*4882a593Smuzhiyun static void enable_signaling(struct i915_active_fence *active)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct dma_fence *fence;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	if (unlikely(is_barrier(active)))
590*4882a593Smuzhiyun 		return;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	fence = i915_active_fence_get(active);
593*4882a593Smuzhiyun 	if (!fence)
594*4882a593Smuzhiyun 		return;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	dma_fence_enable_sw_signaling(fence);
597*4882a593Smuzhiyun 	dma_fence_put(fence);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun 
flush_barrier(struct active_node * it)600*4882a593Smuzhiyun static int flush_barrier(struct active_node *it)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun 	struct intel_engine_cs *engine;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	if (likely(!is_barrier(&it->base)))
605*4882a593Smuzhiyun 		return 0;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	engine = __barrier_to_engine(it);
608*4882a593Smuzhiyun 	smp_rmb(); /* serialise with add_active_barriers */
609*4882a593Smuzhiyun 	if (!is_barrier(&it->base))
610*4882a593Smuzhiyun 		return 0;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	return intel_engine_flush_barriers(engine);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun 
flush_lazy_signals(struct i915_active * ref)615*4882a593Smuzhiyun static int flush_lazy_signals(struct i915_active *ref)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	struct active_node *it, *n;
618*4882a593Smuzhiyun 	int err = 0;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	enable_signaling(&ref->excl);
621*4882a593Smuzhiyun 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
622*4882a593Smuzhiyun 		err = flush_barrier(it); /* unconnected idle barrier? */
623*4882a593Smuzhiyun 		if (err)
624*4882a593Smuzhiyun 			break;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		enable_signaling(&it->base);
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	return err;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
__i915_active_wait(struct i915_active * ref,int state)632*4882a593Smuzhiyun int __i915_active_wait(struct i915_active *ref, int state)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	might_sleep();
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	/* Any fence added after the wait begins will not be auto-signaled */
637*4882a593Smuzhiyun 	if (i915_active_acquire_if_busy(ref)) {
638*4882a593Smuzhiyun 		int err;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 		err = flush_lazy_signals(ref);
641*4882a593Smuzhiyun 		i915_active_release(ref);
642*4882a593Smuzhiyun 		if (err)
643*4882a593Smuzhiyun 			return err;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 		if (___wait_var_event(ref, i915_active_is_idle(ref),
646*4882a593Smuzhiyun 				      state, 0, 0, schedule()))
647*4882a593Smuzhiyun 			return -EINTR;
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	/*
651*4882a593Smuzhiyun 	 * After the wait is complete, the caller may free the active.
652*4882a593Smuzhiyun 	 * We have to flush any concurrent retirement before returning.
653*4882a593Smuzhiyun 	 */
654*4882a593Smuzhiyun 	flush_work(&ref->work);
655*4882a593Smuzhiyun 	return 0;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun 
__await_active(struct i915_active_fence * active,int (* fn)(void * arg,struct dma_fence * fence),void * arg)658*4882a593Smuzhiyun static int __await_active(struct i915_active_fence *active,
659*4882a593Smuzhiyun 			  int (*fn)(void *arg, struct dma_fence *fence),
660*4882a593Smuzhiyun 			  void *arg)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	struct dma_fence *fence;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	if (is_barrier(active)) /* XXX flush the barrier? */
665*4882a593Smuzhiyun 		return 0;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	fence = i915_active_fence_get(active);
668*4882a593Smuzhiyun 	if (fence) {
669*4882a593Smuzhiyun 		int err;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 		err = fn(arg, fence);
672*4882a593Smuzhiyun 		dma_fence_put(fence);
673*4882a593Smuzhiyun 		if (err < 0)
674*4882a593Smuzhiyun 			return err;
675*4882a593Smuzhiyun 	}
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	return 0;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun struct wait_barrier {
681*4882a593Smuzhiyun 	struct wait_queue_entry base;
682*4882a593Smuzhiyun 	struct i915_active *ref;
683*4882a593Smuzhiyun };
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun static int
barrier_wake(wait_queue_entry_t * wq,unsigned int mode,int flags,void * key)686*4882a593Smuzhiyun barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun 	struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	if (i915_active_is_idle(wb->ref)) {
691*4882a593Smuzhiyun 		list_del(&wq->entry);
692*4882a593Smuzhiyun 		i915_sw_fence_complete(wq->private);
693*4882a593Smuzhiyun 		kfree(wq);
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	return 0;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun 
__await_barrier(struct i915_active * ref,struct i915_sw_fence * fence)699*4882a593Smuzhiyun static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun 	struct wait_barrier *wb;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	wb = kmalloc(sizeof(*wb), GFP_KERNEL);
704*4882a593Smuzhiyun 	if (unlikely(!wb))
705*4882a593Smuzhiyun 		return -ENOMEM;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	GEM_BUG_ON(i915_active_is_idle(ref));
708*4882a593Smuzhiyun 	if (!i915_sw_fence_await(fence)) {
709*4882a593Smuzhiyun 		kfree(wb);
710*4882a593Smuzhiyun 		return -EINVAL;
711*4882a593Smuzhiyun 	}
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	wb->base.flags = 0;
714*4882a593Smuzhiyun 	wb->base.func = barrier_wake;
715*4882a593Smuzhiyun 	wb->base.private = fence;
716*4882a593Smuzhiyun 	wb->ref = ref;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	add_wait_queue(__var_waitqueue(ref), &wb->base);
719*4882a593Smuzhiyun 	return 0;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
await_active(struct i915_active * ref,unsigned int flags,int (* fn)(void * arg,struct dma_fence * fence),void * arg,struct i915_sw_fence * barrier)722*4882a593Smuzhiyun static int await_active(struct i915_active *ref,
723*4882a593Smuzhiyun 			unsigned int flags,
724*4882a593Smuzhiyun 			int (*fn)(void *arg, struct dma_fence *fence),
725*4882a593Smuzhiyun 			void *arg, struct i915_sw_fence *barrier)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun 	int err = 0;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	if (!i915_active_acquire_if_busy(ref))
730*4882a593Smuzhiyun 		return 0;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	if (flags & I915_ACTIVE_AWAIT_EXCL &&
733*4882a593Smuzhiyun 	    rcu_access_pointer(ref->excl.fence)) {
734*4882a593Smuzhiyun 		err = __await_active(&ref->excl, fn, arg);
735*4882a593Smuzhiyun 		if (err)
736*4882a593Smuzhiyun 			goto out;
737*4882a593Smuzhiyun 	}
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
740*4882a593Smuzhiyun 		struct active_node *it, *n;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
743*4882a593Smuzhiyun 			err = __await_active(&it->base, fn, arg);
744*4882a593Smuzhiyun 			if (err)
745*4882a593Smuzhiyun 				goto out;
746*4882a593Smuzhiyun 		}
747*4882a593Smuzhiyun 	}
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	if (flags & I915_ACTIVE_AWAIT_BARRIER) {
750*4882a593Smuzhiyun 		err = flush_lazy_signals(ref);
751*4882a593Smuzhiyun 		if (err)
752*4882a593Smuzhiyun 			goto out;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 		err = __await_barrier(ref, barrier);
755*4882a593Smuzhiyun 		if (err)
756*4882a593Smuzhiyun 			goto out;
757*4882a593Smuzhiyun 	}
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun out:
760*4882a593Smuzhiyun 	i915_active_release(ref);
761*4882a593Smuzhiyun 	return err;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun 
rq_await_fence(void * arg,struct dma_fence * fence)764*4882a593Smuzhiyun static int rq_await_fence(void *arg, struct dma_fence *fence)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun 	return i915_request_await_dma_fence(arg, fence);
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun 
i915_request_await_active(struct i915_request * rq,struct i915_active * ref,unsigned int flags)769*4882a593Smuzhiyun int i915_request_await_active(struct i915_request *rq,
770*4882a593Smuzhiyun 			      struct i915_active *ref,
771*4882a593Smuzhiyun 			      unsigned int flags)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun 	return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun 
sw_await_fence(void * arg,struct dma_fence * fence)776*4882a593Smuzhiyun static int sw_await_fence(void *arg, struct dma_fence *fence)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun 	return i915_sw_fence_await_dma_fence(arg, fence, 0,
779*4882a593Smuzhiyun 					     GFP_NOWAIT | __GFP_NOWARN);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun 
i915_sw_fence_await_active(struct i915_sw_fence * fence,struct i915_active * ref,unsigned int flags)782*4882a593Smuzhiyun int i915_sw_fence_await_active(struct i915_sw_fence *fence,
783*4882a593Smuzhiyun 			       struct i915_active *ref,
784*4882a593Smuzhiyun 			       unsigned int flags)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun 	return await_active(ref, flags, sw_await_fence, fence, fence);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun 
i915_active_fini(struct i915_active * ref)789*4882a593Smuzhiyun void i915_active_fini(struct i915_active *ref)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun 	debug_active_fini(ref);
792*4882a593Smuzhiyun 	GEM_BUG_ON(atomic_read(&ref->count));
793*4882a593Smuzhiyun 	GEM_BUG_ON(work_pending(&ref->work));
794*4882a593Smuzhiyun 	mutex_destroy(&ref->mutex);
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	if (ref->cache)
797*4882a593Smuzhiyun 		kmem_cache_free(global.slab_cache, ref->cache);
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
is_idle_barrier(struct active_node * node,u64 idx)800*4882a593Smuzhiyun static inline bool is_idle_barrier(struct active_node *node, u64 idx)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun 	return node->timeline == idx && !i915_active_fence_isset(&node->base);
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun 
reuse_idle_barrier(struct i915_active * ref,u64 idx)805*4882a593Smuzhiyun static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun 	struct rb_node *prev, *p;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	if (RB_EMPTY_ROOT(&ref->tree))
810*4882a593Smuzhiyun 		return NULL;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	GEM_BUG_ON(i915_active_is_idle(ref));
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	/*
815*4882a593Smuzhiyun 	 * Try to reuse any existing barrier nodes already allocated for this
816*4882a593Smuzhiyun 	 * i915_active, due to overlapping active phases there is likely a
817*4882a593Smuzhiyun 	 * node kept alive (as we reuse before parking). We prefer to reuse
818*4882a593Smuzhiyun 	 * completely idle barriers (less hassle in manipulating the llists),
819*4882a593Smuzhiyun 	 * but otherwise any will do.
820*4882a593Smuzhiyun 	 */
821*4882a593Smuzhiyun 	if (ref->cache && is_idle_barrier(ref->cache, idx)) {
822*4882a593Smuzhiyun 		p = &ref->cache->node;
823*4882a593Smuzhiyun 		goto match;
824*4882a593Smuzhiyun 	}
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	prev = NULL;
827*4882a593Smuzhiyun 	p = ref->tree.rb_node;
828*4882a593Smuzhiyun 	while (p) {
829*4882a593Smuzhiyun 		struct active_node *node =
830*4882a593Smuzhiyun 			rb_entry(p, struct active_node, node);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 		if (is_idle_barrier(node, idx))
833*4882a593Smuzhiyun 			goto match;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 		prev = p;
836*4882a593Smuzhiyun 		if (node->timeline < idx)
837*4882a593Smuzhiyun 			p = READ_ONCE(p->rb_right);
838*4882a593Smuzhiyun 		else
839*4882a593Smuzhiyun 			p = READ_ONCE(p->rb_left);
840*4882a593Smuzhiyun 	}
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	/*
843*4882a593Smuzhiyun 	 * No quick match, but we did find the leftmost rb_node for the
844*4882a593Smuzhiyun 	 * kernel_context. Walk the rb_tree in-order to see if there were
845*4882a593Smuzhiyun 	 * any idle-barriers on this timeline that we missed, or just use
846*4882a593Smuzhiyun 	 * the first pending barrier.
847*4882a593Smuzhiyun 	 */
848*4882a593Smuzhiyun 	for (p = prev; p; p = rb_next(p)) {
849*4882a593Smuzhiyun 		struct active_node *node =
850*4882a593Smuzhiyun 			rb_entry(p, struct active_node, node);
851*4882a593Smuzhiyun 		struct intel_engine_cs *engine;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 		if (node->timeline > idx)
854*4882a593Smuzhiyun 			break;
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 		if (node->timeline < idx)
857*4882a593Smuzhiyun 			continue;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 		if (is_idle_barrier(node, idx))
860*4882a593Smuzhiyun 			goto match;
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 		/*
863*4882a593Smuzhiyun 		 * The list of pending barriers is protected by the
864*4882a593Smuzhiyun 		 * kernel_context timeline, which notably we do not hold
865*4882a593Smuzhiyun 		 * here. i915_request_add_active_barriers() may consume
866*4882a593Smuzhiyun 		 * the barrier before we claim it, so we have to check
867*4882a593Smuzhiyun 		 * for success.
868*4882a593Smuzhiyun 		 */
869*4882a593Smuzhiyun 		engine = __barrier_to_engine(node);
870*4882a593Smuzhiyun 		smp_rmb(); /* serialise with add_active_barriers */
871*4882a593Smuzhiyun 		if (is_barrier(&node->base) &&
872*4882a593Smuzhiyun 		    ____active_del_barrier(ref, node, engine))
873*4882a593Smuzhiyun 			goto match;
874*4882a593Smuzhiyun 	}
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	return NULL;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun match:
879*4882a593Smuzhiyun 	spin_lock_irq(&ref->tree_lock);
880*4882a593Smuzhiyun 	rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
881*4882a593Smuzhiyun 	if (p == &ref->cache->node)
882*4882a593Smuzhiyun 		WRITE_ONCE(ref->cache, NULL);
883*4882a593Smuzhiyun 	spin_unlock_irq(&ref->tree_lock);
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	return rb_entry(p, struct active_node, node);
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun 
i915_active_acquire_preallocate_barrier(struct i915_active * ref,struct intel_engine_cs * engine)888*4882a593Smuzhiyun int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
889*4882a593Smuzhiyun 					    struct intel_engine_cs *engine)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun 	intel_engine_mask_t tmp, mask = engine->mask;
892*4882a593Smuzhiyun 	struct llist_node *first = NULL, *last = NULL;
893*4882a593Smuzhiyun 	struct intel_gt *gt = engine->gt;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	GEM_BUG_ON(i915_active_is_idle(ref));
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	/* Wait until the previous preallocation is completed */
898*4882a593Smuzhiyun 	while (!llist_empty(&ref->preallocated_barriers))
899*4882a593Smuzhiyun 		cond_resched();
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	/*
902*4882a593Smuzhiyun 	 * Preallocate a node for each physical engine supporting the target
903*4882a593Smuzhiyun 	 * engine (remember virtual engines have more than one sibling).
904*4882a593Smuzhiyun 	 * We can then use the preallocated nodes in
905*4882a593Smuzhiyun 	 * i915_active_acquire_barrier()
906*4882a593Smuzhiyun 	 */
907*4882a593Smuzhiyun 	GEM_BUG_ON(!mask);
908*4882a593Smuzhiyun 	for_each_engine_masked(engine, gt, mask, tmp) {
909*4882a593Smuzhiyun 		u64 idx = engine->kernel_context->timeline->fence_context;
910*4882a593Smuzhiyun 		struct llist_node *prev = first;
911*4882a593Smuzhiyun 		struct active_node *node;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 		rcu_read_lock();
914*4882a593Smuzhiyun 		node = reuse_idle_barrier(ref, idx);
915*4882a593Smuzhiyun 		rcu_read_unlock();
916*4882a593Smuzhiyun 		if (!node) {
917*4882a593Smuzhiyun 			node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
918*4882a593Smuzhiyun 			if (!node)
919*4882a593Smuzhiyun 				goto unwind;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 			RCU_INIT_POINTER(node->base.fence, NULL);
922*4882a593Smuzhiyun 			node->base.cb.func = node_retire;
923*4882a593Smuzhiyun 			node->timeline = idx;
924*4882a593Smuzhiyun 			node->ref = ref;
925*4882a593Smuzhiyun 		}
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 		if (!i915_active_fence_isset(&node->base)) {
928*4882a593Smuzhiyun 			/*
929*4882a593Smuzhiyun 			 * Mark this as being *our* unconnected proto-node.
930*4882a593Smuzhiyun 			 *
931*4882a593Smuzhiyun 			 * Since this node is not in any list, and we have
932*4882a593Smuzhiyun 			 * decoupled it from the rbtree, we can reuse the
933*4882a593Smuzhiyun 			 * request to indicate this is an idle-barrier node
934*4882a593Smuzhiyun 			 * and then we can use the rb_node and list pointers
935*4882a593Smuzhiyun 			 * for our tracking of the pending barrier.
936*4882a593Smuzhiyun 			 */
937*4882a593Smuzhiyun 			RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
938*4882a593Smuzhiyun 			node->base.cb.node.prev = (void *)engine;
939*4882a593Smuzhiyun 			__i915_active_acquire(ref);
940*4882a593Smuzhiyun 		}
941*4882a593Smuzhiyun 		GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 		GEM_BUG_ON(barrier_to_engine(node) != engine);
944*4882a593Smuzhiyun 		first = barrier_to_ll(node);
945*4882a593Smuzhiyun 		first->next = prev;
946*4882a593Smuzhiyun 		if (!last)
947*4882a593Smuzhiyun 			last = first;
948*4882a593Smuzhiyun 		intel_engine_pm_get(engine);
949*4882a593Smuzhiyun 	}
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
952*4882a593Smuzhiyun 	llist_add_batch(first, last, &ref->preallocated_barriers);
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	return 0;
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun unwind:
957*4882a593Smuzhiyun 	while (first) {
958*4882a593Smuzhiyun 		struct active_node *node = barrier_from_ll(first);
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 		first = first->next;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 		atomic_dec(&ref->count);
963*4882a593Smuzhiyun 		intel_engine_pm_put(barrier_to_engine(node));
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 		kmem_cache_free(global.slab_cache, node);
966*4882a593Smuzhiyun 	}
967*4882a593Smuzhiyun 	return -ENOMEM;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun 
i915_active_acquire_barrier(struct i915_active * ref)970*4882a593Smuzhiyun void i915_active_acquire_barrier(struct i915_active *ref)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun 	struct llist_node *pos, *next;
973*4882a593Smuzhiyun 	unsigned long flags;
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	GEM_BUG_ON(i915_active_is_idle(ref));
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	/*
978*4882a593Smuzhiyun 	 * Transfer the list of preallocated barriers into the
979*4882a593Smuzhiyun 	 * i915_active rbtree, but only as proto-nodes. They will be
980*4882a593Smuzhiyun 	 * populated by i915_request_add_active_barriers() to point to the
981*4882a593Smuzhiyun 	 * request that will eventually release them.
982*4882a593Smuzhiyun 	 */
983*4882a593Smuzhiyun 	llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
984*4882a593Smuzhiyun 		struct active_node *node = barrier_from_ll(pos);
985*4882a593Smuzhiyun 		struct intel_engine_cs *engine = barrier_to_engine(node);
986*4882a593Smuzhiyun 		struct rb_node **p, *parent;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 		spin_lock_irqsave_nested(&ref->tree_lock, flags,
989*4882a593Smuzhiyun 					 SINGLE_DEPTH_NESTING);
990*4882a593Smuzhiyun 		parent = NULL;
991*4882a593Smuzhiyun 		p = &ref->tree.rb_node;
992*4882a593Smuzhiyun 		while (*p) {
993*4882a593Smuzhiyun 			struct active_node *it;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 			parent = *p;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 			it = rb_entry(parent, struct active_node, node);
998*4882a593Smuzhiyun 			if (it->timeline < node->timeline)
999*4882a593Smuzhiyun 				p = &parent->rb_right;
1000*4882a593Smuzhiyun 			else
1001*4882a593Smuzhiyun 				p = &parent->rb_left;
1002*4882a593Smuzhiyun 		}
1003*4882a593Smuzhiyun 		rb_link_node(&node->node, parent, p);
1004*4882a593Smuzhiyun 		rb_insert_color(&node->node, &ref->tree);
1005*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ref->tree_lock, flags);
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 		GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
1008*4882a593Smuzhiyun 		llist_add(barrier_to_ll(node), &engine->barrier_tasks);
1009*4882a593Smuzhiyun 		intel_engine_pm_put_delay(engine, 1);
1010*4882a593Smuzhiyun 	}
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun 
ll_to_fence_slot(struct llist_node * node)1013*4882a593Smuzhiyun static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
1014*4882a593Smuzhiyun {
1015*4882a593Smuzhiyun 	return __active_fence_slot(&barrier_from_ll(node)->base);
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun 
i915_request_add_active_barriers(struct i915_request * rq)1018*4882a593Smuzhiyun void i915_request_add_active_barriers(struct i915_request *rq)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun 	struct intel_engine_cs *engine = rq->engine;
1021*4882a593Smuzhiyun 	struct llist_node *node, *next;
1022*4882a593Smuzhiyun 	unsigned long flags;
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	GEM_BUG_ON(!intel_context_is_barrier(rq->context));
1025*4882a593Smuzhiyun 	GEM_BUG_ON(intel_engine_is_virtual(engine));
1026*4882a593Smuzhiyun 	GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	node = llist_del_all(&engine->barrier_tasks);
1029*4882a593Smuzhiyun 	if (!node)
1030*4882a593Smuzhiyun 		return;
1031*4882a593Smuzhiyun 	/*
1032*4882a593Smuzhiyun 	 * Attach the list of proto-fences to the in-flight request such
1033*4882a593Smuzhiyun 	 * that the parent i915_active will be released when this request
1034*4882a593Smuzhiyun 	 * is retired.
1035*4882a593Smuzhiyun 	 */
1036*4882a593Smuzhiyun 	spin_lock_irqsave(&rq->lock, flags);
1037*4882a593Smuzhiyun 	llist_for_each_safe(node, next, node) {
1038*4882a593Smuzhiyun 		/* serialise with reuse_idle_barrier */
1039*4882a593Smuzhiyun 		smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1040*4882a593Smuzhiyun 		list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1041*4882a593Smuzhiyun 	}
1042*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rq->lock, flags);
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun /*
1046*4882a593Smuzhiyun  * __i915_active_fence_set: Update the last active fence along its timeline
1047*4882a593Smuzhiyun  * @active: the active tracker
1048*4882a593Smuzhiyun  * @fence: the new fence (under construction)
1049*4882a593Smuzhiyun  *
1050*4882a593Smuzhiyun  * Records the new @fence as the last active fence along its timeline in
1051*4882a593Smuzhiyun  * this active tracker, moving the tracking callbacks from the previous
1052*4882a593Smuzhiyun  * fence onto this one. Returns the previous fence (if not already completed),
1053*4882a593Smuzhiyun  * which the caller must ensure is executed before the new fence. To ensure
1054*4882a593Smuzhiyun  * that the order of fences within the timeline of the i915_active_fence is
1055*4882a593Smuzhiyun  * understood, it should be locked by the caller.
1056*4882a593Smuzhiyun  */
1057*4882a593Smuzhiyun struct dma_fence *
__i915_active_fence_set(struct i915_active_fence * active,struct dma_fence * fence)1058*4882a593Smuzhiyun __i915_active_fence_set(struct i915_active_fence *active,
1059*4882a593Smuzhiyun 			struct dma_fence *fence)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun 	struct dma_fence *prev;
1062*4882a593Smuzhiyun 	unsigned long flags;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	if (fence == rcu_access_pointer(active->fence))
1065*4882a593Smuzhiyun 		return fence;
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	/*
1070*4882a593Smuzhiyun 	 * Consider that we have two threads arriving (A and B), with
1071*4882a593Smuzhiyun 	 * C already resident as the active->fence.
1072*4882a593Smuzhiyun 	 *
1073*4882a593Smuzhiyun 	 * A does the xchg first, and so it sees C or NULL depending
1074*4882a593Smuzhiyun 	 * on the timing of the interrupt handler. If it is NULL, the
1075*4882a593Smuzhiyun 	 * previous fence must have been signaled and we know that
1076*4882a593Smuzhiyun 	 * we are first on the timeline. If it is still present,
1077*4882a593Smuzhiyun 	 * we acquire the lock on that fence and serialise with the interrupt
1078*4882a593Smuzhiyun 	 * handler, in the process removing it from any future interrupt
1079*4882a593Smuzhiyun 	 * callback. A will then wait on C before executing (if present).
1080*4882a593Smuzhiyun 	 *
1081*4882a593Smuzhiyun 	 * As B is second, it sees A as the previous fence and so waits for
1082*4882a593Smuzhiyun 	 * it to complete its transition and takes over the occupancy for
1083*4882a593Smuzhiyun 	 * itself -- remembering that it needs to wait on A before executing.
1084*4882a593Smuzhiyun 	 *
1085*4882a593Smuzhiyun 	 * Note the strong ordering of the timeline also provides consistent
1086*4882a593Smuzhiyun 	 * nesting rules for the fence->lock; the inner lock is always the
1087*4882a593Smuzhiyun 	 * older lock.
1088*4882a593Smuzhiyun 	 */
1089*4882a593Smuzhiyun 	spin_lock_irqsave(fence->lock, flags);
1090*4882a593Smuzhiyun 	prev = xchg(__active_fence_slot(active), fence);
1091*4882a593Smuzhiyun 	if (prev) {
1092*4882a593Smuzhiyun 		GEM_BUG_ON(prev == fence);
1093*4882a593Smuzhiyun 		spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1094*4882a593Smuzhiyun 		__list_del_entry(&active->cb.node);
1095*4882a593Smuzhiyun 		spin_unlock(prev->lock); /* serialise with prev->cb_list */
1096*4882a593Smuzhiyun 	}
1097*4882a593Smuzhiyun 	list_add_tail(&active->cb.node, &fence->cb_list);
1098*4882a593Smuzhiyun 	spin_unlock_irqrestore(fence->lock, flags);
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	return prev;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun 
i915_active_fence_set(struct i915_active_fence * active,struct i915_request * rq)1103*4882a593Smuzhiyun int i915_active_fence_set(struct i915_active_fence *active,
1104*4882a593Smuzhiyun 			  struct i915_request *rq)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun 	struct dma_fence *fence;
1107*4882a593Smuzhiyun 	int err = 0;
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	/* Must maintain timeline ordering wrt previous active requests */
1110*4882a593Smuzhiyun 	rcu_read_lock();
1111*4882a593Smuzhiyun 	fence = __i915_active_fence_set(active, &rq->fence);
1112*4882a593Smuzhiyun 	if (fence) /* but the previous fence may not belong to that timeline! */
1113*4882a593Smuzhiyun 		fence = dma_fence_get_rcu(fence);
1114*4882a593Smuzhiyun 	rcu_read_unlock();
1115*4882a593Smuzhiyun 	if (fence) {
1116*4882a593Smuzhiyun 		err = i915_request_await_dma_fence(rq, fence);
1117*4882a593Smuzhiyun 		dma_fence_put(fence);
1118*4882a593Smuzhiyun 	}
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	return err;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun 
i915_active_noop(struct dma_fence * fence,struct dma_fence_cb * cb)1123*4882a593Smuzhiyun void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1124*4882a593Smuzhiyun {
1125*4882a593Smuzhiyun 	active_fence_cb(fence, cb);
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun struct auto_active {
1129*4882a593Smuzhiyun 	struct i915_active base;
1130*4882a593Smuzhiyun 	struct kref ref;
1131*4882a593Smuzhiyun };
1132*4882a593Smuzhiyun 
i915_active_get(struct i915_active * ref)1133*4882a593Smuzhiyun struct i915_active *i915_active_get(struct i915_active *ref)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun 	struct auto_active *aa = container_of(ref, typeof(*aa), base);
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	kref_get(&aa->ref);
1138*4882a593Smuzhiyun 	return &aa->base;
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun 
auto_release(struct kref * ref)1141*4882a593Smuzhiyun static void auto_release(struct kref *ref)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun 	struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	i915_active_fini(&aa->base);
1146*4882a593Smuzhiyun 	kfree(aa);
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun 
i915_active_put(struct i915_active * ref)1149*4882a593Smuzhiyun void i915_active_put(struct i915_active *ref)
1150*4882a593Smuzhiyun {
1151*4882a593Smuzhiyun 	struct auto_active *aa = container_of(ref, typeof(*aa), base);
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	kref_put(&aa->ref, auto_release);
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun 
auto_active(struct i915_active * ref)1156*4882a593Smuzhiyun static int auto_active(struct i915_active *ref)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun 	i915_active_get(ref);
1159*4882a593Smuzhiyun 	return 0;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun __i915_active_call static void
auto_retire(struct i915_active * ref)1163*4882a593Smuzhiyun auto_retire(struct i915_active *ref)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun 	i915_active_put(ref);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun 
i915_active_create(void)1168*4882a593Smuzhiyun struct i915_active *i915_active_create(void)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun 	struct auto_active *aa;
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	aa = kmalloc(sizeof(*aa), GFP_KERNEL);
1173*4882a593Smuzhiyun 	if (!aa)
1174*4882a593Smuzhiyun 		return NULL;
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	kref_init(&aa->ref);
1177*4882a593Smuzhiyun 	i915_active_init(&aa->base, auto_active, auto_retire);
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	return &aa->base;
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1183*4882a593Smuzhiyun #include "selftests/i915_active.c"
1184*4882a593Smuzhiyun #endif
1185*4882a593Smuzhiyun 
i915_global_active_shrink(void)1186*4882a593Smuzhiyun static void i915_global_active_shrink(void)
1187*4882a593Smuzhiyun {
1188*4882a593Smuzhiyun 	kmem_cache_shrink(global.slab_cache);
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun 
i915_global_active_exit(void)1191*4882a593Smuzhiyun static void i915_global_active_exit(void)
1192*4882a593Smuzhiyun {
1193*4882a593Smuzhiyun 	kmem_cache_destroy(global.slab_cache);
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun static struct i915_global_active global = { {
1197*4882a593Smuzhiyun 	.shrink = i915_global_active_shrink,
1198*4882a593Smuzhiyun 	.exit = i915_global_active_exit,
1199*4882a593Smuzhiyun } };
1200*4882a593Smuzhiyun 
i915_global_active_init(void)1201*4882a593Smuzhiyun int __init i915_global_active_init(void)
1202*4882a593Smuzhiyun {
1203*4882a593Smuzhiyun 	global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
1204*4882a593Smuzhiyun 	if (!global.slab_cache)
1205*4882a593Smuzhiyun 		return -ENOMEM;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	i915_global_register(&global.base);
1208*4882a593Smuzhiyun 	return 0;
1209*4882a593Smuzhiyun }
1210