xref: /OK3568_Linux_fs/kernel/drivers/dma-buf/dma-resv.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Based on bo.c which bears the following copyright notice,
5*4882a593Smuzhiyun  * but is dual licensed:
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8*4882a593Smuzhiyun  * All Rights Reserved.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
11*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
12*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
13*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
14*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
15*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
16*4882a593Smuzhiyun  * the following conditions:
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
19*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
20*4882a593Smuzhiyun  * of the Software.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  **************************************************************************/
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <linux/dma-resv.h>
36*4882a593Smuzhiyun #include <linux/export.h>
37*4882a593Smuzhiyun #include <linux/mm.h>
38*4882a593Smuzhiyun #include <linux/sched/mm.h>
39*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /**
42*4882a593Smuzhiyun  * DOC: Reservation Object Overview
43*4882a593Smuzhiyun  *
44*4882a593Smuzhiyun  * The reservation object provides a mechanism to manage shared and
45*4882a593Smuzhiyun  * exclusive fences associated with a buffer.  A reservation object
46*4882a593Smuzhiyun  * can have attached one exclusive fence (normally associated with
47*4882a593Smuzhiyun  * write operations) or N shared fences (read operations).  The RCU
48*4882a593Smuzhiyun  * mechanism is used to protect read access to fences from locked
49*4882a593Smuzhiyun  * write-side updates.
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun DEFINE_WD_CLASS(reservation_ww_class);
53*4882a593Smuzhiyun EXPORT_SYMBOL(reservation_ww_class);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /**
56*4882a593Smuzhiyun  * dma_resv_list_alloc - allocate fence list
57*4882a593Smuzhiyun  * @shared_max: number of fences we need space for
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * Allocate a new dma_resv_list and make sure to correctly initialize
60*4882a593Smuzhiyun  * shared_max.
61*4882a593Smuzhiyun  */
dma_resv_list_alloc(unsigned int shared_max)62*4882a593Smuzhiyun static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct dma_resv_list *list;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
67*4882a593Smuzhiyun 	if (!list)
68*4882a593Smuzhiyun 		return NULL;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
71*4882a593Smuzhiyun 		sizeof(*list->shared);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	return list;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /**
77*4882a593Smuzhiyun  * dma_resv_list_free - free fence list
78*4882a593Smuzhiyun  * @list: list to free
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * Free a dma_resv_list and make sure to drop all references.
81*4882a593Smuzhiyun  */
dma_resv_list_free(struct dma_resv_list * list)82*4882a593Smuzhiyun static void dma_resv_list_free(struct dma_resv_list *list)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	unsigned int i;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (!list)
87*4882a593Smuzhiyun 		return;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	for (i = 0; i < list->shared_count; ++i)
90*4882a593Smuzhiyun 		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	kfree_rcu(list, rcu);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_LOCKDEP)
dma_resv_lockdep(void)96*4882a593Smuzhiyun static int __init dma_resv_lockdep(void)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	struct mm_struct *mm = mm_alloc();
99*4882a593Smuzhiyun 	struct ww_acquire_ctx ctx;
100*4882a593Smuzhiyun 	struct dma_resv obj;
101*4882a593Smuzhiyun 	struct address_space mapping;
102*4882a593Smuzhiyun 	int ret;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	if (!mm)
105*4882a593Smuzhiyun 		return -ENOMEM;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	dma_resv_init(&obj);
108*4882a593Smuzhiyun 	address_space_init_once(&mapping);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	mmap_read_lock(mm);
111*4882a593Smuzhiyun 	ww_acquire_init(&ctx, &reservation_ww_class);
112*4882a593Smuzhiyun 	ret = dma_resv_lock(&obj, &ctx);
113*4882a593Smuzhiyun 	if (ret == -EDEADLK)
114*4882a593Smuzhiyun 		dma_resv_lock_slow(&obj, &ctx);
115*4882a593Smuzhiyun 	fs_reclaim_acquire(GFP_KERNEL);
116*4882a593Smuzhiyun 	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
117*4882a593Smuzhiyun 	i_mmap_lock_write(&mapping);
118*4882a593Smuzhiyun 	i_mmap_unlock_write(&mapping);
119*4882a593Smuzhiyun #ifdef CONFIG_MMU_NOTIFIER
120*4882a593Smuzhiyun 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
121*4882a593Smuzhiyun 	__dma_fence_might_wait();
122*4882a593Smuzhiyun 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
123*4882a593Smuzhiyun #else
124*4882a593Smuzhiyun 	__dma_fence_might_wait();
125*4882a593Smuzhiyun #endif
126*4882a593Smuzhiyun 	fs_reclaim_release(GFP_KERNEL);
127*4882a593Smuzhiyun 	ww_mutex_unlock(&obj.lock);
128*4882a593Smuzhiyun 	ww_acquire_fini(&ctx);
129*4882a593Smuzhiyun 	mmap_read_unlock(mm);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	mmput(mm);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	return 0;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun subsys_initcall(dma_resv_lockdep);
136*4882a593Smuzhiyun #endif
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun  * dma_resv_init - initialize a reservation object
140*4882a593Smuzhiyun  * @obj: the reservation object
141*4882a593Smuzhiyun  */
dma_resv_init(struct dma_resv * obj)142*4882a593Smuzhiyun void dma_resv_init(struct dma_resv *obj)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	ww_mutex_init(&obj->lock, &reservation_ww_class);
145*4882a593Smuzhiyun 	seqcount_ww_mutex_init(&obj->seq, &obj->lock);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	RCU_INIT_POINTER(obj->fence, NULL);
148*4882a593Smuzhiyun 	RCU_INIT_POINTER(obj->fence_excl, NULL);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun EXPORT_SYMBOL(dma_resv_init);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /**
153*4882a593Smuzhiyun  * dma_resv_fini - destroys a reservation object
154*4882a593Smuzhiyun  * @obj: the reservation object
155*4882a593Smuzhiyun  */
dma_resv_fini(struct dma_resv * obj)156*4882a593Smuzhiyun void dma_resv_fini(struct dma_resv *obj)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	struct dma_resv_list *fobj;
159*4882a593Smuzhiyun 	struct dma_fence *excl;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/*
162*4882a593Smuzhiyun 	 * This object should be dead and all references must have
163*4882a593Smuzhiyun 	 * been released to it, so no need to be protected with rcu.
164*4882a593Smuzhiyun 	 */
165*4882a593Smuzhiyun 	excl = rcu_dereference_protected(obj->fence_excl, 1);
166*4882a593Smuzhiyun 	if (excl)
167*4882a593Smuzhiyun 		dma_fence_put(excl);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	fobj = rcu_dereference_protected(obj->fence, 1);
170*4882a593Smuzhiyun 	dma_resv_list_free(fobj);
171*4882a593Smuzhiyun 	ww_mutex_destroy(&obj->lock);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun EXPORT_SYMBOL(dma_resv_fini);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun /**
176*4882a593Smuzhiyun  * dma_resv_reserve_shared - Reserve space to add shared fences to
177*4882a593Smuzhiyun  * a dma_resv.
178*4882a593Smuzhiyun  * @obj: reservation object
179*4882a593Smuzhiyun  * @num_fences: number of fences we want to add
180*4882a593Smuzhiyun  *
181*4882a593Smuzhiyun  * Should be called before dma_resv_add_shared_fence().  Must
182*4882a593Smuzhiyun  * be called with obj->lock held.
183*4882a593Smuzhiyun  *
184*4882a593Smuzhiyun  * RETURNS
185*4882a593Smuzhiyun  * Zero for success, or -errno
186*4882a593Smuzhiyun  */
dma_resv_reserve_shared(struct dma_resv * obj,unsigned int num_fences)187*4882a593Smuzhiyun int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	struct dma_resv_list *old, *new;
190*4882a593Smuzhiyun 	unsigned int i, j, k, max;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	dma_resv_assert_held(obj);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	old = dma_resv_get_list(obj);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	if (old && old->shared_max) {
197*4882a593Smuzhiyun 		if ((old->shared_count + num_fences) <= old->shared_max)
198*4882a593Smuzhiyun 			return 0;
199*4882a593Smuzhiyun 		else
200*4882a593Smuzhiyun 			max = max(old->shared_count + num_fences,
201*4882a593Smuzhiyun 				  old->shared_max * 2);
202*4882a593Smuzhiyun 	} else {
203*4882a593Smuzhiyun 		max = max(4ul, roundup_pow_of_two(num_fences));
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	new = dma_resv_list_alloc(max);
207*4882a593Smuzhiyun 	if (!new)
208*4882a593Smuzhiyun 		return -ENOMEM;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/*
211*4882a593Smuzhiyun 	 * no need to bump fence refcounts, rcu_read access
212*4882a593Smuzhiyun 	 * requires the use of kref_get_unless_zero, and the
213*4882a593Smuzhiyun 	 * references from the old struct are carried over to
214*4882a593Smuzhiyun 	 * the new.
215*4882a593Smuzhiyun 	 */
216*4882a593Smuzhiyun 	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
217*4882a593Smuzhiyun 		struct dma_fence *fence;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 		fence = rcu_dereference_protected(old->shared[i],
220*4882a593Smuzhiyun 						  dma_resv_held(obj));
221*4882a593Smuzhiyun 		if (dma_fence_is_signaled(fence))
222*4882a593Smuzhiyun 			RCU_INIT_POINTER(new->shared[--k], fence);
223*4882a593Smuzhiyun 		else
224*4882a593Smuzhiyun 			RCU_INIT_POINTER(new->shared[j++], fence);
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 	new->shared_count = j;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	/*
229*4882a593Smuzhiyun 	 * We are not changing the effective set of fences here so can
230*4882a593Smuzhiyun 	 * merely update the pointer to the new array; both existing
231*4882a593Smuzhiyun 	 * readers and new readers will see exactly the same set of
232*4882a593Smuzhiyun 	 * active (unsignaled) shared fences. Individual fences and the
233*4882a593Smuzhiyun 	 * old array are protected by RCU and so will not vanish under
234*4882a593Smuzhiyun 	 * the gaze of the rcu_read_lock() readers.
235*4882a593Smuzhiyun 	 */
236*4882a593Smuzhiyun 	rcu_assign_pointer(obj->fence, new);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (!old)
239*4882a593Smuzhiyun 		return 0;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/* Drop the references to the signaled fences */
242*4882a593Smuzhiyun 	for (i = k; i < max; ++i) {
243*4882a593Smuzhiyun 		struct dma_fence *fence;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		fence = rcu_dereference_protected(new->shared[i],
246*4882a593Smuzhiyun 						  dma_resv_held(obj));
247*4882a593Smuzhiyun 		dma_fence_put(fence);
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 	kfree_rcu(old, rcu);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	return 0;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun EXPORT_SYMBOL(dma_resv_reserve_shared);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /**
256*4882a593Smuzhiyun  * dma_resv_add_shared_fence - Add a fence to a shared slot
257*4882a593Smuzhiyun  * @obj: the reservation object
258*4882a593Smuzhiyun  * @fence: the shared fence to add
259*4882a593Smuzhiyun  *
260*4882a593Smuzhiyun  * Add a fence to a shared slot, obj->lock must be held, and
261*4882a593Smuzhiyun  * dma_resv_reserve_shared() has been called.
262*4882a593Smuzhiyun  */
dma_resv_add_shared_fence(struct dma_resv * obj,struct dma_fence * fence)263*4882a593Smuzhiyun void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	struct dma_resv_list *fobj;
266*4882a593Smuzhiyun 	struct dma_fence *old;
267*4882a593Smuzhiyun 	unsigned int i, count;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	dma_fence_get(fence);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	dma_resv_assert_held(obj);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	fobj = dma_resv_get_list(obj);
274*4882a593Smuzhiyun 	count = fobj->shared_count;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	write_seqcount_begin(&obj->seq);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	for (i = 0; i < count; ++i) {
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		old = rcu_dereference_protected(fobj->shared[i],
281*4882a593Smuzhiyun 						dma_resv_held(obj));
282*4882a593Smuzhiyun 		if (old->context == fence->context ||
283*4882a593Smuzhiyun 		    dma_fence_is_signaled(old))
284*4882a593Smuzhiyun 			goto replace;
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	BUG_ON(fobj->shared_count >= fobj->shared_max);
288*4882a593Smuzhiyun 	old = NULL;
289*4882a593Smuzhiyun 	count++;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun replace:
292*4882a593Smuzhiyun 	RCU_INIT_POINTER(fobj->shared[i], fence);
293*4882a593Smuzhiyun 	/* pointer update must be visible before we extend the shared_count */
294*4882a593Smuzhiyun 	smp_store_mb(fobj->shared_count, count);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	write_seqcount_end(&obj->seq);
297*4882a593Smuzhiyun 	dma_fence_put(old);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun EXPORT_SYMBOL(dma_resv_add_shared_fence);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /**
302*4882a593Smuzhiyun  * dma_resv_add_excl_fence - Add an exclusive fence.
303*4882a593Smuzhiyun  * @obj: the reservation object
304*4882a593Smuzhiyun  * @fence: the shared fence to add
305*4882a593Smuzhiyun  *
306*4882a593Smuzhiyun  * Add a fence to the exclusive slot.  The obj->lock must be held.
307*4882a593Smuzhiyun  */
dma_resv_add_excl_fence(struct dma_resv * obj,struct dma_fence * fence)308*4882a593Smuzhiyun void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	struct dma_fence *old_fence = dma_resv_get_excl(obj);
311*4882a593Smuzhiyun 	struct dma_resv_list *old;
312*4882a593Smuzhiyun 	u32 i = 0;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	dma_resv_assert_held(obj);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	old = dma_resv_get_list(obj);
317*4882a593Smuzhiyun 	if (old)
318*4882a593Smuzhiyun 		i = old->shared_count;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (fence)
321*4882a593Smuzhiyun 		dma_fence_get(fence);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	write_seqcount_begin(&obj->seq);
324*4882a593Smuzhiyun 	/* write_seqcount_begin provides the necessary memory barrier */
325*4882a593Smuzhiyun 	RCU_INIT_POINTER(obj->fence_excl, fence);
326*4882a593Smuzhiyun 	if (old)
327*4882a593Smuzhiyun 		old->shared_count = 0;
328*4882a593Smuzhiyun 	write_seqcount_end(&obj->seq);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/* inplace update, no shared fences */
331*4882a593Smuzhiyun 	while (i--)
332*4882a593Smuzhiyun 		dma_fence_put(rcu_dereference_protected(old->shared[i],
333*4882a593Smuzhiyun 						dma_resv_held(obj)));
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	dma_fence_put(old_fence);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun EXPORT_SYMBOL(dma_resv_add_excl_fence);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /**
340*4882a593Smuzhiyun * dma_resv_copy_fences - Copy all fences from src to dst.
341*4882a593Smuzhiyun * @dst: the destination reservation object
342*4882a593Smuzhiyun * @src: the source reservation object
343*4882a593Smuzhiyun *
344*4882a593Smuzhiyun * Copy all fences from src to dst. dst-lock must be held.
345*4882a593Smuzhiyun */
dma_resv_copy_fences(struct dma_resv * dst,struct dma_resv * src)346*4882a593Smuzhiyun int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct dma_resv_list *src_list, *dst_list;
349*4882a593Smuzhiyun 	struct dma_fence *old, *new;
350*4882a593Smuzhiyun 	unsigned i;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	dma_resv_assert_held(dst);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	rcu_read_lock();
355*4882a593Smuzhiyun 	src_list = rcu_dereference(src->fence);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun retry:
358*4882a593Smuzhiyun 	if (src_list) {
359*4882a593Smuzhiyun 		unsigned shared_count = src_list->shared_count;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		rcu_read_unlock();
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 		dst_list = dma_resv_list_alloc(shared_count);
364*4882a593Smuzhiyun 		if (!dst_list)
365*4882a593Smuzhiyun 			return -ENOMEM;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 		rcu_read_lock();
368*4882a593Smuzhiyun 		src_list = rcu_dereference(src->fence);
369*4882a593Smuzhiyun 		if (!src_list || src_list->shared_count > shared_count) {
370*4882a593Smuzhiyun 			kfree(dst_list);
371*4882a593Smuzhiyun 			goto retry;
372*4882a593Smuzhiyun 		}
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 		dst_list->shared_count = 0;
375*4882a593Smuzhiyun 		for (i = 0; i < src_list->shared_count; ++i) {
376*4882a593Smuzhiyun 			struct dma_fence *fence;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 			fence = rcu_dereference(src_list->shared[i]);
379*4882a593Smuzhiyun 			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
380*4882a593Smuzhiyun 				     &fence->flags))
381*4882a593Smuzhiyun 				continue;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 			if (!dma_fence_get_rcu(fence)) {
384*4882a593Smuzhiyun 				dma_resv_list_free(dst_list);
385*4882a593Smuzhiyun 				src_list = rcu_dereference(src->fence);
386*4882a593Smuzhiyun 				goto retry;
387*4882a593Smuzhiyun 			}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 			if (dma_fence_is_signaled(fence)) {
390*4882a593Smuzhiyun 				dma_fence_put(fence);
391*4882a593Smuzhiyun 				continue;
392*4882a593Smuzhiyun 			}
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 			rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
395*4882a593Smuzhiyun 		}
396*4882a593Smuzhiyun 	} else {
397*4882a593Smuzhiyun 		dst_list = NULL;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	new = dma_fence_get_rcu_safe(&src->fence_excl);
401*4882a593Smuzhiyun 	rcu_read_unlock();
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	src_list = dma_resv_get_list(dst);
404*4882a593Smuzhiyun 	old = dma_resv_get_excl(dst);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	write_seqcount_begin(&dst->seq);
407*4882a593Smuzhiyun 	/* write_seqcount_begin provides the necessary memory barrier */
408*4882a593Smuzhiyun 	RCU_INIT_POINTER(dst->fence_excl, new);
409*4882a593Smuzhiyun 	RCU_INIT_POINTER(dst->fence, dst_list);
410*4882a593Smuzhiyun 	write_seqcount_end(&dst->seq);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	dma_resv_list_free(src_list);
413*4882a593Smuzhiyun 	dma_fence_put(old);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	return 0;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun EXPORT_SYMBOL(dma_resv_copy_fences);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun /**
420*4882a593Smuzhiyun  * dma_resv_get_fences_rcu - Get an object's shared and exclusive
421*4882a593Smuzhiyun  * fences without update side lock held
422*4882a593Smuzhiyun  * @obj: the reservation object
423*4882a593Smuzhiyun  * @pfence_excl: the returned exclusive fence (or NULL)
424*4882a593Smuzhiyun  * @pshared_count: the number of shared fences returned
425*4882a593Smuzhiyun  * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
426*4882a593Smuzhiyun  * the required size, and must be freed by caller)
427*4882a593Smuzhiyun  *
428*4882a593Smuzhiyun  * Retrieve all fences from the reservation object. If the pointer for the
429*4882a593Smuzhiyun  * exclusive fence is not specified the fence is put into the array of the
430*4882a593Smuzhiyun  * shared fences as well. Returns either zero or -ENOMEM.
431*4882a593Smuzhiyun  */
dma_resv_get_fences_rcu(struct dma_resv * obj,struct dma_fence ** pfence_excl,unsigned * pshared_count,struct dma_fence *** pshared)432*4882a593Smuzhiyun int dma_resv_get_fences_rcu(struct dma_resv *obj,
433*4882a593Smuzhiyun 			    struct dma_fence **pfence_excl,
434*4882a593Smuzhiyun 			    unsigned *pshared_count,
435*4882a593Smuzhiyun 			    struct dma_fence ***pshared)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	struct dma_fence **shared = NULL;
438*4882a593Smuzhiyun 	struct dma_fence *fence_excl;
439*4882a593Smuzhiyun 	unsigned int shared_count;
440*4882a593Smuzhiyun 	int ret = 1;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	do {
443*4882a593Smuzhiyun 		struct dma_resv_list *fobj;
444*4882a593Smuzhiyun 		unsigned int i, seq;
445*4882a593Smuzhiyun 		size_t sz = 0;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 		shared_count = i = 0;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 		rcu_read_lock();
450*4882a593Smuzhiyun 		seq = read_seqcount_begin(&obj->seq);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 		fence_excl = rcu_dereference(obj->fence_excl);
453*4882a593Smuzhiyun 		if (fence_excl && !dma_fence_get_rcu(fence_excl))
454*4882a593Smuzhiyun 			goto unlock;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		fobj = rcu_dereference(obj->fence);
457*4882a593Smuzhiyun 		if (fobj)
458*4882a593Smuzhiyun 			sz += sizeof(*shared) * fobj->shared_max;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		if (!pfence_excl && fence_excl)
461*4882a593Smuzhiyun 			sz += sizeof(*shared);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 		if (sz) {
464*4882a593Smuzhiyun 			struct dma_fence **nshared;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 			nshared = krealloc(shared, sz,
467*4882a593Smuzhiyun 					   GFP_NOWAIT | __GFP_NOWARN);
468*4882a593Smuzhiyun 			if (!nshared) {
469*4882a593Smuzhiyun 				rcu_read_unlock();
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 				dma_fence_put(fence_excl);
472*4882a593Smuzhiyun 				fence_excl = NULL;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 				nshared = krealloc(shared, sz, GFP_KERNEL);
475*4882a593Smuzhiyun 				if (nshared) {
476*4882a593Smuzhiyun 					shared = nshared;
477*4882a593Smuzhiyun 					continue;
478*4882a593Smuzhiyun 				}
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 				ret = -ENOMEM;
481*4882a593Smuzhiyun 				break;
482*4882a593Smuzhiyun 			}
483*4882a593Smuzhiyun 			shared = nshared;
484*4882a593Smuzhiyun 			shared_count = fobj ? fobj->shared_count : 0;
485*4882a593Smuzhiyun 			for (i = 0; i < shared_count; ++i) {
486*4882a593Smuzhiyun 				shared[i] = rcu_dereference(fobj->shared[i]);
487*4882a593Smuzhiyun 				if (!dma_fence_get_rcu(shared[i]))
488*4882a593Smuzhiyun 					break;
489*4882a593Smuzhiyun 			}
490*4882a593Smuzhiyun 		}
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
493*4882a593Smuzhiyun 			while (i--)
494*4882a593Smuzhiyun 				dma_fence_put(shared[i]);
495*4882a593Smuzhiyun 			dma_fence_put(fence_excl);
496*4882a593Smuzhiyun 			goto unlock;
497*4882a593Smuzhiyun 		}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 		ret = 0;
500*4882a593Smuzhiyun unlock:
501*4882a593Smuzhiyun 		rcu_read_unlock();
502*4882a593Smuzhiyun 	} while (ret);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	if (pfence_excl)
505*4882a593Smuzhiyun 		*pfence_excl = fence_excl;
506*4882a593Smuzhiyun 	else if (fence_excl)
507*4882a593Smuzhiyun 		shared[shared_count++] = fence_excl;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	if (!shared_count) {
510*4882a593Smuzhiyun 		kfree(shared);
511*4882a593Smuzhiyun 		shared = NULL;
512*4882a593Smuzhiyun 	}
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	*pshared_count = shared_count;
515*4882a593Smuzhiyun 	*pshared = shared;
516*4882a593Smuzhiyun 	return ret;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun /**
521*4882a593Smuzhiyun  * dma_resv_wait_timeout_rcu - Wait on reservation's objects
522*4882a593Smuzhiyun  * shared and/or exclusive fences.
523*4882a593Smuzhiyun  * @obj: the reservation object
524*4882a593Smuzhiyun  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
525*4882a593Smuzhiyun  * @intr: if true, do interruptible wait
526*4882a593Smuzhiyun  * @timeout: timeout value in jiffies or zero to return immediately
527*4882a593Smuzhiyun  *
528*4882a593Smuzhiyun  * RETURNS
529*4882a593Smuzhiyun  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
530*4882a593Smuzhiyun  * greater than zer on success.
531*4882a593Smuzhiyun  */
dma_resv_wait_timeout_rcu(struct dma_resv * obj,bool wait_all,bool intr,unsigned long timeout)532*4882a593Smuzhiyun long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
533*4882a593Smuzhiyun 			       bool wait_all, bool intr,
534*4882a593Smuzhiyun 			       unsigned long timeout)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	struct dma_fence *fence;
537*4882a593Smuzhiyun 	unsigned seq, shared_count;
538*4882a593Smuzhiyun 	long ret = timeout ? timeout : 1;
539*4882a593Smuzhiyun 	int i;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun retry:
542*4882a593Smuzhiyun 	shared_count = 0;
543*4882a593Smuzhiyun 	seq = read_seqcount_begin(&obj->seq);
544*4882a593Smuzhiyun 	rcu_read_lock();
545*4882a593Smuzhiyun 	i = -1;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	fence = rcu_dereference(obj->fence_excl);
548*4882a593Smuzhiyun 	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
549*4882a593Smuzhiyun 		if (!dma_fence_get_rcu(fence))
550*4882a593Smuzhiyun 			goto unlock_retry;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 		if (dma_fence_is_signaled(fence)) {
553*4882a593Smuzhiyun 			dma_fence_put(fence);
554*4882a593Smuzhiyun 			fence = NULL;
555*4882a593Smuzhiyun 		}
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	} else {
558*4882a593Smuzhiyun 		fence = NULL;
559*4882a593Smuzhiyun 	}
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	if (wait_all) {
562*4882a593Smuzhiyun 		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 		if (fobj)
565*4882a593Smuzhiyun 			shared_count = fobj->shared_count;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 		for (i = 0; !fence && i < shared_count; ++i) {
568*4882a593Smuzhiyun 			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
571*4882a593Smuzhiyun 				     &lfence->flags))
572*4882a593Smuzhiyun 				continue;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 			if (!dma_fence_get_rcu(lfence))
575*4882a593Smuzhiyun 				goto unlock_retry;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 			if (dma_fence_is_signaled(lfence)) {
578*4882a593Smuzhiyun 				dma_fence_put(lfence);
579*4882a593Smuzhiyun 				continue;
580*4882a593Smuzhiyun 			}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 			fence = lfence;
583*4882a593Smuzhiyun 			break;
584*4882a593Smuzhiyun 		}
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	rcu_read_unlock();
588*4882a593Smuzhiyun 	if (fence) {
589*4882a593Smuzhiyun 		if (read_seqcount_retry(&obj->seq, seq)) {
590*4882a593Smuzhiyun 			dma_fence_put(fence);
591*4882a593Smuzhiyun 			goto retry;
592*4882a593Smuzhiyun 		}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 		ret = dma_fence_wait_timeout(fence, intr, ret);
595*4882a593Smuzhiyun 		dma_fence_put(fence);
596*4882a593Smuzhiyun 		if (ret > 0 && wait_all && (i + 1 < shared_count))
597*4882a593Smuzhiyun 			goto retry;
598*4882a593Smuzhiyun 	}
599*4882a593Smuzhiyun 	return ret;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun unlock_retry:
602*4882a593Smuzhiyun 	rcu_read_unlock();
603*4882a593Smuzhiyun 	goto retry;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 
dma_resv_test_signaled_single(struct dma_fence * passed_fence)608*4882a593Smuzhiyun static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun 	struct dma_fence *fence, *lfence = passed_fence;
611*4882a593Smuzhiyun 	int ret = 1;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
614*4882a593Smuzhiyun 		fence = dma_fence_get_rcu(lfence);
615*4882a593Smuzhiyun 		if (!fence)
616*4882a593Smuzhiyun 			return -1;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 		ret = !!dma_fence_is_signaled(fence);
619*4882a593Smuzhiyun 		dma_fence_put(fence);
620*4882a593Smuzhiyun 	}
621*4882a593Smuzhiyun 	return ret;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun /**
625*4882a593Smuzhiyun  * dma_resv_test_signaled_rcu - Test if a reservation object's
626*4882a593Smuzhiyun  * fences have been signaled.
627*4882a593Smuzhiyun  * @obj: the reservation object
628*4882a593Smuzhiyun  * @test_all: if true, test all fences, otherwise only test the exclusive
629*4882a593Smuzhiyun  * fence
630*4882a593Smuzhiyun  *
631*4882a593Smuzhiyun  * RETURNS
632*4882a593Smuzhiyun  * true if all fences signaled, else false
633*4882a593Smuzhiyun  */
dma_resv_test_signaled_rcu(struct dma_resv * obj,bool test_all)634*4882a593Smuzhiyun bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun 	unsigned seq, shared_count;
637*4882a593Smuzhiyun 	int ret;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	rcu_read_lock();
640*4882a593Smuzhiyun retry:
641*4882a593Smuzhiyun 	ret = true;
642*4882a593Smuzhiyun 	shared_count = 0;
643*4882a593Smuzhiyun 	seq = read_seqcount_begin(&obj->seq);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	if (test_all) {
646*4882a593Smuzhiyun 		unsigned i;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 		if (fobj)
651*4882a593Smuzhiyun 			shared_count = fobj->shared_count;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 		for (i = 0; i < shared_count; ++i) {
654*4882a593Smuzhiyun 			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 			ret = dma_resv_test_signaled_single(fence);
657*4882a593Smuzhiyun 			if (ret < 0)
658*4882a593Smuzhiyun 				goto retry;
659*4882a593Smuzhiyun 			else if (!ret)
660*4882a593Smuzhiyun 				break;
661*4882a593Smuzhiyun 		}
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 		if (read_seqcount_retry(&obj->seq, seq))
664*4882a593Smuzhiyun 			goto retry;
665*4882a593Smuzhiyun 	}
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	if (!shared_count) {
668*4882a593Smuzhiyun 		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 		if (fence_excl) {
671*4882a593Smuzhiyun 			ret = dma_resv_test_signaled_single(fence_excl);
672*4882a593Smuzhiyun 			if (ret < 0)
673*4882a593Smuzhiyun 				goto retry;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 			if (read_seqcount_retry(&obj->seq, seq))
676*4882a593Smuzhiyun 				goto retry;
677*4882a593Smuzhiyun 		}
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	rcu_read_unlock();
681*4882a593Smuzhiyun 	return ret;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
684