xref: /OK3568_Linux_fs/kernel/drivers/dma-buf/dma-fence-array.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * dma-fence-array: aggregate fences to be waited together
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2016 Collabora Ltd
6*4882a593Smuzhiyun  * Copyright (C) 2016 Advanced Micro Devices, Inc.
7*4882a593Smuzhiyun  * Authors:
8*4882a593Smuzhiyun  *	Gustavo Padovan <gustavo@padovan.org>
9*4882a593Smuzhiyun  *	Christian König <christian.koenig@amd.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/export.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/dma-fence-array.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define PENDING_ERROR 1
17*4882a593Smuzhiyun 
dma_fence_array_get_driver_name(struct dma_fence * fence)18*4882a593Smuzhiyun static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	return "dma_fence_array";
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun 
dma_fence_array_get_timeline_name(struct dma_fence * fence)23*4882a593Smuzhiyun static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	return "unbound";
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
dma_fence_array_set_pending_error(struct dma_fence_array * array,int error)28*4882a593Smuzhiyun static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
29*4882a593Smuzhiyun 					      int error)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	/*
32*4882a593Smuzhiyun 	 * Propagate the first error reported by any of our fences, but only
33*4882a593Smuzhiyun 	 * before we ourselves are signaled.
34*4882a593Smuzhiyun 	 */
35*4882a593Smuzhiyun 	if (error)
36*4882a593Smuzhiyun 		cmpxchg(&array->base.error, PENDING_ERROR, error);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
dma_fence_array_clear_pending_error(struct dma_fence_array * array)39*4882a593Smuzhiyun static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	/* Clear the error flag if not actually set. */
42*4882a593Smuzhiyun 	cmpxchg(&array->base.error, PENDING_ERROR, 0);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
irq_dma_fence_array_work(struct irq_work * wrk)45*4882a593Smuzhiyun static void irq_dma_fence_array_work(struct irq_work *wrk)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	dma_fence_array_clear_pending_error(array);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	dma_fence_signal(&array->base);
52*4882a593Smuzhiyun 	dma_fence_put(&array->base);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
dma_fence_array_cb_func(struct dma_fence * f,struct dma_fence_cb * cb)55*4882a593Smuzhiyun static void dma_fence_array_cb_func(struct dma_fence *f,
56*4882a593Smuzhiyun 				    struct dma_fence_cb *cb)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	struct dma_fence_array_cb *array_cb =
59*4882a593Smuzhiyun 		container_of(cb, struct dma_fence_array_cb, cb);
60*4882a593Smuzhiyun 	struct dma_fence_array *array = array_cb->array;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	dma_fence_array_set_pending_error(array, f->error);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	if (atomic_dec_and_test(&array->num_pending))
65*4882a593Smuzhiyun 		irq_work_queue(&array->work);
66*4882a593Smuzhiyun 	else
67*4882a593Smuzhiyun 		dma_fence_put(&array->base);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
dma_fence_array_enable_signaling(struct dma_fence * fence)70*4882a593Smuzhiyun static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	struct dma_fence_array *array = to_dma_fence_array(fence);
73*4882a593Smuzhiyun 	struct dma_fence_array_cb *cb = (void *)(&array[1]);
74*4882a593Smuzhiyun 	unsigned i;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	for (i = 0; i < array->num_fences; ++i) {
77*4882a593Smuzhiyun 		cb[i].array = array;
78*4882a593Smuzhiyun 		/*
79*4882a593Smuzhiyun 		 * As we may report that the fence is signaled before all
80*4882a593Smuzhiyun 		 * callbacks are complete, we need to take an additional
81*4882a593Smuzhiyun 		 * reference count on the array so that we do not free it too
82*4882a593Smuzhiyun 		 * early. The core fence handling will only hold the reference
83*4882a593Smuzhiyun 		 * until we signal the array as complete (but that is now
84*4882a593Smuzhiyun 		 * insufficient).
85*4882a593Smuzhiyun 		 */
86*4882a593Smuzhiyun 		dma_fence_get(&array->base);
87*4882a593Smuzhiyun 		if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
88*4882a593Smuzhiyun 					   dma_fence_array_cb_func)) {
89*4882a593Smuzhiyun 			int error = array->fences[i]->error;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 			dma_fence_array_set_pending_error(array, error);
92*4882a593Smuzhiyun 			dma_fence_put(&array->base);
93*4882a593Smuzhiyun 			if (atomic_dec_and_test(&array->num_pending)) {
94*4882a593Smuzhiyun 				dma_fence_array_clear_pending_error(array);
95*4882a593Smuzhiyun 				return false;
96*4882a593Smuzhiyun 			}
97*4882a593Smuzhiyun 		}
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	return true;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
dma_fence_array_signaled(struct dma_fence * fence)103*4882a593Smuzhiyun static bool dma_fence_array_signaled(struct dma_fence *fence)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct dma_fence_array *array = to_dma_fence_array(fence);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	if (atomic_read(&array->num_pending) > 0)
108*4882a593Smuzhiyun 		return false;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	dma_fence_array_clear_pending_error(array);
111*4882a593Smuzhiyun 	return true;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
dma_fence_array_release(struct dma_fence * fence)114*4882a593Smuzhiyun static void dma_fence_array_release(struct dma_fence *fence)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	struct dma_fence_array *array = to_dma_fence_array(fence);
117*4882a593Smuzhiyun 	unsigned i;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	for (i = 0; i < array->num_fences; ++i)
120*4882a593Smuzhiyun 		dma_fence_put(array->fences[i]);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	kfree(array->fences);
123*4882a593Smuzhiyun 	dma_fence_free(fence);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun const struct dma_fence_ops dma_fence_array_ops = {
127*4882a593Smuzhiyun 	.get_driver_name = dma_fence_array_get_driver_name,
128*4882a593Smuzhiyun 	.get_timeline_name = dma_fence_array_get_timeline_name,
129*4882a593Smuzhiyun 	.enable_signaling = dma_fence_array_enable_signaling,
130*4882a593Smuzhiyun 	.signaled = dma_fence_array_signaled,
131*4882a593Smuzhiyun 	.release = dma_fence_array_release,
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun EXPORT_SYMBOL(dma_fence_array_ops);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun  * dma_fence_array_create - Create a custom fence array
137*4882a593Smuzhiyun  * @num_fences:		[in]	number of fences to add in the array
138*4882a593Smuzhiyun  * @fences:		[in]	array containing the fences
139*4882a593Smuzhiyun  * @context:		[in]	fence context to use
140*4882a593Smuzhiyun  * @seqno:		[in]	sequence number to use
141*4882a593Smuzhiyun  * @signal_on_any:	[in]	signal on any fence in the array
142*4882a593Smuzhiyun  *
143*4882a593Smuzhiyun  * Allocate a dma_fence_array object and initialize the base fence with
144*4882a593Smuzhiyun  * dma_fence_init().
145*4882a593Smuzhiyun  * In case of error it returns NULL.
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * The caller should allocate the fences array with num_fences size
148*4882a593Smuzhiyun  * and fill it with the fences it wants to add to the object. Ownership of this
149*4882a593Smuzhiyun  * array is taken and dma_fence_put() is used on each fence on release.
150*4882a593Smuzhiyun  *
151*4882a593Smuzhiyun  * If @signal_on_any is true the fence array signals if any fence in the array
152*4882a593Smuzhiyun  * signals, otherwise it signals when all fences in the array signal.
153*4882a593Smuzhiyun  */
dma_fence_array_create(int num_fences,struct dma_fence ** fences,u64 context,unsigned seqno,bool signal_on_any)154*4882a593Smuzhiyun struct dma_fence_array *dma_fence_array_create(int num_fences,
155*4882a593Smuzhiyun 					       struct dma_fence **fences,
156*4882a593Smuzhiyun 					       u64 context, unsigned seqno,
157*4882a593Smuzhiyun 					       bool signal_on_any)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct dma_fence_array *array;
160*4882a593Smuzhiyun 	size_t size = sizeof(*array);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/* Allocate the callback structures behind the array. */
163*4882a593Smuzhiyun 	size += num_fences * sizeof(struct dma_fence_array_cb);
164*4882a593Smuzhiyun 	array = kzalloc(size, GFP_KERNEL);
165*4882a593Smuzhiyun 	if (!array)
166*4882a593Smuzhiyun 		return NULL;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	spin_lock_init(&array->lock);
169*4882a593Smuzhiyun 	dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
170*4882a593Smuzhiyun 		       context, seqno);
171*4882a593Smuzhiyun 	init_irq_work(&array->work, irq_dma_fence_array_work);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	array->num_fences = num_fences;
174*4882a593Smuzhiyun 	atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
175*4882a593Smuzhiyun 	array->fences = fences;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	array->base.error = PENDING_ERROR;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	return array;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun EXPORT_SYMBOL(dma_fence_array_create);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /**
184*4882a593Smuzhiyun  * dma_fence_match_context - Check if all fences are from the given context
185*4882a593Smuzhiyun  * @fence:		[in]	fence or fence array
186*4882a593Smuzhiyun  * @context:		[in]	fence context to check all fences against
187*4882a593Smuzhiyun  *
188*4882a593Smuzhiyun  * Checks the provided fence or, for a fence array, all fences in the array
189*4882a593Smuzhiyun  * against the given context. Returns false if any fence is from a different
190*4882a593Smuzhiyun  * context.
191*4882a593Smuzhiyun  */
dma_fence_match_context(struct dma_fence * fence,u64 context)192*4882a593Smuzhiyun bool dma_fence_match_context(struct dma_fence *fence, u64 context)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct dma_fence_array *array = to_dma_fence_array(fence);
195*4882a593Smuzhiyun 	unsigned i;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (!dma_fence_is_array(fence))
198*4882a593Smuzhiyun 		return fence->context == context;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	for (i = 0; i < array->num_fences; i++) {
201*4882a593Smuzhiyun 		if (array->fences[i]->context != context)
202*4882a593Smuzhiyun 			return false;
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return true;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun EXPORT_SYMBOL(dma_fence_match_context);
208