xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/mali_kbase_fence.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This program is free software and is provided to you under the terms of the
6*4882a593Smuzhiyun  * GNU General Public License version 2 as published by the Free Software
7*4882a593Smuzhiyun  * Foundation, and any use by you of this program is subject to the terms
8*4882a593Smuzhiyun  * of such GNU licence.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * A copy of the licence is included with the program, and can also be obtained
11*4882a593Smuzhiyun  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12*4882a593Smuzhiyun  * Boston, MA  02110-1301, USA.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <linux/atomic.h>
19*4882a593Smuzhiyun #include <linux/list.h>
20*4882a593Smuzhiyun #include <linux/spinlock.h>
21*4882a593Smuzhiyun #include <mali_kbase_fence_defs.h>
22*4882a593Smuzhiyun #include <mali_kbase_fence.h>
23*4882a593Smuzhiyun #include <mali_kbase.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /* Spin lock protecting all Mali fences as fence->lock. */
26*4882a593Smuzhiyun static DEFINE_SPINLOCK(kbase_fence_lock);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun static const char *
29*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
kbase_fence_get_driver_name(struct fence * fence)30*4882a593Smuzhiyun kbase_fence_get_driver_name(struct fence *fence)
31*4882a593Smuzhiyun #else
32*4882a593Smuzhiyun kbase_fence_get_driver_name(struct dma_fence *fence)
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	return kbase_drv_name;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static const char *
39*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
kbase_fence_get_timeline_name(struct fence * fence)40*4882a593Smuzhiyun kbase_fence_get_timeline_name(struct fence *fence)
41*4882a593Smuzhiyun #else
42*4882a593Smuzhiyun kbase_fence_get_timeline_name(struct dma_fence *fence)
43*4882a593Smuzhiyun #endif
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	return kbase_timeline_name;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun static bool
49*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
kbase_fence_enable_signaling(struct fence * fence)50*4882a593Smuzhiyun kbase_fence_enable_signaling(struct fence *fence)
51*4882a593Smuzhiyun #else
52*4882a593Smuzhiyun kbase_fence_enable_signaling(struct dma_fence *fence)
53*4882a593Smuzhiyun #endif
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	return true;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun static void
59*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
kbase_fence_fence_value_str(struct fence * fence,char * str,int size)60*4882a593Smuzhiyun kbase_fence_fence_value_str(struct fence *fence, char *str, int size)
61*4882a593Smuzhiyun #else
62*4882a593Smuzhiyun kbase_fence_fence_value_str(struct dma_fence *fence, char *str, int size)
63*4882a593Smuzhiyun #endif
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun #if (KERNEL_VERSION(5, 1, 0) > LINUX_VERSION_CODE)
66*4882a593Smuzhiyun 	snprintf(str, size, "%u", fence->seqno);
67*4882a593Smuzhiyun #else
68*4882a593Smuzhiyun 	snprintf(str, size, "%llu", fence->seqno);
69*4882a593Smuzhiyun #endif
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
73*4882a593Smuzhiyun const struct fence_ops kbase_fence_ops = {
74*4882a593Smuzhiyun 	.wait = fence_default_wait,
75*4882a593Smuzhiyun #else
76*4882a593Smuzhiyun const struct dma_fence_ops kbase_fence_ops = {
77*4882a593Smuzhiyun 	.wait = dma_fence_default_wait,
78*4882a593Smuzhiyun #endif
79*4882a593Smuzhiyun 	.get_driver_name = kbase_fence_get_driver_name,
80*4882a593Smuzhiyun 	.get_timeline_name = kbase_fence_get_timeline_name,
81*4882a593Smuzhiyun 	.enable_signaling = kbase_fence_enable_signaling,
82*4882a593Smuzhiyun 	.fence_value_str = kbase_fence_fence_value_str
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
86*4882a593Smuzhiyun struct fence *
kbase_fence_out_new(struct kbase_jd_atom * katom)87*4882a593Smuzhiyun kbase_fence_out_new(struct kbase_jd_atom *katom)
88*4882a593Smuzhiyun #else
89*4882a593Smuzhiyun struct dma_fence *
90*4882a593Smuzhiyun kbase_fence_out_new(struct kbase_jd_atom *katom)
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
94*4882a593Smuzhiyun 	struct fence *fence;
95*4882a593Smuzhiyun #else
96*4882a593Smuzhiyun 	struct dma_fence *fence;
97*4882a593Smuzhiyun #endif
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	WARN_ON(katom->dma_fence.fence);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
102*4882a593Smuzhiyun 	if (!fence)
103*4882a593Smuzhiyun 		return NULL;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	dma_fence_init(fence,
106*4882a593Smuzhiyun 		       &kbase_fence_ops,
107*4882a593Smuzhiyun 		       &kbase_fence_lock,
108*4882a593Smuzhiyun 		       katom->dma_fence.context,
109*4882a593Smuzhiyun 		       atomic_inc_return(&katom->dma_fence.seqno));
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	katom->dma_fence.fence = fence;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	return fence;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun bool
kbase_fence_free_callbacks(struct kbase_jd_atom * katom)117*4882a593Smuzhiyun kbase_fence_free_callbacks(struct kbase_jd_atom *katom)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct kbase_fence_cb *cb, *tmp;
120*4882a593Smuzhiyun 	bool res = false;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	lockdep_assert_held(&katom->kctx->jctx.lock);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* Clean up and free callbacks. */
125*4882a593Smuzhiyun 	list_for_each_entry_safe(cb, tmp, &katom->dma_fence.callbacks, node) {
126*4882a593Smuzhiyun 		bool ret;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 		/* Cancel callbacks that hasn't been called yet. */
129*4882a593Smuzhiyun 		ret = dma_fence_remove_callback(cb->fence, &cb->fence_cb);
130*4882a593Smuzhiyun 		if (ret) {
131*4882a593Smuzhiyun 			int ret;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 			/* Fence had not signaled, clean up after
134*4882a593Smuzhiyun 			 * canceling.
135*4882a593Smuzhiyun 			 */
136*4882a593Smuzhiyun 			ret = atomic_dec_return(&katom->dma_fence.dep_count);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 			if (unlikely(ret == 0))
139*4882a593Smuzhiyun 				res = true;
140*4882a593Smuzhiyun 		}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 		/*
143*4882a593Smuzhiyun 		 * Release the reference taken in
144*4882a593Smuzhiyun 		 * kbase_fence_add_callback().
145*4882a593Smuzhiyun 		 */
146*4882a593Smuzhiyun 		dma_fence_put(cb->fence);
147*4882a593Smuzhiyun 		list_del(&cb->node);
148*4882a593Smuzhiyun 		kfree(cb);
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	return res;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
155*4882a593Smuzhiyun int
kbase_fence_add_callback(struct kbase_jd_atom * katom,struct fence * fence,fence_func_t callback)156*4882a593Smuzhiyun kbase_fence_add_callback(struct kbase_jd_atom *katom,
157*4882a593Smuzhiyun 			 struct fence *fence,
158*4882a593Smuzhiyun 			 fence_func_t callback)
159*4882a593Smuzhiyun #else
160*4882a593Smuzhiyun int
161*4882a593Smuzhiyun kbase_fence_add_callback(struct kbase_jd_atom *katom,
162*4882a593Smuzhiyun 			 struct dma_fence *fence,
163*4882a593Smuzhiyun 			 dma_fence_func_t callback)
164*4882a593Smuzhiyun #endif
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	int err = 0;
167*4882a593Smuzhiyun 	struct kbase_fence_cb *kbase_fence_cb;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if (!fence)
170*4882a593Smuzhiyun 		return -EINVAL;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	kbase_fence_cb = kmalloc(sizeof(*kbase_fence_cb), GFP_KERNEL);
173*4882a593Smuzhiyun 	if (!kbase_fence_cb)
174*4882a593Smuzhiyun 		return -ENOMEM;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	kbase_fence_cb->fence = fence;
177*4882a593Smuzhiyun 	kbase_fence_cb->katom = katom;
178*4882a593Smuzhiyun 	INIT_LIST_HEAD(&kbase_fence_cb->node);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	err = dma_fence_add_callback(fence, &kbase_fence_cb->fence_cb,
181*4882a593Smuzhiyun 				     callback);
182*4882a593Smuzhiyun 	if (err == -ENOENT) {
183*4882a593Smuzhiyun 		/* Fence signaled, clear the error and return */
184*4882a593Smuzhiyun 		err = 0;
185*4882a593Smuzhiyun 		kfree(kbase_fence_cb);
186*4882a593Smuzhiyun 	} else if (err) {
187*4882a593Smuzhiyun 		kfree(kbase_fence_cb);
188*4882a593Smuzhiyun 	} else {
189*4882a593Smuzhiyun 		/*
190*4882a593Smuzhiyun 		 * Get reference to fence that will be kept until callback gets
191*4882a593Smuzhiyun 		 * cleaned up in kbase_fence_free_callbacks().
192*4882a593Smuzhiyun 		 */
193*4882a593Smuzhiyun 		dma_fence_get(fence);
194*4882a593Smuzhiyun 		atomic_inc(&katom->dma_fence.dep_count);
195*4882a593Smuzhiyun 		/* Add callback to katom's list of callbacks */
196*4882a593Smuzhiyun 		list_add(&kbase_fence_cb->node, &katom->dma_fence.callbacks);
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	return err;
200*4882a593Smuzhiyun }
201