xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/linux/mali_dma_fence.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (C) 2012-2017 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10 #include <linux/version.h>
11 #include "mali_osk.h"
12 #include "mali_kernel_common.h"
13 
14 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
15 #include "mali_dma_fence.h"
16 #include <linux/atomic.h>
17 #include <linux/workqueue.h>
18 #endif
19 
20 static DEFINE_SPINLOCK(mali_dma_fence_lock);
21 
22 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
mali_dma_fence_enable_signaling(struct dma_fence * fence)23 static bool mali_dma_fence_enable_signaling(struct dma_fence *fence)
24 {
25 	MALI_IGNORE(fence);
26 	return true;
27 }
28 
mali_dma_fence_get_driver_name(struct dma_fence * fence)29 static const char *mali_dma_fence_get_driver_name(struct dma_fence *fence)
30 {
31 	MALI_IGNORE(fence);
32 	return "mali";
33 }
34 
mali_dma_fence_get_timeline_name(struct dma_fence * fence)35 static const char *mali_dma_fence_get_timeline_name(struct dma_fence *fence)
36 {
37 	MALI_IGNORE(fence);
38 	return "mali_dma_fence";
39 }
40 
41 static const struct dma_fence_ops mali_dma_fence_ops = {
42 	.get_driver_name = mali_dma_fence_get_driver_name,
43 	.get_timeline_name = mali_dma_fence_get_timeline_name,
44 	.enable_signaling = mali_dma_fence_enable_signaling,
45 	.signaled = NULL,
46 	.wait = dma_fence_default_wait,
47 	.release = NULL
48 };
49 #else
mali_dma_fence_enable_signaling(struct fence * fence)50 static bool mali_dma_fence_enable_signaling(struct fence *fence)
51 {
52 	MALI_IGNORE(fence);
53 	return true;
54 }
55 
mali_dma_fence_get_driver_name(struct fence * fence)56 static const char *mali_dma_fence_get_driver_name(struct fence *fence)
57 {
58 	MALI_IGNORE(fence);
59 	return "mali";
60 }
61 
mali_dma_fence_get_timeline_name(struct fence * fence)62 static const char *mali_dma_fence_get_timeline_name(struct fence *fence)
63 {
64 	MALI_IGNORE(fence);
65 	return "mali_dma_fence";
66 }
67 
68 static const struct fence_ops mali_dma_fence_ops = {
69 	.get_driver_name = mali_dma_fence_get_driver_name,
70 	.get_timeline_name = mali_dma_fence_get_timeline_name,
71 	.enable_signaling = mali_dma_fence_enable_signaling,
72 	.signaled = NULL,
73 	.wait = fence_default_wait,
74 	.release = NULL
75 };
76 #endif
77 
mali_dma_fence_context_cleanup(struct mali_dma_fence_context * dma_fence_context)78 static void mali_dma_fence_context_cleanup(struct mali_dma_fence_context *dma_fence_context)
79 {
80 	u32 i;
81 
82 	MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
83 
84 	for (i = 0; i < dma_fence_context->num_dma_fence_waiter; i++) {
85 		if (dma_fence_context->mali_dma_fence_waiters[i]) {
86 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
87 			dma_fence_remove_callback(dma_fence_context->mali_dma_fence_waiters[i]->fence,
88 						  &dma_fence_context->mali_dma_fence_waiters[i]->base);
89 			dma_fence_put(dma_fence_context->mali_dma_fence_waiters[i]->fence);
90 
91 #else
92 			fence_remove_callback(dma_fence_context->mali_dma_fence_waiters[i]->fence,
93 					      &dma_fence_context->mali_dma_fence_waiters[i]->base);
94 			fence_put(dma_fence_context->mali_dma_fence_waiters[i]->fence);
95 #endif
96 			kfree(dma_fence_context->mali_dma_fence_waiters[i]);
97 			dma_fence_context->mali_dma_fence_waiters[i] = NULL;
98 		}
99 	}
100 
101 	if (NULL != dma_fence_context->mali_dma_fence_waiters)
102 		kfree(dma_fence_context->mali_dma_fence_waiters);
103 
104 	dma_fence_context->mali_dma_fence_waiters = NULL;
105 	dma_fence_context->num_dma_fence_waiter = 0;
106 }
107 
mali_dma_fence_context_work_func(struct work_struct * work_handle)108 static void mali_dma_fence_context_work_func(struct work_struct *work_handle)
109 {
110 	struct mali_dma_fence_context *dma_fence_context;
111 
112 	MALI_DEBUG_ASSERT_POINTER(work_handle);
113 
114 	dma_fence_context = container_of(work_handle, struct mali_dma_fence_context, work_handle);
115 
116 	dma_fence_context->cb_func(dma_fence_context->pp_job_ptr);
117 }
118 
119 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
mali_dma_fence_callback(struct dma_fence * fence,struct dma_fence_cb * cb)120 static void mali_dma_fence_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
121 #else
122 static void mali_dma_fence_callback(struct fence *fence, struct fence_cb *cb)
123 #endif
124 {
125 	struct mali_dma_fence_waiter *dma_fence_waiter = NULL;
126 	struct mali_dma_fence_context *dma_fence_context = NULL;
127 
128 	MALI_DEBUG_ASSERT_POINTER(fence);
129 	MALI_DEBUG_ASSERT_POINTER(cb);
130 
131 	MALI_IGNORE(fence);
132 
133 	dma_fence_waiter = container_of(cb, struct mali_dma_fence_waiter, base);
134 	dma_fence_context = dma_fence_waiter->parent;
135 
136 	MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
137 
138 	if (atomic_dec_and_test(&dma_fence_context->count))
139 		schedule_work(&dma_fence_context->work_handle);
140 }
141 
142 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
mali_dma_fence_add_callback(struct mali_dma_fence_context * dma_fence_context,struct dma_fence * fence)143 static _mali_osk_errcode_t mali_dma_fence_add_callback(struct mali_dma_fence_context *dma_fence_context, struct dma_fence *fence)
144 #else
145 static _mali_osk_errcode_t mali_dma_fence_add_callback(struct mali_dma_fence_context *dma_fence_context, struct fence *fence)
146 #endif
147 {
148 	int ret = 0;
149 	struct mali_dma_fence_waiter *dma_fence_waiter;
150 	struct mali_dma_fence_waiter **dma_fence_waiters;
151 
152 	MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
153 	MALI_DEBUG_ASSERT_POINTER(fence);
154 
155 	dma_fence_waiters = krealloc(dma_fence_context->mali_dma_fence_waiters,
156 				     (dma_fence_context->num_dma_fence_waiter + 1)
157 				     * sizeof(struct mali_dma_fence_waiter *),
158 				     GFP_KERNEL);
159 
160 	if (NULL == dma_fence_waiters) {
161 		MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to realloc the dma fence waiters.\n"));
162 		return _MALI_OSK_ERR_NOMEM;
163 	}
164 
165 	dma_fence_context->mali_dma_fence_waiters = dma_fence_waiters;
166 
167 	dma_fence_waiter = kzalloc(sizeof(struct mali_dma_fence_waiter), GFP_KERNEL);
168 
169 	if (NULL == dma_fence_waiter) {
170 		MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to create mali dma fence waiter.\n"));
171 		return _MALI_OSK_ERR_NOMEM;
172 	}
173 
174 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
175 	dma_fence_get(fence);
176 #else
177 	fence_get(fence);
178 #endif
179 	dma_fence_waiter->fence = fence;
180 	dma_fence_waiter->parent = dma_fence_context;
181 	atomic_inc(&dma_fence_context->count);
182 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
183 	ret = dma_fence_add_callback(fence, &dma_fence_waiter->base,
184 				     mali_dma_fence_callback);
185 #else
186 	ret = fence_add_callback(fence, &dma_fence_waiter->base,
187 				 mali_dma_fence_callback);
188 #endif
189 	if (0 > ret) {
190 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
191 		dma_fence_put(fence);
192 #else
193 		fence_put(fence);
194 #endif
195 		kfree(dma_fence_waiter);
196 		atomic_dec(&dma_fence_context->count);
197 		if (-ENOENT == ret) {
198 			/*-ENOENT if fence has already been signaled, return _MALI_OSK_ERR_OK*/
199 			return _MALI_OSK_ERR_OK;
200 		}
201 		/* Failed to add the fence callback into fence, return _MALI_OSK_ERR_FAULT*/
202 		MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into fence.\n"));
203 		return _MALI_OSK_ERR_FAULT;
204 	}
205 
206 	dma_fence_context->mali_dma_fence_waiters[dma_fence_context->num_dma_fence_waiter] = dma_fence_waiter;
207 	dma_fence_context->num_dma_fence_waiter++;
208 
209 	return _MALI_OSK_ERR_OK;
210 }
211 
212 
213 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
mali_dma_fence_new(u32 context,u32 seqno)214 struct dma_fence *mali_dma_fence_new(u32  context, u32 seqno)
215 #else
216 struct fence *mali_dma_fence_new(u32  context, u32 seqno)
217 #endif
218 {
219 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
220 	struct dma_fence *fence = NULL;
221 	fence = kzalloc(sizeof(struct dma_fence), GFP_KERNEL);
222 #else
223 	struct fence *fence = NULL;
224 	fence = kzalloc(sizeof(struct fence), GFP_KERNEL);
225 #endif
226 	if (NULL == fence) {
227 		MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to create dma fence.\n"));
228 		return fence;
229 	}
230 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
231 	dma_fence_init(fence,
232 		       &mali_dma_fence_ops,
233 		       &mali_dma_fence_lock,
234 		       context, seqno);
235 #else
236 	fence_init(fence,
237 		   &mali_dma_fence_ops,
238 		   &mali_dma_fence_lock,
239 		   context, seqno);
240 #endif
241 	return fence;
242 }
243 
244 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
mali_dma_fence_signal_and_put(struct dma_fence ** fence)245 void mali_dma_fence_signal_and_put(struct dma_fence **fence)
246 #else
247 void mali_dma_fence_signal_and_put(struct fence **fence)
248 #endif
249 {
250 	MALI_DEBUG_ASSERT_POINTER(fence);
251 	MALI_DEBUG_ASSERT_POINTER(*fence);
252 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
253 	dma_fence_signal(*fence);
254 	dma_fence_put(*fence);
255 #else
256 	fence_signal(*fence);
257 	fence_put(*fence);
258 #endif
259 	*fence = NULL;
260 }
261 
mali_dma_fence_context_init(struct mali_dma_fence_context * dma_fence_context,mali_dma_fence_context_callback_func_t cb_func,void * pp_job_ptr)262 void mali_dma_fence_context_init(struct mali_dma_fence_context *dma_fence_context,
263 				 mali_dma_fence_context_callback_func_t  cb_func,
264 				 void *pp_job_ptr)
265 {
266 	MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
267 
268 	INIT_WORK(&dma_fence_context->work_handle, mali_dma_fence_context_work_func);
269 	atomic_set(&dma_fence_context->count, 1);
270 	dma_fence_context->num_dma_fence_waiter = 0;
271 	dma_fence_context->mali_dma_fence_waiters = NULL;
272 	dma_fence_context->cb_func = cb_func;
273 	dma_fence_context->pp_job_ptr = pp_job_ptr;
274 }
275 
mali_dma_fence_context_add_waiters(struct mali_dma_fence_context * dma_fence_context,struct reservation_object * dma_reservation_object)276 _mali_osk_errcode_t mali_dma_fence_context_add_waiters(struct mali_dma_fence_context *dma_fence_context,
277 		struct reservation_object *dma_reservation_object)
278 {
279 	_mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
280 	u32 shared_count = 0, i;
281 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
282 	struct dma_fence *exclusive_fence = NULL;
283 	struct dma_fence **shared_fences = NULL;
284 #else
285 	struct fence *exclusive_fence = NULL;
286 	struct fence **shared_fences = NULL;
287 #endif
288 	MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
289 	MALI_DEBUG_ASSERT_POINTER(dma_reservation_object);
290 
291 	/* Get all the shared/exclusive fences in the reservation object of dma buf*/
292 	ret = reservation_object_get_fences_rcu(dma_reservation_object, &exclusive_fence,
293 						&shared_count, &shared_fences);
294 	if (ret < 0) {
295 		MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to get  shared or exclusive_fence dma fences from  the reservation object of dma buf.\n"));
296 		return _MALI_OSK_ERR_FAULT;
297 	}
298 
299 	if (exclusive_fence) {
300 		ret = mali_dma_fence_add_callback(dma_fence_context, exclusive_fence);
301 		if (_MALI_OSK_ERR_OK != ret) {
302 			MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into exclusive fence.\n"));
303 			mali_dma_fence_context_cleanup(dma_fence_context);
304 			goto ended;
305 		}
306 	}
307 
308 
309 	for (i = 0; i < shared_count; i++) {
310 		ret = mali_dma_fence_add_callback(dma_fence_context, shared_fences[i]);
311 		if (_MALI_OSK_ERR_OK != ret) {
312 			MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into shared fence [%d].\n", i));
313 			mali_dma_fence_context_cleanup(dma_fence_context);
314 			break;
315 		}
316 	}
317 
318 ended:
319 
320 	if (exclusive_fence)
321 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
322 		dma_fence_put(exclusive_fence);
323 #else
324 		fence_put(exclusive_fence);
325 #endif
326 
327 	if (shared_fences) {
328 		for (i = 0; i < shared_count; i++) {
329 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
330 			dma_fence_put(shared_fences[i]);
331 #else
332 			fence_put(shared_fences[i]);
333 #endif
334 		}
335 		kfree(shared_fences);
336 	}
337 
338 	return ret;
339 }
340 
341 
mali_dma_fence_context_term(struct mali_dma_fence_context * dma_fence_context)342 void mali_dma_fence_context_term(struct mali_dma_fence_context *dma_fence_context)
343 {
344 	MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
345 	atomic_set(&dma_fence_context->count, 0);
346 	if (dma_fence_context->work_handle.func) {
347 		cancel_work_sync(&dma_fence_context->work_handle);
348 	}
349 	mali_dma_fence_context_cleanup(dma_fence_context);
350 }
351 
mali_dma_fence_context_dec_count(struct mali_dma_fence_context * dma_fence_context)352 void mali_dma_fence_context_dec_count(struct mali_dma_fence_context *dma_fence_context)
353 {
354 	MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
355 
356 	if (atomic_dec_and_test(&dma_fence_context->count))
357 		schedule_work(&dma_fence_context->work_handle);
358 }
359 
360 
mali_dma_fence_add_reservation_object_list(struct reservation_object * dma_reservation_object,struct reservation_object ** dma_reservation_object_list,u32 * num_dma_reservation_object)361 void mali_dma_fence_add_reservation_object_list(struct reservation_object *dma_reservation_object,
362 		struct reservation_object **dma_reservation_object_list,
363 		u32 *num_dma_reservation_object)
364 {
365 	u32 i;
366 
367 	MALI_DEBUG_ASSERT_POINTER(dma_reservation_object);
368 	MALI_DEBUG_ASSERT_POINTER(dma_reservation_object_list);
369 	MALI_DEBUG_ASSERT_POINTER(num_dma_reservation_object);
370 
371 	for (i = 0; i < *num_dma_reservation_object; i++) {
372 		if (dma_reservation_object_list[i] == dma_reservation_object)
373 			return;
374 	}
375 
376 	dma_reservation_object_list[*num_dma_reservation_object] = dma_reservation_object;
377 	(*num_dma_reservation_object)++;
378 }
379 
mali_dma_fence_lock_reservation_object_list(struct reservation_object ** dma_reservation_object_list,u32 num_dma_reservation_object,struct ww_acquire_ctx * ww_actx)380 int mali_dma_fence_lock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
381 		u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx)
382 {
383 	u32 i;
384 
385 	struct reservation_object *reservation_object_to_slow_lock = NULL;
386 
387 	MALI_DEBUG_ASSERT_POINTER(dma_reservation_object_list);
388 	MALI_DEBUG_ASSERT_POINTER(ww_actx);
389 
390 	ww_acquire_init(ww_actx, &reservation_ww_class);
391 
392 again:
393 	for (i = 0; i < num_dma_reservation_object; i++) {
394 		int ret;
395 
396 		if (dma_reservation_object_list[i] == reservation_object_to_slow_lock) {
397 			reservation_object_to_slow_lock = NULL;
398 			continue;
399 		}
400 
401 		ret = ww_mutex_lock(&dma_reservation_object_list[i]->lock, ww_actx);
402 
403 		if (ret < 0) {
404 			u32  slow_lock_index = i;
405 
406 			/* unlock all pre locks we have already locked.*/
407 			while (i > 0) {
408 				i--;
409 				ww_mutex_unlock(&dma_reservation_object_list[i]->lock);
410 			}
411 
412 			if (NULL != reservation_object_to_slow_lock)
413 				ww_mutex_unlock(&reservation_object_to_slow_lock->lock);
414 
415 			if (ret == -EDEADLK) {
416 				reservation_object_to_slow_lock = dma_reservation_object_list[slow_lock_index];
417 				ww_mutex_lock_slow(&reservation_object_to_slow_lock->lock, ww_actx);
418 				goto again;
419 			}
420 			ww_acquire_fini(ww_actx);
421 			MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to lock all dma reservation objects.\n", i));
422 			return ret;
423 		}
424 	}
425 
426 	ww_acquire_done(ww_actx);
427 	return 0;
428 }
429 
mali_dma_fence_unlock_reservation_object_list(struct reservation_object ** dma_reservation_object_list,u32 num_dma_reservation_object,struct ww_acquire_ctx * ww_actx)430 void mali_dma_fence_unlock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
431 		u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx)
432 {
433 	u32 i;
434 
435 	for (i = 0; i < num_dma_reservation_object; i++)
436 		ww_mutex_unlock(&dma_reservation_object_list[i]->lock);
437 
438 	ww_acquire_fini(ww_actx);
439 }
440