xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/linux/mali_internal_sync.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (C) 2012-2018 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10 
11 #include "mali_internal_sync.h"
12 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
13 #include <linux/ioctl.h>
14 #include <linux/export.h>
15 #include <linux/file.h>
16 #include <linux/fs.h>
17 #include <linux/kernel.h>
18 #include <linux/poll.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/anon_inodes.h>
24 
25 #include "mali_osk.h"
26 #include "mali_kernel_common.h"
27 #if defined(DEBUG)
28 #include "mali_session.h"
29 #include "mali_timeline.h"
30 #endif
31 
32 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
33 static const struct dma_fence_ops fence_ops;
34 #else
35 static const struct fence_ops fence_ops;
36 #endif
37 
38 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
mali_internal_fence_to_sync_pt(struct dma_fence * fence)39 static struct mali_internal_sync_point *mali_internal_fence_to_sync_pt(struct dma_fence *fence)
40 #else
41 static struct mali_internal_sync_point *mali_internal_fence_to_sync_pt(struct fence *fence)
42 #endif
43 {
44 	MALI_DEBUG_ASSERT_POINTER(fence);
45 	return container_of(fence, struct mali_internal_sync_point, base);
46 }
47 
mali_internal_sync_pt_to_sync_timeline(struct mali_internal_sync_point * sync_pt)48 static inline struct mali_internal_sync_timeline *mali_internal_sync_pt_to_sync_timeline(struct mali_internal_sync_point *sync_pt)
49 {
50 	MALI_DEBUG_ASSERT_POINTER(sync_pt);
51 	return container_of(sync_pt->base.lock, struct mali_internal_sync_timeline, sync_pt_list_lock);
52 }
53 
mali_internal_sync_timeline_free(struct kref * kref_count)54 static void mali_internal_sync_timeline_free(struct kref *kref_count)
55 {
56 	struct mali_internal_sync_timeline *sync_timeline;
57 
58 	MALI_DEBUG_ASSERT_POINTER(kref_count);
59 
60 	sync_timeline = container_of(kref_count, struct mali_internal_sync_timeline, kref_count);
61 
62 	if (sync_timeline->ops->release_obj)
63 		sync_timeline->ops->release_obj(sync_timeline);
64 
65 	kfree(sync_timeline);
66 }
67 
68 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
mali_internal_fence_check_cb_func(struct fence * fence,struct fence_cb * cb)69 static void mali_internal_fence_check_cb_func(struct fence *fence, struct fence_cb *cb)
70 #else
71 static void mali_internal_fence_check_cb_func(struct dma_fence *fence, struct dma_fence_cb *cb)
72 #endif
73 {
74 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
75 	struct mali_internal_sync_fence_cb *check;
76 #else
77 	struct mali_internal_sync_fence_waiter *waiter;
78 #endif
79 	struct mali_internal_sync_fence *sync_fence;
80 	int ret;
81 	MALI_DEBUG_ASSERT_POINTER(cb);
82 	MALI_IGNORE(fence);
83 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
84 	check = container_of(cb, struct mali_internal_sync_fence_cb, cb);
85 	sync_fence = check->sync_file;
86 #else
87 	waiter = container_of(cb, struct mali_internal_sync_fence_waiter, cb);
88 	sync_fence = (struct mali_internal_sync_fence *)waiter->work.private;
89 #endif
90 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
91 	ret = atomic_dec_and_test(&sync_fence->status);
92 	if (ret)
93 		wake_up_all(&sync_fence->wq);
94 #else
95 	ret = sync_fence->fence->ops->signaled(sync_fence->fence);
96 
97 	if (0 > ret)
98 		MALI_PRINT_ERROR(("Mali internal sync:Failed to wait fence  0x%x for sync_fence 0x%x.\n", fence, sync_fence));
99 	if (1 == ret)
100 		wake_up_all(&sync_fence->wq);
101 #endif
102 }
103 
104 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
mali_internal_sync_fence_add_fence(struct mali_internal_sync_fence * sync_fence,struct fence * sync_pt)105 static void mali_internal_sync_fence_add_fence(struct mali_internal_sync_fence *sync_fence, struct fence *sync_pt)
106 {
107 	int fence_num = 0;
108 	MALI_DEBUG_ASSERT_POINTER(sync_fence);
109 	MALI_DEBUG_ASSERT_POINTER(sync_pt);
110 
111 	fence_num = sync_fence->num_fences;
112 
113 	sync_fence->cbs[fence_num].fence = sync_pt;
114 	sync_fence->cbs[fence_num].sync_file = sync_fence;
115 
116 	if (!fence_add_callback(sync_pt, &sync_fence->cbs[fence_num].cb, mali_internal_fence_check_cb_func)) {
117 		fence_get(sync_pt);
118 		sync_fence->num_fences++;
119 		atomic_inc(&sync_fence->status);
120 	}
121 }
122 #endif
123 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
mali_internal_sync_fence_wake_up_wq(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)124 static int mali_internal_sync_fence_wake_up_wq(wait_queue_entry_t *curr, unsigned mode,
125 		int wake_flags, void *key)
126 #else
127 static int mali_internal_sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
128 		int wake_flags, void *key)
129 #endif
130 {
131 	struct mali_internal_sync_fence_waiter *wait;
132 	MALI_IGNORE(mode);
133 	MALI_IGNORE(wake_flags);
134 	MALI_IGNORE(key);
135 
136 	wait = container_of(curr, struct mali_internal_sync_fence_waiter, work);
137 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
138 	list_del_init(&wait->work.entry);
139 #else
140 	list_del_init(&wait->work.task_list);
141 #endif
142 	wait->callback(wait->work.private, wait);
143 	return 1;
144 }
145 
mali_internal_sync_timeline_create(const struct mali_internal_sync_timeline_ops * ops,int size,const char * name)146 struct mali_internal_sync_timeline *mali_internal_sync_timeline_create(const struct mali_internal_sync_timeline_ops *ops,
147 		int size, const char *name)
148 {
149 	struct mali_internal_sync_timeline *sync_timeline = NULL;
150 
151 	MALI_DEBUG_ASSERT_POINTER(ops);
152 
153 	if (size < sizeof(struct mali_internal_sync_timeline)) {
154 		MALI_PRINT_ERROR(("Mali internal sync:Invalid size to create the mali internal sync timeline.\n"));
155 		goto err;
156 	}
157 
158 	sync_timeline = kzalloc(size, GFP_KERNEL);
159 	if (NULL == sync_timeline) {
160 		MALI_PRINT_ERROR(("Mali internal sync:Failed to  allocate buffer  for the mali internal sync timeline.\n"));
161 		goto err;
162 	}
163 	kref_init(&sync_timeline->kref_count);
164 	sync_timeline->ops = ops;
165 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
166 	sync_timeline->fence_context = dma_fence_context_alloc(1);
167 #else
168 	sync_timeline->fence_context = fence_context_alloc(1);
169 #endif
170 	strlcpy(sync_timeline->name, name, sizeof(sync_timeline->name));
171 
172 	INIT_LIST_HEAD(&sync_timeline->sync_pt_list_head);
173 	spin_lock_init(&sync_timeline->sync_pt_list_lock);
174 
175 	return sync_timeline;
176 err:
177 	if (NULL != sync_timeline) {
178 		kfree(sync_timeline);
179 	}
180 	return NULL;
181 }
182 
mali_internal_sync_timeline_destroy(struct mali_internal_sync_timeline * sync_timeline)183 void mali_internal_sync_timeline_destroy(struct mali_internal_sync_timeline *sync_timeline)
184 {
185 	MALI_DEBUG_ASSERT_POINTER(sync_timeline);
186 
187 	sync_timeline->destroyed = MALI_TRUE;
188 
189 	smp_wmb();
190 
191 	mali_internal_sync_timeline_signal(sync_timeline);
192 	kref_put(&sync_timeline->kref_count, mali_internal_sync_timeline_free);
193 }
194 
mali_internal_sync_timeline_signal(struct mali_internal_sync_timeline * sync_timeline)195 void mali_internal_sync_timeline_signal(struct mali_internal_sync_timeline *sync_timeline)
196 {
197 	unsigned long flags;
198 	struct mali_internal_sync_point *sync_pt, *next;
199 
200 	MALI_DEBUG_ASSERT_POINTER(sync_timeline);
201 
202 	spin_lock_irqsave(&sync_timeline->sync_pt_list_lock, flags);
203 
204 	list_for_each_entry_safe(sync_pt, next, &sync_timeline->sync_pt_list_head,
205 				 sync_pt_list) {
206 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
207 		if (dma_fence_is_signaled_locked(&sync_pt->base))
208 #else
209 		if (fence_is_signaled_locked(&sync_pt->base))
210 #endif
211 			list_del_init(&sync_pt->sync_pt_list);
212 	}
213 
214 	spin_unlock_irqrestore(&sync_timeline->sync_pt_list_lock, flags);
215 }
216 
mali_internal_sync_point_create(struct mali_internal_sync_timeline * sync_timeline,int size)217 struct mali_internal_sync_point *mali_internal_sync_point_create(struct mali_internal_sync_timeline *sync_timeline, int size)
218 {
219 	unsigned long flags;
220 	struct mali_internal_sync_point *sync_pt = NULL;
221 
222 	MALI_DEBUG_ASSERT_POINTER(sync_timeline);
223 
224 	if (size < sizeof(struct mali_internal_sync_point)) {
225 		MALI_PRINT_ERROR(("Mali internal sync:Invalid size to create the mali internal sync point.\n"));
226 		goto err;
227 	}
228 
229 	sync_pt = kzalloc(size, GFP_KERNEL);
230 	if (NULL == sync_pt) {
231 		MALI_PRINT_ERROR(("Mali internal sync:Failed to  allocate buffer  for the mali internal sync point.\n"));
232 		goto err;
233 	}
234 	spin_lock_irqsave(&sync_timeline->sync_pt_list_lock, flags);
235 	kref_get(&sync_timeline->kref_count);
236 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
237 	dma_fence_init(&sync_pt->base, &fence_ops, &sync_timeline->sync_pt_list_lock,
238 		       sync_timeline->fence_context, ++sync_timeline->value);
239 #else
240 	fence_init(&sync_pt->base, &fence_ops, &sync_timeline->sync_pt_list_lock,
241 		   sync_timeline->fence_context, ++sync_timeline->value);
242 #endif
243 	INIT_LIST_HEAD(&sync_pt->sync_pt_list);
244 	spin_unlock_irqrestore(&sync_timeline->sync_pt_list_lock, flags);
245 
246 	return sync_pt;
247 err:
248 	if (NULL != sync_pt) {
249 		kfree(sync_pt);
250 	}
251 	return NULL;
252 }
253 
mali_internal_sync_fence_fdget(int fd)254 struct mali_internal_sync_fence *mali_internal_sync_fence_fdget(int fd)
255 {
256 	struct file *file = fget(fd);
257 
258 	if (NULL == file) {
259 		return NULL;
260 	}
261 
262 	return file->private_data;
263 }
264 
265 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
mali_internal_sync_fence_merge(struct mali_internal_sync_fence * sync_fence1,struct mali_internal_sync_fence * sync_fence2)266 struct mali_internal_sync_fence *mali_internal_sync_fence_merge(
267 	struct mali_internal_sync_fence *sync_fence1, struct mali_internal_sync_fence *sync_fence2)
268 {
269 	struct mali_internal_sync_fence *new_sync_fence;
270 	int i, j, num_fence1, num_fence2, total_fences;
271 	struct fence *fence0 = NULL;
272 
273 	MALI_DEBUG_ASSERT_POINTER(sync_fence1);
274 	MALI_DEBUG_ASSERT_POINTER(sync_fence2);
275 
276 	num_fence1 = sync_fence1->num_fences;
277 	num_fence2 = sync_fence2->num_fences;
278 
279 	total_fences = num_fence1 + num_fence2;
280 
281 	i = 0;
282 	j = 0;
283 
284 	if (num_fence1 > 0) {
285 		fence0 = sync_fence1->cbs[i].fence;
286 		i = 1;
287 	} else if (num_fence2 > 0) {
288 		fence0 = sync_fence2->cbs[i].fence;
289 		j = 1;
290 	}
291 
292 	new_sync_fence = (struct mali_internal_sync_fence *)sync_file_create(fence0);
293 	if (NULL == new_sync_fence) {
294 		MALI_PRINT_ERROR(("Mali internal sync:Failed to  create the mali internal sync fence when merging sync fence.\n"));
295 		return NULL;
296 	}
297 
298 	fence_remove_callback(new_sync_fence->cb[0].fence, &new_sync_fence->cb[0].cb);
299 	new_sync_fence->num_fences = 0;
300 	atomic_dec(&new_sync_fence->status);
301 
302 	for (; i < num_fence1 && j < num_fence2;) {
303 		struct fence *fence1 = sync_fence1->cbs[i].fence;
304 		struct fence *fence2 = sync_fence2->cbs[j].fence;
305 
306 		if (fence1->context < fence2->context) {
307 			mali_internal_sync_fence_add_fence(new_sync_fence, fence1);
308 
309 			i++;
310 		} else if (fence1->context > fence2->context) {
311 			mali_internal_sync_fence_add_fence(new_sync_fence, fence2);
312 
313 			j++;
314 		} else {
315 			if (fence1->seqno - fence2->seqno <= INT_MAX)
316 				mali_internal_sync_fence_add_fence(new_sync_fence, fence1);
317 			else
318 				mali_internal_sync_fence_add_fence(new_sync_fence, fence2);
319 			i++;
320 			j++;
321 		}
322 	}
323 
324 	for (; i < num_fence1; i++)
325 		mali_internal_sync_fence_add_fence(new_sync_fence, sync_fence1->cbs[i].fence);
326 
327 	for (; j < num_fence2; j++)
328 		mali_internal_sync_fence_add_fence(new_sync_fence, sync_fence2->cbs[j].fence);
329 
330 	return new_sync_fence;
331 }
332 #else
333 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
mali_internal_get_fences(struct mali_internal_sync_fence * sync_fence,int * num_fences)334 static struct fence **mali_internal_get_fences(struct mali_internal_sync_fence *sync_fence, int *num_fences)
335 #else
336 static struct dma_fence **mali_internal_get_fences(struct mali_internal_sync_fence *sync_fence, int *num_fences)
337 #endif
338 {
339 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
340 	if (sync_fence->fence->ops == &fence_array_ops) {
341 		struct fence_array *fence_array = container_of(sync_fence->fence, struct fence_array, base);
342 		*num_fences = fence_array->num_fences;
343 		return fence_array->fences;
344 	}
345 #else
346 	if (sync_fence->fence->ops == &dma_fence_array_ops) {
347 		struct dma_fence_array *fence_array = container_of(sync_fence->fence, struct dma_fence_array, base);
348 		*num_fences = fence_array->num_fences;
349 		return fence_array->fences;
350 	}
351 #endif
352 	*num_fences = 1;
353 	return &sync_fence->fence;
354 }
355 
356 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
mali_internal_add_fence_array(struct fence ** fences,int * num_fences,struct fence * fence)357 static void mali_internal_add_fence_array(struct fence **fences, int *num_fences, struct fence *fence)
358 #else
359 static void mali_internal_add_fence_array(struct dma_fence **fences, int *num_fences, struct dma_fence *fence)
360 #endif
361 {
362 	fences[*num_fences] = fence;
363 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
364 	if (!fence_is_signaled(fence)) {
365 		fence_get(fence);
366 		(*num_fences)++;
367 	}
368 #else
369 	if (!dma_fence_is_signaled(fence)) {
370 		dma_fence_get(fence);
371 		(*num_fences)++;
372 	}
373 #endif
374 }
375 
376 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
mali_internal_sync_fence_set_fence_array(struct mali_internal_sync_fence * sync_fence,struct fence ** fences,int num_fences)377 static int mali_internal_sync_fence_set_fence_array(struct mali_internal_sync_fence *sync_fence,
378 		struct fence **fences, int num_fences)
379 #else
380 static int mali_internal_sync_fence_set_fence_array(struct mali_internal_sync_fence *sync_fence,
381 		struct dma_fence **fences, int num_fences)
382 #endif
383 {
384 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
385 	struct fence_array *array;
386 #else
387 	struct dma_fence_array *array;
388 #endif
389 	if(num_fences == 1) {
390 		sync_fence->fence =fences[0];
391 		kfree(fences);
392 	} else {
393 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
394 	array = fence_array_create(num_fences, fences,
395 				   fence_context_alloc(1), 1, false);
396 #else
397 	array = dma_fence_array_create(num_fences, fences,
398 				       dma_fence_context_alloc(1), 1, false);
399 #endif
400 	if (!array){
401 		return -ENOMEM;
402     }
403 	    sync_fence->fence = &array->base;
404 	}
405 	return 0;
406 }
407 
mali_internal_sync_fence_merge(struct mali_internal_sync_fence * sync_fence1,struct mali_internal_sync_fence * sync_fence2)408 struct mali_internal_sync_fence *mali_internal_sync_fence_merge(
409 	struct mali_internal_sync_fence *sync_fence1, struct mali_internal_sync_fence *sync_fence2)
410 {
411 	struct mali_internal_sync_fence *sync_fence;
412 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
413 	struct fence **fences, **nfences, **fences1, **fences2;
414 #else
415 	struct dma_fence **fences, **nfences, **fences1, **fences2;
416 #endif
417 	int real_num_fences, i, j, num_fences, num_fences1, num_fences2;
418 
419 	fences1 = mali_internal_get_fences(sync_fence1, &num_fences1);
420 	fences2 = mali_internal_get_fences(sync_fence2, &num_fences2);
421 
422 	num_fences = num_fences1 + num_fences2;
423 
424 	fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
425 	if (!fences) {
426 		MALI_PRINT_ERROR(("Mali internal sync:Failed to  alloc buffer for fences.\n"));
427 		goto fences_alloc_failed;
428 	}
429 
430 	for (real_num_fences = i = j = 0; i < num_fences1 && j < num_fences2;) {
431 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
432 		struct fence *fence1 = fences1[i];
433 		struct fence *fence2 = fences2[j];
434 #else
435 		struct dma_fence *fence1 = fences1[i];
436 		struct dma_fence *fence2 = fences2[j];
437 #endif
438 		if (fence1->context < fence2->context) {
439 			mali_internal_add_fence_array(fences, &real_num_fences, fence1);
440 
441 			i++;
442 		} else if (fence1->context > fence2->context) {
443 			mali_internal_add_fence_array(fences, &real_num_fences, fence2);
444 
445 			j++;
446 		} else {
447 			if (fence1->seqno - fence2->seqno <= INT_MAX)
448 				mali_internal_add_fence_array(fences, &real_num_fences, fence1);
449 			else
450 				mali_internal_add_fence_array(fences, &real_num_fences, fence2);
451 
452 			i++;
453 			j++;
454 		}
455 	}
456 
457 	for (; i < num_fences1; i++)
458 		mali_internal_add_fence_array(fences, &real_num_fences, fences1[i]);
459 
460 	for (; j < num_fences2; j++)
461 		mali_internal_add_fence_array(fences, &real_num_fences, fences2[j]);
462 
463 	if (0 == real_num_fences)
464 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
465 		fences[real_num_fences++] = fence_get(fences1[0]);
466 #else
467 		fences[real_num_fences++] = dma_fence_get(fences1[0]);
468 #endif
469 
470 	if (num_fences > real_num_fences) {
471 		nfences = krealloc(fences, real_num_fences * sizeof(*fences),
472 				   GFP_KERNEL);
473 		if (!nfences)
474 			goto nfences_alloc_failed;
475 
476 		fences = nfences;
477 	}
478 
479 	sync_fence = (struct mali_internal_sync_fence *)sync_file_create(fences[0]);
480 	if (NULL == sync_fence) {
481 		MALI_PRINT_ERROR(("Mali internal sync:Failed to  create the mali internal sync fence when merging sync fence.\n"));
482 		goto sync_fence_alloc_failed;
483 	}
484 
485 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
486 	fence_put(fences[0]);
487 #else
488 	dma_fence_put(fences[0]);
489 #endif
490 
491 	if (mali_internal_sync_fence_set_fence_array(sync_fence, fences, real_num_fences) < 0) {
492 		MALI_PRINT_ERROR(("Mali internal sync:Failed to  set fence for sync fence.\n"));
493 		goto sync_fence_set_failed;
494 	}
495 
496 	return sync_fence;
497 
498 sync_fence_set_failed:
499 	fput(sync_fence->file);
500 sync_fence_alloc_failed:
501 	for (i = 0; i < real_num_fences; i++)
502 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
503 		fence_put(fences[i]);
504 #else
505 		dma_fence_put(fences[i]);
506 #endif
507 nfences_alloc_failed:
508 	kfree(fences);
509 fences_alloc_failed:
510 	return NULL;
511 }
512 #endif
513 
mali_internal_sync_fence_waiter_init(struct mali_internal_sync_fence_waiter * waiter,mali_internal_sync_callback_t callback)514 void mali_internal_sync_fence_waiter_init(struct mali_internal_sync_fence_waiter *waiter,
515 		mali_internal_sync_callback_t callback)
516 {
517 	MALI_DEBUG_ASSERT_POINTER(waiter);
518 	MALI_DEBUG_ASSERT_POINTER(callback);
519 
520 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
521 	INIT_LIST_HEAD(&waiter->work.entry);
522 #else
523 	INIT_LIST_HEAD(&waiter->work.task_list);
524 #endif
525 	waiter->callback = callback;
526 }
527 
mali_internal_sync_fence_wait_async(struct mali_internal_sync_fence * sync_fence,struct mali_internal_sync_fence_waiter * waiter)528 int mali_internal_sync_fence_wait_async(struct mali_internal_sync_fence *sync_fence,
529 					struct mali_internal_sync_fence_waiter *waiter)
530 {
531 	int err;
532 	unsigned long flags;
533 
534 	MALI_DEBUG_ASSERT_POINTER(sync_fence);
535 	MALI_DEBUG_ASSERT_POINTER(waiter);
536 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
537 	err = atomic_read(&sync_fence->status);
538 
539 	if (0 > err)
540 		return err;
541 
542 	if (!err)
543 		return 1;
544 
545 	init_waitqueue_func_entry(&waiter->work, mali_internal_sync_fence_wake_up_wq);
546 	waiter->work.private = sync_fence;
547 
548 	spin_lock_irqsave(&sync_fence->wq.lock, flags);
549 	err = atomic_read(&sync_fence->status);
550 
551 	if (0 < err)
552 		__add_wait_queue_tail(&sync_fence->wq, &waiter->work);
553 	spin_unlock_irqrestore(&sync_fence->wq.lock, flags);
554 
555 	if (0 > err)
556 		return err;
557 
558 	return !err;
559 #else
560 	if ((sync_fence->fence) && (sync_fence->fence->ops) && (sync_fence->fence->ops->signaled))
561 		err = sync_fence->fence->ops->signaled(sync_fence->fence);
562 	else
563 		err = -1;
564 
565 	if (0 > err)
566 		return err;
567 
568 	if (1 == err)
569 		return err;
570 
571 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
572 	err = dma_fence_add_callback(sync_fence->fence, &waiter->cb, mali_internal_fence_check_cb_func);
573 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
574 	err = fence_add_callback(sync_fence->fence, &waiter->cb, mali_internal_fence_check_cb_func);
575 #endif
576 
577 	if (0 != err) {
578 		if (-ENOENT == err)
579 			err = 1;
580 		return err;
581 	}
582 	init_waitqueue_func_entry(&waiter->work, mali_internal_sync_fence_wake_up_wq);
583 	waiter->work.private = sync_fence;
584 
585 	spin_lock_irqsave(&sync_fence->wq.lock, flags);
586 	err =  sync_fence->fence->ops->signaled(sync_fence->fence);
587 
588 	if (0 == err){
589 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
590 		__add_wait_queue_entry_tail(&sync_fence->wq, &waiter->work);
591 #else
592 		__add_wait_queue_tail(&sync_fence->wq, &waiter->work);
593 #endif
594         }
595 	spin_unlock_irqrestore(&sync_fence->wq.lock, flags);
596 
597 	return err;
598 #endif
599 }
600 
mali_internal_sync_fence_cancel_async(struct mali_internal_sync_fence * sync_fence,struct mali_internal_sync_fence_waiter * waiter)601 int mali_internal_sync_fence_cancel_async(struct mali_internal_sync_fence *sync_fence,
602 		struct mali_internal_sync_fence_waiter *waiter)
603 {
604 	unsigned long flags;
605 	int ret = 0;
606 
607 	MALI_DEBUG_ASSERT_POINTER(sync_fence);
608 	MALI_DEBUG_ASSERT_POINTER(waiter);
609 
610 	spin_lock_irqsave(&sync_fence->wq.lock, flags);
611 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
612 	if (!list_empty(&waiter->work.entry))
613 		list_del_init(&waiter->work.entry);
614 #else
615 	if (!list_empty(&waiter->work.task_list))
616 		list_del_init(&waiter->work.task_list);
617 #endif
618 	else
619 		ret = -ENOENT;
620 	spin_unlock_irqrestore(&sync_fence->wq.lock, flags);
621 
622 	if (0 == ret) {
623 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
624 		dma_fence_remove_callback(sync_fence->fence, &waiter->cb);
625 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
626 		fence_remove_callback(sync_fence->fence, &waiter->cb);
627 #endif
628 
629 	}
630 
631 	return ret;
632 }
633 
634 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
mali_internal_fence_get_driver_name(struct dma_fence * fence)635 static const char *mali_internal_fence_get_driver_name(struct dma_fence *fence)
636 #else
637 static const char *mali_internal_fence_get_driver_name(struct fence *fence)
638 #endif
639 {
640 	struct mali_internal_sync_point *sync_pt;
641 	struct mali_internal_sync_timeline *parent;
642 
643 	MALI_DEBUG_ASSERT_POINTER(fence);
644 
645 	sync_pt = mali_internal_fence_to_sync_pt(fence);
646 	parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
647 
648 	return parent->ops->driver_name;
649 }
650 
651 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
mali_internal_fence_get_timeline_name(struct dma_fence * fence)652 static const char *mali_internal_fence_get_timeline_name(struct dma_fence *fence)
653 #else
654 static const char *mali_internal_fence_get_timeline_name(struct fence *fence)
655 #endif
656 {
657 	struct mali_internal_sync_point *sync_pt;
658 	struct mali_internal_sync_timeline *parent;
659 
660 	MALI_DEBUG_ASSERT_POINTER(fence);
661 
662 	sync_pt = mali_internal_fence_to_sync_pt(fence);
663 	parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
664 
665 	return parent->name;
666 }
667 
668 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
mali_internal_fence_release(struct dma_fence * fence)669 static void mali_internal_fence_release(struct dma_fence *fence)
670 #else
671 static void mali_internal_fence_release(struct fence *fence)
672 #endif
673 {
674 	unsigned long flags;
675 	struct mali_internal_sync_point *sync_pt;
676 	struct mali_internal_sync_timeline *parent;
677 
678 	MALI_DEBUG_ASSERT_POINTER(fence);
679 
680 	sync_pt = mali_internal_fence_to_sync_pt(fence);
681 	parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
682 
683 
684 	spin_lock_irqsave(fence->lock, flags);
685 	if (WARN_ON_ONCE(!list_empty(&sync_pt->sync_pt_list)))
686 		list_del(&sync_pt->sync_pt_list);
687 	spin_unlock_irqrestore(fence->lock, flags);
688 
689 	if (parent->ops->free_pt)
690 		parent->ops->free_pt(sync_pt);
691 
692 	kref_put(&parent->kref_count, mali_internal_sync_timeline_free);
693 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
694 	dma_fence_free(&sync_pt->base);
695 #else
696 	fence_free(&sync_pt->base);
697 #endif
698 }
699 
700 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
mali_internal_fence_signaled(struct dma_fence * fence)701 static bool mali_internal_fence_signaled(struct dma_fence *fence)
702 #else
703 static bool mali_internal_fence_signaled(struct fence *fence)
704 #endif
705 {
706 	int ret;
707 	struct mali_internal_sync_point *sync_pt;
708 	struct mali_internal_sync_timeline *parent;
709 
710 	MALI_DEBUG_ASSERT_POINTER(fence);
711 
712 	sync_pt = mali_internal_fence_to_sync_pt(fence);
713 	parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
714 
715 	ret = parent->ops->has_signaled(sync_pt);
716 	if (0 > ret)
717 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) \
718                 || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 68)))
719 		fence->error = ret;
720 #else
721 		fence->status = ret;
722 #endif
723 	return ret;
724 }
725 
726 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
mali_internal_fence_enable_signaling(struct dma_fence * fence)727 static bool mali_internal_fence_enable_signaling(struct dma_fence *fence)
728 #else
729 static bool mali_internal_fence_enable_signaling(struct fence *fence)
730 #endif
731 {
732 	struct mali_internal_sync_point *sync_pt;
733 	struct mali_internal_sync_timeline *parent;
734 
735 	MALI_DEBUG_ASSERT_POINTER(fence);
736 
737 	sync_pt = mali_internal_fence_to_sync_pt(fence);
738 	parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
739 
740 	if (mali_internal_fence_signaled(fence))
741 		return false;
742 
743 	list_add_tail(&sync_pt->sync_pt_list, &parent->sync_pt_list_head);
744 	return true;
745 }
746 
747 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
mali_internal_fence_value_str(struct dma_fence * fence,char * str,int size)748 static void mali_internal_fence_value_str(struct dma_fence *fence, char *str, int size)
749 #else
750 static void mali_internal_fence_value_str(struct fence *fence, char *str, int size)
751 #endif
752 {
753 	struct mali_internal_sync_point *sync_pt;
754 	struct mali_internal_sync_timeline *parent;
755 
756 	MALI_DEBUG_ASSERT_POINTER(fence);
757 	MALI_IGNORE(str);
758 	MALI_IGNORE(size);
759 
760 	sync_pt = mali_internal_fence_to_sync_pt(fence);
761 	parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
762 
763 	parent->ops->print_sync_pt(sync_pt);
764 }
765 
766 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
767 static const struct dma_fence_ops fence_ops = {
768 #else
769 static const struct fence_ops fence_ops = {
770 #endif
771 	.get_driver_name = mali_internal_fence_get_driver_name,
772 	.get_timeline_name = mali_internal_fence_get_timeline_name,
773 	.enable_signaling = mali_internal_fence_enable_signaling,
774 	.signaled = mali_internal_fence_signaled,
775 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
776 	.wait = dma_fence_default_wait,
777 #else
778 	.wait = fence_default_wait,
779 #endif
780 	.release = mali_internal_fence_release,
781 	.fence_value_str = mali_internal_fence_value_str,
782 };
783 #endif
784