xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/mali_kbase_sync_android.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  *
3  * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15 
16 
17 
18 /*
19  * Code for supporting explicit Android fences (CONFIG_SYNC)
20  * Known to be good for kernels 4.5 and earlier.
21  * Replaced with CONFIG_SYNC_FILE for 4.9 and later kernels
22  * (see mali_kbase_sync_file.c)
23  */
24 
25 #include <linux/sched.h>
26 #include <linux/fdtable.h>
27 #include <linux/file.h>
28 #include <linux/fs.h>
29 #include <linux/module.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/version.h>
32 #include "sync.h"
33 #include <mali_kbase.h>
34 #include <mali_kbase_sync.h>
35 
36 struct mali_sync_timeline {
37 	struct sync_timeline timeline;
38 	atomic_t counter;
39 	atomic_t signaled;
40 };
41 
42 struct mali_sync_pt {
43 	struct sync_pt pt;
44 	int order;
45 	int result;
46 };
47 
48 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
49 /* For backwards compatibility with kernels before 3.17. After 3.17
50  * sync_pt_parent is included in the kernel. */
sync_pt_parent(struct sync_pt * pt)51 static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
52 {
53 	return pt->parent;
54 }
55 #endif
56 
to_mali_sync_timeline(struct sync_timeline * timeline)57 static struct mali_sync_timeline *to_mali_sync_timeline(
58 						struct sync_timeline *timeline)
59 {
60 	return container_of(timeline, struct mali_sync_timeline, timeline);
61 }
62 
to_mali_sync_pt(struct sync_pt * pt)63 static struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
64 {
65 	return container_of(pt, struct mali_sync_pt, pt);
66 }
67 
timeline_dup(struct sync_pt * pt)68 static struct sync_pt *timeline_dup(struct sync_pt *pt)
69 {
70 	struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
71 	struct mali_sync_pt *new_mpt;
72 	struct sync_pt *new_pt = sync_pt_create(sync_pt_parent(pt),
73 						sizeof(struct mali_sync_pt));
74 
75 	if (!new_pt)
76 		return NULL;
77 
78 	new_mpt = to_mali_sync_pt(new_pt);
79 	new_mpt->order = mpt->order;
80 	new_mpt->result = mpt->result;
81 
82 	return new_pt;
83 }
84 
timeline_has_signaled(struct sync_pt * pt)85 static int timeline_has_signaled(struct sync_pt *pt)
86 {
87 	struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
88 	struct mali_sync_timeline *mtl = to_mali_sync_timeline(
89 							sync_pt_parent(pt));
90 	int result = mpt->result;
91 
92 	int diff = atomic_read(&mtl->signaled) - mpt->order;
93 
94 	if (diff >= 0)
95 		return (result < 0) ? result : 1;
96 
97 	return 0;
98 }
99 
timeline_compare(struct sync_pt * a,struct sync_pt * b)100 static int timeline_compare(struct sync_pt *a, struct sync_pt *b)
101 {
102 	struct mali_sync_pt *ma = container_of(a, struct mali_sync_pt, pt);
103 	struct mali_sync_pt *mb = container_of(b, struct mali_sync_pt, pt);
104 
105 	int diff = ma->order - mb->order;
106 
107 	if (diff == 0)
108 		return 0;
109 
110 	return (diff < 0) ? -1 : 1;
111 }
112 
timeline_value_str(struct sync_timeline * timeline,char * str,int size)113 static void timeline_value_str(struct sync_timeline *timeline, char *str,
114 			       int size)
115 {
116 	struct mali_sync_timeline *mtl = to_mali_sync_timeline(timeline);
117 
118 	snprintf(str, size, "%d", atomic_read(&mtl->signaled));
119 }
120 
pt_value_str(struct sync_pt * pt,char * str,int size)121 static void pt_value_str(struct sync_pt *pt, char *str, int size)
122 {
123 	struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
124 
125 	snprintf(str, size, "%d(%d)", mpt->order, mpt->result);
126 }
127 
128 static struct sync_timeline_ops mali_timeline_ops = {
129 	.driver_name = "Mali",
130 	.dup = timeline_dup,
131 	.has_signaled = timeline_has_signaled,
132 	.compare = timeline_compare,
133 	.timeline_value_str = timeline_value_str,
134 	.pt_value_str       = pt_value_str,
135 };
136 
137 /* Allocates a timeline for Mali
138  *
139  * One timeline should be allocated per API context.
140  */
mali_sync_timeline_alloc(const char * name)141 static struct sync_timeline *mali_sync_timeline_alloc(const char *name)
142 {
143 	struct sync_timeline *tl;
144 	struct mali_sync_timeline *mtl;
145 
146 	tl = sync_timeline_create(&mali_timeline_ops,
147 				  sizeof(struct mali_sync_timeline), name);
148 	if (!tl)
149 		return NULL;
150 
151 	/* Set the counter in our private struct */
152 	mtl = to_mali_sync_timeline(tl);
153 	atomic_set(&mtl->counter, 0);
154 	atomic_set(&mtl->signaled, 0);
155 
156 	return tl;
157 }
158 
kbase_stream_close(struct inode * inode,struct file * file)159 static int kbase_stream_close(struct inode *inode, struct file *file)
160 {
161 	struct sync_timeline *tl;
162 
163 	tl = (struct sync_timeline *)file->private_data;
164 	sync_timeline_destroy(tl);
165 	return 0;
166 }
167 
168 static const struct file_operations stream_fops = {
169 	.owner = THIS_MODULE,
170 	.release = kbase_stream_close,
171 };
172 
kbase_sync_fence_stream_create(const char * name,int * const out_fd)173 int kbase_sync_fence_stream_create(const char *name, int *const out_fd)
174 {
175 	struct sync_timeline *tl;
176 
177 	if (!out_fd)
178 		return -EINVAL;
179 
180 	tl = mali_sync_timeline_alloc(name);
181 	if (!tl)
182 		return -EINVAL;
183 
184 	*out_fd = anon_inode_getfd(name, &stream_fops, tl, O_RDONLY|O_CLOEXEC);
185 
186 	if (*out_fd < 0) {
187 		sync_timeline_destroy(tl);
188 		return -EINVAL;
189 	}
190 
191 	return 0;
192 }
193 
194 /* Allocates a sync point within the timeline.
195  *
196  * The timeline must be the one allocated by kbase_sync_timeline_alloc
197  *
198  * Sync points must be triggered in *exactly* the same order as they are
199  * allocated.
200  */
kbase_sync_pt_alloc(struct sync_timeline * parent)201 static struct sync_pt *kbase_sync_pt_alloc(struct sync_timeline *parent)
202 {
203 	struct sync_pt *pt = sync_pt_create(parent,
204 					    sizeof(struct mali_sync_pt));
205 	struct mali_sync_timeline *mtl = to_mali_sync_timeline(parent);
206 	struct mali_sync_pt *mpt;
207 
208 	if (!pt)
209 		return NULL;
210 
211 	mpt = to_mali_sync_pt(pt);
212 	mpt->order = atomic_inc_return(&mtl->counter);
213 	mpt->result = 0;
214 
215 	return pt;
216 }
217 
kbase_sync_fence_out_create(struct kbase_jd_atom * katom,int tl_fd)218 int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int tl_fd)
219 {
220 	struct sync_timeline *tl;
221 	struct sync_pt *pt;
222 	struct sync_fence *fence;
223 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
224 	struct files_struct *files;
225 	struct fdtable *fdt;
226 #endif
227 	int fd;
228 	struct file *tl_file;
229 
230 	tl_file = fget(tl_fd);
231 	if (tl_file == NULL)
232 		return -EBADF;
233 
234 	if (tl_file->f_op != &stream_fops) {
235 		fd = -EBADF;
236 		goto out;
237 	}
238 
239 	tl = tl_file->private_data;
240 
241 	pt = kbase_sync_pt_alloc(tl);
242 	if (!pt) {
243 		fd = -EFAULT;
244 		goto out;
245 	}
246 
247 	fence = sync_fence_create("mali_fence", pt);
248 	if (!fence) {
249 		sync_pt_free(pt);
250 		fd = -EFAULT;
251 		goto out;
252 	}
253 
254 	/* from here the fence owns the sync_pt */
255 
256 	/* create a fd representing the fence */
257 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
258 	fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
259 	if (fd < 0) {
260 		sync_fence_put(fence);
261 		goto out;
262 	}
263 #else
264 	fd = get_unused_fd();
265 	if (fd < 0) {
266 		sync_fence_put(fence);
267 		goto out;
268 	}
269 
270 	files = current->files;
271 	spin_lock(&files->file_lock);
272 	fdt = files_fdtable(files);
273 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
274 	__set_close_on_exec(fd, fdt);
275 #else
276 	FD_SET(fd, fdt->close_on_exec);
277 #endif
278 	spin_unlock(&files->file_lock);
279 #endif  /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) */
280 
281 	/* bind fence to the new fd */
282 	sync_fence_install(fence, fd);
283 
284 	katom->fence = sync_fence_fdget(fd);
285 	if (katom->fence == NULL) {
286 		/* The only way the fence can be NULL is if userspace closed it
287 		 * for us, so we don't need to clear it up */
288 		fd = -EINVAL;
289 		goto out;
290 	}
291 
292 out:
293 	fput(tl_file);
294 
295 	return fd;
296 }
297 
kbase_sync_fence_in_from_fd(struct kbase_jd_atom * katom,int fd)298 int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd)
299 {
300 	katom->fence = sync_fence_fdget(fd);
301 	return katom->fence ? 0 : -ENOENT;
302 }
303 
kbase_sync_fence_validate(int fd)304 int kbase_sync_fence_validate(int fd)
305 {
306 	struct sync_fence *fence;
307 
308 	fence = sync_fence_fdget(fd);
309 	if (!fence)
310 		return -EINVAL;
311 
312 	sync_fence_put(fence);
313 	return 0;
314 }
315 
316 /* Returns true if the specified timeline is allocated by Mali */
kbase_sync_timeline_is_ours(struct sync_timeline * timeline)317 static int kbase_sync_timeline_is_ours(struct sync_timeline *timeline)
318 {
319 	return timeline->ops == &mali_timeline_ops;
320 }
321 
322 /* Signals a particular sync point
323  *
324  * Sync points must be triggered in *exactly* the same order as they are
325  * allocated.
326  *
327  * If they are signaled in the wrong order then a message will be printed in
328  * debug builds and otherwise attempts to signal order sync_pts will be ignored.
329  *
330  * result can be negative to indicate error, any other value is interpreted as
331  * success.
332  */
kbase_sync_signal_pt(struct sync_pt * pt,int result)333 static void kbase_sync_signal_pt(struct sync_pt *pt, int result)
334 {
335 	struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
336 	struct mali_sync_timeline *mtl = to_mali_sync_timeline(
337 							sync_pt_parent(pt));
338 	int signaled;
339 	int diff;
340 
341 	mpt->result = result;
342 
343 	do {
344 		signaled = atomic_read(&mtl->signaled);
345 
346 		diff = signaled - mpt->order;
347 
348 		if (diff > 0) {
349 			/* The timeline is already at or ahead of this point.
350 			 * This should not happen unless userspace has been
351 			 * signaling fences out of order, so warn but don't
352 			 * violate the sync_pt API.
353 			 * The warning is only in debug builds to prevent
354 			 * a malicious user being able to spam dmesg.
355 			 */
356 #ifdef CONFIG_MALI_DEBUG
357 			pr_err("Fences were triggered in a different order to allocation!");
358 #endif				/* CONFIG_MALI_DEBUG */
359 			return;
360 		}
361 	} while (atomic_cmpxchg(&mtl->signaled,
362 				signaled, mpt->order) != signaled);
363 }
364 
365 enum base_jd_event_code
kbase_sync_fence_out_trigger(struct kbase_jd_atom * katom,int result)366 kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result)
367 {
368 	struct sync_pt *pt;
369 	struct sync_timeline *timeline;
370 
371 	if (!katom->fence)
372 		return BASE_JD_EVENT_JOB_CANCELLED;
373 
374 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
375 	if (!list_is_singular(&katom->fence->pt_list_head)) {
376 #else
377 	if (katom->fence->num_fences != 1) {
378 #endif
379 		/* Not exactly one item in the list - so it didn't (directly)
380 		 * come from us */
381 		return BASE_JD_EVENT_JOB_CANCELLED;
382 	}
383 
384 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
385 	pt = list_first_entry(&katom->fence->pt_list_head,
386 			      struct sync_pt, pt_list);
387 #else
388 	pt = container_of(katom->fence->cbs[0].sync_pt, struct sync_pt, base);
389 #endif
390 	timeline = sync_pt_parent(pt);
391 
392 	if (!kbase_sync_timeline_is_ours(timeline)) {
393 		/* Fence has a sync_pt which isn't ours! */
394 		return BASE_JD_EVENT_JOB_CANCELLED;
395 	}
396 
397 	kbase_sync_signal_pt(pt, result);
398 
399 	sync_timeline_signal(timeline);
400 
401 	kbase_sync_fence_out_remove(katom);
402 
403 	return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
404 }
405 
406 static inline int kbase_fence_get_status(struct sync_fence *fence)
407 {
408 	if (!fence)
409 		return -ENOENT;
410 
411 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
412 	return fence->status;
413 #else
414 	return atomic_read(&fence->status);
415 #endif
416 }
417 
418 static void kbase_fence_wait_callback(struct sync_fence *fence,
419 				      struct sync_fence_waiter *waiter)
420 {
421 	struct kbase_jd_atom *katom = container_of(waiter,
422 					struct kbase_jd_atom, sync_waiter);
423 	struct kbase_context *kctx = katom->kctx;
424 
425 	/* Propagate the fence status to the atom.
426 	 * If negative then cancel this atom and its dependencies.
427 	 */
428 	if (kbase_fence_get_status(fence) < 0)
429 		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
430 
431 	/* To prevent a potential deadlock we schedule the work onto the
432 	 * job_done_wq workqueue
433 	 *
434 	 * The issue is that we may signal the timeline while holding
435 	 * kctx->jctx.lock and the callbacks are run synchronously from
436 	 * sync_timeline_signal. So we simply defer the work.
437 	 */
438 
439 	INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
440 	queue_work(kctx->jctx.job_done_wq, &katom->work);
441 }
442 
443 int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom)
444 {
445 	int ret;
446 
447 	sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
448 
449 	ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
450 
451 	if (ret == 1) {
452 		/* Already signaled */
453 		return 0;
454 	}
455 
456 	if (ret < 0) {
457 		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
458 		/* We should cause the dependent jobs in the bag to be failed,
459 		 * to do this we schedule the work queue to complete this job */
460 		INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
461 		queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
462 	}
463 
464 	return 1;
465 }
466 
467 void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom)
468 {
469 	if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
470 		/* The wait wasn't cancelled - leave the cleanup for
471 		 * kbase_fence_wait_callback */
472 		return;
473 	}
474 
475 	/* Wait was cancelled - zap the atoms */
476 	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
477 
478 	kbasep_remove_waiting_soft_job(katom);
479 	kbase_finish_soft_job(katom);
480 
481 	if (jd_done_nolock(katom, NULL))
482 		kbase_js_sched_all(katom->kctx->kbdev);
483 }
484 
485 void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom)
486 {
487 	if (katom->fence) {
488 		sync_fence_put(katom->fence);
489 		katom->fence = NULL;
490 	}
491 }
492 
493 void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom)
494 {
495 	if (katom->fence) {
496 		sync_fence_put(katom->fence);
497 		katom->fence = NULL;
498 	}
499 }
500 
501 int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom,
502 				 struct kbase_sync_fence_info *info)
503 {
504 	if (!katom->fence)
505 		return -ENOENT;
506 
507 	info->fence = katom->fence;
508 	info->status = kbase_fence_get_status(katom->fence);
509 	strlcpy(info->name, katom->fence->name, sizeof(info->name));
510 
511 	return 0;
512 }
513 
514 int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom,
515 				 struct kbase_sync_fence_info *info)
516 {
517 	if (!katom->fence)
518 		return -ENOENT;
519 
520 	info->fence = katom->fence;
521 	info->status = kbase_fence_get_status(katom->fence);
522 	strlcpy(info->name, katom->fence->name, sizeof(info->name));
523 
524 	return 0;
525 }
526 
527 #ifdef CONFIG_MALI_FENCE_DEBUG
528 void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom)
529 {
530 	/* Dump out the full state of all the Android sync fences.
531 	 * The function sync_dump() isn't exported to modules, so force
532 	 * sync_fence_wait() to time out to trigger sync_dump().
533 	 */
534 	if (katom->fence)
535 		sync_fence_wait(katom->fence, 1);
536 }
537 #endif
538