xref: /OK3568_Linux_fs/kernel/include/drm/task_barrier.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2019 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun #include <linux/semaphore.h>
24*4882a593Smuzhiyun #include <linux/atomic.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.
28*4882a593Smuzhiyun  * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #ifndef DRM_TASK_BARRIER_H_
34*4882a593Smuzhiyun #define DRM_TASK_BARRIER_H_
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * Represents an instance of a task barrier.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun struct task_barrier {
40*4882a593Smuzhiyun 	unsigned int n;
41*4882a593Smuzhiyun 	atomic_t count;
42*4882a593Smuzhiyun 	struct semaphore enter_turnstile;
43*4882a593Smuzhiyun 	struct semaphore exit_turnstile;
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
task_barrier_signal_turnstile(struct semaphore * turnstile,unsigned int n)46*4882a593Smuzhiyun static inline void task_barrier_signal_turnstile(struct semaphore *turnstile,
47*4882a593Smuzhiyun 						 unsigned int n)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	int i;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	for (i = 0 ; i < n; i++)
52*4882a593Smuzhiyun 		up(turnstile);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
task_barrier_init(struct task_barrier * tb)55*4882a593Smuzhiyun static inline void task_barrier_init(struct task_barrier *tb)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	tb->n = 0;
58*4882a593Smuzhiyun 	atomic_set(&tb->count, 0);
59*4882a593Smuzhiyun 	sema_init(&tb->enter_turnstile, 0);
60*4882a593Smuzhiyun 	sema_init(&tb->exit_turnstile, 0);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
task_barrier_add_task(struct task_barrier * tb)63*4882a593Smuzhiyun static inline void task_barrier_add_task(struct task_barrier *tb)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	tb->n++;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
task_barrier_rem_task(struct task_barrier * tb)68*4882a593Smuzhiyun static inline void task_barrier_rem_task(struct task_barrier *tb)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	tb->n--;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun  * Lines up all the threads BEFORE the critical point.
75*4882a593Smuzhiyun  *
76*4882a593Smuzhiyun  * When all thread passed this code the entry barrier is back to locked state.
77*4882a593Smuzhiyun  */
task_barrier_enter(struct task_barrier * tb)78*4882a593Smuzhiyun static inline void task_barrier_enter(struct task_barrier *tb)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	if (atomic_inc_return(&tb->count) == tb->n)
81*4882a593Smuzhiyun 		task_barrier_signal_turnstile(&tb->enter_turnstile, tb->n);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	down(&tb->enter_turnstile);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun  * Lines up all the threads AFTER the critical point.
88*4882a593Smuzhiyun  *
89*4882a593Smuzhiyun  * This function is used to avoid any one thread running ahead if the barrier is
90*4882a593Smuzhiyun  *  used repeatedly .
91*4882a593Smuzhiyun  */
task_barrier_exit(struct task_barrier * tb)92*4882a593Smuzhiyun static inline void task_barrier_exit(struct task_barrier *tb)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	if (atomic_dec_return(&tb->count) == 0)
95*4882a593Smuzhiyun 		task_barrier_signal_turnstile(&tb->exit_turnstile, tb->n);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	down(&tb->exit_turnstile);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /* Convinieince function when nothing to be done in between entry and exit */
task_barrier_full(struct task_barrier * tb)101*4882a593Smuzhiyun static inline void task_barrier_full(struct task_barrier *tb)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	task_barrier_enter(tb);
104*4882a593Smuzhiyun 	task_barrier_exit(tb);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun #endif
108