xref: /OK3568_Linux_fs/kernel/drivers/infiniband/sw/rxe/rxe_task.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/interrupt.h>
9*4882a593Smuzhiyun #include <linux/hardirq.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "rxe.h"
12*4882a593Smuzhiyun 
__rxe_do_task(struct rxe_task * task)13*4882a593Smuzhiyun int __rxe_do_task(struct rxe_task *task)
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun 	int ret;
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	while ((ret = task->func(task->arg)) == 0)
19*4882a593Smuzhiyun 		;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	task->ret = ret;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 	return ret;
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * this locking is due to a potential race where
28*4882a593Smuzhiyun  * a second caller finds the task already running
29*4882a593Smuzhiyun  * but looks just after the last call to func
30*4882a593Smuzhiyun  */
rxe_do_task(struct tasklet_struct * t)31*4882a593Smuzhiyun void rxe_do_task(struct tasklet_struct *t)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	int cont;
34*4882a593Smuzhiyun 	int ret;
35*4882a593Smuzhiyun 	unsigned long flags;
36*4882a593Smuzhiyun 	struct rxe_task *task = from_tasklet(task, t, tasklet);
37*4882a593Smuzhiyun 	unsigned int iterations = RXE_MAX_ITERATIONS;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	spin_lock_irqsave(&task->state_lock, flags);
40*4882a593Smuzhiyun 	switch (task->state) {
41*4882a593Smuzhiyun 	case TASK_STATE_START:
42*4882a593Smuzhiyun 		task->state = TASK_STATE_BUSY;
43*4882a593Smuzhiyun 		spin_unlock_irqrestore(&task->state_lock, flags);
44*4882a593Smuzhiyun 		break;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	case TASK_STATE_BUSY:
47*4882a593Smuzhiyun 		task->state = TASK_STATE_ARMED;
48*4882a593Smuzhiyun 		fallthrough;
49*4882a593Smuzhiyun 	case TASK_STATE_ARMED:
50*4882a593Smuzhiyun 		spin_unlock_irqrestore(&task->state_lock, flags);
51*4882a593Smuzhiyun 		return;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	default:
54*4882a593Smuzhiyun 		spin_unlock_irqrestore(&task->state_lock, flags);
55*4882a593Smuzhiyun 		pr_warn("%s failed with bad state %d\n", __func__, task->state);
56*4882a593Smuzhiyun 		return;
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	do {
60*4882a593Smuzhiyun 		cont = 0;
61*4882a593Smuzhiyun 		ret = task->func(task->arg);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 		spin_lock_irqsave(&task->state_lock, flags);
64*4882a593Smuzhiyun 		switch (task->state) {
65*4882a593Smuzhiyun 		case TASK_STATE_BUSY:
66*4882a593Smuzhiyun 			if (ret) {
67*4882a593Smuzhiyun 				task->state = TASK_STATE_START;
68*4882a593Smuzhiyun 			} else if (iterations--) {
69*4882a593Smuzhiyun 				cont = 1;
70*4882a593Smuzhiyun 			} else {
71*4882a593Smuzhiyun 				/* reschedule the tasklet and exit
72*4882a593Smuzhiyun 				 * the loop to give up the cpu
73*4882a593Smuzhiyun 				 */
74*4882a593Smuzhiyun 				tasklet_schedule(&task->tasklet);
75*4882a593Smuzhiyun 				task->state = TASK_STATE_START;
76*4882a593Smuzhiyun 			}
77*4882a593Smuzhiyun 			break;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 		/* someone tried to run the task since the last time we called
80*4882a593Smuzhiyun 		 * func, so we will call one more time regardless of the
81*4882a593Smuzhiyun 		 * return value
82*4882a593Smuzhiyun 		 */
83*4882a593Smuzhiyun 		case TASK_STATE_ARMED:
84*4882a593Smuzhiyun 			task->state = TASK_STATE_BUSY;
85*4882a593Smuzhiyun 			cont = 1;
86*4882a593Smuzhiyun 			break;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 		default:
89*4882a593Smuzhiyun 			pr_warn("%s failed with bad state %d\n", __func__,
90*4882a593Smuzhiyun 				task->state);
91*4882a593Smuzhiyun 		}
92*4882a593Smuzhiyun 		spin_unlock_irqrestore(&task->state_lock, flags);
93*4882a593Smuzhiyun 	} while (cont);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	task->ret = ret;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
rxe_init_task(void * obj,struct rxe_task * task,void * arg,int (* func)(void *),char * name)98*4882a593Smuzhiyun int rxe_init_task(void *obj, struct rxe_task *task,
99*4882a593Smuzhiyun 		  void *arg, int (*func)(void *), char *name)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	task->obj	= obj;
102*4882a593Smuzhiyun 	task->arg	= arg;
103*4882a593Smuzhiyun 	task->func	= func;
104*4882a593Smuzhiyun 	snprintf(task->name, sizeof(task->name), "%s", name);
105*4882a593Smuzhiyun 	task->destroyed	= false;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	tasklet_setup(&task->tasklet, rxe_do_task);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	task->state = TASK_STATE_START;
110*4882a593Smuzhiyun 	spin_lock_init(&task->state_lock);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return 0;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
rxe_cleanup_task(struct rxe_task * task)115*4882a593Smuzhiyun void rxe_cleanup_task(struct rxe_task *task)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	unsigned long flags;
118*4882a593Smuzhiyun 	bool idle;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	/*
121*4882a593Smuzhiyun 	 * Mark the task, then wait for it to finish. It might be
122*4882a593Smuzhiyun 	 * running in a non-tasklet (direct call) context.
123*4882a593Smuzhiyun 	 */
124*4882a593Smuzhiyun 	task->destroyed = true;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	do {
127*4882a593Smuzhiyun 		spin_lock_irqsave(&task->state_lock, flags);
128*4882a593Smuzhiyun 		idle = (task->state == TASK_STATE_START);
129*4882a593Smuzhiyun 		spin_unlock_irqrestore(&task->state_lock, flags);
130*4882a593Smuzhiyun 	} while (!idle);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	tasklet_kill(&task->tasklet);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
rxe_run_task(struct rxe_task * task,int sched)135*4882a593Smuzhiyun void rxe_run_task(struct rxe_task *task, int sched)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	if (task->destroyed)
138*4882a593Smuzhiyun 		return;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	if (sched)
141*4882a593Smuzhiyun 		tasklet_schedule(&task->tasklet);
142*4882a593Smuzhiyun 	else
143*4882a593Smuzhiyun 		rxe_do_task(&task->tasklet);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
rxe_disable_task(struct rxe_task * task)146*4882a593Smuzhiyun void rxe_disable_task(struct rxe_task *task)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	tasklet_disable(&task->tasklet);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
rxe_enable_task(struct rxe_task * task)151*4882a593Smuzhiyun void rxe_enable_task(struct rxe_task *task)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	tasklet_enable(&task->tasklet);
154*4882a593Smuzhiyun }
155