xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/dhd_linux_wq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Broadcom Dongle Host Driver (DHD), Generic work queue framework
3*4882a593Smuzhiyun  * Generic interface to handle dhd deferred work events
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2020, Broadcom.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *      Unless you and Broadcom execute a separate written software license
8*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
9*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 (the "GPL"),
10*4882a593Smuzhiyun  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11*4882a593Smuzhiyun  * following added to such license:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *      As a special exception, the copyright holders of this software give you
14*4882a593Smuzhiyun  * permission to link this software with independent modules, and to copy and
15*4882a593Smuzhiyun  * distribute the resulting executable under terms of your choice, provided that
16*4882a593Smuzhiyun  * you also meet, for each linked independent module, the terms and conditions of
17*4882a593Smuzhiyun  * the license of that module.  An independent module is a module which is not
18*4882a593Smuzhiyun  * derived from this software.  The special exception does not apply to any
19*4882a593Smuzhiyun  * modifications of the software.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * <<Broadcom-WL-IPTag/Open:>>
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * $Id$
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <linux/init.h>
28*4882a593Smuzhiyun #include <linux/kernel.h>
29*4882a593Smuzhiyun #include <linux/spinlock.h>
30*4882a593Smuzhiyun #include <linux/fcntl.h>
31*4882a593Smuzhiyun #include <linux/fs.h>
32*4882a593Smuzhiyun #include <linux/ip.h>
33*4882a593Smuzhiyun #include <linux/kfifo.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <linuxver.h>
36*4882a593Smuzhiyun #include <osl.h>
37*4882a593Smuzhiyun #include <bcmutils.h>
38*4882a593Smuzhiyun #include <bcmendian.h>
39*4882a593Smuzhiyun #include <bcmdevs.h>
40*4882a593Smuzhiyun #include <dngl_stats.h>
41*4882a593Smuzhiyun #include <dhd.h>
42*4882a593Smuzhiyun #include <dhd_dbg.h>
43*4882a593Smuzhiyun #include <dhd_linux_wq.h>
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * XXX: always make sure that the size of this structure is aligned to
47*4882a593Smuzhiyun  * the power of 2 (2^n) i.e, if any new variable has to be added then
48*4882a593Smuzhiyun  * modify the padding accordingly
49*4882a593Smuzhiyun  */
50*4882a593Smuzhiyun typedef struct dhd_deferred_event {
51*4882a593Smuzhiyun 	u8 event;		/* holds the event */
52*4882a593Smuzhiyun 	void *event_data;	/* holds event specific data */
53*4882a593Smuzhiyun 	event_handler_t event_handler;
54*4882a593Smuzhiyun 	unsigned long pad;	/* for memory alignment to power of 2 */
55*4882a593Smuzhiyun } dhd_deferred_event_t;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #define DEFRD_EVT_SIZE	(sizeof(dhd_deferred_event_t))
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun  * work events may occur simultaneously.
61*4882a593Smuzhiyun  * can hold upto 64 low priority events and 16 high priority events
62*4882a593Smuzhiyun  */
63*4882a593Smuzhiyun #define DHD_PRIO_WORK_FIFO_SIZE	(16 * DEFRD_EVT_SIZE)
64*4882a593Smuzhiyun #define DHD_WORK_FIFO_SIZE	(64 * DEFRD_EVT_SIZE)
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32))
67*4882a593Smuzhiyun #define kfifo_avail(fifo) (fifo->size - kfifo_len(fifo))
68*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32)) */
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #define DHD_FIFO_HAS_FREE_SPACE(fifo) \
71*4882a593Smuzhiyun 	((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE))
72*4882a593Smuzhiyun #define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \
73*4882a593Smuzhiyun 	((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE))
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun struct dhd_deferred_wq {
76*4882a593Smuzhiyun 	struct work_struct deferred_work; /* should be the first member */
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	struct kfifo *prio_fifo;
79*4882a593Smuzhiyun 	struct kfifo			*work_fifo;
80*4882a593Smuzhiyun 	u8				*prio_fifo_buf;
81*4882a593Smuzhiyun 	u8				*work_fifo_buf;
82*4882a593Smuzhiyun 	spinlock_t			work_lock;
83*4882a593Smuzhiyun 	void				*dhd_info; /* review: does it require */
84*4882a593Smuzhiyun 	u32				event_skip_mask;
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun static inline struct kfifo*
dhd_kfifo_init(u8 * buf,int size,spinlock_t * lock)88*4882a593Smuzhiyun dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct kfifo *fifo;
91*4882a593Smuzhiyun 	gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
94*4882a593Smuzhiyun 	fifo = kfifo_init(buf, size, flags, lock);
95*4882a593Smuzhiyun #else
96*4882a593Smuzhiyun 	fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
97*4882a593Smuzhiyun 	if (!fifo) {
98*4882a593Smuzhiyun 		return NULL;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 	kfifo_init(fifo, buf, size);
101*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
102*4882a593Smuzhiyun 	return fifo;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun static inline void
dhd_kfifo_free(struct kfifo * fifo)106*4882a593Smuzhiyun dhd_kfifo_free(struct kfifo *fifo)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	kfifo_free(fifo);
109*4882a593Smuzhiyun 	kfree(fifo);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /* deferred work functions */
113*4882a593Smuzhiyun static void dhd_deferred_work_handler(struct work_struct *data);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun void*
dhd_deferred_work_init(void * dhd_info)116*4882a593Smuzhiyun dhd_deferred_work_init(void *dhd_info)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	struct dhd_deferred_wq	*work = NULL;
119*4882a593Smuzhiyun 	u8*	buf;
120*4882a593Smuzhiyun 	unsigned long	fifo_size = 0;
121*4882a593Smuzhiyun 	gfp_t	flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (!dhd_info) {
124*4882a593Smuzhiyun 		DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
125*4882a593Smuzhiyun 		goto return_null;
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
129*4882a593Smuzhiyun 		flags);
130*4882a593Smuzhiyun 	if (!work) {
131*4882a593Smuzhiyun 		DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__));
132*4882a593Smuzhiyun 		goto return_null;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/* initialize event fifo */
138*4882a593Smuzhiyun 	spin_lock_init(&work->work_lock);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/* allocate buffer to hold prio events */
141*4882a593Smuzhiyun 	fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
142*4882a593Smuzhiyun 	fifo_size = is_power_of_2(fifo_size) ? fifo_size :
143*4882a593Smuzhiyun 			roundup_pow_of_two(fifo_size);
144*4882a593Smuzhiyun 	buf = (u8*)kzalloc(fifo_size, flags);
145*4882a593Smuzhiyun 	if (!buf) {
146*4882a593Smuzhiyun 		DHD_ERROR(("%s: prio work fifo allocation failed\n",
147*4882a593Smuzhiyun 			__FUNCTION__));
148*4882a593Smuzhiyun 		goto return_null;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/* Initialize prio event fifo */
152*4882a593Smuzhiyun 	work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
153*4882a593Smuzhiyun 	if (!work->prio_fifo) {
154*4882a593Smuzhiyun 		kfree(buf);
155*4882a593Smuzhiyun 		goto return_null;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* allocate buffer to hold work events */
159*4882a593Smuzhiyun 	fifo_size = DHD_WORK_FIFO_SIZE;
160*4882a593Smuzhiyun 	fifo_size = is_power_of_2(fifo_size) ? fifo_size :
161*4882a593Smuzhiyun 			roundup_pow_of_two(fifo_size);
162*4882a593Smuzhiyun 	buf = (u8*)kzalloc(fifo_size, flags);
163*4882a593Smuzhiyun 	if (!buf) {
164*4882a593Smuzhiyun 		DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__));
165*4882a593Smuzhiyun 		goto return_null;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/* Initialize event fifo */
169*4882a593Smuzhiyun 	work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
170*4882a593Smuzhiyun 	if (!work->work_fifo) {
171*4882a593Smuzhiyun 		kfree(buf);
172*4882a593Smuzhiyun 		goto return_null;
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	work->dhd_info = dhd_info;
176*4882a593Smuzhiyun 	work->event_skip_mask = 0;
177*4882a593Smuzhiyun 	DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__));
178*4882a593Smuzhiyun 	return work;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun return_null:
181*4882a593Smuzhiyun 	if (work) {
182*4882a593Smuzhiyun 		dhd_deferred_work_deinit(work);
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	return NULL;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun void
dhd_deferred_work_deinit(void * work)189*4882a593Smuzhiyun dhd_deferred_work_deinit(void *work)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	struct dhd_deferred_wq *deferred_work = work;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (!deferred_work) {
194*4882a593Smuzhiyun 		DHD_ERROR(("%s: deferred work has been freed already\n",
195*4882a593Smuzhiyun 			__FUNCTION__));
196*4882a593Smuzhiyun 		return;
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/* cancel the deferred work handling */
200*4882a593Smuzhiyun 	cancel_work_sync((struct work_struct *)deferred_work);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/*
203*4882a593Smuzhiyun 	 * free work event fifo.
204*4882a593Smuzhiyun 	 * kfifo_free frees locally allocated fifo buffer
205*4882a593Smuzhiyun 	 */
206*4882a593Smuzhiyun 	if (deferred_work->prio_fifo) {
207*4882a593Smuzhiyun 		dhd_kfifo_free(deferred_work->prio_fifo);
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	if (deferred_work->work_fifo) {
211*4882a593Smuzhiyun 		dhd_kfifo_free(deferred_work->work_fifo);
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	kfree(deferred_work);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /* select kfifo according to priority */
218*4882a593Smuzhiyun static inline struct kfifo *
dhd_deferred_work_select_kfifo(struct dhd_deferred_wq * deferred_wq,u8 priority)219*4882a593Smuzhiyun dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq,
220*4882a593Smuzhiyun 	u8 priority)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	if (priority == DHD_WQ_WORK_PRIORITY_HIGH) {
223*4882a593Smuzhiyun 		return deferred_wq->prio_fifo;
224*4882a593Smuzhiyun 	} else if (priority == DHD_WQ_WORK_PRIORITY_LOW) {
225*4882a593Smuzhiyun 		return deferred_wq->work_fifo;
226*4882a593Smuzhiyun 	} else {
227*4882a593Smuzhiyun 		return NULL;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun /*
232*4882a593Smuzhiyun  *	Prepares event to be queued
233*4882a593Smuzhiyun  *	Schedules the event
234*4882a593Smuzhiyun  */
235*4882a593Smuzhiyun int
dhd_deferred_schedule_work(void * workq,void * event_data,u8 event,event_handler_t event_handler,u8 priority)236*4882a593Smuzhiyun dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
237*4882a593Smuzhiyun 	event_handler_t event_handler, u8 priority)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq;
240*4882a593Smuzhiyun 	struct kfifo *fifo;
241*4882a593Smuzhiyun 	dhd_deferred_event_t deferred_event;
242*4882a593Smuzhiyun 	int bytes_copied = 0;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (!deferred_wq) {
245*4882a593Smuzhiyun 		DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
246*4882a593Smuzhiyun 		ASSERT(0);
247*4882a593Smuzhiyun 		return DHD_WQ_STS_UNINITIALIZED;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
251*4882a593Smuzhiyun 		DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__,
252*4882a593Smuzhiyun 			event));
253*4882a593Smuzhiyun 		return DHD_WQ_STS_UNKNOWN_EVENT;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) {
257*4882a593Smuzhiyun 		DHD_ERROR(("%s: unknown priority, priority=%d\n",
258*4882a593Smuzhiyun 			__FUNCTION__, priority));
259*4882a593Smuzhiyun 		return DHD_WQ_STS_UNKNOWN_PRIORITY;
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	if ((deferred_wq->event_skip_mask & (1 << event))) {
263*4882a593Smuzhiyun 		DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n",
264*4882a593Smuzhiyun 			__FUNCTION__, deferred_wq->event_skip_mask));
265*4882a593Smuzhiyun 		return DHD_WQ_STS_EVENT_SKIPPED;
266*4882a593Smuzhiyun 	}
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/*
269*4882a593Smuzhiyun 	 * default element size is 1, which can be changed
270*4882a593Smuzhiyun 	 * using kfifo_esize(). Older kernel(FC11) doesn't support
271*4882a593Smuzhiyun 	 * changing element size. For compatibility changing
272*4882a593Smuzhiyun 	 * element size is not prefered
273*4882a593Smuzhiyun 	 */
274*4882a593Smuzhiyun 	ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
275*4882a593Smuzhiyun 	ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	deferred_event.event = event;
278*4882a593Smuzhiyun 	deferred_event.event_data = event_data;
279*4882a593Smuzhiyun 	deferred_event.event_handler = event_handler;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority);
282*4882a593Smuzhiyun 	if (DHD_FIFO_HAS_FREE_SPACE(fifo)) {
283*4882a593Smuzhiyun 		bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event,
284*4882a593Smuzhiyun 			DEFRD_EVT_SIZE, &deferred_wq->work_lock);
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 	if (bytes_copied != DEFRD_EVT_SIZE) {
287*4882a593Smuzhiyun 		DHD_ERROR(("%s: failed to schedule deferred work, "
288*4882a593Smuzhiyun 			"priority=%d, bytes_copied=%d\n", __FUNCTION__,
289*4882a593Smuzhiyun 			priority, bytes_copied));
290*4882a593Smuzhiyun 		return DHD_WQ_STS_SCHED_FAILED;
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 	schedule_work((struct work_struct *)deferred_wq);
293*4882a593Smuzhiyun 	return DHD_WQ_STS_OK;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun static bool
dhd_get_scheduled_work(struct dhd_deferred_wq * deferred_wq,dhd_deferred_event_t * event)297*4882a593Smuzhiyun dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq,
298*4882a593Smuzhiyun 	dhd_deferred_event_t *event)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	int bytes_copied = 0;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if (!deferred_wq) {
303*4882a593Smuzhiyun 		DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
304*4882a593Smuzhiyun 		return DHD_WQ_STS_UNINITIALIZED;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/*
308*4882a593Smuzhiyun 	 * default element size is 1 byte, which can be changed
309*4882a593Smuzhiyun 	 * using kfifo_esize(). Older kernel(FC11) doesn't support
310*4882a593Smuzhiyun 	 * changing element size. For compatibility changing
311*4882a593Smuzhiyun 	 * element size is not prefered
312*4882a593Smuzhiyun 	 */
313*4882a593Smuzhiyun 	ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
314*4882a593Smuzhiyun 	ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* handle priority work */
317*4882a593Smuzhiyun 	if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) {
318*4882a593Smuzhiyun 		bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo,
319*4882a593Smuzhiyun 			event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	/* handle normal work if priority work doesn't have enough data */
323*4882a593Smuzhiyun 	if ((bytes_copied != DEFRD_EVT_SIZE) &&
324*4882a593Smuzhiyun 		DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) {
325*4882a593Smuzhiyun 		bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo,
326*4882a593Smuzhiyun 			event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	return (bytes_copied == DEFRD_EVT_SIZE);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun static inline void
dhd_deferred_dump_work_event(dhd_deferred_event_t * work_event)333*4882a593Smuzhiyun dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	if (!work_event) {
336*4882a593Smuzhiyun 		DHD_ERROR(("%s: work_event is null\n", __FUNCTION__));
337*4882a593Smuzhiyun 		return;
338*4882a593Smuzhiyun 	}
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__,
341*4882a593Smuzhiyun 		work_event->event));
342*4882a593Smuzhiyun 	DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__,
343*4882a593Smuzhiyun 		work_event->event_data));
344*4882a593Smuzhiyun 	DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__,
345*4882a593Smuzhiyun 		work_event->event_handler));
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun  *	Called when work is scheduled
350*4882a593Smuzhiyun  */
351*4882a593Smuzhiyun static void
dhd_deferred_work_handler(struct work_struct * work)352*4882a593Smuzhiyun dhd_deferred_work_handler(struct work_struct *work)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
355*4882a593Smuzhiyun 	dhd_deferred_event_t work_event;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	if (!deferred_work) {
358*4882a593Smuzhiyun 		DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
359*4882a593Smuzhiyun 		return;
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	do {
363*4882a593Smuzhiyun 		if (!dhd_get_scheduled_work(deferred_work, &work_event)) {
364*4882a593Smuzhiyun 			DHD_TRACE(("%s: no event to handle\n", __FUNCTION__));
365*4882a593Smuzhiyun 			break;
366*4882a593Smuzhiyun 		}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		if (work_event.event >= DHD_MAX_WQ_EVENTS) {
369*4882a593Smuzhiyun 			DHD_ERROR(("%s: unknown event\n", __FUNCTION__));
370*4882a593Smuzhiyun 			dhd_deferred_dump_work_event(&work_event);
371*4882a593Smuzhiyun 			ASSERT(work_event.event < DHD_MAX_WQ_EVENTS);
372*4882a593Smuzhiyun 			continue;
373*4882a593Smuzhiyun 		}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 		/*
376*4882a593Smuzhiyun 		 * XXX: don't do NULL check for 'work_event.event_data'
377*4882a593Smuzhiyun 		 * as for some events like DHD_WQ_WORK_DHD_LOG_DUMP the
378*4882a593Smuzhiyun 		 * event data is always NULL even though rest of the
379*4882a593Smuzhiyun 		 * event parameters are valid
380*4882a593Smuzhiyun 		 */
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 		if (work_event.event_handler) {
383*4882a593Smuzhiyun 			work_event.event_handler(deferred_work->dhd_info,
384*4882a593Smuzhiyun 				work_event.event_data, work_event.event);
385*4882a593Smuzhiyun 		} else {
386*4882a593Smuzhiyun 			DHD_ERROR(("%s: event handler is null\n",
387*4882a593Smuzhiyun 				__FUNCTION__));
388*4882a593Smuzhiyun 			dhd_deferred_dump_work_event(&work_event);
389*4882a593Smuzhiyun 			ASSERT(work_event.event_handler != NULL);
390*4882a593Smuzhiyun 		}
391*4882a593Smuzhiyun 	} while (1);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	return;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun void
dhd_deferred_work_set_skip(void * work,u8 event,bool set)397*4882a593Smuzhiyun dhd_deferred_work_set_skip(void *work, u8 event, bool set)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) {
402*4882a593Smuzhiyun 		DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__));
403*4882a593Smuzhiyun 		return;
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	if (set) {
407*4882a593Smuzhiyun 		/* Set */
408*4882a593Smuzhiyun 		deferred_wq->event_skip_mask |= (1 << event);
409*4882a593Smuzhiyun 	} else {
410*4882a593Smuzhiyun 		/* Clear */
411*4882a593Smuzhiyun 		deferred_wq->event_skip_mask &= ~(1 << event);
412*4882a593Smuzhiyun 	}
413*4882a593Smuzhiyun }
414