1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Broadcom Dongle Host Driver (DHD), Generic work queue framework
3*4882a593Smuzhiyun * Generic interface to handle dhd deferred work events
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 1999-2017, Broadcom Corporation
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Unless you and Broadcom execute a separate written software license
10*4882a593Smuzhiyun * agreement governing use of this software, this software is licensed to you
11*4882a593Smuzhiyun * under the terms of the GNU General Public License version 2 (the "GPL"),
12*4882a593Smuzhiyun * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13*4882a593Smuzhiyun * following added to such license:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * As a special exception, the copyright holders of this software give you
16*4882a593Smuzhiyun * permission to link this software with independent modules, and to copy and
17*4882a593Smuzhiyun * distribute the resulting executable under terms of your choice, provided that
18*4882a593Smuzhiyun * you also meet, for each linked independent module, the terms and conditions of
19*4882a593Smuzhiyun * the license of that module. An independent module is a module which is not
20*4882a593Smuzhiyun * derived from this software. The special exception does not apply to any
21*4882a593Smuzhiyun * modifications of the software.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Notwithstanding the above, under no circumstances may you combine this
24*4882a593Smuzhiyun * software in any way with any other Broadcom software provided under a license
25*4882a593Smuzhiyun * other than the GPL, without Broadcom's express prior written consent.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * <<Broadcom-WL-IPTag/Open:>>
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * $Id: dhd_linux_wq.c 675839 2016-12-19 03:07:26Z $
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <linux/init.h>
34*4882a593Smuzhiyun #include <linux/kernel.h>
35*4882a593Smuzhiyun #include <linux/spinlock.h>
36*4882a593Smuzhiyun #include <linux/fcntl.h>
37*4882a593Smuzhiyun #include <linux/fs.h>
38*4882a593Smuzhiyun #include <linux/ip.h>
39*4882a593Smuzhiyun #include <linux/kfifo.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #include <linuxver.h>
42*4882a593Smuzhiyun #include <osl.h>
43*4882a593Smuzhiyun #include <bcmutils.h>
44*4882a593Smuzhiyun #include <bcmendian.h>
45*4882a593Smuzhiyun #include <bcmdevs.h>
46*4882a593Smuzhiyun #include <dngl_stats.h>
47*4882a593Smuzhiyun #include <dhd.h>
48*4882a593Smuzhiyun #include <dhd_dbg.h>
49*4882a593Smuzhiyun #include <dhd_linux_wq.h>
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun typedef struct dhd_deferred_event {
52*4882a593Smuzhiyun u8 event; /* holds the event */
53*4882a593Smuzhiyun void *event_data; /* holds event specific data */
54*4882a593Smuzhiyun event_handler_t event_handler;
55*4882a593Smuzhiyun unsigned long pad; /* for memory alignment to power of 2 */
56*4882a593Smuzhiyun } dhd_deferred_event_t;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #define DEFRD_EVT_SIZE (sizeof(dhd_deferred_event_t))
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * work events may occur simultaneously.
62*4882a593Smuzhiyun * can hold upto 64 low priority events and 16 high priority events
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun #define DHD_PRIO_WORK_FIFO_SIZE (16 * DEFRD_EVT_SIZE)
65*4882a593Smuzhiyun #define DHD_WORK_FIFO_SIZE (64 * DEFRD_EVT_SIZE)
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #define DHD_FIFO_HAS_FREE_SPACE(fifo) \
68*4882a593Smuzhiyun ((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE))
69*4882a593Smuzhiyun #define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \
70*4882a593Smuzhiyun ((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE))
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun struct dhd_deferred_wq {
73*4882a593Smuzhiyun struct work_struct deferred_work; /* should be the first member */
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun struct kfifo *prio_fifo;
76*4882a593Smuzhiyun struct kfifo *work_fifo;
77*4882a593Smuzhiyun u8 *prio_fifo_buf;
78*4882a593Smuzhiyun u8 *work_fifo_buf;
79*4882a593Smuzhiyun spinlock_t work_lock;
80*4882a593Smuzhiyun void *dhd_info; /* review: does it require */
81*4882a593Smuzhiyun u32 event_skip_mask;
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun static inline struct kfifo*
dhd_kfifo_init(u8 * buf,int size,spinlock_t * lock)85*4882a593Smuzhiyun dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun struct kfifo *fifo;
88*4882a593Smuzhiyun gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
91*4882a593Smuzhiyun if (!fifo) {
92*4882a593Smuzhiyun return NULL;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun kfifo_init(fifo, buf, size);
95*4882a593Smuzhiyun return fifo;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun static inline void
dhd_kfifo_free(struct kfifo * fifo)99*4882a593Smuzhiyun dhd_kfifo_free(struct kfifo *fifo)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun kfifo_free(fifo);
102*4882a593Smuzhiyun kfree(fifo);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* deferred work functions */
106*4882a593Smuzhiyun static void dhd_deferred_work_handler(struct work_struct *data);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun void*
dhd_deferred_work_init(void * dhd_info)109*4882a593Smuzhiyun dhd_deferred_work_init(void *dhd_info)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct dhd_deferred_wq *work = NULL;
112*4882a593Smuzhiyun u8* buf;
113*4882a593Smuzhiyun unsigned long fifo_size = 0;
114*4882a593Smuzhiyun gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (!dhd_info) {
117*4882a593Smuzhiyun DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
118*4882a593Smuzhiyun goto return_null;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
122*4882a593Smuzhiyun flags);
123*4882a593Smuzhiyun if (!work) {
124*4882a593Smuzhiyun DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__));
125*4882a593Smuzhiyun goto return_null;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* initialize event fifo */
131*4882a593Smuzhiyun spin_lock_init(&work->work_lock);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* allocate buffer to hold prio events */
134*4882a593Smuzhiyun fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
135*4882a593Smuzhiyun fifo_size = is_power_of_2(fifo_size) ? fifo_size :
136*4882a593Smuzhiyun roundup_pow_of_two(fifo_size);
137*4882a593Smuzhiyun buf = (u8*)kzalloc(fifo_size, flags);
138*4882a593Smuzhiyun if (!buf) {
139*4882a593Smuzhiyun DHD_ERROR(("%s: prio work fifo allocation failed\n",
140*4882a593Smuzhiyun __FUNCTION__));
141*4882a593Smuzhiyun goto return_null;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* Initialize prio event fifo */
145*4882a593Smuzhiyun work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
146*4882a593Smuzhiyun if (!work->prio_fifo) {
147*4882a593Smuzhiyun kfree(buf);
148*4882a593Smuzhiyun goto return_null;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* allocate buffer to hold work events */
152*4882a593Smuzhiyun fifo_size = DHD_WORK_FIFO_SIZE;
153*4882a593Smuzhiyun fifo_size = is_power_of_2(fifo_size) ? fifo_size :
154*4882a593Smuzhiyun roundup_pow_of_two(fifo_size);
155*4882a593Smuzhiyun buf = (u8*)kzalloc(fifo_size, flags);
156*4882a593Smuzhiyun if (!buf) {
157*4882a593Smuzhiyun DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__));
158*4882a593Smuzhiyun goto return_null;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* Initialize event fifo */
162*4882a593Smuzhiyun work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
163*4882a593Smuzhiyun if (!work->work_fifo) {
164*4882a593Smuzhiyun kfree(buf);
165*4882a593Smuzhiyun goto return_null;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun work->dhd_info = dhd_info;
169*4882a593Smuzhiyun work->event_skip_mask = 0;
170*4882a593Smuzhiyun DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__));
171*4882a593Smuzhiyun return work;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun return_null:
174*4882a593Smuzhiyun if (work) {
175*4882a593Smuzhiyun dhd_deferred_work_deinit(work);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return NULL;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun void
dhd_deferred_work_deinit(void * work)182*4882a593Smuzhiyun dhd_deferred_work_deinit(void *work)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun struct dhd_deferred_wq *deferred_work = work;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (!deferred_work) {
187*4882a593Smuzhiyun DHD_ERROR(("%s: deferred work has been freed already\n",
188*4882a593Smuzhiyun __FUNCTION__));
189*4882a593Smuzhiyun return;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* cancel the deferred work handling */
193*4882a593Smuzhiyun cancel_work_sync((struct work_struct *)deferred_work);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun * free work event fifo.
197*4882a593Smuzhiyun * kfifo_free frees locally allocated fifo buffer
198*4882a593Smuzhiyun */
199*4882a593Smuzhiyun if (deferred_work->prio_fifo) {
200*4882a593Smuzhiyun dhd_kfifo_free(deferred_work->prio_fifo);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun if (deferred_work->work_fifo) {
204*4882a593Smuzhiyun dhd_kfifo_free(deferred_work->work_fifo);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun kfree(deferred_work);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* select kfifo according to priority */
211*4882a593Smuzhiyun static inline struct kfifo *
dhd_deferred_work_select_kfifo(struct dhd_deferred_wq * deferred_wq,u8 priority)212*4882a593Smuzhiyun dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq,
213*4882a593Smuzhiyun u8 priority)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun if (priority == DHD_WQ_WORK_PRIORITY_HIGH) {
216*4882a593Smuzhiyun return deferred_wq->prio_fifo;
217*4882a593Smuzhiyun } else if (priority == DHD_WQ_WORK_PRIORITY_LOW) {
218*4882a593Smuzhiyun return deferred_wq->work_fifo;
219*4882a593Smuzhiyun } else {
220*4882a593Smuzhiyun return NULL;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /*
225*4882a593Smuzhiyun * Prepares event to be queued
226*4882a593Smuzhiyun * Schedules the event
227*4882a593Smuzhiyun */
228*4882a593Smuzhiyun int
dhd_deferred_schedule_work(void * workq,void * event_data,u8 event,event_handler_t event_handler,u8 priority)229*4882a593Smuzhiyun dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
230*4882a593Smuzhiyun event_handler_t event_handler, u8 priority)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq;
233*4882a593Smuzhiyun struct kfifo *fifo;
234*4882a593Smuzhiyun dhd_deferred_event_t deferred_event;
235*4882a593Smuzhiyun int bytes_copied = 0;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (!deferred_wq) {
238*4882a593Smuzhiyun DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
239*4882a593Smuzhiyun ASSERT(0);
240*4882a593Smuzhiyun return DHD_WQ_STS_UNINITIALIZED;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
244*4882a593Smuzhiyun DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__,
245*4882a593Smuzhiyun event));
246*4882a593Smuzhiyun return DHD_WQ_STS_UNKNOWN_EVENT;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) {
250*4882a593Smuzhiyun DHD_ERROR(("%s: unknown priority, priority=%d\n",
251*4882a593Smuzhiyun __FUNCTION__, priority));
252*4882a593Smuzhiyun return DHD_WQ_STS_UNKNOWN_PRIORITY;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if ((deferred_wq->event_skip_mask & (1 << event))) {
256*4882a593Smuzhiyun DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n",
257*4882a593Smuzhiyun __FUNCTION__, deferred_wq->event_skip_mask));
258*4882a593Smuzhiyun return DHD_WQ_STS_EVENT_SKIPPED;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun * default element size is 1, which can be changed
263*4882a593Smuzhiyun * using kfifo_esize(). Older kernel(FC11) doesn't support
264*4882a593Smuzhiyun * changing element size. For compatibility changing
265*4882a593Smuzhiyun * element size is not prefered
266*4882a593Smuzhiyun */
267*4882a593Smuzhiyun ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
268*4882a593Smuzhiyun ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun deferred_event.event = event;
271*4882a593Smuzhiyun deferred_event.event_data = event_data;
272*4882a593Smuzhiyun deferred_event.event_handler = event_handler;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority);
275*4882a593Smuzhiyun if (DHD_FIFO_HAS_FREE_SPACE(fifo)) {
276*4882a593Smuzhiyun bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event,
277*4882a593Smuzhiyun DEFRD_EVT_SIZE, &deferred_wq->work_lock);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun if (bytes_copied != DEFRD_EVT_SIZE) {
280*4882a593Smuzhiyun DHD_ERROR(("%s: failed to schedule deferred work, "
281*4882a593Smuzhiyun "priority=%d, bytes_copied=%d\n", __FUNCTION__,
282*4882a593Smuzhiyun priority, bytes_copied));
283*4882a593Smuzhiyun return DHD_WQ_STS_SCHED_FAILED;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun schedule_work((struct work_struct *)deferred_wq);
286*4882a593Smuzhiyun return DHD_WQ_STS_OK;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun static bool
dhd_get_scheduled_work(struct dhd_deferred_wq * deferred_wq,dhd_deferred_event_t * event)290*4882a593Smuzhiyun dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq,
291*4882a593Smuzhiyun dhd_deferred_event_t *event)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun int bytes_copied = 0;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (!deferred_wq) {
296*4882a593Smuzhiyun DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
297*4882a593Smuzhiyun return DHD_WQ_STS_UNINITIALIZED;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * default element size is 1 byte, which can be changed
302*4882a593Smuzhiyun * using kfifo_esize(). Older kernel(FC11) doesn't support
303*4882a593Smuzhiyun * changing element size. For compatibility changing
304*4882a593Smuzhiyun * element size is not prefered
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyun ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
307*4882a593Smuzhiyun ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* handle priority work */
310*4882a593Smuzhiyun if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) {
311*4882a593Smuzhiyun bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo,
312*4882a593Smuzhiyun event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /* handle normal work if priority work doesn't have enough data */
316*4882a593Smuzhiyun if ((bytes_copied != DEFRD_EVT_SIZE) &&
317*4882a593Smuzhiyun DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) {
318*4882a593Smuzhiyun bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo,
319*4882a593Smuzhiyun event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun return (bytes_copied == DEFRD_EVT_SIZE);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun static inline void
dhd_deferred_dump_work_event(dhd_deferred_event_t * work_event)326*4882a593Smuzhiyun dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun if (!work_event) {
329*4882a593Smuzhiyun DHD_ERROR(("%s: work_event is null\n", __FUNCTION__));
330*4882a593Smuzhiyun return;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__,
334*4882a593Smuzhiyun work_event->event));
335*4882a593Smuzhiyun DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__,
336*4882a593Smuzhiyun work_event->event_data));
337*4882a593Smuzhiyun DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__,
338*4882a593Smuzhiyun work_event->event_handler));
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun * Called when work is scheduled
343*4882a593Smuzhiyun */
344*4882a593Smuzhiyun static void
dhd_deferred_work_handler(struct work_struct * work)345*4882a593Smuzhiyun dhd_deferred_work_handler(struct work_struct *work)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
348*4882a593Smuzhiyun dhd_deferred_event_t work_event;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (!deferred_work) {
351*4882a593Smuzhiyun DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
352*4882a593Smuzhiyun return;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun do {
356*4882a593Smuzhiyun if (!dhd_get_scheduled_work(deferred_work, &work_event)) {
357*4882a593Smuzhiyun DHD_TRACE(("%s: no event to handle\n", __FUNCTION__));
358*4882a593Smuzhiyun break;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (work_event.event >= DHD_MAX_WQ_EVENTS) {
362*4882a593Smuzhiyun DHD_ERROR(("%s: unknown event\n", __FUNCTION__));
363*4882a593Smuzhiyun dhd_deferred_dump_work_event(&work_event);
364*4882a593Smuzhiyun ASSERT(work_event.event < DHD_MAX_WQ_EVENTS);
365*4882a593Smuzhiyun continue;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (work_event.event_handler) {
369*4882a593Smuzhiyun work_event.event_handler(deferred_work->dhd_info,
370*4882a593Smuzhiyun work_event.event_data, work_event.event);
371*4882a593Smuzhiyun } else {
372*4882a593Smuzhiyun DHD_ERROR(("%s: event handler is null\n",
373*4882a593Smuzhiyun __FUNCTION__));
374*4882a593Smuzhiyun dhd_deferred_dump_work_event(&work_event);
375*4882a593Smuzhiyun ASSERT(work_event.event_handler != NULL);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun } while (1);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun return;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun void
dhd_deferred_work_set_skip(void * work,u8 event,bool set)383*4882a593Smuzhiyun dhd_deferred_work_set_skip(void *work, u8 event, bool set)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) {
388*4882a593Smuzhiyun DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__));
389*4882a593Smuzhiyun return;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (set) {
393*4882a593Smuzhiyun /* Set */
394*4882a593Smuzhiyun deferred_wq->event_skip_mask |= (1 << event);
395*4882a593Smuzhiyun } else {
396*4882a593Smuzhiyun /* Clear */
397*4882a593Smuzhiyun deferred_wq->event_skip_mask &= ~(1 << event);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun }
400