1 /*
2 * Broadcom Dongle Host Driver (DHD), Generic work queue framework
3 * Generic interface to handle dhd deferred work events
4 *
5 * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
6 *
7 * Copyright (C) 1999-2017, Broadcom Corporation
8 *
9 * Unless you and Broadcom execute a separate written software license
10 * agreement governing use of this software, this software is licensed to you
11 * under the terms of the GNU General Public License version 2 (the "GPL"),
12 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13 * following added to such license:
14 *
15 * As a special exception, the copyright holders of this software give you
16 * permission to link this software with independent modules, and to copy and
17 * distribute the resulting executable under terms of your choice, provided that
18 * you also meet, for each linked independent module, the terms and conditions of
19 * the license of that module. An independent module is a module which is not
20 * derived from this software. The special exception does not apply to any
21 * modifications of the software.
22 *
23 * Notwithstanding the above, under no circumstances may you combine this
24 * software in any way with any other Broadcom software provided under a license
25 * other than the GPL, without Broadcom's express prior written consent.
26 *
27 *
28 * <<Broadcom-WL-IPTag/Open:>>
29 *
30 * $Id: dhd_linux_wq.c 675839 2016-12-19 03:07:26Z $
31 */
32
33 #include <linux/init.h>
34 #include <linux/kernel.h>
35 #include <linux/spinlock.h>
36 #include <linux/fcntl.h>
37 #include <linux/fs.h>
38 #include <linux/ip.h>
39 #include <linux/kfifo.h>
40
41 #include <linuxver.h>
42 #include <osl.h>
43 #include <bcmutils.h>
44 #include <bcmendian.h>
45 #include <bcmdevs.h>
46 #include <dngl_stats.h>
47 #include <dhd.h>
48 #include <dhd_dbg.h>
49 #include <dhd_linux_wq.h>
50
51 typedef struct dhd_deferred_event {
52 u8 event; /* holds the event */
53 void *event_data; /* holds event specific data */
54 event_handler_t event_handler;
55 unsigned long pad; /* for memory alignment to power of 2 */
56 } dhd_deferred_event_t;
57
58 #define DEFRD_EVT_SIZE (sizeof(dhd_deferred_event_t))
59
60 /*
61 * work events may occur simultaneously.
62 * can hold upto 64 low priority events and 16 high priority events
63 */
64 #define DHD_PRIO_WORK_FIFO_SIZE (16 * DEFRD_EVT_SIZE)
65 #define DHD_WORK_FIFO_SIZE (64 * DEFRD_EVT_SIZE)
66
67 #define DHD_FIFO_HAS_FREE_SPACE(fifo) \
68 ((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE))
69 #define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \
70 ((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE))
71
72 struct dhd_deferred_wq {
73 struct work_struct deferred_work; /* should be the first member */
74
75 struct kfifo *prio_fifo;
76 struct kfifo *work_fifo;
77 u8 *prio_fifo_buf;
78 u8 *work_fifo_buf;
79 spinlock_t work_lock;
80 void *dhd_info; /* review: does it require */
81 u32 event_skip_mask;
82 };
83
84 static inline struct kfifo*
dhd_kfifo_init(u8 * buf,int size,spinlock_t * lock)85 dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
86 {
87 struct kfifo *fifo;
88 gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
89
90 fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
91 if (!fifo) {
92 return NULL;
93 }
94 kfifo_init(fifo, buf, size);
95 return fifo;
96 }
97
98 static inline void
dhd_kfifo_free(struct kfifo * fifo)99 dhd_kfifo_free(struct kfifo *fifo)
100 {
101 kfifo_free(fifo);
102 kfree(fifo);
103 }
104
105 /* deferred work functions */
106 static void dhd_deferred_work_handler(struct work_struct *data);
107
108 void*
dhd_deferred_work_init(void * dhd_info)109 dhd_deferred_work_init(void *dhd_info)
110 {
111 struct dhd_deferred_wq *work = NULL;
112 u8* buf;
113 unsigned long fifo_size = 0;
114 gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
115
116 if (!dhd_info) {
117 DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
118 goto return_null;
119 }
120
121 work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
122 flags);
123 if (!work) {
124 DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__));
125 goto return_null;
126 }
127
128 INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
129
130 /* initialize event fifo */
131 spin_lock_init(&work->work_lock);
132
133 /* allocate buffer to hold prio events */
134 fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
135 fifo_size = is_power_of_2(fifo_size) ? fifo_size :
136 roundup_pow_of_two(fifo_size);
137 buf = (u8*)kzalloc(fifo_size, flags);
138 if (!buf) {
139 DHD_ERROR(("%s: prio work fifo allocation failed\n",
140 __FUNCTION__));
141 goto return_null;
142 }
143
144 /* Initialize prio event fifo */
145 work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
146 if (!work->prio_fifo) {
147 kfree(buf);
148 goto return_null;
149 }
150
151 /* allocate buffer to hold work events */
152 fifo_size = DHD_WORK_FIFO_SIZE;
153 fifo_size = is_power_of_2(fifo_size) ? fifo_size :
154 roundup_pow_of_two(fifo_size);
155 buf = (u8*)kzalloc(fifo_size, flags);
156 if (!buf) {
157 DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__));
158 goto return_null;
159 }
160
161 /* Initialize event fifo */
162 work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
163 if (!work->work_fifo) {
164 kfree(buf);
165 goto return_null;
166 }
167
168 work->dhd_info = dhd_info;
169 work->event_skip_mask = 0;
170 DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__));
171 return work;
172
173 return_null:
174 if (work) {
175 dhd_deferred_work_deinit(work);
176 }
177
178 return NULL;
179 }
180
181 void
dhd_deferred_work_deinit(void * work)182 dhd_deferred_work_deinit(void *work)
183 {
184 struct dhd_deferred_wq *deferred_work = work;
185
186 if (!deferred_work) {
187 DHD_ERROR(("%s: deferred work has been freed already\n",
188 __FUNCTION__));
189 return;
190 }
191
192 /* cancel the deferred work handling */
193 cancel_work_sync((struct work_struct *)deferred_work);
194
195 /*
196 * free work event fifo.
197 * kfifo_free frees locally allocated fifo buffer
198 */
199 if (deferred_work->prio_fifo) {
200 dhd_kfifo_free(deferred_work->prio_fifo);
201 }
202
203 if (deferred_work->work_fifo) {
204 dhd_kfifo_free(deferred_work->work_fifo);
205 }
206
207 kfree(deferred_work);
208 }
209
210 /* select kfifo according to priority */
211 static inline struct kfifo *
dhd_deferred_work_select_kfifo(struct dhd_deferred_wq * deferred_wq,u8 priority)212 dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq,
213 u8 priority)
214 {
215 if (priority == DHD_WQ_WORK_PRIORITY_HIGH) {
216 return deferred_wq->prio_fifo;
217 } else if (priority == DHD_WQ_WORK_PRIORITY_LOW) {
218 return deferred_wq->work_fifo;
219 } else {
220 return NULL;
221 }
222 }
223
224 /*
225 * Prepares event to be queued
226 * Schedules the event
227 */
228 int
dhd_deferred_schedule_work(void * workq,void * event_data,u8 event,event_handler_t event_handler,u8 priority)229 dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
230 event_handler_t event_handler, u8 priority)
231 {
232 struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq;
233 struct kfifo *fifo;
234 dhd_deferred_event_t deferred_event;
235 int bytes_copied = 0;
236
237 if (!deferred_wq) {
238 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
239 ASSERT(0);
240 return DHD_WQ_STS_UNINITIALIZED;
241 }
242
243 if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
244 DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__,
245 event));
246 return DHD_WQ_STS_UNKNOWN_EVENT;
247 }
248
249 if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) {
250 DHD_ERROR(("%s: unknown priority, priority=%d\n",
251 __FUNCTION__, priority));
252 return DHD_WQ_STS_UNKNOWN_PRIORITY;
253 }
254
255 if ((deferred_wq->event_skip_mask & (1 << event))) {
256 DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n",
257 __FUNCTION__, deferred_wq->event_skip_mask));
258 return DHD_WQ_STS_EVENT_SKIPPED;
259 }
260
261 /*
262 * default element size is 1, which can be changed
263 * using kfifo_esize(). Older kernel(FC11) doesn't support
264 * changing element size. For compatibility changing
265 * element size is not prefered
266 */
267 ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
268 ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
269
270 deferred_event.event = event;
271 deferred_event.event_data = event_data;
272 deferred_event.event_handler = event_handler;
273
274 fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority);
275 if (DHD_FIFO_HAS_FREE_SPACE(fifo)) {
276 bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event,
277 DEFRD_EVT_SIZE, &deferred_wq->work_lock);
278 }
279 if (bytes_copied != DEFRD_EVT_SIZE) {
280 DHD_ERROR(("%s: failed to schedule deferred work, "
281 "priority=%d, bytes_copied=%d\n", __FUNCTION__,
282 priority, bytes_copied));
283 return DHD_WQ_STS_SCHED_FAILED;
284 }
285 schedule_work((struct work_struct *)deferred_wq);
286 return DHD_WQ_STS_OK;
287 }
288
289 static bool
dhd_get_scheduled_work(struct dhd_deferred_wq * deferred_wq,dhd_deferred_event_t * event)290 dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq,
291 dhd_deferred_event_t *event)
292 {
293 int bytes_copied = 0;
294
295 if (!deferred_wq) {
296 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
297 return DHD_WQ_STS_UNINITIALIZED;
298 }
299
300 /*
301 * default element size is 1 byte, which can be changed
302 * using kfifo_esize(). Older kernel(FC11) doesn't support
303 * changing element size. For compatibility changing
304 * element size is not prefered
305 */
306 ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
307 ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
308
309 /* handle priority work */
310 if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) {
311 bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo,
312 event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
313 }
314
315 /* handle normal work if priority work doesn't have enough data */
316 if ((bytes_copied != DEFRD_EVT_SIZE) &&
317 DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) {
318 bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo,
319 event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
320 }
321
322 return (bytes_copied == DEFRD_EVT_SIZE);
323 }
324
325 static inline void
dhd_deferred_dump_work_event(dhd_deferred_event_t * work_event)326 dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event)
327 {
328 if (!work_event) {
329 DHD_ERROR(("%s: work_event is null\n", __FUNCTION__));
330 return;
331 }
332
333 DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__,
334 work_event->event));
335 DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__,
336 work_event->event_data));
337 DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__,
338 work_event->event_handler));
339 }
340
341 /*
342 * Called when work is scheduled
343 */
344 static void
dhd_deferred_work_handler(struct work_struct * work)345 dhd_deferred_work_handler(struct work_struct *work)
346 {
347 struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
348 dhd_deferred_event_t work_event;
349
350 if (!deferred_work) {
351 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
352 return;
353 }
354
355 do {
356 if (!dhd_get_scheduled_work(deferred_work, &work_event)) {
357 DHD_TRACE(("%s: no event to handle\n", __FUNCTION__));
358 break;
359 }
360
361 if (work_event.event >= DHD_MAX_WQ_EVENTS) {
362 DHD_ERROR(("%s: unknown event\n", __FUNCTION__));
363 dhd_deferred_dump_work_event(&work_event);
364 ASSERT(work_event.event < DHD_MAX_WQ_EVENTS);
365 continue;
366 }
367
368 if (work_event.event_handler) {
369 work_event.event_handler(deferred_work->dhd_info,
370 work_event.event_data, work_event.event);
371 } else {
372 DHD_ERROR(("%s: event handler is null\n",
373 __FUNCTION__));
374 dhd_deferred_dump_work_event(&work_event);
375 ASSERT(work_event.event_handler != NULL);
376 }
377 } while (1);
378
379 return;
380 }
381
382 void
dhd_deferred_work_set_skip(void * work,u8 event,bool set)383 dhd_deferred_work_set_skip(void *work, u8 event, bool set)
384 {
385 struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work;
386
387 if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) {
388 DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__));
389 return;
390 }
391
392 if (set) {
393 /* Set */
394 deferred_wq->event_skip_mask |= (1 << event);
395 } else {
396 /* Clear */
397 deferred_wq->event_skip_mask &= ~(1 << event);
398 }
399 }
400