xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/dhd_linux_wq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Broadcom Dongle Host Driver (DHD), Generic work queue framework
3  * Generic interface to handle dhd deferred work events
4  *
5  * Copyright (C) 2020, Broadcom.
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *
22  * <<Broadcom-WL-IPTag/Open:>>
23  *
24  * $Id$
25  */
26 
27 #include <linux/init.h>
28 #include <linux/kernel.h>
29 #include <linux/spinlock.h>
30 #include <linux/fcntl.h>
31 #include <linux/fs.h>
32 #include <linux/ip.h>
33 #include <linux/kfifo.h>
34 
35 #include <linuxver.h>
36 #include <osl.h>
37 #include <bcmutils.h>
38 #include <bcmendian.h>
39 #include <bcmdevs.h>
40 #include <dngl_stats.h>
41 #include <dhd.h>
42 #include <dhd_dbg.h>
43 #include <dhd_linux_wq.h>
44 
45 /*
46  * XXX: always make sure that the size of this structure is aligned to
47  * the power of 2 (2^n) i.e, if any new variable has to be added then
48  * modify the padding accordingly
49  */
50 typedef struct dhd_deferred_event {
51 	u8 event;		/* holds the event */
52 	void *event_data;	/* holds event specific data */
53 	event_handler_t event_handler;
54 	unsigned long pad;	/* for memory alignment to power of 2 */
55 } dhd_deferred_event_t;
56 
57 #define DEFRD_EVT_SIZE	(sizeof(dhd_deferred_event_t))
58 
59 /*
60  * work events may occur simultaneously.
61  * can hold upto 64 low priority events and 16 high priority events
62  */
63 #define DHD_PRIO_WORK_FIFO_SIZE	(16 * DEFRD_EVT_SIZE)
64 #define DHD_WORK_FIFO_SIZE	(64 * DEFRD_EVT_SIZE)
65 
66 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32))
67 #define kfifo_avail(fifo) (fifo->size - kfifo_len(fifo))
68 #endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32)) */
69 
70 #define DHD_FIFO_HAS_FREE_SPACE(fifo) \
71 	((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE))
72 #define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \
73 	((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE))
74 
75 struct dhd_deferred_wq {
76 	struct work_struct deferred_work; /* should be the first member */
77 
78 	struct kfifo *prio_fifo;
79 	struct kfifo			*work_fifo;
80 	u8				*prio_fifo_buf;
81 	u8				*work_fifo_buf;
82 	spinlock_t			work_lock;
83 	void				*dhd_info; /* review: does it require */
84 	u32				event_skip_mask;
85 };
86 
87 static inline struct kfifo*
dhd_kfifo_init(u8 * buf,int size,spinlock_t * lock)88 dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
89 {
90 	struct kfifo *fifo;
91 	gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
92 
93 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
94 	fifo = kfifo_init(buf, size, flags, lock);
95 #else
96 	fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
97 	if (!fifo) {
98 		return NULL;
99 	}
100 	kfifo_init(fifo, buf, size);
101 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
102 	return fifo;
103 }
104 
105 static inline void
dhd_kfifo_free(struct kfifo * fifo)106 dhd_kfifo_free(struct kfifo *fifo)
107 {
108 	kfifo_free(fifo);
109 	kfree(fifo);
110 }
111 
112 /* deferred work functions */
113 static void dhd_deferred_work_handler(struct work_struct *data);
114 
115 void*
dhd_deferred_work_init(void * dhd_info)116 dhd_deferred_work_init(void *dhd_info)
117 {
118 	struct dhd_deferred_wq	*work = NULL;
119 	u8*	buf;
120 	unsigned long	fifo_size = 0;
121 	gfp_t	flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
122 
123 	if (!dhd_info) {
124 		DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
125 		goto return_null;
126 	}
127 
128 	work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
129 		flags);
130 	if (!work) {
131 		DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__));
132 		goto return_null;
133 	}
134 
135 	INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
136 
137 	/* initialize event fifo */
138 	spin_lock_init(&work->work_lock);
139 
140 	/* allocate buffer to hold prio events */
141 	fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
142 	fifo_size = is_power_of_2(fifo_size) ? fifo_size :
143 			roundup_pow_of_two(fifo_size);
144 	buf = (u8*)kzalloc(fifo_size, flags);
145 	if (!buf) {
146 		DHD_ERROR(("%s: prio work fifo allocation failed\n",
147 			__FUNCTION__));
148 		goto return_null;
149 	}
150 
151 	/* Initialize prio event fifo */
152 	work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
153 	if (!work->prio_fifo) {
154 		kfree(buf);
155 		goto return_null;
156 	}
157 
158 	/* allocate buffer to hold work events */
159 	fifo_size = DHD_WORK_FIFO_SIZE;
160 	fifo_size = is_power_of_2(fifo_size) ? fifo_size :
161 			roundup_pow_of_two(fifo_size);
162 	buf = (u8*)kzalloc(fifo_size, flags);
163 	if (!buf) {
164 		DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__));
165 		goto return_null;
166 	}
167 
168 	/* Initialize event fifo */
169 	work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
170 	if (!work->work_fifo) {
171 		kfree(buf);
172 		goto return_null;
173 	}
174 
175 	work->dhd_info = dhd_info;
176 	work->event_skip_mask = 0;
177 	DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__));
178 	return work;
179 
180 return_null:
181 	if (work) {
182 		dhd_deferred_work_deinit(work);
183 	}
184 
185 	return NULL;
186 }
187 
188 void
dhd_deferred_work_deinit(void * work)189 dhd_deferred_work_deinit(void *work)
190 {
191 	struct dhd_deferred_wq *deferred_work = work;
192 
193 	if (!deferred_work) {
194 		DHD_ERROR(("%s: deferred work has been freed already\n",
195 			__FUNCTION__));
196 		return;
197 	}
198 
199 	/* cancel the deferred work handling */
200 	cancel_work_sync((struct work_struct *)deferred_work);
201 
202 	/*
203 	 * free work event fifo.
204 	 * kfifo_free frees locally allocated fifo buffer
205 	 */
206 	if (deferred_work->prio_fifo) {
207 		dhd_kfifo_free(deferred_work->prio_fifo);
208 	}
209 
210 	if (deferred_work->work_fifo) {
211 		dhd_kfifo_free(deferred_work->work_fifo);
212 	}
213 
214 	kfree(deferred_work);
215 }
216 
217 /* select kfifo according to priority */
218 static inline struct kfifo *
dhd_deferred_work_select_kfifo(struct dhd_deferred_wq * deferred_wq,u8 priority)219 dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq,
220 	u8 priority)
221 {
222 	if (priority == DHD_WQ_WORK_PRIORITY_HIGH) {
223 		return deferred_wq->prio_fifo;
224 	} else if (priority == DHD_WQ_WORK_PRIORITY_LOW) {
225 		return deferred_wq->work_fifo;
226 	} else {
227 		return NULL;
228 	}
229 }
230 
231 /*
232  *	Prepares event to be queued
233  *	Schedules the event
234  */
235 int
dhd_deferred_schedule_work(void * workq,void * event_data,u8 event,event_handler_t event_handler,u8 priority)236 dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
237 	event_handler_t event_handler, u8 priority)
238 {
239 	struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq;
240 	struct kfifo *fifo;
241 	dhd_deferred_event_t deferred_event;
242 	int bytes_copied = 0;
243 
244 	if (!deferred_wq) {
245 		DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
246 		ASSERT(0);
247 		return DHD_WQ_STS_UNINITIALIZED;
248 	}
249 
250 	if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
251 		DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__,
252 			event));
253 		return DHD_WQ_STS_UNKNOWN_EVENT;
254 	}
255 
256 	if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) {
257 		DHD_ERROR(("%s: unknown priority, priority=%d\n",
258 			__FUNCTION__, priority));
259 		return DHD_WQ_STS_UNKNOWN_PRIORITY;
260 	}
261 
262 	if ((deferred_wq->event_skip_mask & (1 << event))) {
263 		DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n",
264 			__FUNCTION__, deferred_wq->event_skip_mask));
265 		return DHD_WQ_STS_EVENT_SKIPPED;
266 	}
267 
268 	/*
269 	 * default element size is 1, which can be changed
270 	 * using kfifo_esize(). Older kernel(FC11) doesn't support
271 	 * changing element size. For compatibility changing
272 	 * element size is not prefered
273 	 */
274 	ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
275 	ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
276 
277 	deferred_event.event = event;
278 	deferred_event.event_data = event_data;
279 	deferred_event.event_handler = event_handler;
280 
281 	fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority);
282 	if (DHD_FIFO_HAS_FREE_SPACE(fifo)) {
283 		bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event,
284 			DEFRD_EVT_SIZE, &deferred_wq->work_lock);
285 	}
286 	if (bytes_copied != DEFRD_EVT_SIZE) {
287 		DHD_ERROR(("%s: failed to schedule deferred work, "
288 			"priority=%d, bytes_copied=%d\n", __FUNCTION__,
289 			priority, bytes_copied));
290 		return DHD_WQ_STS_SCHED_FAILED;
291 	}
292 	schedule_work((struct work_struct *)deferred_wq);
293 	return DHD_WQ_STS_OK;
294 }
295 
296 static bool
dhd_get_scheduled_work(struct dhd_deferred_wq * deferred_wq,dhd_deferred_event_t * event)297 dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq,
298 	dhd_deferred_event_t *event)
299 {
300 	int bytes_copied = 0;
301 
302 	if (!deferred_wq) {
303 		DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
304 		return DHD_WQ_STS_UNINITIALIZED;
305 	}
306 
307 	/*
308 	 * default element size is 1 byte, which can be changed
309 	 * using kfifo_esize(). Older kernel(FC11) doesn't support
310 	 * changing element size. For compatibility changing
311 	 * element size is not prefered
312 	 */
313 	ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
314 	ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
315 
316 	/* handle priority work */
317 	if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) {
318 		bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo,
319 			event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
320 	}
321 
322 	/* handle normal work if priority work doesn't have enough data */
323 	if ((bytes_copied != DEFRD_EVT_SIZE) &&
324 		DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) {
325 		bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo,
326 			event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
327 	}
328 
329 	return (bytes_copied == DEFRD_EVT_SIZE);
330 }
331 
332 static inline void
dhd_deferred_dump_work_event(dhd_deferred_event_t * work_event)333 dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event)
334 {
335 	if (!work_event) {
336 		DHD_ERROR(("%s: work_event is null\n", __FUNCTION__));
337 		return;
338 	}
339 
340 	DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__,
341 		work_event->event));
342 	DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__,
343 		work_event->event_data));
344 	DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__,
345 		work_event->event_handler));
346 }
347 
348 /*
349  *	Called when work is scheduled
350  */
351 static void
dhd_deferred_work_handler(struct work_struct * work)352 dhd_deferred_work_handler(struct work_struct *work)
353 {
354 	struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
355 	dhd_deferred_event_t work_event;
356 
357 	if (!deferred_work) {
358 		DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
359 		return;
360 	}
361 
362 	do {
363 		if (!dhd_get_scheduled_work(deferred_work, &work_event)) {
364 			DHD_TRACE(("%s: no event to handle\n", __FUNCTION__));
365 			break;
366 		}
367 
368 		if (work_event.event >= DHD_MAX_WQ_EVENTS) {
369 			DHD_ERROR(("%s: unknown event\n", __FUNCTION__));
370 			dhd_deferred_dump_work_event(&work_event);
371 			ASSERT(work_event.event < DHD_MAX_WQ_EVENTS);
372 			continue;
373 		}
374 
375 		/*
376 		 * XXX: don't do NULL check for 'work_event.event_data'
377 		 * as for some events like DHD_WQ_WORK_DHD_LOG_DUMP the
378 		 * event data is always NULL even though rest of the
379 		 * event parameters are valid
380 		 */
381 
382 		if (work_event.event_handler) {
383 			work_event.event_handler(deferred_work->dhd_info,
384 				work_event.event_data, work_event.event);
385 		} else {
386 			DHD_ERROR(("%s: event handler is null\n",
387 				__FUNCTION__));
388 			dhd_deferred_dump_work_event(&work_event);
389 			ASSERT(work_event.event_handler != NULL);
390 		}
391 	} while (1);
392 
393 	return;
394 }
395 
396 void
dhd_deferred_work_set_skip(void * work,u8 event,bool set)397 dhd_deferred_work_set_skip(void *work, u8 event, bool set)
398 {
399 	struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work;
400 
401 	if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) {
402 		DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__));
403 		return;
404 	}
405 
406 	if (set) {
407 		/* Set */
408 		deferred_wq->event_skip_mask |= (1 << event);
409 	} else {
410 		/* Clear */
411 		deferred_wq->event_skip_mask &= ~(1 << event);
412 	}
413 }
414