xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/wl_event.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 
2 #include <wl_android.h>
3 #ifdef WL_EVENT
4 #include <bcmendian.h>
5 #include <dhd_config.h>
6 
7 #define EVENT_ERROR(name, arg1, args...) \
8 	do { \
9 		if (android_msg_level & ANDROID_ERROR_LEVEL) { \
10 			printf("[%s] EVENT-ERROR) %s : " arg1, name, __func__, ## args); \
11 		} \
12 	} while (0)
13 #define EVENT_TRACE(name, arg1, args...) \
14 	do { \
15 		if (android_msg_level & ANDROID_TRACE_LEVEL) { \
16 			printf("[%s] EVENT-TRACE) %s : " arg1, name, __func__, ## args); \
17 		} \
18 	} while (0)
19 #define EVENT_DBG(name, arg1, args...) \
20 	do { \
21 		if (android_msg_level & ANDROID_DBG_LEVEL) { \
22 			printf("[%s] EVENT-DBG) %s : " arg1, name, __func__, ## args); \
23 		} \
24 	} while (0)
25 
26 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
27 	4 && __GNUC_MINOR__ >= 6))
28 #define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
29 _Pragma("GCC diagnostic push") \
30 _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
31 (entry) = list_first_entry((ptr), type, member); \
32 _Pragma("GCC diagnostic pop") \
33 
34 #define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
35 _Pragma("GCC diagnostic push") \
36 _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
37 entry = container_of((ptr), type, member); \
38 _Pragma("GCC diagnostic pop") \
39 
40 #else
41 #define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
42 (entry) = list_first_entry((ptr), type, member); \
43 
44 #define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
45 entry = container_of((ptr), type, member); \
46 
47 #endif /* STRICT_GCC_WARNINGS */
48 
49 /* event queue for cfg80211 main event */
50 struct wl_event_q {
51 	struct list_head eq_list;
52 	u32 etype;
53 	wl_event_msg_t emsg;
54 	s8 edata[1];
55 };
56 
57 typedef void(*EXT_EVENT_HANDLER) (struct net_device *dev, void *cb_argu,
58 	const wl_event_msg_t *e, void *data);
59 
60 typedef struct event_handler_list {
61 	struct event_handler_list *next;
62 	struct net_device *dev;
63 	uint32 etype;
64 	EXT_EVENT_HANDLER cb_func;
65 	void *cb_argu;
66 	wl_event_prio_t prio;
67 } event_handler_list_t;
68 
69 typedef struct event_handler_head {
70 	event_handler_list_t *evt_head;
71 } event_handler_head_t;
72 
73 typedef struct wl_event_params {
74 	dhd_pub_t *pub;
75 	struct net_device *dev[DHD_MAX_IFS];
76 	struct event_handler_head evt_head;
77 	struct list_head eq_list;	/* used for event queue */
78 	spinlock_t eq_lock;	/* for event queue synchronization */
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
80 	tsk_ctl_t thr_event_ctl;
81 #else
82 	struct workqueue_struct *event_workq;   /* workqueue for event */
83 	struct work_struct event_work;		/* work item for event */
84 #endif
85 	struct mutex event_sync;
86 } wl_event_params_t;
87 
88 static unsigned long
wl_ext_event_lock_eq(struct wl_event_params * event_params)89 wl_ext_event_lock_eq(struct wl_event_params *event_params)
90 {
91 	unsigned long flags;
92 
93 	spin_lock_irqsave(&event_params->eq_lock, flags);
94 	return flags;
95 }
96 
97 static void
wl_ext_event_unlock_eq(struct wl_event_params * event_params,unsigned long flags)98 wl_ext_event_unlock_eq(struct wl_event_params *event_params, unsigned long flags)
99 {
100 	spin_unlock_irqrestore(&event_params->eq_lock, flags);
101 }
102 
103 static void
wl_ext_event_init_eq_lock(struct wl_event_params * event_params)104 wl_ext_event_init_eq_lock(struct wl_event_params *event_params)
105 {
106 	spin_lock_init(&event_params->eq_lock);
107 }
108 
109 static void
wl_ext_event_init_eq(struct wl_event_params * event_params)110 wl_ext_event_init_eq(struct wl_event_params *event_params)
111 {
112 	wl_ext_event_init_eq_lock(event_params);
113 	INIT_LIST_HEAD(&event_params->eq_list);
114 }
115 
116 static void
wl_ext_event_flush_eq(struct wl_event_params * event_params)117 wl_ext_event_flush_eq(struct wl_event_params *event_params)
118 {
119 	struct wl_event_q *e;
120 	unsigned long flags;
121 
122 	flags = wl_ext_event_lock_eq(event_params);
123 	while (!list_empty_careful(&event_params->eq_list)) {
124 		BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
125 		list_del(&e->eq_list);
126 		kfree(e);
127 	}
128 	wl_ext_event_unlock_eq(event_params, flags);
129 }
130 
131 /*
132 * retrieve first queued event from head
133 */
134 
135 static struct wl_event_q *
wl_ext_event_deq_event(struct wl_event_params * event_params)136 wl_ext_event_deq_event(struct wl_event_params *event_params)
137 {
138 	struct wl_event_q *e = NULL;
139 	unsigned long flags;
140 
141 	flags = wl_ext_event_lock_eq(event_params);
142 	if (likely(!list_empty(&event_params->eq_list))) {
143 		BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
144 		list_del(&e->eq_list);
145 	}
146 	wl_ext_event_unlock_eq(event_params, flags);
147 
148 	return e;
149 }
150 
151 /*
152  * push event to tail of the queue
153  */
154 
155 static s32
wl_ext_event_enq_event(struct wl_event_params * event_params,u32 event,const wl_event_msg_t * msg,void * data)156 wl_ext_event_enq_event(struct wl_event_params *event_params, u32 event,
157 	const wl_event_msg_t *msg, void *data)
158 {
159 	struct wl_event_q *e;
160 	s32 err = 0;
161 	uint32 evtq_size;
162 	uint32 data_len;
163 	unsigned long flags;
164 	gfp_t aflags;
165 
166 	data_len = 0;
167 	if (data)
168 		data_len = ntoh32(msg->datalen);
169 	evtq_size = sizeof(struct wl_event_q) + data_len;
170 	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
171 	e = kzalloc(evtq_size, aflags);
172 	if (unlikely(!e)) {
173 		EVENT_ERROR("wlan", "event alloc failed\n");
174 		return -ENOMEM;
175 	}
176 	e->etype = event;
177 	memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
178 	if (data)
179 		memcpy(e->edata, data, data_len);
180 	flags = wl_ext_event_lock_eq(event_params);
181 	list_add_tail(&e->eq_list, &event_params->eq_list);
182 	wl_ext_event_unlock_eq(event_params, flags);
183 
184 	return err;
185 }
186 
187 static void
wl_ext_event_put_event(struct wl_event_q * e)188 wl_ext_event_put_event(struct wl_event_q *e)
189 {
190 	kfree(e);
191 }
192 
193 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
194 static int wl_ext_event_handler(void *data);
195 #define WL_EXT_EVENT_HANDLER() static int wl_ext_event_handler(void *data)
196 #else
197 static void wl_ext_event_handler(struct work_struct *data);
198 #define WL_EXT_EVENT_HANDLER() static void wl_ext_event_handler(struct work_struct *data)
199 #endif
200 
WL_EXT_EVENT_HANDLER()201 WL_EXT_EVENT_HANDLER()
202 {
203 	struct wl_event_params *event_params = NULL;
204 	struct wl_event_q *e;
205 	struct net_device *dev = NULL;
206 	struct event_handler_list *evt_node;
207 	dhd_pub_t *dhd;
208 	unsigned long flags = 0;
209 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
210 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
211 	event_params = (struct wl_event_params *)tsk->parent;
212 #else
213 	BCM_SET_CONTAINER_OF(event_params, data, struct wl_event_params, event_work);
214 #endif
215 
216 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
217 	while (1) {
218 	if (down_interruptible(&tsk->sema) == 0) {
219 		SMP_RD_BARRIER_DEPENDS();
220 		if (tsk->terminated) {
221 			break;
222 		}
223 #endif
224 	DHD_EVENT_WAKE_LOCK(event_params->pub);
225 	while ((e = wl_ext_event_deq_event(event_params))) {
226 		if (e->emsg.ifidx >= DHD_MAX_IFS) {
227 			EVENT_ERROR("wlan", "ifidx=%d not in range\n", e->emsg.ifidx);
228 			goto fail;
229 		}
230 		dev = event_params->dev[e->emsg.ifidx];
231 		if (!dev) {
232 			EVENT_DBG("wlan", "ifidx=%d dev not ready\n", e->emsg.ifidx);
233 			goto fail;
234 		}
235 		dhd = dhd_get_pub(dev);
236 		if (e->etype > WLC_E_LAST) {
237 			EVENT_TRACE(dev->name, "Unknown Event (%d): ignoring\n", e->etype);
238 			goto fail;
239 		}
240 		DHD_GENERAL_LOCK(dhd, flags);
241 		if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) {
242 			EVENT_ERROR(dev->name, "BUS is DOWN.\n");
243 			DHD_GENERAL_UNLOCK(dhd, flags);
244 			goto fail;
245 		}
246 		DHD_GENERAL_UNLOCK(dhd, flags);
247 		EVENT_DBG(dev->name, "event type (%d)\n", e->etype);
248 		mutex_lock(&event_params->event_sync);
249 		evt_node = event_params->evt_head.evt_head;
250 		for (;evt_node;) {
251 			if (evt_node->dev == dev &&
252 					(evt_node->etype == e->etype || evt_node->etype == WLC_E_LAST))
253 				evt_node->cb_func(dev, evt_node->cb_argu, &e->emsg, e->edata);
254 			evt_node = evt_node->next;
255 		}
256 		mutex_unlock(&event_params->event_sync);
257 fail:
258 		wl_ext_event_put_event(e);
259 	}
260 	DHD_EVENT_WAKE_UNLOCK(event_params->pub);
261 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
262 	} else {
263 		break;
264 	}
265 	}
266 	complete_and_exit(&tsk->completed, 0);
267 #endif
268 }
269 
270 void
wl_ext_event_send(void * params,const wl_event_msg_t * e,void * data)271 wl_ext_event_send(void *params, const wl_event_msg_t * e, void *data)
272 {
273 	struct wl_event_params *event_params = params;
274 	u32 event_type = ntoh32(e->event_type);
275 
276 	if (event_params == NULL) {
277 		EVENT_ERROR("wlan", "Stale event %d(%s) ignored\n",
278 			event_type, bcmevent_get_name(event_type));
279 		return;
280 	}
281 
282 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
283 	if (event_params->event_workq == NULL) {
284 		EVENT_ERROR("wlan", "Event handler is not created %d(%s)\n",
285 			event_type, bcmevent_get_name(event_type));
286 		return;
287 	}
288 #endif
289 
290 	if (likely(!wl_ext_event_enq_event(event_params, event_type, e, data))) {
291 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
292 		if (event_params->thr_event_ctl.thr_pid >= 0) {
293 			up(&event_params->thr_event_ctl.sema);
294 		}
295 #else
296 		queue_work(event_params->event_workq, &event_params->event_work);
297 #endif
298 	}
299 }
300 
301 static s32
wl_ext_event_create_handler(struct wl_event_params * event_params)302 wl_ext_event_create_handler(struct wl_event_params *event_params)
303 {
304 	int ret = 0;
305 	EVENT_TRACE("wlan", "Enter\n");
306 
307 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
308 	PROC_START(wl_ext_event_handler, event_params, &event_params->thr_event_ctl, 0, "ext_eventd");
309 	if (event_params->thr_event_ctl.thr_pid < 0) {
310 		ret = -ENOMEM;
311 	}
312 #else
313 	/* Allocate workqueue for event */
314 	if (!event_params->event_workq) {
315 		event_params->event_workq = alloc_workqueue("ext_eventd",
316 			WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 0);
317 	}
318 
319 	if (!event_params->event_workq) {
320 		EVENT_ERROR("wlan", "event_workq alloc_workqueue failed\n");
321 		ret = -ENOMEM;
322 	} else {
323 		INIT_WORK(&event_params->event_work, wl_ext_event_handler);
324 	}
325 #endif
326 
327 	return ret;
328 }
329 
330 static void
wl_ext_event_free(struct wl_event_params * event_params)331 wl_ext_event_free(struct wl_event_params *event_params)
332 {
333 	struct event_handler_list *node, *cur, **evt_head;
334 
335 	evt_head = &event_params->evt_head.evt_head;
336 	node = *evt_head;
337 
338 	for (;node;) {
339 		EVENT_TRACE(node->dev->name, "Free etype=%d\n", node->etype);
340 		cur = node;
341 		node = cur->next;
342 		kfree(cur);
343 	}
344 	*evt_head = NULL;
345 }
346 
347 static void
wl_ext_event_destroy_handler(struct wl_event_params * event_params)348 wl_ext_event_destroy_handler(struct wl_event_params *event_params)
349 {
350 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
351 	if (event_params->thr_event_ctl.thr_pid >= 0) {
352 		PROC_STOP(&event_params->thr_event_ctl);
353 	}
354 #else
355 	if (event_params && event_params->event_workq) {
356 		cancel_work_sync(&event_params->event_work);
357 		destroy_workqueue(event_params->event_workq);
358 		event_params->event_workq = NULL;
359 	}
360 #endif
361 }
362 
363 int
wl_ext_event_register(struct net_device * dev,dhd_pub_t * dhd,uint32 event,void * cb_func,void * data,wl_event_prio_t prio)364 wl_ext_event_register(struct net_device *dev, dhd_pub_t *dhd, uint32 event,
365 	void *cb_func, void *data, wl_event_prio_t prio)
366 {
367 	struct wl_event_params *event_params = dhd->event_params;
368 	struct event_handler_list *node, *leaf, *node_prev, **evt_head;
369 	int ret = 0;
370 
371 	if (event_params) {
372 		mutex_lock(&event_params->event_sync);
373 		evt_head = &event_params->evt_head.evt_head;
374 		node = *evt_head;
375 		for (;node;) {
376 			if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
377 				EVENT_TRACE(dev->name, "skip event %d\n", event);
378 				mutex_unlock(&event_params->event_sync);
379 				return 0;
380 			}
381 			node = node->next;
382 		}
383 		leaf = kmalloc(sizeof(event_handler_list_t), GFP_KERNEL);
384 		if (!leaf) {
385 			EVENT_ERROR(dev->name, "Memory alloc failure %d for event %d\n",
386 				(int)sizeof(event_handler_list_t), event);
387 			mutex_unlock(&event_params->event_sync);
388 			return -ENOMEM;
389 		}
390 		memset(leaf, 0, sizeof(event_handler_list_t));
391 		leaf->next = NULL;
392 		leaf->dev = dev;
393 		leaf->etype = event;
394 		leaf->cb_func = cb_func;
395 		leaf->cb_argu = data;
396 		leaf->prio = prio;
397 		if (*evt_head == NULL) {
398 			*evt_head = leaf;
399 		} else {
400 			node = *evt_head;
401 			node_prev = NULL;
402 			for (;node;) {
403 				if (node->prio <= prio) {
404 					leaf->next = node;
405 					if (node_prev)
406 						node_prev->next = leaf;
407 					else
408 						*evt_head = leaf;
409 					break;
410 				} else if (node->next == NULL) {
411 					node->next = leaf;
412 					break;
413 				}
414 				node_prev = node;
415 				node = node->next;
416 			}
417 		}
418 		EVENT_TRACE(dev->name, "event %d registered\n", event);
419 		mutex_unlock(&event_params->event_sync);
420 	} else {
421 		EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
422 		ret = -ENODEV;
423 	}
424 
425 	return ret;
426 }
427 
428 void
wl_ext_event_deregister(struct net_device * dev,dhd_pub_t * dhd,uint32 event,void * cb_func)429 wl_ext_event_deregister(struct net_device *dev, dhd_pub_t *dhd,
430 	uint32 event, void *cb_func)
431 {
432 	struct wl_event_params *event_params = dhd->event_params;
433 	struct event_handler_list *node, *prev, **evt_head;
434 	int tmp = 0;
435 
436 	if (event_params) {
437 		mutex_lock(&event_params->event_sync);
438 		evt_head = &event_params->evt_head.evt_head;
439 		node = *evt_head;
440 		prev = node;
441 		for (;node;) {
442 			if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
443 				if (node == *evt_head) {
444 					tmp = 1;
445 					*evt_head = node->next;
446 				} else {
447 					tmp = 0;
448 					prev->next = node->next;
449 				}
450 				EVENT_TRACE(dev->name, "event %d deregistered\n", event);
451 				kfree(node);
452 				if (tmp == 1) {
453 					node = *evt_head;
454 					prev = node;
455 				} else {
456 					node = prev->next;
457 				}
458 				continue;
459 			}
460 			prev = node;
461 			node = node->next;
462 		}
463 		mutex_unlock(&event_params->event_sync);
464 	} else {
465 		EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
466 	}
467 }
468 
469 static s32
wl_ext_event_init_priv(struct wl_event_params * event_params)470 wl_ext_event_init_priv(struct wl_event_params *event_params)
471 {
472 	s32 err = 0;
473 
474 	mutex_init(&event_params->event_sync);
475 	wl_ext_event_init_eq(event_params);
476 	if (wl_ext_event_create_handler(event_params))
477 		return -ENOMEM;
478 
479 	return err;
480 }
481 
482 static void
wl_ext_event_deinit_priv(struct wl_event_params * event_params)483 wl_ext_event_deinit_priv(struct wl_event_params *event_params)
484 {
485 	wl_ext_event_destroy_handler(event_params);
486 	wl_ext_event_flush_eq(event_params);
487 	wl_ext_event_free(event_params);
488 }
489 
490 int
wl_ext_event_attach_netdev(struct net_device * net,int ifidx,uint8 bssidx)491 wl_ext_event_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
492 {
493 	struct dhd_pub *dhd = dhd_get_pub(net);
494 	struct wl_event_params *event_params = dhd->event_params;
495 
496 	if (event_params && ifidx < DHD_MAX_IFS) {
497 		EVENT_TRACE(net->name, "ifidx=%d, bssidx=%d\n", ifidx, bssidx);
498 		event_params->dev[ifidx] = net;
499 	}
500 
501 	return 0;
502 }
503 
504 int
wl_ext_event_dettach_netdev(struct net_device * net,int ifidx)505 wl_ext_event_dettach_netdev(struct net_device *net, int ifidx)
506 {
507 	struct dhd_pub *dhd = dhd_get_pub(net);
508 	struct wl_event_params *event_params = dhd->event_params;
509 
510 	if (event_params && ifidx < DHD_MAX_IFS) {
511 		EVENT_TRACE(net->name, "ifidx=%d\n", ifidx);
512 		event_params->dev[ifidx] = NULL;
513 	}
514 
515 	return 0;
516 }
517 
518 s32
wl_ext_event_attach(struct net_device * net)519 wl_ext_event_attach(struct net_device *net)
520 {
521 	struct dhd_pub *dhdp = dhd_get_pub(net);
522 	struct wl_event_params *event_params = NULL;
523 	s32 err = 0;
524 
525 	event_params = kmalloc(sizeof(wl_event_params_t), GFP_KERNEL);
526 	if (!event_params) {
527 		EVENT_ERROR(net->name, "Failed to allocate memory (%zu)\n",
528 			sizeof(wl_event_params_t));
529 		return -ENOMEM;
530 	}
531 	dhdp->event_params = event_params;
532 	memset(event_params, 0, sizeof(wl_event_params_t));
533 	event_params->pub = dhdp;
534 
535 	err = wl_ext_event_init_priv(event_params);
536 	if (err) {
537 		EVENT_ERROR(net->name, "Failed to wl_ext_event_init_priv (%d)\n", err);
538 		goto ext_attach_out;
539 	}
540 
541 	return err;
542 ext_attach_out:
543 	wl_ext_event_dettach(dhdp);
544 	return err;
545 }
546 
547 void
wl_ext_event_dettach(dhd_pub_t * dhdp)548 wl_ext_event_dettach(dhd_pub_t *dhdp)
549 {
550 	struct wl_event_params *event_params = dhdp->event_params;
551 
552 	if (event_params) {
553 		wl_ext_event_deinit_priv(event_params);
554 		kfree(event_params);
555 		dhdp->event_params = NULL;
556 	}
557 }
558 #endif
559