xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/wl_event.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 
2 #include <wl_android.h>
3 #ifdef WL_EVENT
4 #include <bcmendian.h>
5 
6 #define EVENT_ERROR(name, arg1, args...) \
7 	do { \
8 		if (android_msg_level & ANDROID_ERROR_LEVEL) { \
9 			printf("[%s] EVENT-ERROR) %s : " arg1, name, __func__, ## args); \
10 		} \
11 	} while (0)
12 #define EVENT_TRACE(name, arg1, args...) \
13 	do { \
14 		if (android_msg_level & ANDROID_TRACE_LEVEL) { \
15 			printf("[%s] EVENT-TRACE) %s : " arg1, name, __func__, ## args); \
16 		} \
17 	} while (0)
18 #define EVENT_DBG(name, arg1, args...) \
19 	do { \
20 		if (android_msg_level & ANDROID_DBG_LEVEL) { \
21 			printf("[%s] EVENT-DBG) %s : " arg1, name, __func__, ## args); \
22 		} \
23 	} while (0)
24 
25 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
26 	4 && __GNUC_MINOR__ >= 6))
27 #define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
28 _Pragma("GCC diagnostic push") \
29 _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
30 (entry) = list_first_entry((ptr), type, member); \
31 _Pragma("GCC diagnostic pop") \
32 
33 #define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
34 _Pragma("GCC diagnostic push") \
35 _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
36 entry = container_of((ptr), type, member); \
37 _Pragma("GCC diagnostic pop") \
38 
39 #else
40 #define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
41 (entry) = list_first_entry((ptr), type, member); \
42 
43 #define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
44 entry = container_of((ptr), type, member); \
45 
46 #endif /* STRICT_GCC_WARNINGS */
47 
48 /* event queue for cfg80211 main event */
49 struct wl_event_q {
50 	struct list_head eq_list;
51 	u32 etype;
52 	wl_event_msg_t emsg;
53 	s8 edata[1];
54 };
55 
56 typedef void(*EXT_EVENT_HANDLER) (struct net_device *dev, void *cb_argu,
57 	const wl_event_msg_t *e, void *data);
58 
59 typedef struct event_handler_list {
60 	struct event_handler_list *next;
61 	struct net_device *dev;
62 	uint32 etype;
63 	EXT_EVENT_HANDLER cb_func;
64 	void *cb_argu;
65 	wl_event_prio_t prio;
66 } event_handler_list_t;
67 
68 typedef struct event_handler_head {
69 	event_handler_list_t *evt_head;
70 } event_handler_head_t;
71 
72 typedef struct wl_event_params {
73 	dhd_pub_t *pub;
74 	struct net_device *dev[DHD_MAX_IFS];
75 	struct event_handler_head evt_head;
76 	struct list_head eq_list;	/* used for event queue */
77 	spinlock_t eq_lock;	/* for event queue synchronization */
78 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
79 	tsk_ctl_t thr_event_ctl;
80 #else
81 	struct workqueue_struct *event_workq;   /* workqueue for event */
82 	struct work_struct event_work;		/* work item for event */
83 #endif
84 	struct mutex event_sync;
85 } wl_event_params_t;
86 
87 static unsigned long
wl_ext_event_lock_eq(struct wl_event_params * event_params)88 wl_ext_event_lock_eq(struct wl_event_params *event_params)
89 {
90 	unsigned long flags;
91 
92 	spin_lock_irqsave(&event_params->eq_lock, flags);
93 	return flags;
94 }
95 
96 static void
wl_ext_event_unlock_eq(struct wl_event_params * event_params,unsigned long flags)97 wl_ext_event_unlock_eq(struct wl_event_params *event_params, unsigned long flags)
98 {
99 	spin_unlock_irqrestore(&event_params->eq_lock, flags);
100 }
101 
102 static void
wl_ext_event_init_eq_lock(struct wl_event_params * event_params)103 wl_ext_event_init_eq_lock(struct wl_event_params *event_params)
104 {
105 	spin_lock_init(&event_params->eq_lock);
106 }
107 
108 static void
wl_ext_event_init_eq(struct wl_event_params * event_params)109 wl_ext_event_init_eq(struct wl_event_params *event_params)
110 {
111 	wl_ext_event_init_eq_lock(event_params);
112 	INIT_LIST_HEAD(&event_params->eq_list);
113 }
114 
115 static void
wl_ext_event_flush_eq(struct wl_event_params * event_params)116 wl_ext_event_flush_eq(struct wl_event_params *event_params)
117 {
118 	struct wl_event_q *e;
119 	unsigned long flags;
120 
121 	flags = wl_ext_event_lock_eq(event_params);
122 	while (!list_empty_careful(&event_params->eq_list)) {
123 		BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
124 		list_del(&e->eq_list);
125 		kfree(e);
126 	}
127 	wl_ext_event_unlock_eq(event_params, flags);
128 }
129 
130 /*
131 * retrieve first queued event from head
132 */
133 
134 static struct wl_event_q *
wl_ext_event_deq_event(struct wl_event_params * event_params)135 wl_ext_event_deq_event(struct wl_event_params *event_params)
136 {
137 	struct wl_event_q *e = NULL;
138 	unsigned long flags;
139 
140 	flags = wl_ext_event_lock_eq(event_params);
141 	if (likely(!list_empty(&event_params->eq_list))) {
142 		BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
143 		list_del(&e->eq_list);
144 	}
145 	wl_ext_event_unlock_eq(event_params, flags);
146 
147 	return e;
148 }
149 
150 /*
151  * push event to tail of the queue
152  */
153 
154 static s32
wl_ext_event_enq_event(struct wl_event_params * event_params,u32 event,const wl_event_msg_t * msg,void * data)155 wl_ext_event_enq_event(struct wl_event_params *event_params, u32 event,
156 	const wl_event_msg_t *msg, void *data)
157 {
158 	struct wl_event_q *e;
159 	s32 err = 0;
160 	uint32 evtq_size;
161 	uint32 data_len;
162 	unsigned long flags;
163 	gfp_t aflags;
164 
165 	data_len = 0;
166 	if (data)
167 		data_len = ntoh32(msg->datalen);
168 	evtq_size = sizeof(struct wl_event_q) + data_len;
169 	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
170 	e = kzalloc(evtq_size, aflags);
171 	if (unlikely(!e)) {
172 		EVENT_ERROR("wlan", "event alloc failed\n");
173 		return -ENOMEM;
174 	}
175 	e->etype = event;
176 	memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
177 	if (data)
178 		memcpy(e->edata, data, data_len);
179 	flags = wl_ext_event_lock_eq(event_params);
180 	list_add_tail(&e->eq_list, &event_params->eq_list);
181 	wl_ext_event_unlock_eq(event_params, flags);
182 
183 	return err;
184 }
185 
186 static void
wl_ext_event_put_event(struct wl_event_q * e)187 wl_ext_event_put_event(struct wl_event_q *e)
188 {
189 	kfree(e);
190 }
191 
192 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
193 static int wl_ext_event_handler(void *data);
194 #define WL_EXT_EVENT_HANDLER() static int wl_ext_event_handler(void *data)
195 #else
196 static void wl_ext_event_handler(struct work_struct *data);
197 #define WL_EXT_EVENT_HANDLER() static void wl_ext_event_handler(struct work_struct *data)
198 #endif
199 
WL_EXT_EVENT_HANDLER()200 WL_EXT_EVENT_HANDLER()
201 {
202 	struct wl_event_params *event_params = NULL;
203 	struct wl_event_q *e;
204 	struct net_device *dev = NULL;
205 	struct event_handler_list *evt_node;
206 	dhd_pub_t *dhd;
207 	unsigned long flags = 0;
208 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
209 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
210 	event_params = (struct wl_event_params *)tsk->parent;
211 #else
212 	BCM_SET_CONTAINER_OF(event_params, data, struct wl_event_params, event_work);
213 #endif
214 
215 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
216 	while (1) {
217 	if (down_interruptible(&tsk->sema) == 0) {
218 		SMP_RD_BARRIER_DEPENDS();
219 		if (tsk->terminated) {
220 			break;
221 		}
222 #endif
223 	DHD_EVENT_WAKE_LOCK(event_params->pub);
224 	while ((e = wl_ext_event_deq_event(event_params))) {
225 		if (e->emsg.ifidx >= DHD_MAX_IFS) {
226 			EVENT_ERROR("wlan", "ifidx=%d not in range\n", e->emsg.ifidx);
227 			goto fail;
228 		}
229 		dev = event_params->dev[e->emsg.ifidx];
230 		if (!dev) {
231 			EVENT_DBG("wlan", "ifidx=%d dev not ready\n", e->emsg.ifidx);
232 			goto fail;
233 		}
234 		dhd = dhd_get_pub(dev);
235 		if (e->etype > WLC_E_LAST) {
236 			EVENT_TRACE(dev->name, "Unknown Event (%d): ignoring\n", e->etype);
237 			goto fail;
238 		}
239 		DHD_GENERAL_LOCK(dhd, flags);
240 		if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) {
241 			EVENT_ERROR(dev->name, "BUS is DOWN.\n");
242 			DHD_GENERAL_UNLOCK(dhd, flags);
243 			goto fail;
244 		}
245 		DHD_GENERAL_UNLOCK(dhd, flags);
246 		EVENT_DBG(dev->name, "event type (%d)\n", e->etype);
247 		mutex_lock(&event_params->event_sync);
248 		evt_node = event_params->evt_head.evt_head;
249 		for (;evt_node;) {
250 			if (evt_node->dev == dev &&
251 					(evt_node->etype == e->etype || evt_node->etype == WLC_E_LAST))
252 				evt_node->cb_func(dev, evt_node->cb_argu, &e->emsg, e->edata);
253 			evt_node = evt_node->next;
254 		}
255 		mutex_unlock(&event_params->event_sync);
256 fail:
257 		wl_ext_event_put_event(e);
258 	}
259 	DHD_EVENT_WAKE_UNLOCK(event_params->pub);
260 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
261 	} else {
262 		break;
263 	}
264 	}
265 	complete_and_exit(&tsk->completed, 0);
266 #endif
267 }
268 
269 void
wl_ext_event_send(void * params,const wl_event_msg_t * e,void * data)270 wl_ext_event_send(void *params, const wl_event_msg_t * e, void *data)
271 {
272 	struct wl_event_params *event_params = params;
273 	u32 event_type = ntoh32(e->event_type);
274 
275 	if (event_params == NULL) {
276 		EVENT_ERROR("wlan", "Stale event %d(%s) ignored\n",
277 			event_type, bcmevent_get_name(event_type));
278 		return;
279 	}
280 
281 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
282 	if (event_params->event_workq == NULL) {
283 		EVENT_ERROR("wlan", "Event handler is not created %d(%s)\n",
284 			event_type, bcmevent_get_name(event_type));
285 		return;
286 	}
287 #endif
288 
289 	if (likely(!wl_ext_event_enq_event(event_params, event_type, e, data))) {
290 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
291 		if (event_params->thr_event_ctl.thr_pid >= 0) {
292 			up(&event_params->thr_event_ctl.sema);
293 		}
294 #else
295 		queue_work(event_params->event_workq, &event_params->event_work);
296 #endif
297 	}
298 }
299 
300 static s32
wl_ext_event_create_handler(struct wl_event_params * event_params)301 wl_ext_event_create_handler(struct wl_event_params *event_params)
302 {
303 	int ret = 0;
304 	EVENT_TRACE("wlan", "Enter\n");
305 
306 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
307 	PROC_START(wl_ext_event_handler, event_params, &event_params->thr_event_ctl, 0, "ext_eventd");
308 	if (event_params->thr_event_ctl.thr_pid < 0) {
309 		ret = -ENOMEM;
310 	}
311 #else
312 	/* Allocate workqueue for event */
313 	if (!event_params->event_workq) {
314 		event_params->event_workq = alloc_workqueue("ext_eventd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
315 	}
316 
317 	if (!event_params->event_workq) {
318 		EVENT_ERROR("wlan", "event_workq alloc_workqueue failed\n");
319 		ret = -ENOMEM;
320 	} else {
321 		INIT_WORK(&event_params->event_work, wl_ext_event_handler);
322 	}
323 #endif
324 
325 	return ret;
326 }
327 
328 static void
wl_ext_event_free(struct wl_event_params * event_params)329 wl_ext_event_free(struct wl_event_params *event_params)
330 {
331 	struct event_handler_list *node, *cur, **evt_head;
332 
333 	evt_head = &event_params->evt_head.evt_head;
334 	node = *evt_head;
335 
336 	for (;node;) {
337 		EVENT_TRACE(node->dev->name, "Free etype=%d\n", node->etype);
338 		cur = node;
339 		node = cur->next;
340 		kfree(cur);
341 	}
342 	*evt_head = NULL;
343 }
344 
345 static void
wl_ext_event_destroy_handler(struct wl_event_params * event_params)346 wl_ext_event_destroy_handler(struct wl_event_params *event_params)
347 {
348 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
349 	if (event_params->thr_event_ctl.thr_pid >= 0) {
350 		PROC_STOP(&event_params->thr_event_ctl);
351 	}
352 #else
353 	if (event_params && event_params->event_workq) {
354 		cancel_work_sync(&event_params->event_work);
355 		destroy_workqueue(event_params->event_workq);
356 		event_params->event_workq = NULL;
357 	}
358 #endif
359 }
360 
361 int
wl_ext_event_register(struct net_device * dev,dhd_pub_t * dhd,uint32 event,void * cb_func,void * data,wl_event_prio_t prio)362 wl_ext_event_register(struct net_device *dev, dhd_pub_t *dhd, uint32 event,
363 	void *cb_func, void *data, wl_event_prio_t prio)
364 {
365 	struct wl_event_params *event_params = dhd->event_params;
366 	struct event_handler_list *node, *leaf, *node_prev, **evt_head;
367 	int ret = 0;
368 
369 	if (event_params) {
370 		mutex_lock(&event_params->event_sync);
371 		evt_head = &event_params->evt_head.evt_head;
372 		node = *evt_head;
373 		for (;node;) {
374 			if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
375 				EVENT_TRACE(dev->name, "skip event %d\n", event);
376 				mutex_unlock(&event_params->event_sync);
377 				return 0;
378 			}
379 			node = node->next;
380 		}
381 		leaf = kmalloc(sizeof(event_handler_list_t), GFP_KERNEL);
382 		if (!leaf) {
383 			EVENT_ERROR(dev->name, "Memory alloc failure %d for event %d\n",
384 				(int)sizeof(event_handler_list_t), event);
385 			mutex_unlock(&event_params->event_sync);
386 			return -ENOMEM;
387 		}
388 		leaf->next = NULL;
389 		leaf->dev = dev;
390 		leaf->etype = event;
391 		leaf->cb_func = cb_func;
392 		leaf->cb_argu = data;
393 		leaf->prio = prio;
394 		if (*evt_head == NULL) {
395 			*evt_head = leaf;
396 		} else {
397 			node = *evt_head;
398 			node_prev = NULL;
399 			for (;node;) {
400 				if (node->prio <= prio) {
401 					leaf->next = node;
402 					if (node_prev)
403 						node_prev->next = leaf;
404 					else
405 						*evt_head = leaf;
406 					break;
407 				} else if (node->next == NULL) {
408 					node->next = leaf;
409 					break;
410 				}
411 				node_prev = node;
412 				node = node->next;
413 			}
414 		}
415 		EVENT_TRACE(dev->name, "event %d registered\n", event);
416 		mutex_unlock(&event_params->event_sync);
417 	} else {
418 		EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
419 		ret = -ENODEV;
420 	}
421 
422 	return ret;
423 }
424 
425 void
wl_ext_event_deregister(struct net_device * dev,dhd_pub_t * dhd,uint32 event,void * cb_func)426 wl_ext_event_deregister(struct net_device *dev, dhd_pub_t *dhd,
427 	uint32 event, void *cb_func)
428 {
429 	struct wl_event_params *event_params = dhd->event_params;
430 	struct event_handler_list *node, *prev, **evt_head;
431 	int tmp = 0;
432 
433 	if (event_params) {
434 		mutex_lock(&event_params->event_sync);
435 		evt_head = &event_params->evt_head.evt_head;
436 		node = *evt_head;
437 		prev = node;
438 		for (;node;) {
439 			if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
440 				if (node == *evt_head) {
441 					tmp = 1;
442 					*evt_head = node->next;
443 				} else {
444 					tmp = 0;
445 					prev->next = node->next;
446 				}
447 				EVENT_TRACE(dev->name, "event %d deregistered\n", event);
448 				kfree(node);
449 				if (tmp == 1) {
450 					node = *evt_head;
451 					prev = node;
452 				} else {
453 					node = prev->next;
454 				}
455 				continue;
456 			}
457 			prev = node;
458 			node = node->next;
459 		}
460 		mutex_unlock(&event_params->event_sync);
461 	} else {
462 		EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
463 	}
464 }
465 
466 static s32
wl_ext_event_init_priv(struct wl_event_params * event_params)467 wl_ext_event_init_priv(struct wl_event_params *event_params)
468 {
469 	s32 err = 0;
470 
471 	mutex_init(&event_params->event_sync);
472 	wl_ext_event_init_eq(event_params);
473 	if (wl_ext_event_create_handler(event_params))
474 		return -ENOMEM;
475 
476 	return err;
477 }
478 
479 static void
wl_ext_event_deinit_priv(struct wl_event_params * event_params)480 wl_ext_event_deinit_priv(struct wl_event_params *event_params)
481 {
482 	wl_ext_event_destroy_handler(event_params);
483 	wl_ext_event_flush_eq(event_params);
484 	wl_ext_event_free(event_params);
485 }
486 
487 int
wl_ext_event_attach_netdev(struct net_device * net,int ifidx,uint8 bssidx)488 wl_ext_event_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
489 {
490 	struct dhd_pub *dhd = dhd_get_pub(net);
491 	struct wl_event_params *event_params = dhd->event_params;
492 
493 	if (event_params && ifidx < DHD_MAX_IFS) {
494 		EVENT_TRACE(net->name, "ifidx=%d, bssidx=%d\n", ifidx, bssidx);
495 		event_params->dev[ifidx] = net;
496 	}
497 
498 	return 0;
499 }
500 
501 int
wl_ext_event_dettach_netdev(struct net_device * net,int ifidx)502 wl_ext_event_dettach_netdev(struct net_device *net, int ifidx)
503 {
504 	struct dhd_pub *dhd = dhd_get_pub(net);
505 	struct wl_event_params *event_params = dhd->event_params;
506 
507 	if (event_params && ifidx < DHD_MAX_IFS) {
508 		EVENT_TRACE(net->name, "ifidx=%d\n", ifidx);
509 		event_params->dev[ifidx] = NULL;
510 	}
511 
512 	return 0;
513 }
514 
515 s32
wl_ext_event_attach(struct net_device * net)516 wl_ext_event_attach(struct net_device *net)
517 {
518 	struct dhd_pub *dhdp = dhd_get_pub(net);
519 	struct wl_event_params *event_params = NULL;
520 	s32 err = 0;
521 
522 	event_params = kmalloc(sizeof(wl_event_params_t), GFP_KERNEL);
523 	if (!event_params) {
524 		EVENT_ERROR(net->name, "Failed to allocate memory (%zu)\n",
525 			sizeof(wl_event_params_t));
526 		return -ENOMEM;
527 	}
528 	dhdp->event_params = event_params;
529 	memset(event_params, 0, sizeof(wl_event_params_t));
530 	event_params->pub = dhdp;
531 
532 	err = wl_ext_event_init_priv(event_params);
533 	if (err) {
534 		EVENT_ERROR(net->name, "Failed to wl_ext_event_init_priv (%d)\n", err);
535 		goto ext_attach_out;
536 	}
537 
538 	return err;
539 ext_attach_out:
540 	wl_ext_event_dettach(dhdp);
541 	return err;
542 }
543 
544 void
wl_ext_event_dettach(dhd_pub_t * dhdp)545 wl_ext_event_dettach(dhd_pub_t *dhdp)
546 {
547 	struct wl_event_params *event_params = dhdp->event_params;
548 
549 	if (event_params) {
550 		wl_ext_event_deinit_priv(event_params);
551 		kfree(event_params);
552 		dhdp->event_params = NULL;
553 	}
554 }
555 #endif
556