xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/wl_event.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun 
2*4882a593Smuzhiyun #include <wl_android.h>
3*4882a593Smuzhiyun #ifdef WL_EVENT
4*4882a593Smuzhiyun #include <bcmendian.h>
5*4882a593Smuzhiyun #include <dhd_config.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #define EVENT_ERROR(name, arg1, args...) \
8*4882a593Smuzhiyun 	do { \
9*4882a593Smuzhiyun 		if (android_msg_level & ANDROID_ERROR_LEVEL) { \
10*4882a593Smuzhiyun 			printf("[%s] EVENT-ERROR) %s : " arg1, name, __func__, ## args); \
11*4882a593Smuzhiyun 		} \
12*4882a593Smuzhiyun 	} while (0)
13*4882a593Smuzhiyun #define EVENT_TRACE(name, arg1, args...) \
14*4882a593Smuzhiyun 	do { \
15*4882a593Smuzhiyun 		if (android_msg_level & ANDROID_TRACE_LEVEL) { \
16*4882a593Smuzhiyun 			printf("[%s] EVENT-TRACE) %s : " arg1, name, __func__, ## args); \
17*4882a593Smuzhiyun 		} \
18*4882a593Smuzhiyun 	} while (0)
19*4882a593Smuzhiyun #define EVENT_DBG(name, arg1, args...) \
20*4882a593Smuzhiyun 	do { \
21*4882a593Smuzhiyun 		if (android_msg_level & ANDROID_DBG_LEVEL) { \
22*4882a593Smuzhiyun 			printf("[%s] EVENT-DBG) %s : " arg1, name, __func__, ## args); \
23*4882a593Smuzhiyun 		} \
24*4882a593Smuzhiyun 	} while (0)
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
27*4882a593Smuzhiyun 	4 && __GNUC_MINOR__ >= 6))
28*4882a593Smuzhiyun #define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
29*4882a593Smuzhiyun _Pragma("GCC diagnostic push") \
30*4882a593Smuzhiyun _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
31*4882a593Smuzhiyun (entry) = list_first_entry((ptr), type, member); \
32*4882a593Smuzhiyun _Pragma("GCC diagnostic pop") \
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
35*4882a593Smuzhiyun _Pragma("GCC diagnostic push") \
36*4882a593Smuzhiyun _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
37*4882a593Smuzhiyun entry = container_of((ptr), type, member); \
38*4882a593Smuzhiyun _Pragma("GCC diagnostic pop") \
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #else
41*4882a593Smuzhiyun #define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
42*4882a593Smuzhiyun (entry) = list_first_entry((ptr), type, member); \
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
45*4882a593Smuzhiyun entry = container_of((ptr), type, member); \
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #endif /* STRICT_GCC_WARNINGS */
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /* event queue for cfg80211 main event */
50*4882a593Smuzhiyun struct wl_event_q {
51*4882a593Smuzhiyun 	struct list_head eq_list;
52*4882a593Smuzhiyun 	u32 etype;
53*4882a593Smuzhiyun 	wl_event_msg_t emsg;
54*4882a593Smuzhiyun 	s8 edata[1];
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun typedef void(*EXT_EVENT_HANDLER) (struct net_device *dev, void *cb_argu,
58*4882a593Smuzhiyun 	const wl_event_msg_t *e, void *data);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun typedef struct event_handler_list {
61*4882a593Smuzhiyun 	struct event_handler_list *next;
62*4882a593Smuzhiyun 	struct net_device *dev;
63*4882a593Smuzhiyun 	uint32 etype;
64*4882a593Smuzhiyun 	EXT_EVENT_HANDLER cb_func;
65*4882a593Smuzhiyun 	void *cb_argu;
66*4882a593Smuzhiyun 	wl_event_prio_t prio;
67*4882a593Smuzhiyun } event_handler_list_t;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun typedef struct event_handler_head {
70*4882a593Smuzhiyun 	event_handler_list_t *evt_head;
71*4882a593Smuzhiyun } event_handler_head_t;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun typedef struct wl_event_params {
74*4882a593Smuzhiyun 	dhd_pub_t *pub;
75*4882a593Smuzhiyun 	struct net_device *dev[DHD_MAX_IFS];
76*4882a593Smuzhiyun 	struct event_handler_head evt_head;
77*4882a593Smuzhiyun 	struct list_head eq_list;	/* used for event queue */
78*4882a593Smuzhiyun 	spinlock_t eq_lock;	/* for event queue synchronization */
79*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
80*4882a593Smuzhiyun 	tsk_ctl_t thr_event_ctl;
81*4882a593Smuzhiyun #else
82*4882a593Smuzhiyun 	struct workqueue_struct *event_workq;   /* workqueue for event */
83*4882a593Smuzhiyun 	struct work_struct event_work;		/* work item for event */
84*4882a593Smuzhiyun #endif
85*4882a593Smuzhiyun 	struct mutex event_sync;
86*4882a593Smuzhiyun } wl_event_params_t;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun static unsigned long
wl_ext_event_lock_eq(struct wl_event_params * event_params)89*4882a593Smuzhiyun wl_ext_event_lock_eq(struct wl_event_params *event_params)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	unsigned long flags;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	spin_lock_irqsave(&event_params->eq_lock, flags);
94*4882a593Smuzhiyun 	return flags;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun static void
wl_ext_event_unlock_eq(struct wl_event_params * event_params,unsigned long flags)98*4882a593Smuzhiyun wl_ext_event_unlock_eq(struct wl_event_params *event_params, unsigned long flags)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	spin_unlock_irqrestore(&event_params->eq_lock, flags);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun static void
wl_ext_event_init_eq_lock(struct wl_event_params * event_params)104*4882a593Smuzhiyun wl_ext_event_init_eq_lock(struct wl_event_params *event_params)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	spin_lock_init(&event_params->eq_lock);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun static void
wl_ext_event_init_eq(struct wl_event_params * event_params)110*4882a593Smuzhiyun wl_ext_event_init_eq(struct wl_event_params *event_params)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	wl_ext_event_init_eq_lock(event_params);
113*4882a593Smuzhiyun 	INIT_LIST_HEAD(&event_params->eq_list);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun static void
wl_ext_event_flush_eq(struct wl_event_params * event_params)117*4882a593Smuzhiyun wl_ext_event_flush_eq(struct wl_event_params *event_params)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct wl_event_q *e;
120*4882a593Smuzhiyun 	unsigned long flags;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	flags = wl_ext_event_lock_eq(event_params);
123*4882a593Smuzhiyun 	while (!list_empty_careful(&event_params->eq_list)) {
124*4882a593Smuzhiyun 		BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
125*4882a593Smuzhiyun 		list_del(&e->eq_list);
126*4882a593Smuzhiyun 		kfree(e);
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 	wl_ext_event_unlock_eq(event_params, flags);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * retrieve first queued event from head
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun static struct wl_event_q *
wl_ext_event_deq_event(struct wl_event_params * event_params)136*4882a593Smuzhiyun wl_ext_event_deq_event(struct wl_event_params *event_params)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	struct wl_event_q *e = NULL;
139*4882a593Smuzhiyun 	unsigned long flags;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	flags = wl_ext_event_lock_eq(event_params);
142*4882a593Smuzhiyun 	if (likely(!list_empty(&event_params->eq_list))) {
143*4882a593Smuzhiyun 		BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
144*4882a593Smuzhiyun 		list_del(&e->eq_list);
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 	wl_ext_event_unlock_eq(event_params, flags);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return e;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun  * push event to tail of the queue
153*4882a593Smuzhiyun  */
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun static s32
wl_ext_event_enq_event(struct wl_event_params * event_params,u32 event,const wl_event_msg_t * msg,void * data)156*4882a593Smuzhiyun wl_ext_event_enq_event(struct wl_event_params *event_params, u32 event,
157*4882a593Smuzhiyun 	const wl_event_msg_t *msg, void *data)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct wl_event_q *e;
160*4882a593Smuzhiyun 	s32 err = 0;
161*4882a593Smuzhiyun 	uint32 evtq_size;
162*4882a593Smuzhiyun 	uint32 data_len;
163*4882a593Smuzhiyun 	unsigned long flags;
164*4882a593Smuzhiyun 	gfp_t aflags;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	data_len = 0;
167*4882a593Smuzhiyun 	if (data)
168*4882a593Smuzhiyun 		data_len = ntoh32(msg->datalen);
169*4882a593Smuzhiyun 	evtq_size = sizeof(struct wl_event_q) + data_len;
170*4882a593Smuzhiyun 	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
171*4882a593Smuzhiyun 	e = kzalloc(evtq_size, aflags);
172*4882a593Smuzhiyun 	if (unlikely(!e)) {
173*4882a593Smuzhiyun 		EVENT_ERROR("wlan", "event alloc failed\n");
174*4882a593Smuzhiyun 		return -ENOMEM;
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 	e->etype = event;
177*4882a593Smuzhiyun 	memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
178*4882a593Smuzhiyun 	if (data)
179*4882a593Smuzhiyun 		memcpy(e->edata, data, data_len);
180*4882a593Smuzhiyun 	flags = wl_ext_event_lock_eq(event_params);
181*4882a593Smuzhiyun 	list_add_tail(&e->eq_list, &event_params->eq_list);
182*4882a593Smuzhiyun 	wl_ext_event_unlock_eq(event_params, flags);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	return err;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun static void
wl_ext_event_put_event(struct wl_event_q * e)188*4882a593Smuzhiyun wl_ext_event_put_event(struct wl_event_q *e)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	kfree(e);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
194*4882a593Smuzhiyun static int wl_ext_event_handler(void *data);
195*4882a593Smuzhiyun #define WL_EXT_EVENT_HANDLER() static int wl_ext_event_handler(void *data)
196*4882a593Smuzhiyun #else
197*4882a593Smuzhiyun static void wl_ext_event_handler(struct work_struct *data);
198*4882a593Smuzhiyun #define WL_EXT_EVENT_HANDLER() static void wl_ext_event_handler(struct work_struct *data)
199*4882a593Smuzhiyun #endif
200*4882a593Smuzhiyun 
WL_EXT_EVENT_HANDLER()201*4882a593Smuzhiyun WL_EXT_EVENT_HANDLER()
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct wl_event_params *event_params = NULL;
204*4882a593Smuzhiyun 	struct wl_event_q *e;
205*4882a593Smuzhiyun 	struct net_device *dev = NULL;
206*4882a593Smuzhiyun 	struct event_handler_list *evt_node;
207*4882a593Smuzhiyun 	dhd_pub_t *dhd;
208*4882a593Smuzhiyun 	unsigned long flags = 0;
209*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
210*4882a593Smuzhiyun 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
211*4882a593Smuzhiyun 	event_params = (struct wl_event_params *)tsk->parent;
212*4882a593Smuzhiyun #else
213*4882a593Smuzhiyun 	BCM_SET_CONTAINER_OF(event_params, data, struct wl_event_params, event_work);
214*4882a593Smuzhiyun #endif
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
217*4882a593Smuzhiyun 	while (1) {
218*4882a593Smuzhiyun 	if (down_interruptible(&tsk->sema) == 0) {
219*4882a593Smuzhiyun 		SMP_RD_BARRIER_DEPENDS();
220*4882a593Smuzhiyun 		if (tsk->terminated) {
221*4882a593Smuzhiyun 			break;
222*4882a593Smuzhiyun 		}
223*4882a593Smuzhiyun #endif
224*4882a593Smuzhiyun 	DHD_EVENT_WAKE_LOCK(event_params->pub);
225*4882a593Smuzhiyun 	while ((e = wl_ext_event_deq_event(event_params))) {
226*4882a593Smuzhiyun 		if (e->emsg.ifidx >= DHD_MAX_IFS) {
227*4882a593Smuzhiyun 			EVENT_ERROR("wlan", "ifidx=%d not in range\n", e->emsg.ifidx);
228*4882a593Smuzhiyun 			goto fail;
229*4882a593Smuzhiyun 		}
230*4882a593Smuzhiyun 		dev = event_params->dev[e->emsg.ifidx];
231*4882a593Smuzhiyun 		if (!dev) {
232*4882a593Smuzhiyun 			EVENT_DBG("wlan", "ifidx=%d dev not ready\n", e->emsg.ifidx);
233*4882a593Smuzhiyun 			goto fail;
234*4882a593Smuzhiyun 		}
235*4882a593Smuzhiyun 		dhd = dhd_get_pub(dev);
236*4882a593Smuzhiyun 		if (e->etype > WLC_E_LAST) {
237*4882a593Smuzhiyun 			EVENT_TRACE(dev->name, "Unknown Event (%d): ignoring\n", e->etype);
238*4882a593Smuzhiyun 			goto fail;
239*4882a593Smuzhiyun 		}
240*4882a593Smuzhiyun 		DHD_GENERAL_LOCK(dhd, flags);
241*4882a593Smuzhiyun 		if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) {
242*4882a593Smuzhiyun 			EVENT_ERROR(dev->name, "BUS is DOWN.\n");
243*4882a593Smuzhiyun 			DHD_GENERAL_UNLOCK(dhd, flags);
244*4882a593Smuzhiyun 			goto fail;
245*4882a593Smuzhiyun 		}
246*4882a593Smuzhiyun 		DHD_GENERAL_UNLOCK(dhd, flags);
247*4882a593Smuzhiyun 		EVENT_DBG(dev->name, "event type (%d)\n", e->etype);
248*4882a593Smuzhiyun 		mutex_lock(&event_params->event_sync);
249*4882a593Smuzhiyun 		evt_node = event_params->evt_head.evt_head;
250*4882a593Smuzhiyun 		for (;evt_node;) {
251*4882a593Smuzhiyun 			if (evt_node->dev == dev &&
252*4882a593Smuzhiyun 					(evt_node->etype == e->etype || evt_node->etype == WLC_E_LAST))
253*4882a593Smuzhiyun 				evt_node->cb_func(dev, evt_node->cb_argu, &e->emsg, e->edata);
254*4882a593Smuzhiyun 			evt_node = evt_node->next;
255*4882a593Smuzhiyun 		}
256*4882a593Smuzhiyun 		mutex_unlock(&event_params->event_sync);
257*4882a593Smuzhiyun fail:
258*4882a593Smuzhiyun 		wl_ext_event_put_event(e);
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 	DHD_EVENT_WAKE_UNLOCK(event_params->pub);
261*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
262*4882a593Smuzhiyun 	} else {
263*4882a593Smuzhiyun 		break;
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 	complete_and_exit(&tsk->completed, 0);
267*4882a593Smuzhiyun #endif
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun void
wl_ext_event_send(void * params,const wl_event_msg_t * e,void * data)271*4882a593Smuzhiyun wl_ext_event_send(void *params, const wl_event_msg_t * e, void *data)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct wl_event_params *event_params = params;
274*4882a593Smuzhiyun 	u32 event_type = ntoh32(e->event_type);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	if (event_params == NULL) {
277*4882a593Smuzhiyun 		EVENT_ERROR("wlan", "Stale event %d(%s) ignored\n",
278*4882a593Smuzhiyun 			event_type, bcmevent_get_name(event_type));
279*4882a593Smuzhiyun 		return;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
283*4882a593Smuzhiyun 	if (event_params->event_workq == NULL) {
284*4882a593Smuzhiyun 		EVENT_ERROR("wlan", "Event handler is not created %d(%s)\n",
285*4882a593Smuzhiyun 			event_type, bcmevent_get_name(event_type));
286*4882a593Smuzhiyun 		return;
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun #endif
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	if (likely(!wl_ext_event_enq_event(event_params, event_type, e, data))) {
291*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
292*4882a593Smuzhiyun 		if (event_params->thr_event_ctl.thr_pid >= 0) {
293*4882a593Smuzhiyun 			up(&event_params->thr_event_ctl.sema);
294*4882a593Smuzhiyun 		}
295*4882a593Smuzhiyun #else
296*4882a593Smuzhiyun 		queue_work(event_params->event_workq, &event_params->event_work);
297*4882a593Smuzhiyun #endif
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun static s32
wl_ext_event_create_handler(struct wl_event_params * event_params)302*4882a593Smuzhiyun wl_ext_event_create_handler(struct wl_event_params *event_params)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	int ret = 0;
305*4882a593Smuzhiyun 	EVENT_TRACE("wlan", "Enter\n");
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
308*4882a593Smuzhiyun 	PROC_START(wl_ext_event_handler, event_params, &event_params->thr_event_ctl, 0, "ext_eventd");
309*4882a593Smuzhiyun 	if (event_params->thr_event_ctl.thr_pid < 0) {
310*4882a593Smuzhiyun 		ret = -ENOMEM;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun #else
313*4882a593Smuzhiyun 	/* Allocate workqueue for event */
314*4882a593Smuzhiyun 	if (!event_params->event_workq) {
315*4882a593Smuzhiyun 		event_params->event_workq = alloc_workqueue("ext_eventd",
316*4882a593Smuzhiyun 			WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 0);
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (!event_params->event_workq) {
320*4882a593Smuzhiyun 		EVENT_ERROR("wlan", "event_workq alloc_workqueue failed\n");
321*4882a593Smuzhiyun 		ret = -ENOMEM;
322*4882a593Smuzhiyun 	} else {
323*4882a593Smuzhiyun 		INIT_WORK(&event_params->event_work, wl_ext_event_handler);
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun #endif
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	return ret;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun static void
wl_ext_event_free(struct wl_event_params * event_params)331*4882a593Smuzhiyun wl_ext_event_free(struct wl_event_params *event_params)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	struct event_handler_list *node, *cur, **evt_head;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	evt_head = &event_params->evt_head.evt_head;
336*4882a593Smuzhiyun 	node = *evt_head;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	for (;node;) {
339*4882a593Smuzhiyun 		EVENT_TRACE(node->dev->name, "Free etype=%d\n", node->etype);
340*4882a593Smuzhiyun 		cur = node;
341*4882a593Smuzhiyun 		node = cur->next;
342*4882a593Smuzhiyun 		kfree(cur);
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 	*evt_head = NULL;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun static void
wl_ext_event_destroy_handler(struct wl_event_params * event_params)348*4882a593Smuzhiyun wl_ext_event_destroy_handler(struct wl_event_params *event_params)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
351*4882a593Smuzhiyun 	if (event_params->thr_event_ctl.thr_pid >= 0) {
352*4882a593Smuzhiyun 		PROC_STOP(&event_params->thr_event_ctl);
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun #else
355*4882a593Smuzhiyun 	if (event_params && event_params->event_workq) {
356*4882a593Smuzhiyun 		cancel_work_sync(&event_params->event_work);
357*4882a593Smuzhiyun 		destroy_workqueue(event_params->event_workq);
358*4882a593Smuzhiyun 		event_params->event_workq = NULL;
359*4882a593Smuzhiyun 	}
360*4882a593Smuzhiyun #endif
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun int
wl_ext_event_register(struct net_device * dev,dhd_pub_t * dhd,uint32 event,void * cb_func,void * data,wl_event_prio_t prio)364*4882a593Smuzhiyun wl_ext_event_register(struct net_device *dev, dhd_pub_t *dhd, uint32 event,
365*4882a593Smuzhiyun 	void *cb_func, void *data, wl_event_prio_t prio)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	struct wl_event_params *event_params = dhd->event_params;
368*4882a593Smuzhiyun 	struct event_handler_list *node, *leaf, *node_prev, **evt_head;
369*4882a593Smuzhiyun 	int ret = 0;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	if (event_params) {
372*4882a593Smuzhiyun 		mutex_lock(&event_params->event_sync);
373*4882a593Smuzhiyun 		evt_head = &event_params->evt_head.evt_head;
374*4882a593Smuzhiyun 		node = *evt_head;
375*4882a593Smuzhiyun 		for (;node;) {
376*4882a593Smuzhiyun 			if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
377*4882a593Smuzhiyun 				EVENT_TRACE(dev->name, "skip event %d\n", event);
378*4882a593Smuzhiyun 				mutex_unlock(&event_params->event_sync);
379*4882a593Smuzhiyun 				return 0;
380*4882a593Smuzhiyun 			}
381*4882a593Smuzhiyun 			node = node->next;
382*4882a593Smuzhiyun 		}
383*4882a593Smuzhiyun 		leaf = kmalloc(sizeof(event_handler_list_t), GFP_KERNEL);
384*4882a593Smuzhiyun 		if (!leaf) {
385*4882a593Smuzhiyun 			EVENT_ERROR(dev->name, "Memory alloc failure %d for event %d\n",
386*4882a593Smuzhiyun 				(int)sizeof(event_handler_list_t), event);
387*4882a593Smuzhiyun 			mutex_unlock(&event_params->event_sync);
388*4882a593Smuzhiyun 			return -ENOMEM;
389*4882a593Smuzhiyun 		}
390*4882a593Smuzhiyun 		memset(leaf, 0, sizeof(event_handler_list_t));
391*4882a593Smuzhiyun 		leaf->next = NULL;
392*4882a593Smuzhiyun 		leaf->dev = dev;
393*4882a593Smuzhiyun 		leaf->etype = event;
394*4882a593Smuzhiyun 		leaf->cb_func = cb_func;
395*4882a593Smuzhiyun 		leaf->cb_argu = data;
396*4882a593Smuzhiyun 		leaf->prio = prio;
397*4882a593Smuzhiyun 		if (*evt_head == NULL) {
398*4882a593Smuzhiyun 			*evt_head = leaf;
399*4882a593Smuzhiyun 		} else {
400*4882a593Smuzhiyun 			node = *evt_head;
401*4882a593Smuzhiyun 			node_prev = NULL;
402*4882a593Smuzhiyun 			for (;node;) {
403*4882a593Smuzhiyun 				if (node->prio <= prio) {
404*4882a593Smuzhiyun 					leaf->next = node;
405*4882a593Smuzhiyun 					if (node_prev)
406*4882a593Smuzhiyun 						node_prev->next = leaf;
407*4882a593Smuzhiyun 					else
408*4882a593Smuzhiyun 						*evt_head = leaf;
409*4882a593Smuzhiyun 					break;
410*4882a593Smuzhiyun 				} else if (node->next == NULL) {
411*4882a593Smuzhiyun 					node->next = leaf;
412*4882a593Smuzhiyun 					break;
413*4882a593Smuzhiyun 				}
414*4882a593Smuzhiyun 				node_prev = node;
415*4882a593Smuzhiyun 				node = node->next;
416*4882a593Smuzhiyun 			}
417*4882a593Smuzhiyun 		}
418*4882a593Smuzhiyun 		EVENT_TRACE(dev->name, "event %d registered\n", event);
419*4882a593Smuzhiyun 		mutex_unlock(&event_params->event_sync);
420*4882a593Smuzhiyun 	} else {
421*4882a593Smuzhiyun 		EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
422*4882a593Smuzhiyun 		ret = -ENODEV;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return ret;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun void
wl_ext_event_deregister(struct net_device * dev,dhd_pub_t * dhd,uint32 event,void * cb_func)429*4882a593Smuzhiyun wl_ext_event_deregister(struct net_device *dev, dhd_pub_t *dhd,
430*4882a593Smuzhiyun 	uint32 event, void *cb_func)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	struct wl_event_params *event_params = dhd->event_params;
433*4882a593Smuzhiyun 	struct event_handler_list *node, *prev, **evt_head;
434*4882a593Smuzhiyun 	int tmp = 0;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	if (event_params) {
437*4882a593Smuzhiyun 		mutex_lock(&event_params->event_sync);
438*4882a593Smuzhiyun 		evt_head = &event_params->evt_head.evt_head;
439*4882a593Smuzhiyun 		node = *evt_head;
440*4882a593Smuzhiyun 		prev = node;
441*4882a593Smuzhiyun 		for (;node;) {
442*4882a593Smuzhiyun 			if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
443*4882a593Smuzhiyun 				if (node == *evt_head) {
444*4882a593Smuzhiyun 					tmp = 1;
445*4882a593Smuzhiyun 					*evt_head = node->next;
446*4882a593Smuzhiyun 				} else {
447*4882a593Smuzhiyun 					tmp = 0;
448*4882a593Smuzhiyun 					prev->next = node->next;
449*4882a593Smuzhiyun 				}
450*4882a593Smuzhiyun 				EVENT_TRACE(dev->name, "event %d deregistered\n", event);
451*4882a593Smuzhiyun 				kfree(node);
452*4882a593Smuzhiyun 				if (tmp == 1) {
453*4882a593Smuzhiyun 					node = *evt_head;
454*4882a593Smuzhiyun 					prev = node;
455*4882a593Smuzhiyun 				} else {
456*4882a593Smuzhiyun 					node = prev->next;
457*4882a593Smuzhiyun 				}
458*4882a593Smuzhiyun 				continue;
459*4882a593Smuzhiyun 			}
460*4882a593Smuzhiyun 			prev = node;
461*4882a593Smuzhiyun 			node = node->next;
462*4882a593Smuzhiyun 		}
463*4882a593Smuzhiyun 		mutex_unlock(&event_params->event_sync);
464*4882a593Smuzhiyun 	} else {
465*4882a593Smuzhiyun 		EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
466*4882a593Smuzhiyun 	}
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun static s32
wl_ext_event_init_priv(struct wl_event_params * event_params)470*4882a593Smuzhiyun wl_ext_event_init_priv(struct wl_event_params *event_params)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	s32 err = 0;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	mutex_init(&event_params->event_sync);
475*4882a593Smuzhiyun 	wl_ext_event_init_eq(event_params);
476*4882a593Smuzhiyun 	if (wl_ext_event_create_handler(event_params))
477*4882a593Smuzhiyun 		return -ENOMEM;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	return err;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun static void
wl_ext_event_deinit_priv(struct wl_event_params * event_params)483*4882a593Smuzhiyun wl_ext_event_deinit_priv(struct wl_event_params *event_params)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	wl_ext_event_destroy_handler(event_params);
486*4882a593Smuzhiyun 	wl_ext_event_flush_eq(event_params);
487*4882a593Smuzhiyun 	wl_ext_event_free(event_params);
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun int
wl_ext_event_attach_netdev(struct net_device * net,int ifidx,uint8 bssidx)491*4882a593Smuzhiyun wl_ext_event_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	struct dhd_pub *dhd = dhd_get_pub(net);
494*4882a593Smuzhiyun 	struct wl_event_params *event_params = dhd->event_params;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	if (event_params && ifidx < DHD_MAX_IFS) {
497*4882a593Smuzhiyun 		EVENT_TRACE(net->name, "ifidx=%d, bssidx=%d\n", ifidx, bssidx);
498*4882a593Smuzhiyun 		event_params->dev[ifidx] = net;
499*4882a593Smuzhiyun 	}
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	return 0;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun int
wl_ext_event_dettach_netdev(struct net_device * net,int ifidx)505*4882a593Smuzhiyun wl_ext_event_dettach_netdev(struct net_device *net, int ifidx)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	struct dhd_pub *dhd = dhd_get_pub(net);
508*4882a593Smuzhiyun 	struct wl_event_params *event_params = dhd->event_params;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	if (event_params && ifidx < DHD_MAX_IFS) {
511*4882a593Smuzhiyun 		EVENT_TRACE(net->name, "ifidx=%d\n", ifidx);
512*4882a593Smuzhiyun 		event_params->dev[ifidx] = NULL;
513*4882a593Smuzhiyun 	}
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	return 0;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun s32
wl_ext_event_attach(struct net_device * net)519*4882a593Smuzhiyun wl_ext_event_attach(struct net_device *net)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	struct dhd_pub *dhdp = dhd_get_pub(net);
522*4882a593Smuzhiyun 	struct wl_event_params *event_params = NULL;
523*4882a593Smuzhiyun 	s32 err = 0;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	event_params = kmalloc(sizeof(wl_event_params_t), GFP_KERNEL);
526*4882a593Smuzhiyun 	if (!event_params) {
527*4882a593Smuzhiyun 		EVENT_ERROR(net->name, "Failed to allocate memory (%zu)\n",
528*4882a593Smuzhiyun 			sizeof(wl_event_params_t));
529*4882a593Smuzhiyun 		return -ENOMEM;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun 	dhdp->event_params = event_params;
532*4882a593Smuzhiyun 	memset(event_params, 0, sizeof(wl_event_params_t));
533*4882a593Smuzhiyun 	event_params->pub = dhdp;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	err = wl_ext_event_init_priv(event_params);
536*4882a593Smuzhiyun 	if (err) {
537*4882a593Smuzhiyun 		EVENT_ERROR(net->name, "Failed to wl_ext_event_init_priv (%d)\n", err);
538*4882a593Smuzhiyun 		goto ext_attach_out;
539*4882a593Smuzhiyun 	}
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	return err;
542*4882a593Smuzhiyun ext_attach_out:
543*4882a593Smuzhiyun 	wl_ext_event_dettach(dhdp);
544*4882a593Smuzhiyun 	return err;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun void
wl_ext_event_dettach(dhd_pub_t * dhdp)548*4882a593Smuzhiyun wl_ext_event_dettach(dhd_pub_t *dhdp)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	struct wl_event_params *event_params = dhdp->event_params;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	if (event_params) {
553*4882a593Smuzhiyun 		wl_ext_event_deinit_priv(event_params);
554*4882a593Smuzhiyun 		kfree(event_params);
555*4882a593Smuzhiyun 		dhdp->event_params = NULL;
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun #endif
559