xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852be/phl/phl_cmd_ser.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 
2 /******************************************************************************
3  *
4  * Copyright(c) 2019 Realtek Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of version 2 of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  * more details.
14  *
15  *****************************************************************************/
16 #define _PHL_CMD_SER_C_
17 #include "phl_headers.h"
18 #include "phl_api.h"
19 
20 #define CMD_SER_L0 0x00000001
21 #define CMD_SER_L1 0x00000002
22 #define CMD_SER_L2 0x00000004
23 
24 enum _CMD_SER_EVENT_SOURCE {
25 	CMD_SER_SRC_UNKNOWN = 0,
26 	CMD_SER_SRC_INT = BIT0, // ser event from interrupt
27 	CMD_SER_SRC_POLL = BIT1, // ser event by polling io
28 	CMD_SER_SRC_INT_NOTIFY = BIT2, // ser event from interrupt and ser state from mac
29 };
30 
31 enum _CMD_SER_TIMER_STATE {
32 	CMD_SER_NOT_OCCUR = 0,
33 	CMD_SER_M1 = BIT0, //POLL_IO
34 	CMD_SER_M2 = BIT1, //POLL_FW
35 	CMD_SER_M3 = BIT2,
36 	CMD_SER_M4 = BIT3,
37 	CMD_SER_M5 = BIT4,
38 	CMD_SER_M9 = BIT5,
39 };
40 
41 #define CMD_SER_FW_TIMEOUT 1000 /* ms */
42 #define CMD_SER_POLLING_INTERVAL 10 /* ms */
43 #define CMD_SER_USB_POLLING_INTERVAL_IDL 1000 /* ms */
44 #define CMD_SER_USB_POLLING_INTERVAL_ACT 10 /* ms */
45 
46 #define CMD_SER_POLL_IO_TIMES 200
47 #define CMD_SER_USB_POLL_IO_TIMES 300
48 
49 #define CMD_SER_LOG_SIZE 10
50 
51 struct sts_l2 {
52 	_os_list list;
53 	u8 idx;
54 	u8 ser_log;
55 };
56 
57 struct cmd_ser {
58 	struct phl_info_t *phl_info;
59 	void* dispr;
60 	u8 state;
61 	_os_lock _lock;
62 
63 	u8 evtsrc;
64 	int poll_cnt;
65 	_os_timer poll_timer;
66 
67 	/* L2 log :
68 	//    If L2 triggered, set ser_log = state-of-cmd_ser
69 	*/
70 	struct phl_queue stslist;
71 	struct sts_l2 stsl2[CMD_SER_LOG_SIZE];
72 	u8 bserl2;
73 	u8 (*ser_l2_hdlr)(void *drv);
74 };
75 
_ser_int_ntfy_ctrl(struct phl_info_t * phl_info,enum rtw_phl_config_int int_type)76 static void _ser_int_ntfy_ctrl(struct phl_info_t *phl_info,
77                                enum rtw_phl_config_int int_type)
78 {
79 	if (RTW_PHL_CLR_HCI_INT == int_type)
80 		rtw_hal_clear_interrupt(phl_info->hal);
81 	else
82 		rtw_hal_config_interrupt(phl_info->hal, int_type);
83 }
84 
85 static enum rtw_phl_status
_ser_event_notify(void * phl,u8 * p_ntfy)86 _ser_event_notify(void *phl, u8 *p_ntfy)
87 {
88 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
89 	enum RTW_PHL_SER_NOTIFY_EVENT notify = RTW_PHL_SER_L2_RESET;
90 	struct phl_msg msg = {0};
91 	u32 err = 0;
92 
93 	notify = rtw_hal_ser_get_error_status(phl_info->hal, &err);
94 
95 	if (p_ntfy != NULL)
96 		*p_ntfy = notify;
97 
98 	phl_info->phl_com->phl_stats.ser_event[notify]++;
99 
100 	PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "_ser_event_notify, error 0x%x, notify 0x%x\n", err, notify);
101 
102 	if (notify == RTW_PHL_SER_L0_RESET) {
103 		PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "_ser_event_notify, hit L0 Reset\n");
104 		return RTW_PHL_STATUS_SUCCESS;
105 	}
106 
107 	if (notify == RTW_PHL_SER_LOG_ONLY) {
108 		PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "_ser_event_notify, RTW_PHL_SER_LOG_ONLY\n");
109 		return RTW_PHL_STATUS_SUCCESS;
110 	}
111 
112 	if (notify == RTW_PHL_SER_DUMP_FW_LOG) {
113 		PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "_ser_event_notify, RTW_PHL_SER_DUMP_FW_LOG\n");
114 
115 		SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_PHY_MGNT);
116 		SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_DUMP_PLE_BUFFER);
117 		phl_msg_hub_send(phl_info, NULL, &msg);
118 
119 		return RTW_PHL_STATUS_SUCCESS;
120 	}
121 
122 	return phl_ser_send_msg(phl, notify);
123 }
124 
_ser_dump_stsl2(struct cmd_ser * cser)125 void _ser_dump_stsl2(struct cmd_ser *cser)
126 {
127 	u8 idx =0;
128 
129 	for (idx = 0; idx < CMD_SER_LOG_SIZE; idx++) {
130 		if (cser->stsl2[idx].ser_log || cser->stsl2[idx].idx >= CMD_SER_LOG_SIZE) {
131 			PHL_ERR("%s :: [%d] %d - ser_log = 0x%X \n", __func__,
132 			        idx, cser->stsl2[idx].idx, cser->stsl2[idx].ser_log);
133 		}
134 	}
135 }
136 
_ser_reset_status(struct cmd_ser * cser)137 void _ser_reset_status(struct cmd_ser *cser)
138 {
139 	void *drv = phl_to_drvpriv(cser->phl_info);
140 
141 	_os_spinlock(drv, &cser->_lock, _bh, NULL);
142 	cser->state = CMD_SER_NOT_OCCUR;
143 	_os_spinunlock(drv, &cser->_lock, _bh, NULL);
144 
145 	if (CMD_SER_SRC_POLL == cser->evtsrc) {
146 		cser->poll_cnt = 0;
147 		_os_set_timer(drv,
148 		              &cser->poll_timer,
149 		              CMD_SER_USB_POLLING_INTERVAL_IDL);
150 	} else if (CMD_SER_SRC_INT_NOTIFY == cser->evtsrc) {
151 		_ser_int_ntfy_ctrl(cser->phl_info, RTW_PHL_EN_HCI_INT);
152 	}
153 }
154 
_ser_set_status(struct cmd_ser * cser,u8 serstatus)155 void _ser_set_status(struct cmd_ser *cser, u8 serstatus)
156 {
157 	void *drv = phl_to_drvpriv(cser->phl_info);
158 
159 	_os_spinlock(drv, &cser->_lock, _bh, NULL);
160 	cser->state |= serstatus;
161 	_os_spinunlock(drv, &cser->_lock, _bh, NULL);
162 }
163 
_ser_clear_status(struct cmd_ser * cser,u8 serstatus)164 void _ser_clear_status(struct cmd_ser *cser, u8 serstatus)
165 {
166 	void *drv = phl_to_drvpriv(cser->phl_info);
167 
168 	_os_spinlock(drv, &cser->_lock, _bh, NULL);
169 	cser->state &= ~(serstatus);
170 	_os_spinunlock(drv, &cser->_lock, _bh, NULL);
171 }
172 
_ser_l1_notify(struct cmd_ser * cser)173 static void _ser_l1_notify(struct cmd_ser *cser)
174 {
175 	struct phl_msg nextmsg = {0};
176 
177 	SET_MSG_MDL_ID_FIELD(nextmsg.msg_id, PHL_MDL_SER);
178 	SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_SER_L1);
179 	phl_msg_hub_send(cser->phl_info, NULL, &nextmsg);
180 }
181 
_ser_l2_notify(struct cmd_ser * cser)182 static void _ser_l2_notify(struct cmd_ser *cser)
183 {
184 	struct phl_info_t *phl_info = cser->phl_info;
185 	void *drv = phl_to_drvpriv(phl_info);
186 	struct sts_l2 *stsl2 = NULL;
187 	struct phl_msg nextmsg = {0};
188 	_os_list* obj = NULL;
189 
190 	rtw_hal_ser_reset_wdt_intr(phl_info->hal);
191 
192 	if (pq_pop(drv, &cser->stslist, &obj, _first, _ps)) {
193 		stsl2 = (struct sts_l2*)obj;
194 
195 		/* Rotate stslist : 0~ (CMD_SER_LOG_SIZE-1) are unused index */
196 		stsl2->idx+= CMD_SER_LOG_SIZE;
197 		stsl2->ser_log = cser->state;
198 		pq_push(drv, &cser->stslist, &stsl2->list, _tail, _ps);
199 	}
200 	_ser_dump_stsl2(cser);
201 
202 	/* L2 can't be rescued, bserl2 wouldn't reset. */
203 	/* comment out: wait for new ser flow to handle L2 */
204 	cser->bserl2 = true;
205 
206 	if (cser->ser_l2_hdlr)
207 		cser->ser_l2_hdlr(phl_to_drvpriv(phl_info));
208 
209 	phl_disp_eng_clr_pending_msg(cser->phl_info, HW_BAND_0);
210 
211 	SET_MSG_MDL_ID_FIELD(nextmsg.msg_id, PHL_MDL_SER);
212 	SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_SER_L2);
213 	phl_msg_hub_send(cser->phl_info, NULL, &nextmsg);
214 }
215 
_ser_m2_notify(struct cmd_ser * cser)216 static void _ser_m2_notify(struct cmd_ser *cser)
217 {
218 	struct phl_info_t *phl_info = cser->phl_info;
219 	enum rtw_hal_status status = RTW_HAL_STATUS_FAILURE;
220 
221 	if (CMD_SER_SRC_INT_NOTIFY == cser->evtsrc)
222 		_ser_int_ntfy_ctrl(phl_info, RTW_PHL_SER_HANDSHAKE_MODE);
223 
224 	_ser_clear_status(cser, CMD_SER_M1);
225 	_ser_set_status(cser, CMD_SER_M2);
226 
227 	/* send M2 event to fw */
228 	status = rtw_hal_ser_set_error_status(phl_info->hal, RTW_PHL_SER_L1_DISABLE_EN);
229 	PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "_ser_m2_notify:: RTW_PHL_SER_L1_DISABLE_EN, status 0x%x\n", status);
230 }
231 
_ser_m3_m5_waiting(struct cmd_ser * cser)232 static void _ser_m3_m5_waiting(struct cmd_ser *cser)
233 {
234 	struct phl_info_t *phl_info = cser->phl_info;
235 	void *drv = phl_to_drvpriv(phl_info);
236 	int poll_cnt = 0, intvl = CMD_SER_FW_TIMEOUT;
237 
238 	if (cser->evtsrc == CMD_SER_SRC_POLL) {
239 		/* CMD_SER_POLLING_INTERVAL = CMD_SER_FW_TIMEOUT/ CMD_SER_USB_POLLING_INTERVAL_ACT */
240 		poll_cnt = CMD_SER_POLLING_INTERVAL;
241 		intvl = CMD_SER_USB_POLLING_INTERVAL_ACT;
242 	}
243 
244 	cser->poll_cnt = poll_cnt;
245 	/* wait M3 or M5 */
246 	_os_set_timer(drv, &cser->poll_timer, intvl);
247 }
248 
_ser_m4_notify(struct cmd_ser * cser)249 static void _ser_m4_notify(struct cmd_ser *cser)
250 {
251 	struct phl_info_t *phl_info = cser->phl_info;
252 	enum rtw_hal_status status = RTW_HAL_STATUS_FAILURE;
253 
254 	if (CMD_SER_SRC_INT_NOTIFY == cser->evtsrc)
255 		_ser_int_ntfy_ctrl(phl_info, RTW_PHL_SER_HANDSHAKE_MODE);
256 
257 	_ser_clear_status(cser, CMD_SER_M3);
258 	_ser_set_status(cser, CMD_SER_M4);
259 
260 	/* send M4 event */
261 	status = rtw_hal_ser_set_error_status(phl_info->hal, RTW_PHL_SER_L1_RCVY_EN);
262 	PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "_ser_m4_notify:: RTW_PHL_SER_L1_RCVY_EN, status 0x%x\n", status);
263 }
264 
_ser_poll_timer_cb(void * priv)265 static void _ser_poll_timer_cb(void *priv)
266 {
267 	struct cmd_ser *cser = (struct cmd_ser *)priv;
268 	struct phl_msg nextmsg = {0};
269 	struct phl_msg_attribute attr = {0};
270 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
271 
272 	SET_MSG_MDL_ID_FIELD(nextmsg.msg_id, PHL_MDL_SER);
273 
274 	if (TEST_STATUS_FLAG(cser->state, CMD_SER_M1)) {
275 		SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_SER_IO_TIMER_EXPIRE);
276 	} else if (TEST_STATUS_FLAG(cser->state, CMD_SER_M2)) {
277 		if (cser->poll_cnt > 0) /* polling mode */
278 			SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_SER_POLLING_CHK);
279 		else
280 			SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_SER_FW_TIMER_EXPIRE);
281 	} else if (TEST_STATUS_FLAG(cser->state, CMD_SER_M4)) {
282 		if (cser->poll_cnt > 0) /* polling mode */
283 			SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_SER_POLLING_CHK);
284 		else
285 			SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_SER_FW_TIMER_EXPIRE);
286 	} else {
287 		SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_SER_POLLING_CHK);
288 	}
289 
290 	nextmsg.band_idx = HW_BAND_0;
291 
292 	if (MSG_EVT_ID_FIELD(nextmsg.msg_id)) {
293 		PHL_INFO("%s :: nextmsg->msg_id= 0x%X\n", __func__, MSG_EVT_ID_FIELD(nextmsg.msg_id));
294 		pstatus = phl_disp_eng_send_msg(cser->phl_info, &nextmsg, &attr, NULL);
295 		if (pstatus != RTW_PHL_STATUS_SUCCESS)
296 			PHL_ERR("%s :: [SER_TIMER_CB] dispr_send_msg failed\n", __func__);
297 	}
298 }
299 
_ser_m1_pause_trx(struct cmd_ser * cser)300 static void _ser_m1_pause_trx(struct cmd_ser *cser)
301 {
302 	struct phl_info_t *phl_info = cser->phl_info;
303 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
304 	struct phl_data_ctl_t ctl = {0};
305 
306 	ctl.id = PHL_MDL_SER;
307 
308 	ctl.cmd = PHL_DATA_CTL_SW_TX_PAUSE;
309 	sts = phl_data_ctrler(phl_info, &ctl, NULL);
310 	if (RTW_PHL_STATUS_SUCCESS != sts) {
311 		PHL_WARN("%s(): pause sw tx failure\n", __func__);
312 		goto err;
313 	}
314 
315 	ctl.cmd = PHL_DATA_CTL_HW_TRX_PAUSE;
316 	sts = phl_data_ctrler(phl_info, &ctl, NULL);
317 	if (RTW_PHL_STATUS_SUCCESS != sts) {
318 		PHL_WARN("%s(): pause hw trx failure\n", __func__);
319 		goto err;
320 	}
321 
322 	ctl.cmd = PHL_DATA_CTL_SW_RX_PAUSE;
323 	sts = phl_data_ctrler(phl_info, &ctl, NULL);
324 #ifdef RTW_WKARD_SER_L1_EXPIRE
325 	if (RTW_PHL_STATUS_SUCCESS != sts && RTW_PHL_STATUS_CMD_TIMEOUT != sts) {
326 		PHL_WARN("%s(): pause sw rx failure\n", __func__);
327 		goto err;
328 	}
329 #else
330 	if (RTW_PHL_STATUS_SUCCESS != sts) {
331 		PHL_WARN("%s(): pause sw rx failure\n", __func__);
332 		goto err;
333 	}
334 #endif
335 
336 	ctl.cmd = PHL_DATA_CTL_SW_TX_RESET;
337 	sts = phl_data_ctrler(phl_info, &ctl, NULL);
338 	if (RTW_PHL_STATUS_SUCCESS != sts) {
339 		PHL_WARN("%s(): reset sw tx failure\n", __func__);
340 		goto err;
341 	}
342 
343 	ctl.cmd = PHL_DATA_CTL_SW_RX_RESET;
344 	sts = phl_data_ctrler(phl_info, &ctl, NULL);
345 	if (RTW_PHL_STATUS_SUCCESS != sts) {
346 		PHL_WARN("%s(): reset sw rx failure\n", __func__);
347 		goto err;
348 	}
349 
350 	_ser_m2_notify(cser);
351 	_ser_m3_m5_waiting(cser);
352 
353 	return;
354 err:
355 	_ser_l2_notify(cser);
356 	_ser_reset_status(cser);
357 
358 	return;
359 }
360 
_ser_m3_reset_hw_trx(struct cmd_ser * cser)361 static void _ser_m3_reset_hw_trx(struct cmd_ser *cser)
362 {
363 	struct phl_info_t *phl_info = cser->phl_info;
364 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
365 	struct phl_data_ctl_t ctl = {0};
366 
367 	ctl.id = PHL_MDL_SER;
368 
369 	ctl.cmd = PHL_DATA_CTL_HW_TRX_RST_RESUME;
370 	sts = phl_data_ctrler(phl_info, &ctl, NULL);
371 	if (RTW_PHL_STATUS_SUCCESS != sts) {
372 		PHL_WARN("%s(): resume hw trx failure\n", __func__);
373 		goto err;
374 	}
375 
376 	ctl.cmd = PHL_DATA_CTL_SW_RX_RESUME;
377 	sts = phl_data_ctrler(phl_info, &ctl, NULL);
378 	if (RTW_PHL_STATUS_SUCCESS != sts) {
379 		PHL_WARN("%s(): resume sw rx failure\n", __func__);
380 		goto err;
381 	}
382 
383 	_ser_m4_notify(cser);
384 	_ser_m3_m5_waiting(cser);
385 
386 	return;
387 err:
388 	_ser_l2_notify(cser);
389 	_ser_reset_status(cser);
390 
391 	return;
392 }
393 
394 enum phl_mdl_ret_code
_ser_fail_ev_hdlr(void * dispr,void * priv,struct phl_msg * msg)395 _ser_fail_ev_hdlr(void *dispr, void *priv, struct phl_msg *msg)
396 {
397 	struct cmd_ser *cser = (struct cmd_ser *)priv;
398 	struct phl_info_t *phl_info = cser->phl_info;
399 	void *drv = phl_to_drvpriv(phl_info);
400 
401 	PHL_INFO("%s :: [MSG_FAIL] MDL =%d , EVT_ID=%d\n", __func__,
402 	         MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id));
403 
404 	if (cser->evtsrc != CMD_SER_SRC_POLL)
405 		return MDL_RET_SUCCESS;
406 
407 	_os_set_timer(drv, &cser->poll_timer, CMD_SER_USB_POLLING_INTERVAL_IDL);
408 
409 	return MDL_RET_SUCCESS;
410 }
411 
412 enum phl_mdl_ret_code
_ser_hdl_external_evt(void * dispr,void * priv,struct phl_msg * msg)413 _ser_hdl_external_evt(void *dispr, void *priv, struct phl_msg *msg)
414 {
415 	struct cmd_ser *cser = (struct cmd_ser *)priv;
416 
417 	/**
418 	 * 1. SER inprogress: pending msg from others module
419 	 * 2. SER recovery fail: clr pending event from MDL_SER & msg return failed from others module
420 	 * 3. SER recovery done: clr pending event & msg return ignor from others module
421 	 * 4. SER NOT OCCUR: MDL_RET_IGNORE
422 	 */
423 	if (cser->bserl2) {
424 		/* allow MSG_EVT_DBG_L2_DIAGNOSE when ser L2 occured */
425 		if (MSG_EVT_ID_FIELD(msg->msg_id) == MSG_EVT_DBG_L2_DIAGNOSE)
426 			return MDL_RET_IGNORE;
427 		PHL_ERR("%s: L2 Occured!! From others MDL=%d, EVT_ID=%d\n", __func__,
428 		MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id));
429 		return MDL_RET_FAIL;
430 	} else if (cser->state) { /* non-CMD_SER_NOT_OCCUR */
431 		PHL_WARN("%s: Within SER!! From others MDL=%d, EVT_ID=%d\n", __func__,
432 		MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id));
433 		return MDL_RET_PENDING;
434 	}
435 
436 	return MDL_RET_IGNORE;
437 }
438 
_ser_msg_hdl_polling_chk(struct cmd_ser * cser)439 static void _ser_msg_hdl_polling_chk(struct cmd_ser *cser)
440 {
441 	struct phl_info_t *phl_info = cser->phl_info;
442 	void *drv = phl_to_drvpriv(phl_info);
443 
444 	if (CMD_SER_SRC_POLL != cser->evtsrc)
445 		return;
446 
447 	if (true == rtw_hal_recognize_interrupt(phl_info->hal)) {
448 		rtw_phl_interrupt_handler(phl_info);
449 	} else {
450 		if (cser->poll_cnt > 0) {
451 			/* wait for m3/m5, polling 10*10 */
452 			cser->poll_cnt--;
453 			_os_set_timer(drv, &cser->poll_timer, CMD_SER_USB_POLLING_INTERVAL_ACT); /* 10ms */
454 		} else {
455 			/* no ser occur, set next polling timer */
456 			_os_set_timer(drv, &cser->poll_timer, CMD_SER_USB_POLLING_INTERVAL_IDL); /* 1000ms */
457 		}
458 	}
459 }
460 
461 static enum rtw_phl_status
_ser_msg_hdl_evt_chk(struct cmd_ser * cser)462 _ser_msg_hdl_evt_chk(struct cmd_ser *cser)
463 {
464 	struct phl_info_t *phl_info = cser->phl_info;
465 	enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
466 
467 	if (CMD_SER_SRC_INT == cser->evtsrc) {
468 		psts = _ser_event_notify(phl_info, NULL);
469 	} else if (CMD_SER_SRC_POLL == cser->evtsrc) {
470 		psts = _ser_event_notify(phl_info, NULL);
471 	} else if (CMD_SER_SRC_INT_NOTIFY == cser->evtsrc) {
472 		u8 notify = RTW_PHL_SER_L0_RESET;
473 
474 		/* disable interrupt notification and read ser value */
475 		if (true == rtw_hal_recognize_halt_c2h_interrupt(phl_info->hal)) {
476 			psts = _ser_event_notify(phl_info, &notify);
477 
478 			if ((notify == RTW_PHL_SER_L0_RESET) || (notify == RTW_PHL_SER_L2_RESET))
479 				_ser_int_ntfy_ctrl(phl_info, RTW_PHL_EN_HCI_INT);
480 		} else {
481 			_ser_int_ntfy_ctrl(phl_info, RTW_PHL_EN_HCI_INT);
482 		}
483 	}
484 
485 	return psts;
486 }
487 
_ser_msg_hdl_m1(struct cmd_ser * cser)488 static void _ser_msg_hdl_m1(struct cmd_ser *cser)
489 {
490 	struct phl_info_t *phl_info = cser->phl_info;
491 	void *drv = phl_to_drvpriv(phl_info);
492 
493 	if (CMD_SER_SRC_POLL == cser->evtsrc)
494 		_os_cancel_timer(drv, &cser->poll_timer);
495 	else if (CMD_SER_SRC_INT_NOTIFY == cser->evtsrc)
496 		_ser_int_ntfy_ctrl(phl_info, RTW_PHL_DIS_HCI_INT);
497 
498 	_ser_set_status(cser, CMD_SER_M1);
499 	_ser_l1_notify(cser);
500 
501 	_ser_m1_pause_trx(cser);
502 }
503 
_ser_msg_hdl_fw_expire(struct cmd_ser * cser)504 static void _ser_msg_hdl_fw_expire(struct cmd_ser *cser)
505 {
506 	struct phl_info_t *phl_info = cser->phl_info;
507 	void *drv = phl_to_drvpriv(phl_info);
508 
509 	if (CMD_SER_SRC_POLL == cser->evtsrc) {
510 		_ser_set_status(cser, CMD_SER_M9);
511 
512 		_os_set_timer(drv,
513 		              &cser->poll_timer,
514 		              CMD_SER_USB_POLLING_INTERVAL_IDL);
515 	}
516 
517 	cser->bserl2 = true;
518 }
519 
_ser_msg_hdl_m3(struct cmd_ser * cser)520 static void _ser_msg_hdl_m3(struct cmd_ser *cser)
521 {
522 	struct phl_info_t *phl_info = cser->phl_info;
523 	void *drv = phl_to_drvpriv(phl_info);
524 
525 	if (CMD_SER_SRC_INT_NOTIFY == cser->evtsrc)
526 		_ser_int_ntfy_ctrl(phl_info, RTW_PHL_CLR_HCI_INT);
527 
528 	_os_cancel_timer(drv, &cser->poll_timer);
529 	_ser_clear_status(cser, CMD_SER_M2);
530 	_ser_set_status(cser, CMD_SER_M3);
531 
532 	_ser_m3_reset_hw_trx(cser);
533 }
534 
_ser_msg_hdl_m5(struct cmd_ser * cser)535 static void _ser_msg_hdl_m5(struct cmd_ser *cser)
536 {
537 	struct phl_info_t *phl_info = cser->phl_info;
538 	void *drv = phl_to_drvpriv(phl_info);
539 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
540 	struct phl_data_ctl_t ctl = {0};
541 
542 	_os_cancel_timer(drv, &cser->poll_timer);
543 	_ser_clear_status(cser, CMD_SER_M4);
544 	_ser_set_status(cser, CMD_SER_M5);
545 
546 	ctl.id = PHL_MDL_SER;
547 	ctl.cmd = PHL_DATA_CTL_SW_TX_RESUME;
548 	sts = phl_data_ctrler(phl_info, &ctl, NULL);
549 	if (RTW_PHL_STATUS_SUCCESS != sts)
550 		PHL_WARN("%s(): resume sw tx failure\n", __func__);
551 
552 	_ser_reset_status(cser);
553 
554 	phl_disp_eng_clr_pending_msg(cser->phl_info, HW_BAND_0);
555 }
556 
_ser_msg_hdl_m9(struct cmd_ser * cser)557 static void _ser_msg_hdl_m9(struct cmd_ser *cser)
558 {
559 	struct phl_info_t *phl_info = cser->phl_info;
560 	void *drv = phl_to_drvpriv(phl_info);
561 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
562 	struct phl_data_ctl_t ctl = {0};
563 
564 	_os_cancel_timer(drv, &cser->poll_timer);
565 	_ser_set_status(cser, CMD_SER_M9);
566 
567 	if (cser->state > CMD_SER_NOT_OCCUR) {
568 		ctl.id = PHL_MDL_SER;
569 		ctl.cmd = PHL_DATA_CTL_SW_TX_RESUME;
570 		sts = phl_data_ctrler(phl_info, &ctl, NULL);
571 		if (RTW_PHL_STATUS_SUCCESS != sts)
572 			PHL_WARN("%s(): resume sw tx failure\n", __func__);
573 
574 		ctl.cmd = PHL_DATA_CTL_SW_RX_RESUME;
575 		sts = phl_data_ctrler(phl_info, &ctl, NULL);
576 		if (RTW_PHL_STATUS_SUCCESS != sts)
577 			PHL_WARN("%s(): resume sw rx failure\n", __func__);
578 	}
579 
580 	_ser_l2_notify(cser);
581 	_ser_reset_status(cser);
582 }
583 
584 enum phl_mdl_ret_code
_ser_hdl_internal_evt(void * dispr,void * priv,struct phl_msg * msg)585 _ser_hdl_internal_evt(void *dispr, void *priv, struct phl_msg *msg)
586 {
587 	struct cmd_ser *cser = (struct cmd_ser *)priv;
588 	enum phl_mdl_ret_code ret = MDL_RET_SUCCESS;
589 
590 	switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
591 	case MSG_EVT_SER_POLLING_CHK:
592 		PHL_INFO("MSG_EVT_SER_POLLING_CHK\n");
593 		_ser_msg_hdl_polling_chk(cser);
594 		break;
595 
596 	case MSG_EVT_SER_EVENT_CHK:
597 		PHL_INFO("MSG_EVT_SER_EVENT_CHK\n");
598 		_ser_msg_hdl_evt_chk(cser);
599 		break;
600 
601 	case MSG_EVT_SER_M1_PAUSE_TRX:
602 		PHL_WARN("MSG_EVT_SER_M1_PAUSE_TRX\n");
603 		_ser_msg_hdl_m1(cser);
604 		break;
605 
606 	case MSG_EVT_SER_IO_TIMER_EXPIRE:
607 		PHL_INFO("MSG_EVT_SER_IO_TIMER_EXPIRE\n");
608 		break;
609 
610 	case MSG_EVT_SER_FW_TIMER_EXPIRE:
611 		PHL_WARN("MSG_EVT_SER_FW_TIMER_EXPIRE (state %d), do nothing and wait FW WDT\n",
612 		         cser->state);
613 		_ser_msg_hdl_fw_expire(cser);
614 		break;
615 
616 	case MSG_EVT_SER_M3_DO_RECOV:
617 		PHL_INFO("MSG_EVT_SER_M3_DO_RECOV\n");
618 		_ser_msg_hdl_m3(cser);
619 		break;
620 
621 	case MSG_EVT_SER_M5_READY:
622 		PHL_INFO("MSG_EVT_SER_M5_READY\n");
623 		_ser_msg_hdl_m5(cser);
624 		break;
625 
626 	case MSG_EVT_SER_M9_L2_RESET:
627 		PHL_WARN("MSG_EVT_SER_M9_L2_RESET\n");
628 		_ser_msg_hdl_m9(cser);
629 		break;
630 	}
631 
632 	return ret;
633 }
634 
635 static enum phl_mdl_ret_code
_phl_ser_mdl_init(void * phl,void * dispr,void ** priv)636 _phl_ser_mdl_init(void *phl, void *dispr, void **priv)
637 {
638 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
639 	void *drv = phl_to_drvpriv(phl_info);
640 	struct cmd_ser *cser = NULL;
641 	u8	idx = 0;
642 
643 	FUNCIN();
644 	if (priv == NULL)
645 		return MDL_RET_FAIL;
646 
647 	(*priv) = NULL;
648 	cser = (struct cmd_ser *)_os_mem_alloc(drv, sizeof(struct cmd_ser));
649 	if (cser == NULL) {
650 		PHL_ERR(" %s, alloc fail\n",__FUNCTION__);
651 		return MDL_RET_FAIL;
652 	}
653 
654 	_os_mem_set(drv, cser, 0, sizeof(struct cmd_ser));
655 	_os_spinlock_init(drv, &cser->_lock);
656 	_os_init_timer(drv,
657 	               &cser->poll_timer,
658 	               _ser_poll_timer_cb,
659 	               cser,
660 	               "cmd_ser_poll_timer");
661 
662 	INIT_LIST_HEAD(&cser->stslist.queue);
663 	for (idx =0; idx < CMD_SER_LOG_SIZE; idx++) {
664 		INIT_LIST_HEAD(&cser->stsl2[idx].list);
665 		cser->stsl2[idx].idx = idx;
666 		pq_push(drv, &cser->stslist, &cser->stsl2[idx].list, _tail, _ps);
667 	}
668 
669 	cser->phl_info = phl_info;
670 	cser->dispr = dispr;
671 	(*priv) = (void*)cser;
672 
673 	if (phl_info->phl_com->hci_type == RTW_HCI_PCIE)
674 		cser->evtsrc = CMD_SER_SRC_INT;
675 	else if (phl_info->phl_com->hci_type == RTW_HCI_USB)
676 		cser->evtsrc = CMD_SER_SRC_POLL;
677 	else if (phl_info->phl_com->hci_type == RTW_HCI_SDIO)
678 		cser->evtsrc = CMD_SER_SRC_INT_NOTIFY;
679 
680 	PHL_INFO("%s:: cser->evtsrc = %d\n", __func__, cser->evtsrc);
681 	FUNCOUT();
682 
683 	return MDL_RET_SUCCESS;
684 }
685 
_phl_ser_mdl_deinit(void * dispr,void * priv)686 static void _phl_ser_mdl_deinit(void *dispr, void *priv)
687 {
688 	struct cmd_ser *cser = (struct cmd_ser *)priv;
689 	void *drv = phl_to_drvpriv(cser->phl_info);
690 
691 	FUNCIN();
692 
693 	_os_cancel_timer(drv, &cser->poll_timer);
694 	_os_release_timer(drv, &cser->poll_timer);
695 	_os_spinlock_free(drv, &cser->_lock);
696 	_os_mem_free(drv, cser, sizeof(struct cmd_ser));
697 	PHL_INFO(" %s\n", __FUNCTION__);
698 }
699 
700 static enum phl_mdl_ret_code
_phl_ser_mdl_start(void * dispr,void * priv)701 _phl_ser_mdl_start(void *dispr, void *priv)
702 {
703 	struct cmd_ser *cser = (struct cmd_ser *)priv;
704 	struct phl_info_t *phl_info = cser->phl_info;
705 
706 	if (CMD_SER_SRC_POLL == cser->evtsrc) {
707 		/* Disable L0 Reset Notify from FW to driver */
708 		rtw_hal_ser_set_error_status(phl_info->hal, RTW_PHL_SER_L0_CFG_DIS_NOTIFY);
709 	}
710 
711 	_ser_reset_status(cser);
712 
713 	return MDL_RET_SUCCESS;
714 }
715 
716 static enum phl_mdl_ret_code
_phl_ser_mdl_stop(void * dispr,void * priv)717 _phl_ser_mdl_stop(void *dispr, void *priv)
718 {
719 	struct cmd_ser *cser = (struct cmd_ser *)priv;
720 	void *drv = phl_to_drvpriv(cser->phl_info);
721 
722 	_os_cancel_timer(drv, &cser->poll_timer);
723 
724 	return MDL_RET_SUCCESS;
725 }
726 
727 static enum phl_mdl_ret_code
_phl_ser_mdl_msg_hdlr(void * dispr,void * priv,struct phl_msg * msg)728 _phl_ser_mdl_msg_hdlr(void *dispr,
729                       void *priv,
730                       struct phl_msg *msg)
731 {
732 	enum phl_mdl_ret_code ret = MDL_RET_IGNORE;
733 
734 	if (IS_MSG_FAIL(msg->msg_id)) {
735 		PHL_INFO("%s :: MSG(%d)_FAIL - EVT_ID=%d \n", __func__,
736 		         MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id));
737 
738 		return _ser_fail_ev_hdlr(dispr, priv, msg);
739 	}
740 
741 	switch (MSG_MDL_ID_FIELD(msg->msg_id)) {
742 		case PHL_MDL_SER:
743 			if (IS_MSG_IN_PRE_PHASE(msg->msg_id))
744 				ret = _ser_hdl_internal_evt(dispr, priv, msg);
745 			break;
746 
747 		default:
748 			ret = _ser_hdl_external_evt(dispr, priv, msg);
749 			break;
750 	}
751 
752 	return ret;
753 }
754 
755 static enum phl_mdl_ret_code
_phl_ser_mdl_set_info(void * dispr,void * priv,struct phl_module_op_info * info)756 _phl_ser_mdl_set_info(void *dispr,
757                       void *priv,
758                       struct phl_module_op_info *info)
759 {
760 	struct cmd_ser *cser = (struct cmd_ser *)priv;
761 	enum phl_mdl_ret_code ret = MDL_RET_IGNORE;
762 	/* PHL_INFO(" %s :: info->op_code=%d \n", __func__, info->op_code); */
763 
764 	switch (info->op_code) {
765 		case BK_MODL_OP_INPUT_CMD:
766 			if (info->inbuf) {
767 				cser->ser_l2_hdlr=(u8 (*)(void *))info->inbuf;
768 			}
769 			ret = MDL_RET_SUCCESS;
770 			break;
771 	}
772 
773 	return ret;
774 }
775 
776 static enum phl_mdl_ret_code
_phl_ser_mdl_query_info(void * dispr,void * priv,struct phl_module_op_info * info)777 _phl_ser_mdl_query_info(void *dispr,
778                         void *priv,
779                         struct phl_module_op_info *info)
780 {
781 	struct cmd_ser *cser = (struct cmd_ser *)priv;
782 	void *drv = phl_to_drvpriv(cser->phl_info);
783 	enum phl_mdl_ret_code ret = MDL_RET_IGNORE;
784 	/* PHL_INFO(" %s :: info->op_code=%d \n", __func__, info->op_code); */
785 
786 	switch (info->op_code) {
787 		case BK_MODL_OP_STATE:
788 			_os_mem_cpy(drv, (void*)info->inbuf, &cser->state, 1);
789 			ret = MDL_RET_SUCCESS;
790 			break;
791 	}
792 	return ret;
793 }
794 
795 static struct phl_bk_module_ops ser_ops= {
796 	.init = _phl_ser_mdl_init,
797 	.deinit = _phl_ser_mdl_deinit,
798 	.start = _phl_ser_mdl_start,
799 	.stop = _phl_ser_mdl_stop,
800 	.msg_hdlr = _phl_ser_mdl_msg_hdlr,
801 	.set_info = _phl_ser_mdl_set_info,
802 	.query_info = _phl_ser_mdl_query_info,
803 };
804 
805 enum rtw_phl_status
phl_register_ser_module(struct phl_info_t * phl_info)806 phl_register_ser_module(struct phl_info_t *phl_info)
807 {
808 	enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
809 
810 	phl_status = phl_disp_eng_register_module(phl_info,
811 	                                          HW_BAND_0,
812 	                                          PHL_MDL_SER,
813 						  &ser_ops);
814 	if (RTW_PHL_STATUS_SUCCESS != phl_status) {
815 		PHL_ERR("%s register SER module in cmd disp failed! \n", __func__);
816 	}
817 
818 	return phl_status;
819 }
820 
821 #ifdef CONFIG_PHL_CMD_SER
phl_ser_inprogress(void * phl)822 u8 phl_ser_inprogress(void *phl)
823 {
824 	struct phl_module_op_info op_info = {0};
825 	u8 state = 0;
826 
827 	op_info.op_code = BK_MODL_OP_STATE;
828 	op_info.inbuf = (u8*)&state;
829 	op_info.inlen = 1;
830 
831 	if (rtw_phl_query_bk_module_info(phl,
832 	                                 HW_BAND_0,
833 	                                 PHL_MDL_SER,
834 	                                 &op_info) == RTW_PHL_STATUS_SUCCESS) {
835 		if (state) /* non-CMD_SER_NOT_OCCUR */
836 			return true;
837 	}
838 	return false;
839 }
840 
841 enum rtw_phl_status
phl_ser_send_msg(void * phl,enum RTW_PHL_SER_NOTIFY_EVENT notify)842 phl_ser_send_msg(void *phl, enum RTW_PHL_SER_NOTIFY_EVENT notify)
843 {
844 	enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
845 	struct phl_msg nextmsg = {0};
846 	struct phl_msg_attribute attr = {0};
847 	u16 event = 0;
848 
849 	switch (notify) {
850 	case RTW_PHL_SER_PAUSE_TRX: /* M1 */
851 		event = MSG_EVT_SER_M1_PAUSE_TRX;
852 		break;
853 	case RTW_PHL_SER_DO_RECOVERY: /* M3 */
854 		event = MSG_EVT_SER_M3_DO_RECOV;
855 		break;
856 	case RTW_PHL_SER_READY: /* M5 */
857 		event = MSG_EVT_SER_M5_READY;
858 		break;
859 	case RTW_PHL_SER_L2_RESET: /* M9 */
860 		event = MSG_EVT_SER_M9_L2_RESET;
861 		break;
862 	case RTW_PHL_SER_EVENT_CHK:
863 		event = MSG_EVT_SER_EVENT_CHK;
864 		break;
865 	case RTW_PHL_SER_L0_RESET:
866 	default:
867 		PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "phl_ser_send_msg(): unsupported case %d\n",
868 		          notify);
869 		return RTW_PHL_STATUS_FAILURE;
870 	}
871 	PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "phl_ser_send_msg(): event %d\n", event);
872 
873 	SET_MSG_MDL_ID_FIELD(nextmsg.msg_id, PHL_MDL_SER);
874 	SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, event);
875 	nextmsg.band_idx = HW_BAND_0;
876 
877 	phl_status = rtw_phl_send_msg_to_dispr(phl,
878 					       &nextmsg,
879 					       &attr,
880 					       NULL);
881 	if (phl_status != RTW_PHL_STATUS_SUCCESS) {
882 		PHL_ERR("[CMD_SER] send_msg_to_dispr fail! (%d)\n", event);
883 	}
884 
885 	return phl_status;
886 }
887 
phl_ser_send_check(void * context)888 void phl_ser_send_check(void *context)
889 {
890 	struct rtw_phl_handler *phl_handler
891 		= (struct rtw_phl_handler *)phl_container_of(context,
892 							struct rtw_phl_handler,
893 							os_handler);
894 	struct phl_info_t *phl_info = (struct phl_info_t *)phl_handler->context;
895 
896 	phl_ser_send_msg(phl_info, RTW_PHL_SER_EVENT_CHK);
897 }
898 #endif
899 
900 #ifndef CONFIG_FSM
901 /* The same as phl_fw_watchdog_timeout_notify of fsm-ser */
902 enum rtw_phl_status
phl_fw_watchdog_timeout_notify(void * phl)903 phl_fw_watchdog_timeout_notify(void *phl)
904 {
905 	enum RTW_PHL_SER_NOTIFY_EVENT notify = RTW_PHL_SER_L2_RESET;
906 	PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "phl_fw_watchdog_timeout_notify triggle L2 Reset !!!\n");
907 
908 	return phl_ser_send_msg(phl, notify);
909 }
910 
rtw_phl_ser_l2_notify(struct rtw_phl_com_t * phl_com)911 enum rtw_phl_status rtw_phl_ser_l2_notify(struct rtw_phl_com_t *phl_com)
912 {
913 	enum RTW_PHL_SER_NOTIFY_EVENT notify = RTW_PHL_SER_L2_RESET;
914 	PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "rtw_phl_ser_l2_notify triggle L2 Reset !!!\n");
915 
916 	return phl_ser_send_msg(phl_com->phl_priv, notify);
917 }
918 
919 /* The same as rtw_phl_ser_dump_ple_buffer of fsm-ser */
920 enum rtw_phl_status
rtw_phl_ser_dump_ple_buffer(void * phl)921 rtw_phl_ser_dump_ple_buffer(void *phl)
922 {
923 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
924 
925 	PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "rtw_phl_ser_dump_ple_buffer\n");
926 
927 	rtw_hal_dump_fw_rsvd_ple(phl_info->hal);
928 
929 	return RTW_PHL_STATUS_SUCCESS;
930 }
931 #endif /*#ifndef CONFIG_FSM*/
932 
933