xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852be/phl/phl_cmd_ps.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2021 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #define _PHL_CMD_PS_C_
16 #include "phl_headers.h"
17 #ifdef CONFIG_POWER_SAVE
18 /* structure of a power request */
19 struct pwr_req {
20 	_os_list list;
21 	u16 evt_id;
22 };
23 
24 struct rt_ps {
25 	enum phl_ps_rt_rson rt_rson;
26 	bool ps_allow;
27 };
28 
29 /**
30  * +----------------------+-------------------+-----------------------+
31  * |       ps_mode        |      ps_state     |        pwr_lvl        |
32  * +----------------------+-------------------+-----------------------+
33  * |     PS_MODE_LPS      |  PS_STATE_LEAVED  |  PS_PWR_LVL_PWRON     |
34  * |                      +-------------------+-----------------------+
35  * |                      |  PS_STATE_PROTO   |  PS_PWR_LVL_PWRON     |
36  * |                      +-------------------+-----------------------+
37  * |                      |  PS_STATE_ENTERED |  PS_PWR_LVL_PWR_GATED |
38  * |                      |                   |  PS_PWR_LVL_CLK_GATED |
39  * |                      |                   |  PS_PWR_LVL_RF_OFF    |
40  * +----------------------+-------------------+-----------------------+
41  * |     PS_MODE_IPS      |  PS_STATE_LEAVED  |  PS_PWR_LVL_PWRON     |
42  * |                      +-------------------+-----------------------+
43  * |                      |  PS_STATE_ENTERED |  PS_PWR_LVL_PWROFF    |
44  * |                      |                   |  PS_PWR_LVL_PWR_GATED |
45  * |                      |                   |  PS_PWR_LVL_CLK_GATED |
46  * |                      |                   |  PS_PWR_LVL_RF_OFF    |
47  * +----------------------+-------------------+-----------------------+
48  */
49 
50 enum {
51 	PS_STATE_NONE = 0,
52 	PS_STATE_LEAVED,
53 	/**
54 	 * lps: protocol only
55 	 * ips: won't in this state
56 	 */
57 	PS_STATE_PROTO,
58 	/**
59 	 * lps: protocol + power
60 	 * ips: power
61 	 */
62 	PS_STATE_ENTERED
63 };
64 
_ps_state_to_str(u8 ps_state)65 static const char *_ps_state_to_str(u8 ps_state)
66 {
67 	switch (ps_state) {
68 	case PS_STATE_ENTERED:
69 		return "PS_ENTERED";
70 	case PS_STATE_PROTO:
71 		return "PS_PROTOCOL";
72 	case PS_STATE_LEAVED:
73 		return "PS_LEAVED";
74 	default:
75 		return "NONE";
76 	}
77 }
78 
79 struct _ps_mr_info {
80 	bool ap_active;
81 	bool gc_active;
82 };
83 
84 #define CMD_PS_TIMER_PERIOD 100
85 #define MAX_PWE_REQ_NUM 16
86 struct cmd_ps {
87 	struct phl_info_t *phl_info;
88 	void *dispr;
89 	_os_timer ps_timer;
90 
91 	struct phl_queue req_busy_q;
92 	struct phl_queue req_idle_q;
93 	struct pwr_req req_pool[MAX_PWE_REQ_NUM];
94 
95 	bool rej_pwr_req; /* reject all pwr request */
96 	bool btc_req_pwr; /* btc request pwr or not */
97 	bool stop_datapath;
98 
99 	/* current state */
100 	u8 cur_pwr_lvl;
101 	u8 ps_state;
102 	u8 ps_mode;
103 	char enter_rson[MAX_CMD_PS_RSON_LENGTH];
104 	char leave_rson[MAX_CMD_PS_RSON_LENGTH];
105 
106 	struct rtw_phl_stainfo_t *sta; /* sta entering/leaving ps */
107 	u16 macid;
108 
109 	/* lps */
110 	u32 null_token;
111 	bool wdg_leave_ps;
112 
113 	/* refs. enum "phl_ps_rt_rson" */
114 	enum phl_ps_rt_rson rt_stop_rson;
115 
116 	struct _ps_mr_info mr_info;
117 
118 	/* rssi */
119 	u8 rssi_bcn_min;
120 };
121 
122 /**
123  * determine leave lps or not
124  * return true if rssi variation reach threshold
125  * @ps: see cmd_ps
126  */
_chk_rssi_diff_reach_thld(struct cmd_ps * ps)127 static bool _chk_rssi_diff_reach_thld(struct cmd_ps *ps)
128 {
129 	struct phl_info_t *phl_info = ps->phl_info;
130 	struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
131 	u8 cur_rssi_bcn_min = 0;
132 	u8 *rssi_bcn_min = &ps->rssi_bcn_min;
133 	bool leave_ps = false;
134 
135 	cur_rssi_bcn_min = phl_get_min_rssi_bcn(phl_info);
136 
137 	do {
138 		if (*rssi_bcn_min == 0 || *rssi_bcn_min == 0xFF) {
139 			PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): (criteria = %d, cur = %d) criteria invalid, set criteria to cur\n",
140 					__func__, *rssi_bcn_min, cur_rssi_bcn_min);
141 			*rssi_bcn_min = cur_rssi_bcn_min; /* update with current_rssi */
142 			break;
143 		}
144 
145 		if (DIFF(*rssi_bcn_min, cur_rssi_bcn_min) < ps_cap->lps_rssi_diff_threshold) {
146 			PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): (criteria = %d, cur = %d) RSSI diff < %d, do nothing\n",
147 					__func__, *rssi_bcn_min, cur_rssi_bcn_min, ps_cap->lps_rssi_diff_threshold);
148 			break;
149 		}
150 
151 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): leave ps and update criteria from %d to %d\n", __func__, *rssi_bcn_min, cur_rssi_bcn_min);
152 
153 		leave_ps = true;
154 		*rssi_bcn_min = cur_rssi_bcn_min;
155 	} while (0);
156 
157 	return leave_ps;
158 }
159 
160 /**
161  * determine leave lps or not
162  * return true if beacon offset changed
163  * @ps: see cmd_ps
164  */
_chk_bcn_offset_changed(struct cmd_ps * ps)165 bool _chk_bcn_offset_changed(struct cmd_ps *ps)
166 {
167 	struct phl_info_t *phl_info = ps->phl_info;
168 	u8 ridx = MAX_WIFI_ROLE_NUMBER;
169 	struct rtw_wifi_role_t *wrole = NULL;
170 	struct rtw_bcn_offset *b_ofst_i = NULL;
171 	bool leave_ps = false;
172 
173 	for (ridx = 0; ridx < MAX_WIFI_ROLE_NUMBER; ridx++) {
174 		wrole = rtw_phl_get_wrole_by_ridx(phl_info->phl_com, ridx);
175 		if (wrole == NULL)
176 			continue;
177 
178 		if (rtw_phl_role_is_client_category(wrole) && wrole->mstate == MLME_LINKED) {
179 			b_ofst_i = phl_get_sta_bcn_offset_info(phl_info, wrole);
180 
181 			if (b_ofst_i->conf_lvl >= CONF_LVL_MID &&
182 				b_ofst_i->offset != b_ofst_i->cr_tbtt_shift) {
183 				PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): update ridx(%d) bcn offset from %d to %d TU\n",
184 							__func__, ridx, b_ofst_i->cr_tbtt_shift, b_ofst_i->offset);
185 				leave_ps = true;
186 				break;
187 			}
188 		}
189 	}
190 	return leave_ps;
191 }
192 
193 /**
194  * check whether to leave/enter lps
195  * return true if ps need to change to the target state
196  * @ps: see cmd_ps
197  * @mac_id: macid of corresponding sta
198  * @cur_state: currently lps state
199  * @target_state: the target ps state
200  */
201 static bool
_lps_state_judge_changed(struct cmd_ps * ps,u16 macid,u8 cur_state,u8 target_state)202 _lps_state_judge_changed(struct cmd_ps *ps, u16 macid, u8 cur_state, u8 target_state)
203 {
204 	struct phl_info_t *phl_info = ps->phl_info;
205 	struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
206 	struct rtw_stats *phl_stats = &phl_info->phl_com->phl_stats;
207 	struct rtw_phl_stainfo_t *sta = NULL;
208 	bool change_state = false;
209 	u8 rssi = 0;
210 
211 	sta = rtw_phl_get_stainfo_by_macid(phl_info, macid);
212 	if (sta == NULL) {
213 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): cannot get sta of macid %d.\n", __func__, macid);
214 		return false;
215 	}
216 
217 	rssi = rtw_hal_get_sta_rssi(sta);
218 
219 	if (target_state == PS_STATE_ENTERED) {
220 		if (cur_state == PS_STATE_LEAVED || cur_state == PS_STATE_PROTO) {
221 			if (rssi > ps_cap->lps_rssi_enter_threshold &&
222 				phl_stats->tx_traffic.lvl == RTW_TFC_IDLE &&
223 				phl_stats->rx_traffic.lvl == RTW_TFC_IDLE) {
224 				change_state = true;
225 			}
226 		}
227 	} else {
228 		if (cur_state == PS_STATE_ENTERED || cur_state == PS_STATE_PROTO) {
229 			if (_chk_rssi_diff_reach_thld(ps) ||
230 				_chk_bcn_offset_changed(ps) ||
231 				rssi < ps_cap->lps_rssi_leave_threshold ||
232 				phl_stats->tx_traffic.lvl != RTW_TFC_IDLE ||
233 				phl_stats->rx_traffic.lvl != RTW_TFC_IDLE) {
234 				change_state = true;
235 			}
236 		}
237 	}
238 
239 	if (change_state) {
240 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): state %s -> %s, Tx(%s), Rx(%s), RSSI(%d)\n",
241 				  __func__, _ps_state_to_str(cur_state), _ps_state_to_str(target_state),
242 				  phl_tfc_lvl_to_str(phl_stats->tx_traffic.lvl),
243 				  phl_tfc_lvl_to_str(phl_stats->rx_traffic.lvl), rssi);
244 	}
245 
246 	return change_state;
247 }
248 
_set_ps_rson(struct cmd_ps * ps,u8 enter,char * rson)249 static void _set_ps_rson(struct cmd_ps *ps, u8 enter, char *rson)
250 {
251 	if (enter) {
252 		_os_mem_set(phl_to_drvpriv(ps->phl_info), ps->enter_rson, 0, MAX_CMD_PS_RSON_LENGTH);
253 		_os_mem_cpy(phl_to_drvpriv(ps->phl_info), ps->enter_rson, rson, MAX_CMD_PS_RSON_LENGTH);
254 	} else {
255 		_os_mem_set(phl_to_drvpriv(ps->phl_info), ps->leave_rson, 0, MAX_CMD_PS_RSON_LENGTH);
256 		_os_mem_cpy(phl_to_drvpriv(ps->phl_info), ps->leave_rson, rson, MAX_CMD_PS_RSON_LENGTH);
257 	}
258 }
259 
260 /**
261  * Leave power saving
262  * return RTW_PHL_STATUS_SUCCESS if leave ps ok
263  * @ps: see cmd_ps
264  * @leave_proto: whether to leave protocol
265  * @rson: the reason of leaving power saving
266  */
_leave_ps(struct cmd_ps * ps,bool leave_proto,char * rson)267 enum rtw_phl_status _leave_ps(struct cmd_ps *ps, bool leave_proto, char *rson)
268 {
269 	struct phl_info_t *phl_info = ps->phl_info;
270 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
271 	struct ps_cfg cfg = {0};
272 
273 	if (ps->ps_state == PS_STATE_LEAVED) {
274 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): not in power saving.\n", __func__);
275 		return RTW_PHL_STATUS_SUCCESS;
276 	}
277 
278 	cfg.cur_pwr_lvl = ps->cur_pwr_lvl;
279 
280 	PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): leave %s (macid %d).\n",
281 			  __func__, phl_ps_ps_mode_to_str(ps->ps_mode), ps->macid);
282 
283 	if (ps->ps_mode == PS_MODE_LPS) {
284 		if (!leave_proto) {
285 			if (ps->ps_state == PS_STATE_PROTO) {
286 				PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): in lps protocal only.\n", __func__);
287 				return RTW_PHL_STATUS_SUCCESS;
288 			}
289 			cfg.pwr_cfg = true;
290 			cfg.proto_cfg = false;
291 		} else {
292 			if (ps->ps_state == PS_STATE_PROTO) {
293 				cfg.pwr_cfg = false;
294 				cfg.proto_cfg = true;
295 			} else {
296 				cfg.pwr_cfg = true;
297 				cfg.proto_cfg = true;
298 			}
299 		}
300 		cfg.macid = ps->macid;
301 		cfg.token = &ps->null_token;
302 		cfg.pwr_lvl = PS_PWR_LVL_PWRON;
303 		cfg.ps_mode = ps->ps_mode;
304 	} else if (ps->ps_mode == PS_MODE_IPS) {
305 		cfg.macid = ps->macid;
306 		cfg.pwr_lvl = PS_PWR_LVL_PWRON;
307 		cfg.proto_cfg = (cfg.cur_pwr_lvl == PS_PWR_LVL_PWROFF) ? false : true;
308 		cfg.pwr_cfg = true;
309 		cfg.ps_mode = ps->ps_mode;
310 	} else {
311 		PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): unknown ps mode!\n", __func__);
312 	}
313 
314 	_set_ps_rson(ps, false, rson);
315 
316 	status = phl_ps_leave_ps(phl_info, &cfg);
317 
318 	if (status == RTW_PHL_STATUS_SUCCESS) {
319 		ps->cur_pwr_lvl = cfg.pwr_lvl;
320 		if (ps->ps_mode == PS_MODE_LPS) {
321 			ps->ps_mode = (leave_proto != true) ? PS_MODE_LPS : PS_MODE_NONE;
322 			ps->ps_state = (leave_proto != true) ? PS_STATE_PROTO : PS_STATE_LEAVED;
323 			ps->macid = (leave_proto != true) ? ps->macid : PS_MACID_NONE;
324 		} else {
325 			ps->ps_mode = PS_MODE_NONE;
326 			ps->ps_state = PS_STATE_LEAVED;
327 			ps->macid = PS_MACID_NONE;
328 		}
329 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): macid %d leave ps success, reason(%s).\n", __func__, cfg.macid, rson);
330 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): ps mode(%s), pwr lvl(%s), ps state(%s)\n",
331 				__func__, phl_ps_ps_mode_to_str(ps->ps_mode),
332 				phl_ps_pwr_lvl_to_str(ps->cur_pwr_lvl), _ps_state_to_str(ps->ps_state));
333 	} else {
334 		PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): leave ps fail! reason(%s).\n", __func__, rson);
335 	}
336 
337 	return status;
338 }
339 
340 /**
341  * Enter power saving
342  * return RTW_PHL_STATUS_SUCCESS if enter ps ok
343  * @ps: see cmd_ps
344  * @ps_mode: target ps mode to enter
345  * @macid: target macid to enter lps
346  * @rson: the reason of entering power saving
347  */
_enter_ps(struct cmd_ps * ps,u8 ps_mode,u16 macid,char * rson)348 enum rtw_phl_status _enter_ps(struct cmd_ps *ps, u8 ps_mode, u16 macid, char *rson)
349 {
350 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
351 	struct ps_cfg cfg = {0};
352 	struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
353 
354 	if (ps->ps_state == PS_STATE_ENTERED) {
355 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): already in power saving.\n", __func__);
356 		return RTW_PHL_STATUS_SUCCESS;
357 	}
358 
359 	cfg.cur_pwr_lvl = ps->cur_pwr_lvl;
360 
361 	PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): enter %s (macid %d).\n",
362 			  __func__, phl_ps_ps_mode_to_str(ps_mode), macid);
363 
364 	if (ps_mode == PS_MODE_LPS) {
365 		cfg.proto_cfg = (ps->ps_state == PS_STATE_PROTO) ? false : true;
366 		cfg.pwr_cfg = true;
367 		cfg.macid = macid;
368 		cfg.token = &ps->null_token;
369 		cfg.pwr_lvl = phl_ps_judge_pwr_lvl(ps_cap->lps_cap, ps_mode, true);
370 		cfg.ps_mode = ps_mode;
371 		cfg.awake_interval = ps_cap->lps_awake_interval;
372 		cfg.listen_bcn_mode = ps_cap->lps_listen_bcn_mode;
373 		cfg.smart_ps_mode = ps_cap->lps_smart_ps_mode;
374 	} else if (ps_mode == PS_MODE_IPS) {
375 		cfg.macid = macid;
376 		if (macid == PS_MACID_NONE)
377 			cfg.pwr_lvl = PS_PWR_LVL_PWROFF;
378 		else
379 			cfg.pwr_lvl = phl_ps_judge_pwr_lvl(ps_cap->ips_cap, ps_mode, true);
380 		cfg.pwr_cfg = true;
381 		cfg.proto_cfg = (cfg.pwr_lvl == PS_PWR_LVL_PWROFF) ? false : true;
382 		cfg.ps_mode = ps_mode;
383 	} else {
384 		PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): unknown ps mode!\n", __func__);
385 	}
386 
387 	_set_ps_rson(ps, true, rson);
388 
389 	status = phl_ps_enter_ps(ps->phl_info, &cfg);
390 
391 	if (status == RTW_PHL_STATUS_SUCCESS) {
392 		ps->cur_pwr_lvl = cfg.pwr_lvl;
393 		ps->ps_mode = cfg.ps_mode;
394 		ps->macid = cfg.macid;
395 		ps->ps_state = PS_STATE_ENTERED;
396 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): macid %d enter ps success, reason(%s).\n", __func__, ps->macid, rson);
397 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): ps mode(%s), pwr lvl(%s), ps state(%s)\n",
398 					__func__, phl_ps_ps_mode_to_str(ps->ps_mode),
399 					phl_ps_pwr_lvl_to_str(ps->cur_pwr_lvl), _ps_state_to_str(ps->ps_state));
400 	} else {
401 		PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): enter ps fail! reason(%s).\n", __func__, rson);
402 	}
403 
404 	return status;
405 }
406 
_pop_idle_req(struct cmd_ps * ps,struct pwr_req ** req)407 static bool _pop_idle_req(struct cmd_ps *ps, struct pwr_req **req)
408 {
409 	void *d = phl_to_drvpriv(ps->phl_info);
410 	_os_list *new_req = NULL;
411 	bool ret = false;
412 
413 	(*req) = NULL;
414 	if (pq_pop(d, &(ps->req_idle_q), &new_req, _first, _bh)) {
415 		(*req) = (struct pwr_req *)new_req;
416 		ret = true;
417 	} else {
418 		ret = false;
419 	}
420 
421 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): remaining idle req cnt %d.\n", __func__, ps->req_idle_q.cnt);
422 
423 	return ret;
424 }
425 
_push_idle_req(struct cmd_ps * ps,struct pwr_req * req)426 static void _push_idle_req(struct cmd_ps *ps, struct pwr_req *req)
427 {
428 	void *d = phl_to_drvpriv(ps->phl_info);
429 
430 	pq_push(d, &(ps->req_idle_q), &(req->list), _tail, _bh);
431 
432 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): remaining idle req cnt %d.\n", __func__, ps->req_idle_q.cnt);
433 }
434 
_pop_busy_req(struct cmd_ps * ps,struct pwr_req ** req)435 static bool _pop_busy_req(struct cmd_ps *ps, struct pwr_req **req)
436 {
437 	void *d = phl_to_drvpriv(ps->phl_info);
438 	_os_list *new_req = NULL;
439 	bool ret = false;
440 
441 	(*req) = NULL;
442 	if (pq_pop(d, &(ps->req_busy_q), &new_req, _tail, _bh)) {
443 		(*req) = (struct pwr_req *)new_req;
444 		ret = true;
445 	} else {
446 		ret = false;
447 	}
448 
449 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): current busy req cnt %d.\n", __func__, ps->req_busy_q.cnt);
450 
451 	return ret;
452 }
453 
_push_busy_req(struct cmd_ps * ps,struct pwr_req * req)454 static void _push_busy_req(struct cmd_ps *ps, struct pwr_req *req)
455 {
456 	void *d = phl_to_drvpriv(ps->phl_info);
457 
458 	pq_push(d, &(ps->req_busy_q), &(req->list), _tail, _bh);
459 
460 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): current busy req cnt %d.\n", __func__, ps->req_busy_q.cnt);
461 }
462 
_cancel_pwr_req(struct cmd_ps * ps,u16 evt_id)463 static void _cancel_pwr_req(struct cmd_ps *ps, u16 evt_id)
464 {
465 	struct pwr_req *req = NULL;
466 
467 	if (!_pop_busy_req(ps, &req)) {
468 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): busy queue is empty.\n", __func__);
469 		return;
470 	}
471 
472 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): evt_id %d\n", __func__, req->evt_id);
473 
474 	if (req->evt_id != evt_id && MSG_EVT_PHY_IDLE != evt_id)
475 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): evt_id mismatch.\n", __func__);
476 
477 	req->evt_id = MSG_EVT_NONE;
478 
479 	_push_idle_req(ps, req);
480 }
481 
_add_pwr_req(struct cmd_ps * ps,u16 evt_id)482 static enum rtw_phl_status _add_pwr_req(struct cmd_ps *ps, u16 evt_id)
483 {
484 	enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
485 	struct pwr_req *req = NULL;
486 
487 	if (!_pop_idle_req(ps, &req)) {
488 		PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): idle queue is empty.\n", __func__);
489 		return RTW_PHL_STATUS_RESOURCE;
490 	}
491 
492 	req->evt_id = evt_id;
493 
494 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): evt_id %d\n", __func__, evt_id);
495 
496 	_push_busy_req(ps, req);
497 
498 	return status;
499 }
500 
_init_pwr_req_q(struct cmd_ps * ps)501 static void _init_pwr_req_q(struct cmd_ps *ps)
502 {
503 	void *d = phl_to_drvpriv(ps->phl_info);
504 	u8 i = 0;
505 
506 	pq_init(d, &ps->req_busy_q);
507 	pq_init(d, &ps->req_idle_q);
508 
509 	_os_mem_set(d, ps->req_pool, 0,
510 		    sizeof(struct pwr_req) * MAX_PWE_REQ_NUM);
511 
512 	for (i = 0; i < MAX_PWE_REQ_NUM; i++) {
513 		pq_push(d, &(ps->req_idle_q),
514 			&(ps->req_pool[i].list), _tail, _bh);
515 	}
516 }
517 
_reset_pwr_req_q(struct cmd_ps * ps)518 static void _reset_pwr_req_q(struct cmd_ps *ps)
519 {
520 	void *d = phl_to_drvpriv(ps->phl_info);
521 	u8 i = 0;
522 
523 	pq_reset(d, &ps->req_busy_q, _bh);
524 	pq_reset(d, &ps->req_idle_q, _bh);
525 
526 	_os_mem_set(d, ps->req_pool, 0,
527 		    sizeof(struct pwr_req) * MAX_PWE_REQ_NUM);
528 
529 	for (i = 0; i < MAX_PWE_REQ_NUM; i++) {
530 		pq_push(d, &(ps->req_idle_q),
531 			&(ps->req_pool[i].list), _tail, _bh);
532 	}
533 }
534 
_deinit_pwr_req_q(struct cmd_ps * ps)535 static void _deinit_pwr_req_q(struct cmd_ps *ps)
536 {
537 	void *d = phl_to_drvpriv(ps->phl_info);
538 
539 	pq_deinit(d, &ps->req_busy_q);
540 	pq_deinit(d, &ps->req_idle_q);
541 }
542 
_init_ps_dflt_sw_cap(struct cmd_ps * ps)543 static void _init_ps_dflt_sw_cap(struct cmd_ps *ps)
544 {
545 	struct rtw_ps_cap_t *sw_cap = _get_ps_sw_cap(ps->phl_info);
546 
547 	sw_cap->init_rf_state = RTW_RF_ON;
548 	sw_cap->init_rt_stop_rson = PS_RT_RSON_NONE;
549 	sw_cap->leave_fail_act = PS_LEAVE_FAIL_ACT_NONE;
550 
551 	sw_cap->ips_en = PS_OP_MODE_DISABLED;
552 	sw_cap->ips_cap = 0;
553 	sw_cap->ips_wow_en = PS_OP_MODE_DISABLED;
554 	sw_cap->ips_wow_cap = 0;
555 
556 	sw_cap->lps_en = PS_OP_MODE_DISABLED;
557 	sw_cap->lps_cap = 0;
558 	sw_cap->lps_awake_interval = 0;
559 	sw_cap->lps_listen_bcn_mode = RTW_LPS_RLBM_MIN;
560 	sw_cap->lps_smart_ps_mode = RTW_LPS_TRX_PWR0;
561 	sw_cap->lps_rssi_enter_threshold = 40;
562 	sw_cap->lps_rssi_leave_threshold = 35;
563 	sw_cap->lps_rssi_diff_threshold = 5;
564 
565 	sw_cap->lps_wow_en = PS_OP_MODE_DISABLED;
566 	sw_cap->lps_wow_cap = 0;
567 	sw_cap->lps_wow_awake_interval = 0;
568 	sw_cap->lps_wow_listen_bcn_mode = RTW_LPS_RLBM_MAX;
569 	sw_cap->lps_wow_smart_ps_mode = RTW_LPS_TRX_PWR0;
570 }
571 
_cmd_ps_timer_cb(void * context)572 static void _cmd_ps_timer_cb(void *context)
573 {
574 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
575 	struct cmd_ps *ps = (struct cmd_ps *)context;
576 	struct phl_msg msg = {0};
577 	struct phl_msg_attribute attr = {0};
578 
579 	PHL_DBG("[PS_CMD], %s(): \n", __func__);
580 
581 	SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
582 	SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_PS_PERIOD_CHK);
583 	msg.band_idx = HW_BAND_0;
584 
585 	pstatus = phl_disp_eng_send_msg(ps->phl_info, &msg, &attr, NULL);
586 	if (pstatus != RTW_PHL_STATUS_SUCCESS) {
587 		PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): fail to send MSG_EVT_PS_PERIOD_CHK.\n", __func__);
588 	}
589 }
590 
_ps_common_info_init(struct phl_info_t * phl_info)591 static void _ps_common_info_init(struct phl_info_t *phl_info)
592 {
593 	struct phl_ps_info *ps_info = &phl_info->ps_info;
594 
595 	if (!ps_info->init) {
596 		PHL_INFO("[PS_CMD], %s(): \n", __func__);
597 
598 		_os_atomic_set(phl_to_drvpriv(phl_info),
599 					   &phl_info->ps_info.tx_ntfy,
600 					   0);
601 		ps_info->init = true;
602 	}
603 }
604 
_ps_common_info_deinit(struct phl_info_t * phl_info)605 static void _ps_common_info_deinit(struct phl_info_t *phl_info)
606 {
607 	struct phl_ps_info *ps_info = &phl_info->ps_info;
608 
609 	if (ps_info->init) {
610 		PHL_INFO("[PS_CMD], %s(): \n", __func__);
611 
612 		_os_atomic_set(phl_to_drvpriv(phl_info),
613 				   	   &phl_info->ps_info.tx_ntfy,
614 					   0);
615 		ps_info->init = false;
616 	}
617 }
618 
_ps_mdl_init(void * phl,void * dispr,void ** priv)619 static enum phl_mdl_ret_code _ps_mdl_init(void *phl, void *dispr, void **priv)
620 {
621 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
622 	struct cmd_ps *ps = NULL;
623 
624 	PHL_INFO("[PS_CMD], %s(): \n", __func__);
625 
626 	if (priv == NULL)
627 		return MDL_RET_FAIL;
628 
629 	(*priv) = NULL;
630 	ps = (struct cmd_ps *)_os_mem_alloc(phl_to_drvpriv(phl_info),
631 					       sizeof(struct cmd_ps));
632 	if (ps == NULL) {
633 		PHL_ERR("[PS_CMD], %s(): allocate cmd ps fail.\n", __func__);
634 		return MDL_RET_FAIL;
635 	}
636 
637 	_os_mem_set(phl_to_drvpriv(phl_info), ps, 0, sizeof(struct cmd_ps));
638 
639 	ps->phl_info = phl_info;
640 	ps->dispr = dispr;
641 	(*priv) = (void *)ps;
642 	ps->cur_pwr_lvl = PS_PWR_LVL_PWRON;
643 	ps->ps_state = PS_STATE_LEAVED;
644 	ps->ps_mode = PS_MODE_NONE;
645 	ps->rej_pwr_req = false;
646 	ps->rt_stop_rson = PS_RT_RSON_NONE;
647 
648 	_os_init_timer(phl_to_drvpriv(phl_info),
649 	               &ps->ps_timer,
650 	               _cmd_ps_timer_cb,
651 	               ps,
652 	               "phl_cmd_ps_timer");
653 
654 	_init_pwr_req_q(ps);
655 
656 	_init_ps_dflt_sw_cap(ps);
657 
658 	_ps_common_info_init(phl_info);
659 
660 	return MDL_RET_SUCCESS;
661 }
662 
_ps_mdl_deinit(void * dispr,void * priv)663 static void _ps_mdl_deinit(void *dispr, void *priv)
664 {
665 	struct cmd_ps *ps = (struct cmd_ps *)priv;
666 
667 	PHL_INFO("[PS_CMD], %s(): \n", __func__);
668 
669 	_os_release_timer(phl_to_drvpriv(ps->phl_info), &ps->ps_timer);
670 
671 	_deinit_pwr_req_q(ps);
672 
673 	_ps_common_info_deinit(ps->phl_info);
674 
675 	if (ps != NULL)
676 		_os_mem_free(phl_to_drvpriv(ps->phl_info), ps, sizeof(struct cmd_ps));
677 }
678 
_dump_ps_cap(struct cmd_ps * ps)679 static void _dump_ps_cap(struct cmd_ps *ps)
680 {
681 	struct rtw_ps_cap_t *ps_cap = NULL;
682 
683 	ps_cap = _get_ps_cap(ps->phl_info);
684 
685 	PHL_INFO("[PS_CMD], %s(): \n", __func__);
686 	PHL_INFO("[PS_CMD], init_rf_state: %d\n", ps_cap->init_rf_state);
687 	PHL_INFO("[PS_CMD], init_rt_stop_rson: 0x%x\n", ps_cap->init_rt_stop_rson);
688 	PHL_INFO("[PS_CMD], leave_fail_act: 0x%x\n", ps_cap->leave_fail_act);
689 	PHL_INFO("[PS_CMD], ips_en: %d\n", ps_cap->ips_en);
690 	PHL_INFO("[PS_CMD], ips_cap: %d\n", ps_cap->ips_cap);
691 	PHL_INFO("[PS_CMD], ips_wow_en: %d\n", ps_cap->ips_wow_en);
692 	PHL_INFO("[PS_CMD], ips_wow_cap: %d\n", ps_cap->ips_wow_cap);
693 	PHL_INFO("[PS_CMD], lps_en: %d\n", ps_cap->lps_en);
694 	PHL_INFO("[PS_CMD], lps_cap: %d\n", ps_cap->lps_cap);
695 	PHL_INFO("[PS_CMD], lps_awake_interval: %d\n", ps_cap->lps_awake_interval);
696 	PHL_INFO("[PS_CMD], lps_listen_bcn_mode: %d\n", ps_cap->lps_listen_bcn_mode);
697 	PHL_INFO("[PS_CMD], lps_smart_ps_mode: %d\n", ps_cap->lps_smart_ps_mode);
698 	PHL_INFO("[PS_CMD], lps_rssi_enter_threshold: %d\n", ps_cap->lps_rssi_enter_threshold);
699 	PHL_INFO("[PS_CMD], lps_rssi_leave_threshold: %d\n", ps_cap->lps_rssi_leave_threshold);
700 	PHL_INFO("[PS_CMD], lps_rssi_diff_threshold: %d\n", ps_cap->lps_rssi_diff_threshold);
701 	PHL_INFO("[PS_CMD], lps_wow_en: %d\n", ps_cap->lps_wow_en);
702 	PHL_INFO("[PS_CMD], lps_wow_cap: %d\n", ps_cap->lps_wow_cap);
703 	PHL_INFO("[PS_CMD], lps_wow_awake_interval: %d\n", ps_cap->lps_wow_awake_interval);
704 	PHL_INFO("[PS_CMD], lps_wow_listen_bcn_mode: %d\n", ps_cap->lps_wow_listen_bcn_mode);
705 	PHL_INFO("[PS_CMD], lps_wow_smart_ps_mode: %d\n", ps_cap->lps_wow_smart_ps_mode);
706 	PHL_INFO("[PS_CMD], lps_pause_tx: %d\n", ps_cap->lps_pause_tx);
707 }
708 
_leave_success_hdlr(struct cmd_ps * ps)709 static void _leave_success_hdlr(struct cmd_ps *ps)
710 {
711 	struct phl_data_ctl_t ctl = {0};
712 
713 	if (ps->stop_datapath) {
714 		/* resume tx datapath */
715 		ctl.id = PHL_MDL_POWER_MGNT;
716 		ctl.cmd = PHL_DATA_CTL_SW_TX_RESUME;
717 		phl_data_ctrler(ps->phl_info, &ctl, NULL);
718 		ps->stop_datapath = false;
719 	}
720 }
721 
_leave_fail_hdlr(struct cmd_ps * ps)722 static void _leave_fail_hdlr(struct cmd_ps *ps)
723 {
724 	struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
725 
726 	PHL_WARN("[PS_CMD], %s(): action 0x%x\n", __func__, ps_cap->leave_fail_act);
727 
728 	/* reject all power operation */
729 	if (ps_cap->leave_fail_act & PS_LEAVE_FAIL_ACT_REJ_PWR)
730 		ps->rej_pwr_req = true;
731 
732 	/* L2 should be the last one */
733 	if (ps_cap->leave_fail_act & PS_LEAVE_FAIL_ACT_L2)
734 		phl_ser_send_msg(ps->phl_info, RTW_PHL_SER_L2_RESET);
735 }
736 
_ps_mdl_start(void * dispr,void * priv)737 static enum phl_mdl_ret_code _ps_mdl_start(void *dispr, void *priv)
738 {
739 	enum phl_mdl_ret_code ret = MDL_RET_SUCCESS;
740 	struct cmd_ps *ps = (struct cmd_ps *)priv;
741 	struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
742 	u8 idx = 0;
743 
744 	ps->rt_stop_rson = ps_cap->init_rt_stop_rson;
745 
746 	_reset_pwr_req_q(ps);
747 
748 	phl_dispr_get_idx(dispr, &idx);
749 
750 	if (idx == 0) {
751 		_dump_ps_cap(ps);
752 		PHL_INFO("[PS_CMD], %s(): init rf state %d, reject pwr req %d\n",
753 				__func__, ps_cap->init_rf_state, ps->rej_pwr_req);
754 		if (ps_cap->init_rf_state == RTW_RF_OFF)
755 			ps->rej_pwr_req = true;
756 		if (ps->rej_pwr_req == true)
757 			_enter_ps(ps, PS_MODE_IPS, PS_MACID_NONE, "mdl start with rf off");
758 		_os_set_timer(phl_to_drvpriv(ps->phl_info), &ps->ps_timer, CMD_PS_TIMER_PERIOD);
759 	}
760 	return ret;
761 }
762 
_ps_mdl_stop(void * dispr,void * priv)763 static enum phl_mdl_ret_code _ps_mdl_stop(void *dispr, void *priv)
764 {
765 	enum phl_mdl_ret_code ret = MDL_RET_SUCCESS;
766 	struct cmd_ps *ps = (struct cmd_ps *)priv;
767 
768 	if (ps->ps_state == PS_STATE_ENTERED) {
769 		PHL_WARN("[PS_CMD], %s(): module stop in power saving!\n", __func__);
770 		_leave_ps(ps, true, "mdl stop");
771 	}
772 
773 	_os_cancel_timer(phl_to_drvpriv(ps->phl_info), &ps->ps_timer);
774 
775 	PHL_INFO("[PS_CMD], %s(): \n", __func__);
776 
777 	return ret;
778 }
779 
_chk_role_all_no_link(struct cmd_ps * ps)780 static bool _chk_role_all_no_link(struct cmd_ps *ps)
781 {
782 	struct phl_info_t *phl_info = ps->phl_info;
783 	u8 role_idx = 0;
784 	bool ret = true;
785 
786 	for (role_idx = 0; role_idx < MAX_WIFI_ROLE_NUMBER; role_idx++) {
787 		if (phl_info->phl_com->wifi_roles[role_idx].active == false)
788 			continue;
789 		if (phl_info->phl_com->wifi_roles[role_idx].mstate != MLME_NO_LINK) {
790 			ret = false;
791 			break;
792 		}
793 	}
794 
795 	return ret;
796 }
797 
_get_role_of_ps_permitted(struct cmd_ps * ps,u8 target_mode)798 static struct rtw_wifi_role_t *_get_role_of_ps_permitted(struct cmd_ps *ps, u8 target_mode)
799 {
800 	struct phl_info_t *phl_info = ps->phl_info;
801 	struct rtw_phl_com_t *phl_com = phl_info->phl_com;
802 	u8 role_idx = 0;
803 
804 	for (role_idx = 0; role_idx < MAX_WIFI_ROLE_NUMBER; role_idx++) {
805 		if (phl_com->wifi_roles[role_idx].active == false)
806 			continue;
807 		if (phl_com->wifi_roles[role_idx].type == PHL_RTYPE_STATION) {
808 			if (target_mode == PS_MODE_LPS &&
809 				phl_com->wifi_roles[role_idx].mstate == MLME_LINKED)
810 				return &(phl_com->wifi_roles[role_idx]);
811 			else if (target_mode == PS_MODE_IPS &&
812 					 phl_com->wifi_roles[role_idx].mstate == MLME_NO_LINK)
813 				return &(phl_com->wifi_roles[role_idx]);
814 		}
815 	}
816 
817 	return NULL;
818 }
819 
820 /**
821  * go through all wifi roles and check whether input
822  * ps mode desired is allowed with existing wroles
823  * @ps: see cmd_ps
824  * @target_mode: the desired ps mode (lps or ips)
825  * @macid: target macid to enter lps
826  */
_chk_wrole_with_ps_mode(struct cmd_ps * ps,u8 target_mode,u16 * macid)827 static bool _chk_wrole_with_ps_mode(struct cmd_ps *ps,
828 		u8 target_mode, u16 *macid)
829 {
830 	struct rtw_wifi_role_t *role = NULL;
831 	bool ret = false;
832 	struct rtw_phl_stainfo_t *sta = NULL;
833 
834 	if (ps->mr_info.ap_active || ps->mr_info.gc_active) {
835 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): ap %d or gc %d, is active.\n", __func__,
836 				ps->mr_info.ap_active, ps->mr_info.gc_active);
837 		return false;
838 	}
839 
840 	if (target_mode == PS_MODE_IPS) {
841 		if (_chk_role_all_no_link(ps)) {
842 			role = _get_role_of_ps_permitted(ps, PS_MODE_IPS);
843 			if (role == NULL) {
844 				PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): there is no suitable role to enter ips.\n", __func__);
845 				return false;
846 			}
847 			sta = rtw_phl_get_stainfo_self(ps->phl_info, role);
848 			if (sta == NULL) {
849 				PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): cannot get sta info.\n", __func__);
850 				return false;
851 			}
852 			PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): role id to enter ips (%d).\n", __func__, role->id);
853 			ps->sta = sta;
854 			*macid = sta->macid;
855 			ret = true;
856 		}
857 	} else if (target_mode == PS_MODE_LPS) {
858 		role = _get_role_of_ps_permitted(ps, PS_MODE_LPS);
859 		if (role == NULL) {
860 			PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): there is no suitable role to enter lps.\n", __func__);
861 			return false;
862 		}
863 		sta = rtw_phl_get_stainfo_self(ps->phl_info, role);
864 		if (sta == NULL) {
865 			PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): cannot get sta info.\n", __func__);
866 			return false;
867 		}
868 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): role id to enter lps (%d).\n", __func__, role->id);
869 		ps->sta = sta;
870 		*macid = sta->macid;
871 		ret = true;
872 	}
873 
874 	PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): decide enter ps(%s), target mode(%s).\n",
875 				__func__, (ret) ? "Yes" : "No", phl_ps_ps_mode_to_str(target_mode));
876 
877 	return ret;
878 }
879 
_stop_datapath(struct cmd_ps * ps)880 static enum rtw_phl_status _stop_datapath(struct cmd_ps *ps)
881 {
882 	struct phl_data_ctl_t ctl = {0};
883 
884 	/* stop tx datapath */
885 	ctl.id = PHL_MDL_POWER_MGNT;
886 	ctl.cmd = PHL_DATA_CTL_SW_TX_PAUSE;
887 
888 	if (phl_data_ctrler(ps->phl_info, &ctl, NULL) == RTW_PHL_STATUS_SUCCESS) {
889 		ps->stop_datapath = true;
890 		return RTW_PHL_STATUS_SUCCESS;
891 	}
892 
893 	return RTW_PHL_STATUS_FAILURE;
894 }
895 
_is_datapath_active(struct cmd_ps * ps)896 static bool _is_datapath_active(struct cmd_ps *ps)
897 {
898 	struct phl_info_t *phl_info = ps->phl_info;
899 
900 	return (_os_atomic_read(phl_to_drvpriv(phl_info), &phl_info->phl_sw_tx_sts)
901 			!= PHL_TX_STATUS_SW_PAUSE) ? true : false;
902 }
903 
904 /**
905  * check current capability of power saving
906  * return able to enter ps or not
907  * @ps: see cmd_ps
908  * @mode: the target ps mode to be check
909  */
_chk_ps_cap(struct cmd_ps * ps,u8 mode)910 static bool _chk_ps_cap(struct cmd_ps *ps, u8 mode)
911 {
912 	struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
913 
914 	PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): mode(%s), lps_en(%d), ips_en(%d), runtime stop reason(0x%x).\n",
915 			__func__, phl_ps_ps_mode_to_str(mode), ps_cap->lps_en, ps_cap->ips_en, ps->rt_stop_rson);
916 
917 	switch (mode) {
918 	case PS_MODE_LPS:
919 		if (ps_cap->lps_en == PS_OP_MODE_DISABLED) {
920 			return false;
921 		} else if (ps_cap->lps_en == PS_OP_MODE_FORCE_ENABLED) {
922 			/* force enable */
923 			return true;
924 		} else if (ps_cap->lps_en == PS_OP_MODE_AUTO) {
925 			if (ps->rt_stop_rson == PS_RT_RSON_NONE)
926 				return true;
927 		}
928 		break;
929 	case PS_MODE_IPS:
930 		if (ps_cap->ips_en == PS_OP_MODE_DISABLED) {
931 			return false;
932 		} else if (ps_cap->ips_en == PS_OP_MODE_FORCE_ENABLED) {
933 			/* force enable */
934 			return true;
935 		} else if (ps_cap->ips_en == PS_OP_MODE_AUTO) {
936 			if (ps->rt_stop_rson == PS_RT_RSON_NONE)
937 				return true;
938 		}
939 		break;
940 	default:
941 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): unknown ps mode.\n", __func__);
942 		return false;
943 	}
944 
945 	return false;
946 }
947 
948 /**
949  * check the condition of ips
950  * return whether to enter ips or not
951  * @ps: see cmd_ps
952  */
_chk_ips_enter(struct cmd_ps * ps,u16 * macid)953 static bool _chk_ips_enter(struct cmd_ps *ps, u16 *macid)
954 {
955 	if (TEST_STATUS_FLAG(ps->phl_info->phl_com->dev_state, RTW_DEV_RESUMING)) {
956 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): resume in progress.\n", __func__);
957 		return false;
958 	}
959 
960 	if (ps->rej_pwr_req == true) {
961 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
962 		return false;
963 	}
964 
965 	if (!_chk_ps_cap(ps, PS_MODE_IPS)) {
966 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): ips is not allowed.\n", __func__);
967 		return false;
968 	}
969 
970 	if (!phl_disp_eng_is_fg_empty(ps->phl_info, HW_BAND_0)) {
971 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): fg exist.\n", __func__);
972 		return false;
973 	}
974 
975 	if (ps->req_busy_q.cnt) {
976 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): req q is not empty.\n", __func__);
977 		return false;
978 	}
979 
980 	if (ps->btc_req_pwr) {
981 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): btc req pwr.\n", __func__);
982 		return false;
983 	}
984 
985 	if (!_chk_wrole_with_ps_mode(ps, PS_MODE_IPS, macid)) {
986 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): no need to enter ips.\n", __func__);
987 		return false;
988 	}
989 
990 	return true;
991 }
992 
993 /**
994  * check the condition of lps
995  * return whether to enter lps or not
996  * @ps: see cmd_ps
997  */
_chk_lps_enter(struct cmd_ps * ps,u16 * macid)998 static bool _chk_lps_enter(struct cmd_ps *ps, u16 *macid)
999 {
1000 	struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
1001 
1002 	/* check enter lps or not */
1003 
1004 	if (TEST_STATUS_FLAG(ps->phl_info->phl_com->dev_state, RTW_DEV_RESUMING)) {
1005 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): resume in progress.\n", __func__);
1006 		return false;
1007 	}
1008 
1009 	if (ps->rej_pwr_req == true) {
1010 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1011 		return false;
1012 	}
1013 
1014 	/* check capability */
1015 	if (!_chk_ps_cap(ps, PS_MODE_LPS)) {
1016 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): lps is not allowed.\n", __func__);
1017 		return false;
1018 	}
1019 
1020 	/* check fg module */
1021 	if (!phl_disp_eng_is_fg_empty(ps->phl_info, HW_BAND_0)) {
1022 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): fg exist.\n", __func__);
1023 		return false;
1024 	}
1025 
1026 	/* ref cnt */
1027 	if (ps->req_busy_q.cnt) {
1028 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): req q is not empty.\n", __func__);
1029 		return false;
1030 	}
1031 
1032 	/* btc */
1033 	if (ps->btc_req_pwr) {
1034 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): btc req pwr.\n", __func__);
1035 		return false;
1036 	}
1037 
1038 	if (ps->wdg_leave_ps == true) {
1039 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): just leave ps in watchdog prephase.\n", __func__);
1040 		return false;
1041 	}
1042 
1043 	/* check wifi role */
1044 	if (!_chk_wrole_with_ps_mode(ps, PS_MODE_LPS, macid)) {
1045 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): no need to enter lps.\n", __func__);
1046 		return false;
1047 	}
1048 
1049 	/* lps */
1050 	if (_lps_state_judge_changed(ps, *macid, ps->ps_state, PS_STATE_ENTERED)) {
1051 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): lps state changed, going to enter...\n", __func__);
1052 		/* check data path stop or not */
1053 		if (ps_cap->lps_pause_tx) {
1054 			if (!_is_datapath_active(ps)) {
1055 				return true;
1056 			} else {
1057 				if (_stop_datapath(ps) == RTW_PHL_STATUS_SUCCESS)
1058 					return true;
1059 			}
1060 		} else {
1061 			return true;
1062 		}
1063 	}
1064 
1065 	return false;
1066 }
1067 
_ps_watchdog_info_dump(struct cmd_ps * ps)1068 static void _ps_watchdog_info_dump(struct cmd_ps *ps)
1069 {
1070 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "========== CMD PS Info ========== \n");
1071 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "ps mode(%s), pwr lvl(%s), ps state(%s)\n",
1072 			phl_ps_ps_mode_to_str(ps->ps_mode),
1073 			phl_ps_pwr_lvl_to_str(ps->cur_pwr_lvl),
1074 			_ps_state_to_str(ps->ps_state));
1075 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "req idle cnt(%d), req busy cnt(%d)\n",
1076 			ps->req_idle_q.cnt, ps->req_busy_q.cnt);
1077 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "ap active(%s), gc active(%s)\n",
1078 			(ps->mr_info.ap_active ? "yes" : "no"), (ps->mr_info.gc_active ? "yes" : "no"));
1079 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "reject all pwr req(%s), btc req pwr(%s), runtime stop reason(0x%x)\n",
1080 			(ps->rej_pwr_req ? "yes" : "no"), (ps->btc_req_pwr ? "yes" : "no"), ps->rt_stop_rson);
1081 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "========== CMD PS Info ========== \n");
1082 }
1083 
1084 /**
1085  * pre-phase handler of watchdog
1086  * will check whether to leave lps or not
1087  * @ps: see cmd_ps
1088  */
1089 static enum phl_mdl_ret_code
_ps_watchdog_pre_hdlr(struct cmd_ps * ps)1090 _ps_watchdog_pre_hdlr(struct cmd_ps *ps)
1091 {
1092 	/* check leave lps or not */
1093 
1094 	PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): pwr lvl(%s).\n", __func__, phl_ps_pwr_lvl_to_str(ps->cur_pwr_lvl));
1095 
1096 	ps->wdg_leave_ps = false;
1097 
1098 	if (ps->rej_pwr_req == true) {
1099 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1100 		return MDL_RET_CANNOT_IO;
1101 	}
1102 
1103 	if (ps->ps_mode == PS_MODE_IPS) {
1104 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): under inactive ps.\n", __func__);
1105 		return MDL_RET_CANNOT_IO;
1106 	}
1107 
1108 	if (ps->ps_state == PS_STATE_LEAVED) {
1109 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): not in power saving.\n", __func__);
1110 		return MDL_RET_SUCCESS;
1111 	} else {
1112 		if (_lps_state_judge_changed(ps, ps->macid, ps->ps_state, PS_STATE_LEAVED)) {
1113 			PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): lps state changed, going to leave...\n", __func__);
1114 			if (_leave_ps(ps, true, "watchdog") == RTW_PHL_STATUS_SUCCESS) {
1115 				ps->wdg_leave_ps = true;
1116 				_leave_success_hdlr(ps);
1117 				return MDL_RET_SUCCESS;
1118 			} else {
1119 				PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): going to L2 reset!\n", __func__);
1120 				_leave_fail_hdlr(ps);
1121 				return MDL_RET_CANNOT_IO;
1122 			}
1123 		}
1124 		/**
1125 		 * if _lps_state_judge_changed decide not to leave lps and currrent lps
1126 		 * state is in protocol only, i/o operation is allowable.
1127 		 */
1128 		if (ps->ps_state == PS_STATE_PROTO)
1129 			return MDL_RET_SUCCESS;
1130 		else
1131 			return MDL_RET_CANNOT_IO;
1132 	}
1133 }
1134 
1135 /**
1136  * post-phase handler of watchdog
1137  * will check whether to enter lps or not
1138  * @ps: see cmd_ps
1139  */
1140 static enum phl_mdl_ret_code
_ps_watchdog_post_hdlr(struct cmd_ps * ps)1141 _ps_watchdog_post_hdlr(struct cmd_ps *ps)
1142 {
1143 	u16 macid = PS_MACID_NONE;
1144 
1145 	PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): pwr lvl(%s).\n", __func__, phl_ps_pwr_lvl_to_str(ps->cur_pwr_lvl));
1146 
1147 	if (_chk_lps_enter(ps, &macid))
1148 		_enter_ps(ps, PS_MODE_LPS, macid, "watchdog");
1149 	else if (_chk_ips_enter(ps, &macid))
1150 		_enter_ps(ps, PS_MODE_IPS, macid, "watchdog");
1151 
1152 	_ps_watchdog_info_dump(ps);
1153 
1154 	return MDL_RET_SUCCESS;
1155 }
1156 
_phy_on_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1157 static enum phl_mdl_ret_code _phy_on_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1158 {
1159 	enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1160 
1161 	if (ps->rej_pwr_req == true) {
1162 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1163 		return MDL_RET_CANNOT_IO;
1164 	}
1165 
1166 	if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1167 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], MSG_EVT_PHY_ON\n");
1168 		if (_leave_ps(ps, true, "phy on") == RTW_PHL_STATUS_SUCCESS) {
1169 			_add_pwr_req(ps, MSG_EVT_ID_FIELD(msg->msg_id));
1170 			_leave_success_hdlr(ps);
1171 			ret = MDL_RET_SUCCESS;
1172 		} else {
1173 			_leave_fail_hdlr(ps);
1174 		}
1175 	} else {
1176 		ret = MDL_RET_SUCCESS;
1177 	}
1178 
1179 	return ret;
1180 }
1181 
_tx_pwr_req_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1182 static enum phl_mdl_ret_code _tx_pwr_req_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1183 {
1184 	enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1185 
1186 	if (ps->rej_pwr_req == true) {
1187 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1188 		return MDL_RET_CANNOT_IO;
1189 	}
1190 
1191 	if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1192 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], MSG_EVT_TRX_PWR_REQ\n");
1193 		if (_leave_ps(ps, false, "tx req") == RTW_PHL_STATUS_SUCCESS) {
1194 			_leave_success_hdlr(ps);
1195 			ret = MDL_RET_SUCCESS;
1196 		} else {
1197 			_leave_fail_hdlr(ps);
1198 		}
1199 	} else {
1200 		ret = MDL_RET_SUCCESS;
1201 	}
1202 
1203 	return ret;
1204 }
1205 
_phy_idle_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1206 static enum phl_mdl_ret_code _phy_idle_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1207 {
1208 	enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1209 	u16 macid = PS_MACID_NONE;
1210 
1211 	if (!IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1212 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], MSG_EVT_PHY_IDLE\n");
1213 		_cancel_pwr_req(ps, MSG_EVT_ID_FIELD(msg->msg_id));
1214 		/* check enter ips or not */
1215 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): try enter ips.\n", __func__);
1216 		if (_chk_ips_enter(ps, &macid)) {
1217 			_enter_ps(ps, PS_MODE_IPS, macid, "phy idle");
1218 		}
1219 		ret = MDL_RET_SUCCESS;
1220 	} else {
1221 		ret = MDL_RET_SUCCESS;
1222 	}
1223 
1224 	return ret;
1225 }
1226 
1227 /**
1228  * pre-phase handler of msg
1229  * leave ps and return corresponding status
1230  * @ps: see cmd_ps
1231  * @evt_id: evt id of msg
1232  */
1233 static enum phl_mdl_ret_code
_ext_msg_pre_hdlr(struct cmd_ps * ps,u16 evt_id)1234 _ext_msg_pre_hdlr(struct cmd_ps *ps, u16 evt_id)
1235 {
1236 	if (ps->rej_pwr_req == true) {
1237 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1238 		return MDL_RET_CANNOT_IO;
1239 	}
1240 
1241 	/* power request */
1242 	if (_leave_ps(ps, true, "ext msg req") == RTW_PHL_STATUS_SUCCESS) {
1243 		_add_pwr_req(ps, evt_id);
1244 		_leave_success_hdlr(ps);
1245 		return MDL_RET_SUCCESS;
1246 	} else {
1247 		_leave_fail_hdlr(ps);
1248 	}
1249 
1250 	return MDL_RET_CANNOT_IO;
1251 }
1252 
1253 /**
1254  * post-phase handler of msg
1255  * cancel power req and chk enter ips or not
1256  * @ps: see cmd_ps
1257  * @evt_id: evt id of msg
1258  */
1259 static enum phl_mdl_ret_code
_ext_msg_post_hdlr(struct cmd_ps * ps,u16 evt_id)1260 _ext_msg_post_hdlr(struct cmd_ps *ps, u16 evt_id)
1261 {
1262 	u16 macid = PS_MACID_NONE;
1263 
1264 	/* cancel power request */
1265 	_cancel_pwr_req(ps, evt_id);
1266 
1267 	PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): try enter ips.\n", __func__);
1268 	if (_chk_ips_enter(ps, &macid)) {
1269 		_enter_ps(ps, PS_MODE_IPS, macid, "ext msg done");
1270 	}
1271 
1272 	return MDL_RET_SUCCESS;
1273 }
1274 
_ps_mr_info_upt(struct cmd_ps * ps,struct rtw_wifi_role_t * role)1275 static void _ps_mr_info_upt(struct cmd_ps *ps, struct rtw_wifi_role_t *role)
1276 {
1277 	struct phl_info_t *phl_info = ps->phl_info;
1278 	struct rtw_wifi_role_t *wr = NULL;
1279 	u8 role_idx = 0;
1280 
1281 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): id %d, type %d, mstate %d\n", __func__,
1282 				role->id, role->type, role->mstate);
1283 	#ifdef RTW_WKARD_PHL_NTFY_MEDIA_STS
1284 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): is_gc %d\n", __func__, role->is_gc);
1285 	#endif
1286 
1287 	_os_mem_set(phl_to_drvpriv(phl_info), &ps->mr_info, 0, sizeof(struct _ps_mr_info));
1288 
1289 	for (role_idx = 0; role_idx < MAX_WIFI_ROLE_NUMBER; role_idx++) {
1290 		wr = &(phl_info->phl_com->wifi_roles[role_idx]);
1291 		if (wr->active == false)
1292 			continue;
1293 		#ifdef RTW_WKARD_PHL_NTFY_MEDIA_STS
1294 		if (wr->type == PHL_RTYPE_STATION) {
1295 			if(wr->is_gc == true && wr->mstate != MLME_NO_LINK)
1296 				ps->mr_info.gc_active = true;
1297 		}
1298 		#endif
1299 		if (wr->type == PHL_RTYPE_AP || wr->type == PHL_RTYPE_VAP ||
1300 			wr->type == PHL_RTYPE_MESH ||wr->type == PHL_RTYPE_P2P_GO)
1301 			ps->mr_info.ap_active = (wr->mstate == MLME_NO_LINK) ? false : true;
1302 	}
1303 
1304 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): gc_active %d, ap_active %d\n", __func__,
1305 				ps->mr_info.gc_active, ps->mr_info.ap_active);
1306 }
1307 
_is_ignored_mrc_evt(u16 evt_id)1308 static bool _is_ignored_mrc_evt(u16 evt_id)
1309 {
1310 	return false;
1311 }
1312 
1313 static enum phl_mdl_ret_code
_mrc_mdl_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1314 _mrc_mdl_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1315 {
1316 	enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1317 	struct rtw_role_cmd *rcmd = NULL;
1318 	struct rtw_wifi_role_t *role = NULL;
1319 
1320 	if (_is_ignored_mrc_evt(MSG_EVT_ID_FIELD(msg->msg_id)))
1321 		return MDL_RET_SUCCESS;
1322 
1323 	switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
1324 	case MSG_EVT_ROLE_NTFY:
1325 		if (!IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1326 			if (msg->inbuf && (msg->inlen == sizeof(struct rtw_role_cmd))) {
1327 				rcmd  = (struct rtw_role_cmd *)msg->inbuf;
1328 				role = rcmd->wrole;
1329 				_ps_mr_info_upt(ps, role);
1330 			}
1331 		}
1332 		ret = MDL_RET_SUCCESS;
1333 		break;
1334 	default:
1335 		if (ps->cur_pwr_lvl != PS_PWR_LVL_PWRON) {
1336 			PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) get cannot I/O!\n", __func__,
1337 		         MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id));
1338 			ret = MDL_RET_CANNOT_IO;
1339 		} else {
1340 			ret = MDL_RET_SUCCESS;
1341 		}
1342 		break;
1343 	}
1344 
1345 	return ret;
1346 }
1347 
_is_ignored_ser_evt(u16 evt_id)1348 static bool _is_ignored_ser_evt(u16 evt_id)
1349 {
1350 	if (MSG_EVT_SER_M9_L2_RESET != evt_id)
1351 		return true;
1352 
1353 	return false;
1354 }
1355 
1356 static enum phl_mdl_ret_code
_ser_mdl_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1357 _ser_mdl_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1358 {
1359 	enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1360 
1361 	if (_is_ignored_ser_evt(MSG_EVT_ID_FIELD(msg->msg_id)))
1362 		return MDL_RET_SUCCESS;
1363 
1364 	switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
1365 	case MSG_EVT_SER_M9_L2_RESET:
1366 		ret = MDL_RET_SUCCESS;
1367 		break;
1368 	default:
1369 		if (ps->cur_pwr_lvl != PS_PWR_LVL_PWRON) {
1370 			PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) get cannot I/O!\n",
1371 			         __func__,
1372 			         MSG_MDL_ID_FIELD(msg->msg_id),
1373 			         MSG_EVT_ID_FIELD(msg->msg_id));
1374 			ret = MDL_RET_CANNOT_IO;
1375 		} else {
1376 			ret = MDL_RET_SUCCESS;
1377 		}
1378 		break;
1379 	}
1380 
1381 	return ret;
1382 }
1383 
_is_ignored_general_evt(u16 evt_id)1384 static bool _is_ignored_general_evt(u16 evt_id)
1385 {
1386 	bool ret = false;
1387 
1388 	switch (evt_id) {
1389 	case MSG_EVT_GET_USB_SW_ABILITY:
1390 	case MSG_EVT_GET_USB_SPEED:
1391 	case MSG_EVT_SW_WATCHDOG:
1392 		ret = true;
1393 		break;
1394 	default:
1395 		ret = false;
1396 		break;
1397 	}
1398 
1399 	return ret;
1400 }
1401 
1402 #ifdef RTW_WKARD_LINUX_CMD_WKARD
1403 static enum phl_mdl_ret_code
_linux_cmd_wkard_hdlr(struct cmd_ps * ps)1404 _linux_cmd_wkard_hdlr(struct cmd_ps *ps)
1405 {
1406 	if (ps->rej_pwr_req == true) {
1407 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1408 		return MDL_RET_CANNOT_IO;
1409 	}
1410 
1411 	/* power request */
1412 	if (_leave_ps(ps, true, "linux cmd wkard") == RTW_PHL_STATUS_SUCCESS) {
1413 		_leave_success_hdlr(ps);
1414 		return MDL_RET_SUCCESS;
1415 	} else {
1416 		_leave_fail_hdlr(ps);
1417 	}
1418 
1419 	return MDL_RET_CANNOT_IO;
1420 }
1421 #endif /* RTW_WKARD_LINUX_CMD_WKARD */
1422 
1423 static enum phl_mdl_ret_code
_general_mdl_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1424 _general_mdl_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1425 {
1426 	enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1427 
1428 	if (_is_ignored_general_evt(MSG_EVT_ID_FIELD(msg->msg_id)))
1429 		return MDL_RET_SUCCESS;
1430 
1431 	switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
1432 	case MSG_EVT_CHG_OP_CH_DEF_START:
1433 	case MSG_EVT_CHG_OP_CH_DEF_END:
1434 	case MSG_EVT_SWCH_START:
1435 	case MSG_EVT_PCIE_TRX_MIT:
1436 	case MSG_EVT_FORCE_USB_SW:
1437 	case MSG_EVT_GET_USB_SPEED:
1438 	case MSG_EVT_GET_USB_SW_ABILITY:
1439 	case MSG_EVT_CFG_AMPDU:
1440 	case MSG_EVT_DFS_PAUSE_TX:
1441 	case MSG_EVT_ROLE_RECOVER:
1442 	case MSG_EVT_ROLE_SUSPEND:
1443 	case MSG_EVT_HAL_SET_L2_LEAVE:
1444 	case MSG_EVT_NOTIFY_HAL:
1445 	case MSG_EVT_ISSUE_BCN:
1446 	case MSG_EVT_STOP_BCN:
1447 	case MSG_EVT_SEC_KEY:
1448 	case MSG_EVT_ROLE_START:
1449 	case MSG_EVT_ROLE_CHANGE:
1450 	case MSG_EVT_ROLE_STOP:
1451 	case MSG_EVT_STA_INFO_CTRL:
1452 	case MSG_EVT_STA_MEDIA_STATUS_UPT:
1453 	case MSG_EVT_CFG_CHINFO:
1454 	case MSG_EVT_STA_CHG_STAINFO:
1455 #ifdef RTW_WKARD_LINUX_CMD_WKARD
1456 	case MSG_EVT_LINUX_CMD_WRK_TRI_PS:
1457 #endif /* RTW_WKARD_LINUX_CMD_WKARD */
1458 	case MSG_EVT_DBG_RX_DUMP:
1459 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) in %s phase.\n", __func__,
1460 			MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id),
1461 			(IS_MSG_IN_PRE_PHASE(msg->msg_id) ? "pre-protocol" : "post-protocol"));
1462 		if (IS_MSG_IN_PRE_PHASE(msg->msg_id))
1463 			ret = _ext_msg_pre_hdlr(ps, MSG_EVT_ID_FIELD(msg->msg_id));
1464 		else
1465 			ret = _ext_msg_post_hdlr(ps, MSG_EVT_ID_FIELD(msg->msg_id));
1466 		break;
1467 #ifdef RTW_WKARD_LINUX_CMD_WKARD
1468 	case MSG_EVT_LINUX_CMD_WRK:
1469 		if (IS_MSG_IN_PRE_PHASE(msg->msg_id))
1470 			ret = _linux_cmd_wkard_hdlr(ps);
1471 		else
1472 			ret = MDL_RET_SUCCESS;
1473 		break;
1474 #endif /* RTW_WKARD_LINUX_CMD_WKARD */
1475 	case MSG_EVT_HW_WATCHDOG:
1476 		if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1477 			ret = _ps_watchdog_pre_hdlr(ps);
1478 			if (ret == MDL_RET_SUCCESS) {
1479 				ps->rssi_bcn_min = phl_get_min_rssi_bcn(ps->phl_info);
1480 				PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): update rssi_bcn_min to %d\n", __func__, ps->rssi_bcn_min);
1481 			}
1482 		} else {
1483 			ret = _ps_watchdog_post_hdlr(ps);
1484 		}
1485 		break;
1486 	case MSG_EVT_RF_ON:
1487 		if (!IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1488 			PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], MSG_EVT_RF_ON\n");
1489 			ps->rej_pwr_req = false;
1490 		}
1491 		ret = MDL_RET_SUCCESS;
1492 		break;
1493 	case MSG_EVT_RF_OFF:
1494 		if (!IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1495 			PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], MSG_EVT_RF_OFF\n");
1496 			if (_leave_ps(ps, true, "msg rf off") == RTW_PHL_STATUS_SUCCESS) {
1497 				_leave_success_hdlr(ps);
1498 			} else {
1499 				_leave_fail_hdlr(ps);
1500 				ret = MDL_RET_CANNOT_IO;
1501 				break;
1502 			}
1503 			ps->rej_pwr_req = true;
1504 			_enter_ps(ps, PS_MODE_IPS, PS_MACID_NONE, "msg rf off");
1505 		}
1506 		ret = MDL_RET_SUCCESS;
1507 		break;
1508 	case MSG_EVT_PHY_ON:
1509 		ret = _phy_on_msg_hdlr(ps, msg);
1510 		break;
1511 	case MSG_EVT_PHY_IDLE:
1512 		ret = _phy_idle_msg_hdlr(ps, msg);
1513 		break;
1514 	default:
1515 		if (ps->cur_pwr_lvl != PS_PWR_LVL_PWRON) {
1516 			PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) get cannot I/O!\n",
1517 			         __func__,
1518 			         MSG_MDL_ID_FIELD(msg->msg_id),
1519 			         MSG_EVT_ID_FIELD(msg->msg_id));
1520 			ret = MDL_RET_CANNOT_IO;
1521 		} else {
1522 			ret = MDL_RET_SUCCESS;
1523 		}
1524 		break;
1525 	}
1526 
1527 	return ret;
1528 }
1529 
_is_ignored_datapath_evt(u16 evt_id)1530 static bool _is_ignored_datapath_evt(u16 evt_id)
1531 {
1532 	return false;
1533 }
1534 
1535 static enum phl_mdl_ret_code
_datapath_mdl_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1536 _datapath_mdl_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1537 {
1538 	enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1539 
1540 	if (_is_ignored_datapath_evt(MSG_EVT_ID_FIELD(msg->msg_id)))
1541 		return MDL_RET_SUCCESS;
1542 
1543 	switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
1544 	case MSG_EVT_TRX_PWR_REQ:
1545 		ret = _tx_pwr_req_msg_hdlr(ps, msg);
1546 		break;
1547 	default:
1548 		if (ps->cur_pwr_lvl != PS_PWR_LVL_PWRON) {
1549 			PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) get cannot I/O!\n",
1550 			         __func__,
1551 			         MSG_MDL_ID_FIELD(msg->msg_id),
1552 			         MSG_EVT_ID_FIELD(msg->msg_id));
1553 			ret = MDL_RET_CANNOT_IO;
1554 		} else {
1555 			ret = MDL_RET_SUCCESS;
1556 		}
1557 		break;
1558 	}
1559 
1560 	return ret;
1561 }
1562 
1563 /**
1564  * bypass msg of specific module
1565  * @msg: see phl_msg
1566  */
_is_ignored_mdl(struct phl_msg * msg)1567 static bool _is_ignored_mdl(struct phl_msg *msg)
1568 {
1569 	if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_BTC)
1570 		return true;
1571 	if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_LED)
1572 		return true;
1573 
1574 	return false;
1575 }
1576 
1577 static enum phl_mdl_ret_code
_ps_mdl_hdl_external_evt(void * dispr,struct cmd_ps * ps,struct phl_msg * msg)1578 _ps_mdl_hdl_external_evt(void *dispr, struct cmd_ps *ps, struct phl_msg *msg)
1579 {
1580 	enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1581 
1582 	if (_is_ignored_mdl(msg)) {
1583 		/* PHL_INFO("[PS_CMD], %s(): ignore MDL_ID(%d)-EVT_ID(%d).\n", __func__,
1584 		         MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id)); */
1585 		return MDL_RET_SUCCESS;
1586 	}
1587 
1588 	switch (MSG_MDL_ID_FIELD(msg->msg_id)) {
1589 	case PHL_MDL_GENERAL:
1590 		ret = _general_mdl_msg_hdlr(ps, msg);
1591 		break;
1592 	case PHL_MDL_MRC:
1593 		ret = _mrc_mdl_msg_hdlr(ps, msg);
1594 		break;
1595 	case PHL_MDL_TX:
1596 	case PHL_MDL_RX:
1597 		ret = _datapath_mdl_msg_hdlr(ps, msg);
1598 		break;
1599 	case PHL_MDL_SER:
1600 		ret = _ser_mdl_msg_hdlr(ps, msg);
1601 		break;
1602 	/* handle ohters mdl here */
1603 	default:
1604 		if (ps->cur_pwr_lvl != PS_PWR_LVL_PWRON) {
1605 			PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) get cannot I/O!\n", __func__,
1606 		         MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id));
1607 			ret = MDL_RET_CANNOT_IO;
1608 		} else {
1609 			ret = MDL_RET_SUCCESS;
1610 		}
1611 		break;
1612 	}
1613 
1614 	return ret;
1615 }
1616 
_ps_cap_chg_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1617 static enum phl_mdl_ret_code _ps_cap_chg_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1618 {
1619 	enum phl_ps_rt_rson  rt_rson = PS_RT_RSON_NONE;
1620 	bool ps_allow = false;
1621 	struct rt_ps *rt_ps_info= NULL;
1622 	u16 macid = PS_MACID_NONE;
1623 
1624 	if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1625 		rt_ps_info = (struct rt_ps *)msg->inbuf;
1626 		ps_allow = rt_ps_info->ps_allow;
1627 		rt_rson = rt_ps_info->rt_rson;
1628 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): update -> ps_allow(%d), reason(%d).\n", __func__, ps_allow, rt_rson);
1629 
1630 		if (ps_allow) {
1631 			if (TEST_STATUS_FLAG(ps->rt_stop_rson, rt_rson) == true)
1632 				CLEAR_STATUS_FLAG(ps->rt_stop_rson, rt_rson);
1633 			else
1634 				return MDL_RET_SUCCESS;
1635 		} else {
1636 			if (TEST_STATUS_FLAG(ps->rt_stop_rson, rt_rson) == false)
1637 				SET_STATUS_FLAG(ps->rt_stop_rson, rt_rson);
1638 			else
1639 				return MDL_RET_SUCCESS;
1640 		}
1641 
1642 		if (ps->rej_pwr_req == true) {
1643 			PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1644 			return MDL_RET_CANNOT_IO;
1645 		}
1646 
1647 		if (ps->rt_stop_rson != PS_RT_RSON_NONE) {
1648 			if (_leave_ps(ps, true, "cap chg") == RTW_PHL_STATUS_SUCCESS) {
1649 				_leave_success_hdlr(ps);
1650 				return MDL_RET_SUCCESS;
1651 			} else {
1652 				_leave_fail_hdlr(ps);
1653 				return MDL_RET_CANNOT_IO;
1654 			}
1655 		}
1656 	} else {
1657 		if (ps->rt_stop_rson == PS_RT_RSON_NONE) {
1658 			PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): try enter ips.\n", __func__);
1659 			if (_chk_ips_enter(ps, &macid)) {
1660 				_enter_ps(ps, PS_MODE_IPS, macid, "cap chg");
1661 			}
1662 		}
1663 	}
1664 
1665 	return MDL_RET_SUCCESS;
1666 }
1667 
_tx_pkt_ntfy_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1668 static enum phl_mdl_ret_code _tx_pkt_ntfy_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1669 {
1670 	if (!IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1671 		PHL_DBG("[PS_CMD], %s(): rpwm with tx req.\n", __func__);
1672 		/* rpwm with tx req */
1673 		rtw_hal_ps_notify_wake(ps->phl_info->hal);
1674 	}
1675 
1676 	return MDL_RET_SUCCESS;
1677 }
1678 
_ps_period_chk_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1679 static enum phl_mdl_ret_code _ps_period_chk_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1680 {
1681 	u16 macid = PS_MACID_NONE;
1682 
1683 	if (IS_MSG_IN_PRE_PHASE(msg->msg_id))
1684 		return MDL_RET_SUCCESS;
1685 
1686 	ps->wdg_leave_ps = false;
1687 
1688 	if (ps->ps_state != PS_STATE_ENTERED) {
1689 
1690 		PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): \n", __func__);
1691 		if (_chk_lps_enter(ps, &macid))
1692 			_enter_ps(ps, PS_MODE_LPS, macid, "period chk");
1693 	}
1694 
1695 	_os_set_timer(phl_to_drvpriv(ps->phl_info), &ps->ps_timer, CMD_PS_TIMER_PERIOD);
1696 
1697 	return MDL_RET_SUCCESS;
1698 }
1699 
_ps_dbg_lps_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1700 static enum phl_mdl_ret_code _ps_dbg_lps_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1701 {
1702 	u16 macid = PS_MACID_NONE;
1703 	u16 evt_id = MSG_EVT_ID_FIELD(msg->msg_id);
1704 
1705 	if (evt_id == MSG_EVT_PS_DBG_LPS_ENTER) {
1706 		if (!IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1707 			if (!_chk_wrole_with_ps_mode(ps, PS_MODE_LPS, &macid))
1708 				return MDL_RET_SUCCESS;
1709 			_enter_ps(ps, PS_MODE_LPS, macid, "dbg lps enter");
1710 			ps->rej_pwr_req = true;
1711 		}
1712 	} else { /* MSG_EVT_PS_DBG_LPS_LEAVE */
1713 		if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1714 			_leave_ps(ps, true, "dbg lps leave");
1715 			ps->rej_pwr_req = false;
1716 		}
1717 	}
1718 
1719 	return MDL_RET_SUCCESS;
1720 }
1721 
_ps_dbg_ips_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1722 static enum phl_mdl_ret_code _ps_dbg_ips_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1723 {
1724 	/* fw do not support ips currently */
1725 	return MDL_RET_SUCCESS;
1726 }
1727 
1728 static enum phl_mdl_ret_code
_ps_mdl_hdl_internal_evt(void * dispr,struct cmd_ps * ps,struct phl_msg * msg)1729 _ps_mdl_hdl_internal_evt(void *dispr, struct cmd_ps *ps, struct phl_msg *msg)
1730 {
1731 	enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1732 
1733 	switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
1734 	case MSG_EVT_PHY_ON:
1735 		ret = _phy_on_msg_hdlr(ps, msg);
1736 		break;
1737 	case MSG_EVT_PHY_IDLE:
1738 		ret = _phy_idle_msg_hdlr(ps, msg);
1739 		break;
1740 	case MSG_EVT_PS_CAP_CHG:
1741 		ret = _ps_cap_chg_msg_hdlr(ps, msg);
1742 		break;
1743 	case MSG_EVT_PS_PERIOD_CHK:
1744 		ret = _ps_period_chk_hdlr(ps, msg);
1745 		break;
1746 	case MSG_EVT_PS_DBG_LPS_ENTER:
1747 	case MSG_EVT_PS_DBG_LPS_LEAVE:
1748 		ret = _ps_dbg_lps_msg_hdlr(ps, msg);
1749 		break;
1750 	case MSG_EVT_PS_DBG_IPS_ENTER:
1751 	case MSG_EVT_PS_DBG_IPS_LEAVE:
1752 		ret = _ps_dbg_ips_msg_hdlr(ps, msg);
1753 		break;
1754 	case MSG_EVT_TX_PKT_NTFY:
1755 		ret = _tx_pkt_ntfy_msg_hdlr(ps, msg);
1756 		break;
1757 	default:
1758 		ret = MDL_RET_CANNOT_IO;
1759 		break;
1760 	}
1761 
1762 	return ret;
1763 }
1764 
1765 static enum phl_mdl_ret_code
_ps_mdl_msg_hdlr(void * dispr,void * priv,struct phl_msg * msg)1766 _ps_mdl_msg_hdlr(void *dispr, void *priv, struct phl_msg *msg)
1767 {
1768 	enum phl_mdl_ret_code ret = MDL_RET_IGNORE;
1769 	struct cmd_ps *ps = (struct cmd_ps *)priv;
1770 
1771 	if (IS_MSG_FAIL(msg->msg_id)) {
1772 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) fail.\n", __func__,
1773 		         MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id));
1774 		return MDL_RET_SUCCESS;
1775 	}
1776 
1777 	switch (MSG_MDL_ID_FIELD(msg->msg_id)) {
1778 		case PHL_MDL_POWER_MGNT:
1779 			ret = _ps_mdl_hdl_internal_evt(dispr, ps, msg);
1780 			break;
1781 		default:
1782 			ret = _ps_mdl_hdl_external_evt(dispr, ps, msg);
1783 			break;
1784 	}
1785 
1786 	return ret;
1787 }
1788 
1789 static enum phl_mdl_ret_code
_ps_mdl_set_info(void * dispr,void * priv,struct phl_module_op_info * info)1790 _ps_mdl_set_info(void *dispr, void *priv, struct phl_module_op_info *info)
1791 {
1792 	struct cmd_ps *ps = (struct cmd_ps *)priv;
1793 
1794 	switch (info->op_code) {
1795 	case PS_MDL_OP_CANCEL_PWR_REQ:
1796 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): cancel pwr req, evt_id %d\n", __func__, *(u16 *)info->inbuf);
1797 		if (ps->rej_pwr_req == false)
1798 			_cancel_pwr_req(ps, *(u16 *)info->inbuf);
1799 		break;
1800 	case PS_MDL_OP_BTC_PWR_REQ:
1801 		PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): btc req pwr %d\n", __func__, *(bool *)info->inbuf);
1802 		ps->btc_req_pwr = *(bool *)info->inbuf;
1803 		break;
1804 	}
1805 
1806 	return MDL_RET_SUCCESS;
1807 }
1808 
1809 static enum phl_mdl_ret_code
_ps_mdl_query_info(void * dispr,void * priv,struct phl_module_op_info * info)1810 _ps_mdl_query_info(void *dispr, void *priv, struct phl_module_op_info *info)
1811 {
1812 	struct cmd_ps *ps = (struct cmd_ps *)priv;
1813 	u8 pwr_lvl = PS_PWR_LVL_PWROFF;
1814 	struct phl_cmd_ps_basic_info *basic_info = NULL;
1815 
1816 	/* PHL_INFO("[PS_CMD], %s(): opcode %d.\n", __func__, info->op_code); */
1817 
1818 	switch (info->op_code) {
1819 	case PS_MDL_OP_CUR_PWR_LVL:
1820 		pwr_lvl = ps->cur_pwr_lvl;
1821 		_os_mem_cpy(phl_to_drvpriv(ps->phl_info), (void *)info->inbuf,
1822 					&pwr_lvl, sizeof(pwr_lvl));
1823 		break;
1824 	case PS_MDL_OP_BASIC_INFO:
1825 		basic_info = (struct phl_cmd_ps_basic_info *)info->inbuf;
1826 		basic_info->ps_mode = ps->ps_mode;
1827 		basic_info->cur_pwr_lvl = ps->cur_pwr_lvl;
1828 		basic_info->rej_pwr_req = ps->rej_pwr_req;
1829 		basic_info->btc_req_pwr = ps->btc_req_pwr;
1830 		basic_info->rt_stop_rson = ps->rt_stop_rson;
1831 		basic_info->ap_active = ps->mr_info.ap_active;
1832 		basic_info->gc_active = ps->mr_info.gc_active;
1833 		basic_info->sta = ps->sta;
1834 		/* enter/leave ps reason */
1835 		_os_mem_set(phl_to_drvpriv(ps->phl_info), basic_info->enter_rson, 0, MAX_CMD_PS_RSON_LENGTH);
1836 		_os_mem_cpy(phl_to_drvpriv(ps->phl_info), basic_info->enter_rson, ps->enter_rson, MAX_CMD_PS_RSON_LENGTH);
1837 		_os_mem_set(phl_to_drvpriv(ps->phl_info), basic_info->leave_rson, 0, MAX_CMD_PS_RSON_LENGTH);
1838 		_os_mem_cpy(phl_to_drvpriv(ps->phl_info), basic_info->leave_rson, ps->leave_rson, MAX_CMD_PS_RSON_LENGTH);
1839 		break;
1840 	}
1841 
1842 	return MDL_RET_SUCCESS;
1843 }
1844 
phl_register_ps_module(struct phl_info_t * phl_info)1845 enum rtw_phl_status phl_register_ps_module(struct phl_info_t *phl_info)
1846 {
1847 	enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
1848 	struct phl_cmd_dispatch_engine *disp_eng = &(phl_info->disp_eng);
1849 	struct phl_bk_module_ops bk_ops = {0};
1850 	u8 i = 0;
1851 
1852 	PHL_INFO("[PS_CMD], %s(): \n", __func__);
1853 
1854 	bk_ops.init = _ps_mdl_init;
1855 	bk_ops.deinit = _ps_mdl_deinit;
1856 	bk_ops.start = _ps_mdl_start;
1857 	bk_ops.stop = _ps_mdl_stop;
1858 	bk_ops.msg_hdlr = _ps_mdl_msg_hdlr;
1859 	bk_ops.set_info = _ps_mdl_set_info;
1860 	bk_ops.query_info = _ps_mdl_query_info;
1861 
1862 	for (i = 0; i < disp_eng->phy_num; i++) {
1863 		phl_status = phl_disp_eng_register_module(phl_info, i,
1864 						 PHL_MDL_POWER_MGNT, &bk_ops);
1865 		if (phl_status != RTW_PHL_STATUS_SUCCESS) {
1866 			PHL_ERR("register cmd PS module of phy%d failed.\n", i + 1);
1867 			break;
1868 		}
1869 	}
1870 
1871 	return phl_status;
1872 }
1873 
phl_ps_get_cur_pwr_lvl(struct phl_info_t * phl_info)1874 u8 phl_ps_get_cur_pwr_lvl(struct phl_info_t *phl_info)
1875 {
1876 	struct phl_module_op_info op_info = {0};
1877 	u8 pwr_lvl = PS_PWR_LVL_MAX;
1878 
1879 	op_info.op_code = PS_MDL_OP_CUR_PWR_LVL;
1880 	op_info.inbuf = (u8 *)&pwr_lvl;
1881 	op_info.inlen = sizeof(pwr_lvl);
1882 
1883 	phl_disp_eng_query_bk_module_info(phl_info, HW_BAND_0,
1884 			PHL_MDL_POWER_MGNT, &op_info);
1885 
1886 	PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): pwr lvl(%s)\n", __func__, phl_ps_pwr_lvl_to_str(pwr_lvl));
1887 
1888 	return pwr_lvl;
1889 }
1890 
phl_ps_is_datapath_allowed(struct phl_info_t * phl_info)1891 bool phl_ps_is_datapath_allowed(struct phl_info_t *phl_info)
1892 {
1893 	struct phl_module_op_info op_info = {0};
1894 	bool io_allowed = false;
1895 	u8 pwr_lvl = PS_PWR_LVL_MAX;
1896 	struct rtw_ps_cap_t *ps_cap = _get_ps_cap(phl_info);
1897 
1898 	if (!ps_cap->lps_pause_tx)
1899 		return true;
1900 
1901 	op_info.op_code = PS_MDL_OP_CUR_PWR_LVL;
1902 	op_info.inbuf = (u8 *)&pwr_lvl;
1903 	op_info.inlen = sizeof(pwr_lvl);
1904 
1905 	phl_disp_eng_query_bk_module_info(phl_info, HW_BAND_0,
1906 			PHL_MDL_POWER_MGNT, &op_info);
1907 
1908 	if (pwr_lvl == PS_PWR_LVL_PWRON)
1909 		io_allowed = true;
1910 
1911 	return io_allowed;
1912 }
1913 
_ps_tx_pkt_ntfy_done(void * priv,struct phl_msg * msg)1914 static void _ps_tx_pkt_ntfy_done(void *priv, struct phl_msg *msg)
1915 {
1916 	struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1917 
1918 	PHL_DBG("[PS_CMD], %s(): reset ntfy\n", __func__);
1919 
1920 	_os_atomic_set(phl_to_drvpriv(phl_info),
1921 				   &phl_info->ps_info.tx_ntfy,
1922 				   0);
1923 }
1924 
phl_ps_tx_pkt_ntfy(struct phl_info_t * phl_info)1925 void phl_ps_tx_pkt_ntfy(struct phl_info_t *phl_info)
1926 {
1927 	struct phl_msg msg = {0};
1928 	struct phl_msg_attribute attr = {0};
1929 
1930 	if (phl_ps_get_cur_pwr_lvl(phl_info) == PS_PWR_LVL_PWRON)
1931 		return;
1932 
1933 	if (_os_atomic_read(phl_to_drvpriv(phl_info), &phl_info->ps_info.tx_ntfy)) {
1934 		PHL_DBG("[PS_CMD], %s(): already ntfy\n", __func__);
1935 		return;
1936 	}
1937 
1938 	SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
1939 	SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_TX_PKT_NTFY);
1940 	msg.band_idx = HW_BAND_0;
1941 	attr.completion.completion = _ps_tx_pkt_ntfy_done;
1942 	attr.completion.priv = phl_info;
1943 
1944 	_os_atomic_set(phl_to_drvpriv(phl_info),
1945 				   &phl_info->ps_info.tx_ntfy,
1946 				   1);
1947 
1948 	if (phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL) !=
1949 				RTW_PHL_STATUS_SUCCESS) {
1950 		PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): fail to send tx pkt notify.\n", __func__);
1951 		_os_atomic_set(phl_to_drvpriv(phl_info),
1952 				   	   &phl_info->ps_info.tx_ntfy,
1953 				   	   0);
1954 	}
1955 
1956 	return;
1957 }
1958 
1959 
_ps_set_rt_cap_done(void * priv,struct phl_msg * msg)1960 static void _ps_set_rt_cap_done(void *priv, struct phl_msg *msg)
1961 {
1962 	struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1963 
1964 	if (msg->inbuf && msg->inlen) {
1965 		_os_mem_free(phl_to_drvpriv(phl_info),
1966 			msg->inbuf, msg->inlen);
1967 	}
1968 
1969 }
1970 
rtw_phl_ps_set_rt_cap(void * phl,u8 band_idx,bool ps_allow,enum phl_ps_rt_rson rt_rson)1971 void rtw_phl_ps_set_rt_cap(void *phl, u8 band_idx, bool ps_allow, enum phl_ps_rt_rson rt_rson)
1972 {
1973 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1974 	struct phl_msg msg = {0};
1975 	struct phl_msg_attribute attr = {0};
1976 	struct rt_ps *ps_rt_info = NULL;
1977 
1978 	ps_rt_info = (struct rt_ps *)_os_mem_alloc(phl_to_drvpriv(phl_info), sizeof(struct rt_ps));
1979 	if (ps_rt_info == NULL) {
1980 		PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): fail to alloc ps_rt_info memory.\n", __func__);
1981 		return;
1982 	}
1983 
1984 	ps_rt_info->rt_rson = rt_rson;
1985 	ps_rt_info->ps_allow = ps_allow;
1986 
1987 	SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
1988 	SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_PS_CAP_CHG);
1989 	msg.band_idx = band_idx;
1990 	msg.inbuf = (u8*)ps_rt_info;
1991 	msg.inlen = sizeof(struct rt_ps);
1992 	attr.completion.completion = _ps_set_rt_cap_done;
1993 	attr.completion.priv = phl_info;
1994 
1995 	if (phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL) !=
1996 				RTW_PHL_STATUS_SUCCESS) {
1997 		PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): fail to notify batter change.\n", __func__);
1998 		goto cmd_fail;
1999 	}
2000 
2001 	return;
2002 
2003 cmd_fail:
2004 	_os_mem_free(phl_to_drvpriv(phl_info), ps_rt_info, sizeof(ps_rt_info));
2005 }
2006 
phl_ps_dbg_set_ps(struct phl_info_t * phl_info,u8 ps_mode,bool enter)2007 void phl_ps_dbg_set_ps(struct phl_info_t *phl_info, u8 ps_mode, bool enter)
2008 {
2009 	struct phl_msg msg = {0};
2010 	struct phl_msg_attribute attr = {0};
2011 	u16 evt_id = 0;
2012 
2013 	if (ps_mode == PS_MODE_LPS) {
2014 		evt_id = (enter ? MSG_EVT_PS_DBG_LPS_ENTER : MSG_EVT_PS_DBG_LPS_LEAVE);
2015 	} else if (ps_mode == PS_MODE_IPS) {
2016 		PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): dbg ips is not support now.\n", __func__);
2017 		/* evt_id = (enter ? MSG_EVT_PS_DBG_IPS_ENTER : MSG_EVT_PS_DBG_IPS_LEAVE); */
2018 	} else {
2019 		return;
2020 	}
2021 
2022 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): debug set %s %s\n", __func__,
2023 			phl_ps_ps_mode_to_str(ps_mode), (enter ? "enter" : "leave"));
2024 
2025 	SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
2026 	SET_MSG_EVT_ID_FIELD(msg.msg_id, evt_id);
2027 	msg.band_idx = HW_BAND_0;
2028 
2029 	if (phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL) !=
2030 				RTW_PHL_STATUS_SUCCESS) {
2031 		PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): fail to set dbg ps.\n", __func__);
2032 	}
2033 
2034 	return;
2035 }
2036 
2037 enum rtw_phl_status
rtw_phl_ps_set_rf_state(void * phl,u8 band_idx,enum rtw_rf_state rf_state)2038 rtw_phl_ps_set_rf_state(void *phl, u8 band_idx, enum rtw_rf_state rf_state)
2039 {
2040 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
2041 	enum phl_msg_evt_id evt_id = (rf_state == RTW_RF_ON) ? MSG_EVT_RF_ON : MSG_EVT_RF_OFF;
2042 
2043 	phl_cmd_enqueue(phl_info, band_idx, evt_id, NULL, 0, NULL, PHL_CMD_WAIT, 0);
2044 
2045 	return RTW_PHL_STATUS_SUCCESS;
2046 }
2047 
phl_ps_hal_pwr_req(struct rtw_phl_com_t * phl_com,u8 src,bool pwr_req)2048 enum rtw_phl_status phl_ps_hal_pwr_req(struct rtw_phl_com_t *phl_com, u8 src, bool pwr_req)
2049 {
2050 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
2051 	struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
2052 	struct phl_module_op_info op_info = {0};
2053 
2054 	if (src != HAL_BTC_PWR_REQ)
2055 		return RTW_PHL_STATUS_FAILURE;
2056 
2057 	op_info.op_code = PS_MDL_OP_BTC_PWR_REQ;
2058 	op_info.inbuf = (u8 *)&pwr_req;
2059 	status = phl_disp_eng_set_bk_module_info(phl_info, HW_BAND_0,
2060 				PHL_MDL_POWER_MGNT, &op_info);
2061 
2062 	return status;
2063 }
2064 #endif
2065