1 /******************************************************************************
2 *
3 * Copyright(c) 2021 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15 #define _PHL_CMD_PS_C_
16 #include "phl_headers.h"
17 #ifdef CONFIG_POWER_SAVE
18 /* structure of a power request */
19 struct pwr_req {
20 _os_list list;
21 u16 evt_id;
22 };
23
24 struct rt_ps {
25 enum phl_ps_rt_rson rt_rson;
26 bool ps_allow;
27 };
28
29 enum {
30 PS_STATE_NONE = 0,
31 PS_STATE_LEAVED,
32 /**
33 * lps: protocol only
34 * ips: won't in this state
35 */
36 PS_STATE_PROTO,
37 /**
38 * lps: protocol + power
39 * ips: power
40 */
41 PS_STATE_ENTERED
42 };
43
_ps_state_to_str(u8 ps_state)44 static const char *_ps_state_to_str(u8 ps_state)
45 {
46 switch (ps_state) {
47 case PS_STATE_ENTERED:
48 return "PS_ENTERED";
49 case PS_STATE_PROTO:
50 return "PS_PROTOCOL";
51 case PS_STATE_LEAVED:
52 return "PS_LEAVED";
53 default:
54 return "NONE";
55 }
56 }
57
58 struct _ps_mr_info {
59 bool ap_active;
60 bool gc_active;
61 };
62
63 #define CMD_PS_TIMER_PERIOD 100
64 #define MAX_PWE_REQ_NUM 16
65 struct cmd_ps {
66 struct phl_info_t *phl_info;
67 void *dispr;
68 _os_timer ps_timer;
69
70 struct phl_queue req_busy_q;
71 struct phl_queue req_idle_q;
72 struct pwr_req req_pool[MAX_PWE_REQ_NUM];
73
74 bool rej_pwr_req; /* reject all pwr request */
75 bool btc_req_pwr; /* btc request pwr or not */
76 bool stop_datapath;
77
78 /* current state */
79 u8 cur_pwr_lvl;
80 u8 ps_state;
81 u8 ps_mode;
82 char enter_rson[MAX_CMD_PS_RSON_LENGTH];
83 char leave_rson[MAX_CMD_PS_RSON_LENGTH];
84
85 struct rtw_phl_stainfo_t *sta; /* sta entering/leaving ps */
86 u16 macid;
87
88 /* lps */
89 u32 null_token;
90 bool wdg_leave_ps;
91
92 /* refs. enum "phl_ps_rt_rson" */
93 enum phl_ps_rt_rson rt_stop_rson;
94
95 struct _ps_mr_info mr_info;
96
97 /* rssi */
98 u8 rssi_bcn_min;
99 };
100
101 /**
102 * determine leave lps or not
103 * return true if rssi variation reach threshold
104 * @ps: see cmd_ps
105 */
_chk_rssi_diff_reach_thld(struct cmd_ps * ps)106 static bool _chk_rssi_diff_reach_thld(struct cmd_ps *ps)
107 {
108 struct phl_info_t *phl_info = ps->phl_info;
109 struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
110 u8 cur_rssi_bcn_min = 0;
111 u8 *rssi_bcn_min = &ps->rssi_bcn_min;
112 bool leave_ps = false;
113
114 cur_rssi_bcn_min = phl_get_min_rssi_bcn(phl_info);
115
116 do {
117 if (*rssi_bcn_min == 0 || *rssi_bcn_min == 0xFF) {
118 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): (criteria = %d, cur = %d) criteria invalid, set criteria to cur\n",
119 __func__, *rssi_bcn_min, cur_rssi_bcn_min);
120 *rssi_bcn_min = cur_rssi_bcn_min; /* update with current_rssi */
121 break;
122 }
123
124 if (DIFF(*rssi_bcn_min, cur_rssi_bcn_min) < ps_cap->lps_rssi_diff_threshold) {
125 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): (criteria = %d, cur = %d) RSSI diff < %d, do nothing\n",
126 __func__, *rssi_bcn_min, cur_rssi_bcn_min, ps_cap->lps_rssi_diff_threshold);
127 break;
128 }
129
130 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): leave ps and update criteria from %d to %d\n", __func__, *rssi_bcn_min, cur_rssi_bcn_min);
131
132 leave_ps = true;
133 *rssi_bcn_min = cur_rssi_bcn_min;
134 } while (0);
135
136 return leave_ps;
137 }
138
139 /**
140 * determine leave lps or not
141 * return true if beacon offset changed
142 * @ps: see cmd_ps
143 */
_chk_bcn_offset_changed(struct cmd_ps * ps)144 bool _chk_bcn_offset_changed(struct cmd_ps *ps)
145 {
146 struct phl_info_t *phl_info = ps->phl_info;
147 u8 ridx = MAX_WIFI_ROLE_NUMBER;
148 struct rtw_wifi_role_t *wrole = NULL;
149 struct rtw_bcn_offset *b_ofst_i = NULL;
150 bool leave_ps = false;
151
152 for (ridx = 0; ridx < MAX_WIFI_ROLE_NUMBER; ridx++) {
153 wrole = rtw_phl_get_wrole_by_ridx(phl_info->phl_com, ridx);
154 if (wrole == NULL)
155 continue;
156
157 if (rtw_phl_role_is_client_category(wrole) && wrole->mstate == MLME_LINKED) {
158 b_ofst_i = phl_get_sta_bcn_offset_info(phl_info, wrole);
159
160 if (b_ofst_i->conf_lvl >= CONF_LVL_MID &&
161 b_ofst_i->offset != b_ofst_i->cr_tbtt_shift) {
162 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): update ridx(%d) bcn offset from %d to %d TU\n",
163 __func__, ridx, b_ofst_i->cr_tbtt_shift, b_ofst_i->offset);
164 leave_ps = true;
165 break;
166 }
167 }
168 }
169 return leave_ps;
170 }
171
172 /**
173 * check whether to leave/enter lps
174 * return true if ps need to change to the target state
175 * @ps: see cmd_ps
176 * @mac_id: macid of corresponding sta
177 * @cur_state: currently lps state
178 * @target_state: the target ps state
179 */
180 static bool
_lps_state_judge_changed(struct cmd_ps * ps,u16 macid,u8 cur_state,u8 target_state)181 _lps_state_judge_changed(struct cmd_ps *ps, u16 macid, u8 cur_state, u8 target_state)
182 {
183 struct phl_info_t *phl_info = ps->phl_info;
184 struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
185 struct rtw_stats *phl_stats = &phl_info->phl_com->phl_stats;
186 struct rtw_phl_stainfo_t *sta = NULL;
187 bool change_state = false;
188 u8 rssi = 0;
189
190 sta = rtw_phl_get_stainfo_by_macid(phl_info, macid);
191 if (sta == NULL) {
192 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): cannot get sta of macid %d.\n", __func__, macid);
193 return false;
194 }
195
196 rssi = rtw_hal_get_sta_rssi(sta);
197
198 if (target_state == PS_STATE_ENTERED) {
199 if (cur_state == PS_STATE_LEAVED || cur_state == PS_STATE_PROTO) {
200 if (rssi > ps_cap->lps_rssi_enter_threshold &&
201 phl_stats->tx_traffic.lvl == RTW_TFC_IDLE &&
202 phl_stats->rx_traffic.lvl == RTW_TFC_IDLE) {
203 change_state = true;
204 }
205 }
206 } else {
207 if (cur_state == PS_STATE_ENTERED || cur_state == PS_STATE_PROTO) {
208 if (_chk_rssi_diff_reach_thld(ps) ||
209 _chk_bcn_offset_changed(ps) ||
210 rssi < ps_cap->lps_rssi_leave_threshold ||
211 phl_stats->tx_traffic.lvl != RTW_TFC_IDLE ||
212 phl_stats->rx_traffic.lvl != RTW_TFC_IDLE) {
213 change_state = true;
214 }
215 }
216 }
217
218 if (change_state) {
219 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): state %s -> %s, Tx(%s), Rx(%s), RSSI(%d)\n",
220 __func__, _ps_state_to_str(cur_state), _ps_state_to_str(target_state),
221 phl_tfc_lvl_to_str(phl_stats->tx_traffic.lvl),
222 phl_tfc_lvl_to_str(phl_stats->rx_traffic.lvl), rssi);
223 }
224
225 return change_state;
226 }
227
_set_ps_rson(struct cmd_ps * ps,u8 enter,char * rson)228 static void _set_ps_rson(struct cmd_ps *ps, u8 enter, char *rson)
229 {
230 if (enter) {
231 _os_mem_set(phl_to_drvpriv(ps->phl_info), ps->enter_rson, 0, MAX_CMD_PS_RSON_LENGTH);
232 _os_mem_cpy(phl_to_drvpriv(ps->phl_info), ps->enter_rson, rson, MAX_CMD_PS_RSON_LENGTH);
233 } else {
234 _os_mem_set(phl_to_drvpriv(ps->phl_info), ps->leave_rson, 0, MAX_CMD_PS_RSON_LENGTH);
235 _os_mem_cpy(phl_to_drvpriv(ps->phl_info), ps->leave_rson, rson, MAX_CMD_PS_RSON_LENGTH);
236 }
237 }
238
239 /**
240 * Leave power saving
241 * return RTW_PHL_STATUS_SUCCESS if leave ps ok
242 * @ps: see cmd_ps
243 * @leave_proto: whether to leave protocol
244 * @rson: the reason of leaving power saving
245 */
_leave_ps(struct cmd_ps * ps,bool leave_proto,char * rson)246 enum rtw_phl_status _leave_ps(struct cmd_ps *ps, bool leave_proto, char *rson)
247 {
248 struct phl_info_t *phl_info = ps->phl_info;
249 enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
250 struct ps_cfg cfg = {0};
251
252 if (ps->ps_state == PS_STATE_LEAVED) {
253 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): not in power saving.\n", __func__);
254 return RTW_PHL_STATUS_SUCCESS;
255 }
256
257 cfg.cur_pwr_lvl = ps->cur_pwr_lvl;
258
259 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): leave %s (macid %d).\n",
260 __func__, phl_ps_ps_mode_to_str(ps->ps_mode), ps->macid);
261
262 if (ps->ps_mode == PS_MODE_LPS) {
263 if (!leave_proto) {
264 if (ps->ps_state == PS_STATE_PROTO) {
265 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): in lps protocal only.\n", __func__);
266 return RTW_PHL_STATUS_SUCCESS;
267 }
268 cfg.pwr_cfg = true;
269 cfg.proto_cfg = false;
270 } else {
271 if (ps->ps_state == PS_STATE_PROTO) {
272 cfg.pwr_cfg = false;
273 cfg.proto_cfg = true;
274 } else {
275 cfg.pwr_cfg = true;
276 cfg.proto_cfg = true;
277 }
278 }
279 cfg.macid = ps->macid;
280 cfg.token = &ps->null_token;
281 cfg.pwr_lvl = PS_PWR_LVL_PWRON;
282 cfg.ps_mode = ps->ps_mode;
283 } else if (ps->ps_mode == PS_MODE_IPS) {
284 cfg.macid = ps->macid;
285 cfg.pwr_lvl = PS_PWR_LVL_PWRON;
286 cfg.ps_mode = ps->ps_mode;
287 } else {
288 PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): unknown ps mode!\n", __func__);
289 }
290
291 _set_ps_rson(ps, false, rson);
292
293 status = phl_ps_leave_ps(phl_info, &cfg);
294
295 if (status == RTW_PHL_STATUS_SUCCESS) {
296 ps->cur_pwr_lvl = cfg.pwr_lvl;
297 if (ps->ps_mode == PS_MODE_LPS) {
298 ps->ps_mode = (leave_proto != true) ? PS_MODE_LPS : PS_MODE_NONE;
299 ps->ps_state = (leave_proto != true) ? PS_STATE_PROTO : PS_STATE_LEAVED;
300 ps->macid = (leave_proto != true) ? ps->macid : PS_MACID_NONE;
301 } else {
302 ps->ps_mode = PS_MODE_NONE;
303 ps->ps_state = PS_STATE_LEAVED;
304 ps->macid = PS_MACID_NONE;
305 }
306 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): macid %d leave ps success, reason(%s).\n", __func__, cfg.macid, rson);
307 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): ps mode(%s), pwr lvl(%s), ps state(%s)\n",
308 __func__, phl_ps_ps_mode_to_str(ps->ps_mode),
309 phl_ps_pwr_lvl_to_str(ps->cur_pwr_lvl), _ps_state_to_str(ps->ps_state));
310 } else {
311 PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): leave ps fail! reason(%s).\n", __func__, rson);
312 }
313
314 return status;
315 }
316
317 /**
318 * Enter power saving
319 * return RTW_PHL_STATUS_SUCCESS if enter ps ok
320 * @ps: see cmd_ps
321 * @ps_mode: target ps mode to enter
322 * @macid: target macid to enter lps
323 * @rson: the reason of entering power saving
324 */
_enter_ps(struct cmd_ps * ps,u8 ps_mode,u16 macid,char * rson)325 enum rtw_phl_status _enter_ps(struct cmd_ps *ps, u8 ps_mode, u16 macid, char *rson)
326 {
327 enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
328 struct ps_cfg cfg = {0};
329 struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
330
331 if (ps->ps_state == PS_STATE_ENTERED) {
332 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): already in power saving.\n", __func__);
333 return RTW_PHL_STATUS_SUCCESS;
334 }
335
336 cfg.cur_pwr_lvl = ps->cur_pwr_lvl;
337
338 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): enter %s (macid %d).\n",
339 __func__, phl_ps_ps_mode_to_str(ps_mode), macid);
340
341 if (ps_mode == PS_MODE_LPS) {
342 cfg.proto_cfg = (ps->ps_state == PS_STATE_PROTO) ? false : true;
343 cfg.pwr_cfg = true;
344 cfg.macid = macid;
345 cfg.token = &ps->null_token;
346 cfg.pwr_lvl = phl_ps_judge_pwr_lvl(ps_cap->lps_cap, ps_mode, true);
347 cfg.ps_mode = ps_mode;
348 cfg.awake_interval = ps_cap->lps_awake_interval;
349 cfg.listen_bcn_mode = ps_cap->lps_listen_bcn_mode;
350 cfg.smart_ps_mode = ps_cap->lps_smart_ps_mode;
351 } else if (ps_mode == PS_MODE_IPS) {
352 cfg.macid = macid;
353 if (macid == PS_MACID_NONE)
354 cfg.pwr_lvl = PS_PWR_LVL_PWROFF;
355 else
356 cfg.pwr_lvl = phl_ps_judge_pwr_lvl(ps_cap->ips_cap, ps_mode, true);
357 cfg.ps_mode = ps_mode;
358 } else {
359 PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): unknown ps mode!\n", __func__);
360 }
361
362 _set_ps_rson(ps, true, rson);
363
364 status = phl_ps_enter_ps(ps->phl_info, &cfg);
365
366 if (status == RTW_PHL_STATUS_SUCCESS) {
367 ps->cur_pwr_lvl = cfg.pwr_lvl;
368 ps->ps_mode = cfg.ps_mode;
369 ps->macid = cfg.macid;
370 ps->ps_state = PS_STATE_ENTERED;
371 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): macid %d enter ps success, reason(%s).\n", __func__, ps->macid, rson);
372 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): ps mode(%s), pwr lvl(%s), ps state(%s)\n",
373 __func__, phl_ps_ps_mode_to_str(ps->ps_mode),
374 phl_ps_pwr_lvl_to_str(ps->cur_pwr_lvl), _ps_state_to_str(ps->ps_state));
375 } else {
376 PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): enter ps fail! reason(%s).\n", __func__, rson);
377 }
378
379 return status;
380 }
381
_pop_idle_req(struct cmd_ps * ps,struct pwr_req ** req)382 static bool _pop_idle_req(struct cmd_ps *ps, struct pwr_req **req)
383 {
384 void *d = phl_to_drvpriv(ps->phl_info);
385 _os_list *new_req = NULL;
386 bool ret = false;
387
388 (*req) = NULL;
389 if (pq_pop(d, &(ps->req_idle_q), &new_req, _first, _bh)) {
390 (*req) = (struct pwr_req *)new_req;
391 ret = true;
392 } else {
393 ret = false;
394 }
395
396 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): remaining idle req cnt %d.\n", __func__, ps->req_idle_q.cnt);
397
398 return ret;
399 }
400
_push_idle_req(struct cmd_ps * ps,struct pwr_req * req)401 static void _push_idle_req(struct cmd_ps *ps, struct pwr_req *req)
402 {
403 void *d = phl_to_drvpriv(ps->phl_info);
404
405 pq_push(d, &(ps->req_idle_q), &(req->list), _tail, _bh);
406
407 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): remaining idle req cnt %d.\n", __func__, ps->req_idle_q.cnt);
408 }
409
_pop_busy_req(struct cmd_ps * ps,struct pwr_req ** req)410 static bool _pop_busy_req(struct cmd_ps *ps, struct pwr_req **req)
411 {
412 void *d = phl_to_drvpriv(ps->phl_info);
413 _os_list *new_req = NULL;
414 bool ret = false;
415
416 (*req) = NULL;
417 if (pq_pop(d, &(ps->req_busy_q), &new_req, _tail, _bh)) {
418 (*req) = (struct pwr_req *)new_req;
419 ret = true;
420 } else {
421 ret = false;
422 }
423
424 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): current busy req cnt %d.\n", __func__, ps->req_busy_q.cnt);
425
426 return ret;
427 }
428
_push_busy_req(struct cmd_ps * ps,struct pwr_req * req)429 static void _push_busy_req(struct cmd_ps *ps, struct pwr_req *req)
430 {
431 void *d = phl_to_drvpriv(ps->phl_info);
432
433 pq_push(d, &(ps->req_busy_q), &(req->list), _tail, _bh);
434
435 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): current busy req cnt %d.\n", __func__, ps->req_busy_q.cnt);
436 }
437
_cancel_pwr_req(struct cmd_ps * ps,u16 evt_id)438 static void _cancel_pwr_req(struct cmd_ps *ps, u16 evt_id)
439 {
440 struct pwr_req *req = NULL;
441
442 if (!_pop_busy_req(ps, &req)) {
443 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): busy queue is empty.\n", __func__);
444 return;
445 }
446
447 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): evt_id %d\n", __func__, req->evt_id);
448
449 if (req->evt_id != evt_id && MSG_EVT_PHY_IDLE != evt_id)
450 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): evt_id mismatch.\n", __func__);
451
452 req->evt_id = MSG_EVT_NONE;
453
454 _push_idle_req(ps, req);
455 }
456
_add_pwr_req(struct cmd_ps * ps,u16 evt_id)457 static enum rtw_phl_status _add_pwr_req(struct cmd_ps *ps, u16 evt_id)
458 {
459 enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
460 struct pwr_req *req = NULL;
461
462 if (!_pop_idle_req(ps, &req)) {
463 PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): idle queue is empty.\n", __func__);
464 return RTW_PHL_STATUS_RESOURCE;
465 }
466
467 req->evt_id = evt_id;
468
469 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): evt_id %d\n", __func__, evt_id);
470
471 _push_busy_req(ps, req);
472
473 return status;
474 }
475
_init_pwr_req_q(struct cmd_ps * ps)476 static void _init_pwr_req_q(struct cmd_ps *ps)
477 {
478 void *d = phl_to_drvpriv(ps->phl_info);
479 u8 i = 0;
480
481 pq_init(d, &ps->req_busy_q);
482 pq_init(d, &ps->req_idle_q);
483
484 _os_mem_set(d, ps->req_pool, 0,
485 sizeof(struct pwr_req) * MAX_PWE_REQ_NUM);
486
487 for (i = 0; i < MAX_PWE_REQ_NUM; i++) {
488 pq_push(d, &(ps->req_idle_q),
489 &(ps->req_pool[i].list), _tail, _bh);
490 }
491 }
492
_reset_pwr_req_q(struct cmd_ps * ps)493 static void _reset_pwr_req_q(struct cmd_ps *ps)
494 {
495 void *d = phl_to_drvpriv(ps->phl_info);
496 u8 i = 0;
497
498 pq_reset(d, &ps->req_busy_q, _bh);
499 pq_reset(d, &ps->req_idle_q, _bh);
500
501 _os_mem_set(d, ps->req_pool, 0,
502 sizeof(struct pwr_req) * MAX_PWE_REQ_NUM);
503
504 for (i = 0; i < MAX_PWE_REQ_NUM; i++) {
505 pq_push(d, &(ps->req_idle_q),
506 &(ps->req_pool[i].list), _tail, _bh);
507 }
508 }
509
_deinit_pwr_req_q(struct cmd_ps * ps)510 static void _deinit_pwr_req_q(struct cmd_ps *ps)
511 {
512 void *d = phl_to_drvpriv(ps->phl_info);
513
514 pq_deinit(d, &ps->req_busy_q);
515 pq_deinit(d, &ps->req_idle_q);
516 }
517
_init_ps_dflt_sw_cap(struct cmd_ps * ps)518 static void _init_ps_dflt_sw_cap(struct cmd_ps *ps)
519 {
520 struct rtw_ps_cap_t *sw_cap = _get_ps_sw_cap(ps->phl_info);
521
522 sw_cap->init_rf_state = RTW_RF_ON;
523 sw_cap->init_rt_stop_rson = PS_RT_RSON_NONE;
524 sw_cap->leave_fail_act = PS_LEAVE_FAIL_ACT_NONE;
525
526 sw_cap->ips_en = PS_OP_MODE_DISABLED;
527 sw_cap->ips_cap = 0;
528 sw_cap->ips_wow_en = PS_OP_MODE_DISABLED;
529 sw_cap->ips_wow_cap = 0;
530
531 sw_cap->lps_en = PS_OP_MODE_DISABLED;
532 sw_cap->lps_cap = 0;
533 sw_cap->lps_awake_interval = 0;
534 sw_cap->lps_listen_bcn_mode = RTW_LPS_RLBM_MIN;
535 sw_cap->lps_smart_ps_mode = RTW_LPS_TRX_PWR0;
536 sw_cap->lps_rssi_enter_threshold = 40;
537 sw_cap->lps_rssi_leave_threshold = 35;
538 sw_cap->lps_rssi_diff_threshold = 5;
539
540 sw_cap->lps_wow_en = PS_OP_MODE_DISABLED;
541 sw_cap->lps_wow_cap = 0;
542 sw_cap->lps_wow_awake_interval = 0;
543 sw_cap->lps_wow_listen_bcn_mode = RTW_LPS_RLBM_MAX;
544 sw_cap->lps_wow_smart_ps_mode = RTW_LPS_TRX_PWR0;
545 }
546
_cmd_ps_timer_cb(void * context)547 static void _cmd_ps_timer_cb(void *context)
548 {
549 enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
550 struct cmd_ps *ps = (struct cmd_ps *)context;
551 struct phl_msg msg = {0};
552 struct phl_msg_attribute attr = {0};
553
554 PHL_DBG("[PS_CMD], %s(): \n", __func__);
555
556 SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
557 SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_PS_PERIOD_CHK);
558 msg.band_idx = HW_BAND_0;
559
560 pstatus = phl_disp_eng_send_msg(ps->phl_info, &msg, &attr, NULL);
561 if (pstatus != RTW_PHL_STATUS_SUCCESS) {
562 PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): fail to send MSG_EVT_PS_PERIOD_CHK.\n", __func__);
563 }
564 }
565
_ps_common_info_init(struct phl_info_t * phl_info)566 static void _ps_common_info_init(struct phl_info_t *phl_info)
567 {
568 struct phl_ps_info *ps_info = &phl_info->ps_info;
569
570 if (!ps_info->init) {
571 PHL_INFO("[PS_CMD], %s(): \n", __func__);
572
573 _os_atomic_set(phl_to_drvpriv(phl_info),
574 &phl_info->ps_info.tx_ntfy,
575 0);
576 ps_info->init = true;
577 }
578 }
579
_ps_common_info_deinit(struct phl_info_t * phl_info)580 static void _ps_common_info_deinit(struct phl_info_t *phl_info)
581 {
582 struct phl_ps_info *ps_info = &phl_info->ps_info;
583
584 if (ps_info->init) {
585 PHL_INFO("[PS_CMD], %s(): \n", __func__);
586
587 _os_atomic_set(phl_to_drvpriv(phl_info),
588 &phl_info->ps_info.tx_ntfy,
589 0);
590 ps_info->init = false;
591 }
592 }
593
_ps_mdl_init(void * phl,void * dispr,void ** priv)594 static enum phl_mdl_ret_code _ps_mdl_init(void *phl, void *dispr, void **priv)
595 {
596 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
597 struct cmd_ps *ps = NULL;
598
599 PHL_INFO("[PS_CMD], %s(): \n", __func__);
600
601 if (priv == NULL)
602 return MDL_RET_FAIL;
603
604 (*priv) = NULL;
605 ps = (struct cmd_ps *)_os_mem_alloc(phl_to_drvpriv(phl_info),
606 sizeof(struct cmd_ps));
607 if (ps == NULL) {
608 PHL_ERR("[PS_CMD], %s(): allocate cmd ps fail.\n", __func__);
609 return MDL_RET_FAIL;
610 }
611
612 _os_mem_set(phl_to_drvpriv(phl_info), ps, 0, sizeof(struct cmd_ps));
613
614 ps->phl_info = phl_info;
615 ps->dispr = dispr;
616 (*priv) = (void *)ps;
617 ps->cur_pwr_lvl = PS_PWR_LVL_PWRON;
618 ps->ps_state = PS_STATE_LEAVED;
619 ps->ps_mode = PS_MODE_NONE;
620 ps->rej_pwr_req = false;
621 ps->rt_stop_rson = PS_RT_RSON_NONE;
622
623 _os_init_timer(phl_to_drvpriv(phl_info),
624 &ps->ps_timer,
625 _cmd_ps_timer_cb,
626 ps,
627 "phl_cmd_ps_timer");
628
629 _init_pwr_req_q(ps);
630
631 _init_ps_dflt_sw_cap(ps);
632
633 _ps_common_info_init(phl_info);
634
635 return MDL_RET_SUCCESS;
636 }
637
_ps_mdl_deinit(void * dispr,void * priv)638 static void _ps_mdl_deinit(void *dispr, void *priv)
639 {
640 struct cmd_ps *ps = (struct cmd_ps *)priv;
641
642 PHL_INFO("[PS_CMD], %s(): \n", __func__);
643
644 _os_release_timer(phl_to_drvpriv(ps->phl_info), &ps->ps_timer);
645
646 _deinit_pwr_req_q(ps);
647
648 _ps_common_info_deinit(ps->phl_info);
649
650 if (ps != NULL)
651 _os_mem_free(phl_to_drvpriv(ps->phl_info), ps, sizeof(struct cmd_ps));
652 }
653
_dump_ps_cap(struct cmd_ps * ps)654 static void _dump_ps_cap(struct cmd_ps *ps)
655 {
656 struct rtw_ps_cap_t *ps_cap = NULL;
657
658 ps_cap = _get_ps_cap(ps->phl_info);
659
660 PHL_INFO("[PS_CMD], %s(): \n", __func__);
661 PHL_INFO("[PS_CMD], init_rf_state: %d\n", ps_cap->init_rf_state);
662 PHL_INFO("[PS_CMD], init_rt_stop_rson: 0x%x\n", ps_cap->init_rt_stop_rson);
663 PHL_INFO("[PS_CMD], leave_fail_act: 0x%x\n", ps_cap->leave_fail_act);
664 PHL_INFO("[PS_CMD], ips_en: %d\n", ps_cap->ips_en);
665 PHL_INFO("[PS_CMD], ips_cap: %d\n", ps_cap->ips_cap);
666 PHL_INFO("[PS_CMD], ips_wow_en: %d\n", ps_cap->ips_wow_en);
667 PHL_INFO("[PS_CMD], ips_wow_cap: %d\n", ps_cap->ips_wow_cap);
668 PHL_INFO("[PS_CMD], lps_en: %d\n", ps_cap->lps_en);
669 PHL_INFO("[PS_CMD], lps_cap: %d\n", ps_cap->lps_cap);
670 PHL_INFO("[PS_CMD], lps_awake_interval: %d\n", ps_cap->lps_awake_interval);
671 PHL_INFO("[PS_CMD], lps_listen_bcn_mode: %d\n", ps_cap->lps_listen_bcn_mode);
672 PHL_INFO("[PS_CMD], lps_smart_ps_mode: %d\n", ps_cap->lps_smart_ps_mode);
673 PHL_INFO("[PS_CMD], lps_rssi_enter_threshold: %d\n", ps_cap->lps_rssi_enter_threshold);
674 PHL_INFO("[PS_CMD], lps_rssi_leave_threshold: %d\n", ps_cap->lps_rssi_leave_threshold);
675 PHL_INFO("[PS_CMD], lps_rssi_diff_threshold: %d\n", ps_cap->lps_rssi_diff_threshold);
676 PHL_INFO("[PS_CMD], lps_wow_en: %d\n", ps_cap->lps_wow_en);
677 PHL_INFO("[PS_CMD], lps_wow_cap: %d\n", ps_cap->lps_wow_cap);
678 PHL_INFO("[PS_CMD], lps_wow_awake_interval: %d\n", ps_cap->lps_wow_awake_interval);
679 PHL_INFO("[PS_CMD], lps_wow_listen_bcn_mode: %d\n", ps_cap->lps_wow_listen_bcn_mode);
680 PHL_INFO("[PS_CMD], lps_wow_smart_ps_mode: %d\n", ps_cap->lps_wow_smart_ps_mode);
681 PHL_INFO("[PS_CMD], lps_pause_tx: %d\n", ps_cap->lps_pause_tx);
682 }
683
_leave_success_hdlr(struct cmd_ps * ps)684 static void _leave_success_hdlr(struct cmd_ps *ps)
685 {
686 struct phl_data_ctl_t ctl = {0};
687
688 if (ps->stop_datapath) {
689 /* resume tx datapath */
690 ctl.id = PHL_MDL_POWER_MGNT;
691 ctl.cmd = PHL_DATA_CTL_SW_TX_RESUME;
692 phl_data_ctrler(ps->phl_info, &ctl, NULL);
693 ps->stop_datapath = false;
694 }
695 }
696
_leave_fail_hdlr(struct cmd_ps * ps)697 static void _leave_fail_hdlr(struct cmd_ps *ps)
698 {
699 struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
700
701 PHL_WARN("[PS_CMD], %s(): action 0x%x\n", __func__, ps_cap->leave_fail_act);
702
703 /* reject all power operation */
704 if (ps_cap->leave_fail_act & PS_LEAVE_FAIL_ACT_REJ_PWR)
705 ps->rej_pwr_req = true;
706
707 /* L2 should be the last one */
708 if (ps_cap->leave_fail_act & PS_LEAVE_FAIL_ACT_L2)
709 phl_ser_send_msg(ps->phl_info, RTW_PHL_SER_L2_RESET);
710 }
711
_ps_mdl_start(void * dispr,void * priv)712 static enum phl_mdl_ret_code _ps_mdl_start(void *dispr, void *priv)
713 {
714 enum phl_mdl_ret_code ret = MDL_RET_SUCCESS;
715 struct cmd_ps *ps = (struct cmd_ps *)priv;
716 struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
717 u8 idx = 0;
718
719 ps->rt_stop_rson = ps_cap->init_rt_stop_rson;
720
721 _reset_pwr_req_q(ps);
722
723 phl_dispr_get_idx(dispr, &idx);
724
725 if (idx == 0) {
726 _dump_ps_cap(ps);
727 PHL_INFO("[PS_CMD], %s(): init rf state %d, reject pwr req %d\n",
728 __func__, ps_cap->init_rf_state, ps->rej_pwr_req);
729 if (ps_cap->init_rf_state == RTW_RF_OFF)
730 ps->rej_pwr_req = true;
731 if (ps->rej_pwr_req == true)
732 _enter_ps(ps, PS_MODE_IPS, PS_MACID_NONE, "mdl start with rf off");
733 _os_set_timer(phl_to_drvpriv(ps->phl_info), &ps->ps_timer, CMD_PS_TIMER_PERIOD);
734 }
735 return ret;
736 }
737
_ps_mdl_stop(void * dispr,void * priv)738 static enum phl_mdl_ret_code _ps_mdl_stop(void *dispr, void *priv)
739 {
740 enum phl_mdl_ret_code ret = MDL_RET_SUCCESS;
741 struct cmd_ps *ps = (struct cmd_ps *)priv;
742
743 if (ps->ps_state == PS_STATE_ENTERED) {
744 PHL_WARN("[PS_CMD], %s(): module stop in power saving!\n", __func__);
745 _leave_ps(ps, true, "mdl stop");
746 }
747
748 _os_cancel_timer(phl_to_drvpriv(ps->phl_info), &ps->ps_timer);
749
750 PHL_INFO("[PS_CMD], %s(): \n", __func__);
751
752 return ret;
753 }
754
_chk_role_all_no_link(struct cmd_ps * ps)755 static bool _chk_role_all_no_link(struct cmd_ps *ps)
756 {
757 struct phl_info_t *phl_info = ps->phl_info;
758 u8 role_idx = 0;
759 bool ret = true;
760
761 for (role_idx = 0; role_idx < MAX_WIFI_ROLE_NUMBER; role_idx++) {
762 if (phl_info->phl_com->wifi_roles[role_idx].active == false)
763 continue;
764 if (phl_info->phl_com->wifi_roles[role_idx].mstate != MLME_NO_LINK) {
765 ret = false;
766 break;
767 }
768 }
769
770 return ret;
771 }
772
_get_role_of_ps_permitted(struct cmd_ps * ps,u8 target_mode)773 static struct rtw_wifi_role_t *_get_role_of_ps_permitted(struct cmd_ps *ps, u8 target_mode)
774 {
775 struct phl_info_t *phl_info = ps->phl_info;
776 struct rtw_phl_com_t *phl_com = phl_info->phl_com;
777 u8 role_idx = 0;
778
779 for (role_idx = 0; role_idx < MAX_WIFI_ROLE_NUMBER; role_idx++) {
780 if (phl_com->wifi_roles[role_idx].active == false)
781 continue;
782 if (phl_com->wifi_roles[role_idx].type == PHL_RTYPE_STATION) {
783 if (target_mode == PS_MODE_LPS &&
784 phl_com->wifi_roles[role_idx].mstate == MLME_LINKED)
785 return &(phl_com->wifi_roles[role_idx]);
786 else if (target_mode == PS_MODE_IPS &&
787 phl_com->wifi_roles[role_idx].mstate == MLME_NO_LINK)
788 return &(phl_com->wifi_roles[role_idx]);
789 }
790 }
791
792 return NULL;
793 }
794
795 /**
796 * go through all wifi roles and check whether input
797 * ps mode desired is allowed with existing wroles
798 * @ps: see cmd_ps
799 * @target_mode: the desired ps mode (lps or ips)
800 * @macid: target macid to enter lps
801 */
_chk_wrole_with_ps_mode(struct cmd_ps * ps,u8 target_mode,u16 * macid)802 static bool _chk_wrole_with_ps_mode(struct cmd_ps *ps,
803 u8 target_mode, u16 *macid)
804 {
805 struct rtw_wifi_role_t *role = NULL;
806 bool ret = false;
807 struct rtw_phl_stainfo_t *sta = NULL;
808
809 if (ps->mr_info.ap_active || ps->mr_info.gc_active) {
810 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): ap %d or gc %d, is active.\n", __func__,
811 ps->mr_info.ap_active, ps->mr_info.gc_active);
812 return false;
813 }
814
815 if (target_mode == PS_MODE_IPS) {
816 if (_chk_role_all_no_link(ps)) {
817 role = _get_role_of_ps_permitted(ps, PS_MODE_IPS);
818 if (role == NULL) {
819 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): there is no suitable role to enter ips.\n", __func__);
820 return false;
821 }
822 sta = rtw_phl_get_stainfo_self(ps->phl_info, role);
823 if (sta == NULL) {
824 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): cannot get sta info.\n", __func__);
825 return false;
826 }
827 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): role id to enter ips (%d).\n", __func__, role->id);
828 ps->sta = sta;
829 *macid = sta->macid;
830 ret = true;
831 }
832 } else if (target_mode == PS_MODE_LPS) {
833 role = _get_role_of_ps_permitted(ps, PS_MODE_LPS);
834 if (role == NULL) {
835 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): there is no suitable role to enter lps.\n", __func__);
836 return false;
837 }
838 sta = rtw_phl_get_stainfo_self(ps->phl_info, role);
839 if (sta == NULL) {
840 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): cannot get sta info.\n", __func__);
841 return false;
842 }
843 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): role id to enter lps (%d).\n", __func__, role->id);
844 ps->sta = sta;
845 *macid = sta->macid;
846 ret = true;
847 }
848
849 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): decide enter ps(%s), target mode(%s).\n",
850 __func__, (ret) ? "Yes" : "No", phl_ps_ps_mode_to_str(target_mode));
851
852 return ret;
853 }
854
_stop_datapath(struct cmd_ps * ps)855 static enum rtw_phl_status _stop_datapath(struct cmd_ps *ps)
856 {
857 struct phl_data_ctl_t ctl = {0};
858
859 /* stop tx datapath */
860 ctl.id = PHL_MDL_POWER_MGNT;
861 ctl.cmd = PHL_DATA_CTL_SW_TX_PAUSE;
862
863 if (phl_data_ctrler(ps->phl_info, &ctl, NULL) == RTW_PHL_STATUS_SUCCESS) {
864 ps->stop_datapath = true;
865 return RTW_PHL_STATUS_SUCCESS;
866 }
867
868 return RTW_PHL_STATUS_FAILURE;
869 }
870
_is_datapath_active(struct cmd_ps * ps)871 static bool _is_datapath_active(struct cmd_ps *ps)
872 {
873 struct phl_info_t *phl_info = ps->phl_info;
874
875 return (_os_atomic_read(phl_to_drvpriv(phl_info), &phl_info->phl_sw_tx_sts)
876 != PHL_TX_STATUS_SW_PAUSE) ? true : false;
877 }
878
879 /**
880 * check current capability of power saving
881 * return able to enter ps or not
882 * @ps: see cmd_ps
883 * @mode: the target ps mode to be check
884 */
_chk_ps_cap(struct cmd_ps * ps,u8 mode)885 static bool _chk_ps_cap(struct cmd_ps *ps, u8 mode)
886 {
887 struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
888
889 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): mode(%s), lps_en(%d), ips_en(%d), runtime stop reason(0x%x).\n",
890 __func__, phl_ps_ps_mode_to_str(mode), ps_cap->lps_en, ps_cap->ips_en, ps->rt_stop_rson);
891
892 switch (mode) {
893 case PS_MODE_LPS:
894 if (ps_cap->lps_en == PS_OP_MODE_DISABLED) {
895 return false;
896 } else if (ps_cap->lps_en == PS_OP_MODE_FORCE_ENABLED) {
897 /* force enable */
898 return true;
899 } else if (ps_cap->lps_en == PS_OP_MODE_AUTO) {
900 if (ps->rt_stop_rson == PS_RT_RSON_NONE)
901 return true;
902 }
903 break;
904 case PS_MODE_IPS:
905 if (ps_cap->ips_en == PS_OP_MODE_DISABLED) {
906 return false;
907 } else if (ps_cap->ips_en == PS_OP_MODE_FORCE_ENABLED) {
908 /* force enable */
909 return true;
910 } else if (ps_cap->ips_en == PS_OP_MODE_AUTO) {
911 if (ps->rt_stop_rson == PS_RT_RSON_NONE)
912 return true;
913 }
914 break;
915 default:
916 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): unknown ps mode.\n", __func__);
917 return false;
918 }
919
920 return false;
921 }
922
923 /**
924 * check the condition of ips
925 * return whether to enter ips or not
926 * @ps: see cmd_ps
927 */
_chk_ips_enter(struct cmd_ps * ps,u16 * macid)928 static bool _chk_ips_enter(struct cmd_ps *ps, u16 *macid)
929 {
930 if (TEST_STATUS_FLAG(ps->phl_info->phl_com->dev_state, RTW_DEV_RESUMING)) {
931 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): resume in progress.\n", __func__);
932 return false;
933 }
934
935 if (ps->rej_pwr_req == true) {
936 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
937 return false;
938 }
939
940 if (!_chk_ps_cap(ps, PS_MODE_IPS)) {
941 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): ips is not allowed.\n", __func__);
942 return false;
943 }
944
945 if (!phl_disp_eng_is_fg_empty(ps->phl_info, HW_BAND_0)) {
946 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): fg exist.\n", __func__);
947 return false;
948 }
949
950 if (ps->req_busy_q.cnt) {
951 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): req q is not empty.\n", __func__);
952 return false;
953 }
954
955 if (ps->btc_req_pwr) {
956 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): btc req pwr.\n", __func__);
957 return false;
958 }
959
960 if (!_chk_wrole_with_ps_mode(ps, PS_MODE_IPS, macid)) {
961 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): no need to enter ips.\n", __func__);
962 return false;
963 }
964
965 return true;
966 }
967
968 /**
969 * check the condition of lps
970 * return whether to enter lps or not
971 * @ps: see cmd_ps
972 */
_chk_lps_enter(struct cmd_ps * ps,u16 * macid)973 static bool _chk_lps_enter(struct cmd_ps *ps, u16 *macid)
974 {
975 struct rtw_ps_cap_t *ps_cap = _get_ps_cap(ps->phl_info);
976
977 /* check enter lps or not */
978
979 if (TEST_STATUS_FLAG(ps->phl_info->phl_com->dev_state, RTW_DEV_RESUMING)) {
980 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): resume in progress.\n", __func__);
981 return false;
982 }
983
984 if (ps->rej_pwr_req == true) {
985 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
986 return false;
987 }
988
989 /* check capability */
990 if (!_chk_ps_cap(ps, PS_MODE_LPS)) {
991 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): lps is not allowed.\n", __func__);
992 return false;
993 }
994
995 /* check fg module */
996 if (!phl_disp_eng_is_fg_empty(ps->phl_info, HW_BAND_0)) {
997 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): fg exist.\n", __func__);
998 return false;
999 }
1000
1001 /* ref cnt */
1002 if (ps->req_busy_q.cnt) {
1003 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): req q is not empty.\n", __func__);
1004 return false;
1005 }
1006
1007 /* btc */
1008 if (ps->btc_req_pwr) {
1009 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): btc req pwr.\n", __func__);
1010 return false;
1011 }
1012
1013 if (ps->wdg_leave_ps == true) {
1014 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): just leave ps in watchdog prephase.\n", __func__);
1015 return false;
1016 }
1017
1018 /* check wifi role */
1019 if (!_chk_wrole_with_ps_mode(ps, PS_MODE_LPS, macid)) {
1020 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): no need to enter lps.\n", __func__);
1021 return false;
1022 }
1023
1024 /* lps */
1025 if (_lps_state_judge_changed(ps, *macid, ps->ps_state, PS_STATE_ENTERED)) {
1026 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): lps state changed.\n", __func__);
1027 /* check data path stop or not */
1028 if (ps_cap->lps_pause_tx) {
1029 if (!_is_datapath_active(ps)) {
1030 return true;
1031 } else {
1032 if (_stop_datapath(ps) == RTW_PHL_STATUS_SUCCESS)
1033 return true;
1034 }
1035 } else {
1036 return true;
1037 }
1038 }
1039
1040 return false;
1041 }
1042
_ps_watchdog_info_dump(struct cmd_ps * ps)1043 static void _ps_watchdog_info_dump(struct cmd_ps *ps)
1044 {
1045 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "========== CMD PS Info ========== \n");
1046 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "ps mode(%s), pwr lvl(%s), ps state(%s)\n",
1047 phl_ps_ps_mode_to_str(ps->ps_mode),
1048 phl_ps_pwr_lvl_to_str(ps->cur_pwr_lvl),
1049 _ps_state_to_str(ps->ps_state));
1050 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "req idle cnt(%d), req busy cnt(%d)\n",
1051 ps->req_idle_q.cnt, ps->req_busy_q.cnt);
1052 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "ap active(%s), gc active(%s)\n",
1053 (ps->mr_info.ap_active ? "yes" : "no"), (ps->mr_info.gc_active ? "yes" : "no"));
1054 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "reject all pwr req(%s), btc req pwr(%s), runtime stop reason(0x%x)\n",
1055 (ps->rej_pwr_req ? "yes" : "no"), (ps->btc_req_pwr ? "yes" : "no"), ps->rt_stop_rson);
1056 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "========== CMD PS Info ========== \n");
1057 }
1058
1059 /**
1060 * pre-phase handler of watchdog
1061 * will check whether to leave lps or not
1062 * @ps: see cmd_ps
1063 */
1064 static enum phl_mdl_ret_code
_ps_watchdog_pre_hdlr(struct cmd_ps * ps)1065 _ps_watchdog_pre_hdlr(struct cmd_ps *ps)
1066 {
1067 /* check leave lps or not */
1068
1069 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): pwr lvl(%s).\n", __func__, phl_ps_pwr_lvl_to_str(ps->cur_pwr_lvl));
1070
1071 ps->wdg_leave_ps = false;
1072
1073 if (ps->rej_pwr_req == true) {
1074 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1075 return MDL_RET_CANNOT_IO;
1076 }
1077
1078 if (ps->ps_mode == PS_MODE_IPS) {
1079 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): power off.\n", __func__);
1080 return MDL_RET_CANNOT_IO;
1081 }
1082
1083 if (ps->ps_state == PS_STATE_LEAVED) {
1084 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): not in power saving.\n", __func__);
1085 return MDL_RET_SUCCESS;
1086 } else {
1087 if (_lps_state_judge_changed(ps, ps->macid, ps->ps_state, PS_STATE_LEAVED)) {
1088 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): lps state changed.\n", __func__);
1089 if (_leave_ps(ps, true, "watchdog") == RTW_PHL_STATUS_SUCCESS) {
1090 ps->wdg_leave_ps = true;
1091 _leave_success_hdlr(ps);
1092 return MDL_RET_SUCCESS;
1093 } else {
1094 PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): going to L2 reset!\n", __func__);
1095 _leave_fail_hdlr(ps);
1096 return MDL_RET_CANNOT_IO;
1097 }
1098 }
1099 if (ps->ps_state == PS_STATE_PROTO)
1100 return MDL_RET_SUCCESS;
1101 else
1102 return MDL_RET_CANNOT_IO;
1103 }
1104 }
1105
1106 /**
1107 * post-phase handler of watchdog
1108 * will check whether to enter lps or not
1109 * @ps: see cmd_ps
1110 */
1111 static enum phl_mdl_ret_code
_ps_watchdog_post_hdlr(struct cmd_ps * ps)1112 _ps_watchdog_post_hdlr(struct cmd_ps *ps)
1113 {
1114 u16 macid = PS_MACID_NONE;
1115
1116 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): pwr lvl(%s).\n", __func__, phl_ps_pwr_lvl_to_str(ps->cur_pwr_lvl));
1117
1118 if (_chk_lps_enter(ps, &macid))
1119 _enter_ps(ps, PS_MODE_LPS, macid, "watchdog");
1120 else if (_chk_ips_enter(ps, &macid))
1121 _enter_ps(ps, PS_MODE_IPS, macid, "watchdog");
1122
1123 _ps_watchdog_info_dump(ps);
1124
1125 return MDL_RET_SUCCESS;
1126 }
1127
_phy_on_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1128 static enum phl_mdl_ret_code _phy_on_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1129 {
1130 enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1131
1132 if (ps->rej_pwr_req == true) {
1133 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1134 return MDL_RET_CANNOT_IO;
1135 }
1136
1137 if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1138 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], MSG_EVT_PHY_ON\n");
1139 if (_leave_ps(ps, true, "phy on") == RTW_PHL_STATUS_SUCCESS) {
1140 _add_pwr_req(ps, MSG_EVT_ID_FIELD(msg->msg_id));
1141 _leave_success_hdlr(ps);
1142 ret = MDL_RET_SUCCESS;
1143 } else {
1144 _leave_fail_hdlr(ps);
1145 }
1146 } else {
1147 ret = MDL_RET_SUCCESS;
1148 }
1149
1150 return ret;
1151 }
1152
_tx_pwr_req_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1153 static enum phl_mdl_ret_code _tx_pwr_req_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1154 {
1155 enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1156
1157 if (ps->rej_pwr_req == true) {
1158 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1159 return MDL_RET_CANNOT_IO;
1160 }
1161
1162 if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1163 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], MSG_EVT_TRX_PWR_REQ\n");
1164 if (_leave_ps(ps, false, "tx req") == RTW_PHL_STATUS_SUCCESS) {
1165 _leave_success_hdlr(ps);
1166 ret = MDL_RET_SUCCESS;
1167 } else {
1168 _leave_fail_hdlr(ps);
1169 }
1170 } else {
1171 ret = MDL_RET_SUCCESS;
1172 }
1173
1174 return ret;
1175 }
1176
_phy_idle_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1177 static enum phl_mdl_ret_code _phy_idle_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1178 {
1179 enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1180 u16 macid = PS_MACID_NONE;
1181
1182 if (!IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1183 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], MSG_EVT_PHY_IDLE\n");
1184 _cancel_pwr_req(ps, MSG_EVT_ID_FIELD(msg->msg_id));
1185 /* check enter ips or not */
1186 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): try enter ips.\n", __func__);
1187 if (_chk_ips_enter(ps, &macid)) {
1188 _enter_ps(ps, PS_MODE_IPS, macid, "phy idle");
1189 }
1190 ret = MDL_RET_SUCCESS;
1191 } else {
1192 ret = MDL_RET_SUCCESS;
1193 }
1194
1195 return ret;
1196 }
1197
1198 /**
1199 * pre-phase handler of msg
1200 * leave ps and return corresponding status
1201 * @ps: see cmd_ps
1202 * @evt_id: evt id of msg
1203 */
1204 static enum phl_mdl_ret_code
_ext_msg_pre_hdlr(struct cmd_ps * ps,u16 evt_id)1205 _ext_msg_pre_hdlr(struct cmd_ps *ps, u16 evt_id)
1206 {
1207 if (ps->rej_pwr_req == true) {
1208 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1209 return MDL_RET_CANNOT_IO;
1210 }
1211
1212 /* power request */
1213 if (_leave_ps(ps, true, "ext msg req") == RTW_PHL_STATUS_SUCCESS) {
1214 _add_pwr_req(ps, evt_id);
1215 _leave_success_hdlr(ps);
1216 return MDL_RET_SUCCESS;
1217 } else {
1218 _leave_fail_hdlr(ps);
1219 }
1220
1221 return MDL_RET_CANNOT_IO;
1222 }
1223
1224 /**
1225 * post-phase handler of msg
1226 * cancel power req and chk enter ips or not
1227 * @ps: see cmd_ps
1228 * @evt_id: evt id of msg
1229 */
1230 static enum phl_mdl_ret_code
_ext_msg_post_hdlr(struct cmd_ps * ps,u16 evt_id)1231 _ext_msg_post_hdlr(struct cmd_ps *ps, u16 evt_id)
1232 {
1233 u16 macid = PS_MACID_NONE;
1234
1235 /* cancel power request */
1236 _cancel_pwr_req(ps, evt_id);
1237
1238 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): try enter ips.\n", __func__);
1239 if (_chk_ips_enter(ps, &macid)) {
1240 _enter_ps(ps, PS_MODE_IPS, macid, "ext msg done");
1241 }
1242
1243 return MDL_RET_SUCCESS;
1244 }
1245
_ps_mr_info_upt(struct cmd_ps * ps,struct rtw_wifi_role_t * role)1246 static void _ps_mr_info_upt(struct cmd_ps *ps, struct rtw_wifi_role_t *role)
1247 {
1248 struct phl_info_t *phl_info = ps->phl_info;
1249 struct rtw_wifi_role_t *wr = NULL;
1250 u8 role_idx = 0;
1251
1252 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): id %d, type %d, mstate %d\n", __func__,
1253 role->id, role->type, role->mstate);
1254 #ifdef RTW_WKARD_PHL_NTFY_MEDIA_STS
1255 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): is_gc %d\n", __func__, role->is_gc);
1256 #endif
1257
1258 _os_mem_set(phl_to_drvpriv(phl_info), &ps->mr_info, 0, sizeof(struct _ps_mr_info));
1259
1260 for (role_idx = 0; role_idx < MAX_WIFI_ROLE_NUMBER; role_idx++) {
1261 wr = &(phl_info->phl_com->wifi_roles[role_idx]);
1262 if (wr->active == false)
1263 continue;
1264 #ifdef RTW_WKARD_PHL_NTFY_MEDIA_STS
1265 if (wr->type == PHL_RTYPE_STATION) {
1266 if(wr->is_gc == true && wr->mstate != MLME_NO_LINK)
1267 ps->mr_info.gc_active = true;
1268 }
1269 #endif
1270 if (wr->type == PHL_RTYPE_AP || wr->type == PHL_RTYPE_VAP ||
1271 wr->type == PHL_RTYPE_MESH ||wr->type == PHL_RTYPE_P2P_GO)
1272 ps->mr_info.ap_active = (wr->mstate == MLME_NO_LINK) ? false : true;
1273 }
1274
1275 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): gc_active %d, ap_active %d\n", __func__,
1276 ps->mr_info.gc_active, ps->mr_info.ap_active);
1277 }
1278
_is_ignored_mrc_evt(u16 evt_id)1279 static bool _is_ignored_mrc_evt(u16 evt_id)
1280 {
1281 return false;
1282 }
1283
1284 static enum phl_mdl_ret_code
_mrc_mdl_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1285 _mrc_mdl_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1286 {
1287 enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1288 struct rtw_role_cmd *rcmd = NULL;
1289 struct rtw_wifi_role_t *role = NULL;
1290
1291 if (_is_ignored_mrc_evt(MSG_EVT_ID_FIELD(msg->msg_id)))
1292 return MDL_RET_SUCCESS;
1293
1294 switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
1295 case MSG_EVT_ROLE_NTFY:
1296 if (!IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1297 if (msg->inbuf && (msg->inlen == sizeof(struct rtw_role_cmd))) {
1298 rcmd = (struct rtw_role_cmd *)msg->inbuf;
1299 role = rcmd->wrole;
1300 _ps_mr_info_upt(ps, role);
1301 }
1302 }
1303 ret = MDL_RET_SUCCESS;
1304 break;
1305 default:
1306 if (ps->cur_pwr_lvl != PS_PWR_LVL_PWRON) {
1307 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) get cannot I/O!\n", __func__,
1308 MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id));
1309 ret = MDL_RET_CANNOT_IO;
1310 } else {
1311 ret = MDL_RET_SUCCESS;
1312 }
1313 break;
1314 }
1315
1316 return ret;
1317 }
1318
_is_ignored_ser_evt(u16 evt_id)1319 static bool _is_ignored_ser_evt(u16 evt_id)
1320 {
1321 if (MSG_EVT_SER_M9_L2_RESET != evt_id)
1322 return true;
1323
1324 return false;
1325 }
1326
1327 static enum phl_mdl_ret_code
_ser_mdl_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1328 _ser_mdl_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1329 {
1330 enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1331
1332 if (_is_ignored_ser_evt(MSG_EVT_ID_FIELD(msg->msg_id)))
1333 return MDL_RET_SUCCESS;
1334
1335 switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
1336 case MSG_EVT_SER_M9_L2_RESET:
1337 ret = MDL_RET_SUCCESS;
1338 break;
1339 default:
1340 if (ps->cur_pwr_lvl != PS_PWR_LVL_PWRON) {
1341 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) get cannot I/O!\n",
1342 __func__,
1343 MSG_MDL_ID_FIELD(msg->msg_id),
1344 MSG_EVT_ID_FIELD(msg->msg_id));
1345 ret = MDL_RET_CANNOT_IO;
1346 } else {
1347 ret = MDL_RET_SUCCESS;
1348 }
1349 break;
1350 }
1351
1352 return ret;
1353 }
1354
_is_ignored_general_evt(u16 evt_id)1355 static bool _is_ignored_general_evt(u16 evt_id)
1356 {
1357 bool ret = false;
1358
1359 switch (evt_id) {
1360 case MSG_EVT_GET_USB_SW_ABILITY:
1361 case MSG_EVT_GET_USB_SPEED:
1362 case MSG_EVT_SW_WATCHDOG:
1363 ret = true;
1364 break;
1365 default:
1366 ret = false;
1367 break;
1368 }
1369
1370 return ret;
1371 }
1372
1373 #ifdef RTW_WKARD_LINUX_CMD_WKARD
1374 static enum phl_mdl_ret_code
_linux_cmd_wkard_hdlr(struct cmd_ps * ps)1375 _linux_cmd_wkard_hdlr(struct cmd_ps *ps)
1376 {
1377 if (ps->rej_pwr_req == true) {
1378 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1379 return MDL_RET_CANNOT_IO;
1380 }
1381
1382 /* power request */
1383 if (_leave_ps(ps, true, "linux cmd wkard") == RTW_PHL_STATUS_SUCCESS) {
1384 _leave_success_hdlr(ps);
1385 return MDL_RET_SUCCESS;
1386 } else {
1387 _leave_fail_hdlr(ps);
1388 }
1389
1390 return MDL_RET_CANNOT_IO;
1391 }
1392 #endif /* RTW_WKARD_LINUX_CMD_WKARD */
1393
1394 static enum phl_mdl_ret_code
_general_mdl_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1395 _general_mdl_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1396 {
1397 enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1398
1399 if (_is_ignored_general_evt(MSG_EVT_ID_FIELD(msg->msg_id)))
1400 return MDL_RET_SUCCESS;
1401
1402 switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
1403 case MSG_EVT_CHG_OP_CH_DEF_START:
1404 case MSG_EVT_CHG_OP_CH_DEF_END:
1405 case MSG_EVT_SWCH_START:
1406 case MSG_EVT_PCIE_TRX_MIT:
1407 case MSG_EVT_FORCE_USB_SW:
1408 case MSG_EVT_GET_USB_SPEED:
1409 case MSG_EVT_GET_USB_SW_ABILITY:
1410 case MSG_EVT_CFG_AMPDU:
1411 case MSG_EVT_DFS_PAUSE_TX:
1412 case MSG_EVT_ROLE_RECOVER:
1413 case MSG_EVT_ROLE_SUSPEND:
1414 case MSG_EVT_HAL_SET_L2_LEAVE:
1415 case MSG_EVT_NOTIFY_HAL:
1416 case MSG_EVT_ISSUE_BCN:
1417 case MSG_EVT_STOP_BCN:
1418 case MSG_EVT_SEC_KEY:
1419 case MSG_EVT_ROLE_START:
1420 case MSG_EVT_ROLE_CHANGE:
1421 case MSG_EVT_ROLE_STOP:
1422 case MSG_EVT_STA_INFO_CTRL:
1423 case MSG_EVT_STA_MEDIA_STATUS_UPT:
1424 case MSG_EVT_CFG_CHINFO:
1425 case MSG_EVT_STA_CHG_STAINFO:
1426 #ifdef RTW_WKARD_LINUX_CMD_WKARD
1427 case MSG_EVT_LINUX_CMD_WRK_TRI_PS:
1428 #endif /* RTW_WKARD_LINUX_CMD_WKARD */
1429 case MSG_EVT_DBG_RX_DUMP:
1430 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) in %s phase.\n", __func__,
1431 MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id),
1432 (IS_MSG_IN_PRE_PHASE(msg->msg_id) ? "pre-protocol" : "post-protocol"));
1433 if (IS_MSG_IN_PRE_PHASE(msg->msg_id))
1434 ret = _ext_msg_pre_hdlr(ps, MSG_EVT_ID_FIELD(msg->msg_id));
1435 else
1436 ret = _ext_msg_post_hdlr(ps, MSG_EVT_ID_FIELD(msg->msg_id));
1437 break;
1438 #ifdef RTW_WKARD_LINUX_CMD_WKARD
1439 case MSG_EVT_LINUX_CMD_WRK:
1440 if (IS_MSG_IN_PRE_PHASE(msg->msg_id))
1441 ret = _linux_cmd_wkard_hdlr(ps);
1442 else
1443 ret = MDL_RET_SUCCESS;
1444 break;
1445 #endif /* RTW_WKARD_LINUX_CMD_WKARD */
1446 case MSG_EVT_HW_WATCHDOG:
1447 if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1448 ret = _ps_watchdog_pre_hdlr(ps);
1449 if (ret == MDL_RET_SUCCESS) {
1450 ps->rssi_bcn_min = phl_get_min_rssi_bcn(ps->phl_info);
1451 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): update rssi_bcn_min to %d\n", __func__, ps->rssi_bcn_min);
1452 }
1453 } else {
1454 ret = _ps_watchdog_post_hdlr(ps);
1455 }
1456 break;
1457 case MSG_EVT_RF_ON:
1458 if (!IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1459 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], MSG_EVT_RF_ON\n");
1460 ps->rej_pwr_req = false;
1461 }
1462 ret = MDL_RET_SUCCESS;
1463 break;
1464 case MSG_EVT_RF_OFF:
1465 if (!IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1466 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], MSG_EVT_RF_OFF\n");
1467 ps->rej_pwr_req = true;
1468 _enter_ps(ps, PS_MODE_IPS, PS_MACID_NONE, "msg rf off");
1469 }
1470 ret = MDL_RET_SUCCESS;
1471 break;
1472 case MSG_EVT_PHY_ON:
1473 ret = _phy_on_msg_hdlr(ps, msg);
1474 break;
1475 case MSG_EVT_PHY_IDLE:
1476 ret = _phy_idle_msg_hdlr(ps, msg);
1477 break;
1478 default:
1479 if (ps->cur_pwr_lvl != PS_PWR_LVL_PWRON) {
1480 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) get cannot I/O!\n",
1481 __func__,
1482 MSG_MDL_ID_FIELD(msg->msg_id),
1483 MSG_EVT_ID_FIELD(msg->msg_id));
1484 ret = MDL_RET_CANNOT_IO;
1485 } else {
1486 ret = MDL_RET_SUCCESS;
1487 }
1488 break;
1489 }
1490
1491 return ret;
1492 }
1493
_is_ignored_datapath_evt(u16 evt_id)1494 static bool _is_ignored_datapath_evt(u16 evt_id)
1495 {
1496 return false;
1497 }
1498
1499 static enum phl_mdl_ret_code
_datapath_mdl_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1500 _datapath_mdl_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1501 {
1502 enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1503
1504 if (_is_ignored_datapath_evt(MSG_EVT_ID_FIELD(msg->msg_id)))
1505 return MDL_RET_SUCCESS;
1506
1507 switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
1508 case MSG_EVT_TRX_PWR_REQ:
1509 ret = _tx_pwr_req_msg_hdlr(ps, msg);
1510 break;
1511 default:
1512 if (ps->cur_pwr_lvl != PS_PWR_LVL_PWRON) {
1513 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) get cannot I/O!\n",
1514 __func__,
1515 MSG_MDL_ID_FIELD(msg->msg_id),
1516 MSG_EVT_ID_FIELD(msg->msg_id));
1517 ret = MDL_RET_CANNOT_IO;
1518 } else {
1519 ret = MDL_RET_SUCCESS;
1520 }
1521 break;
1522 }
1523
1524 return ret;
1525 }
1526
1527 /**
1528 * bypass msg of specific module
1529 * @msg: see phl_msg
1530 */
_is_ignored_mdl(struct phl_msg * msg)1531 static bool _is_ignored_mdl(struct phl_msg *msg)
1532 {
1533 if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_BTC)
1534 return true;
1535 if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_LED)
1536 return true;
1537
1538 return false;
1539 }
1540
1541 static enum phl_mdl_ret_code
_ps_mdl_hdl_external_evt(void * dispr,struct cmd_ps * ps,struct phl_msg * msg)1542 _ps_mdl_hdl_external_evt(void *dispr, struct cmd_ps *ps, struct phl_msg *msg)
1543 {
1544 enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1545
1546 if (_is_ignored_mdl(msg)) {
1547 /* PHL_INFO("[PS_CMD], %s(): ignore MDL_ID(%d)-EVT_ID(%d).\n", __func__,
1548 MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id)); */
1549 return MDL_RET_SUCCESS;
1550 }
1551
1552 switch (MSG_MDL_ID_FIELD(msg->msg_id)) {
1553 case PHL_MDL_GENERAL:
1554 ret = _general_mdl_msg_hdlr(ps, msg);
1555 break;
1556 case PHL_MDL_MRC:
1557 ret = _mrc_mdl_msg_hdlr(ps, msg);
1558 break;
1559 case PHL_MDL_TX:
1560 case PHL_MDL_RX:
1561 ret = _datapath_mdl_msg_hdlr(ps, msg);
1562 break;
1563 case PHL_MDL_SER:
1564 ret = _ser_mdl_msg_hdlr(ps, msg);
1565 break;
1566 /* handle ohters mdl here */
1567 default:
1568 if (ps->cur_pwr_lvl != PS_PWR_LVL_PWRON) {
1569 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) get cannot I/O!\n", __func__,
1570 MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id));
1571 ret = MDL_RET_CANNOT_IO;
1572 } else {
1573 ret = MDL_RET_SUCCESS;
1574 }
1575 break;
1576 }
1577
1578 return ret;
1579 }
1580
_ps_cap_chg_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1581 static enum phl_mdl_ret_code _ps_cap_chg_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1582 {
1583 enum phl_ps_rt_rson rt_rson = PS_RT_RSON_NONE;
1584 bool ps_allow = false;
1585 struct rt_ps *rt_ps_info= NULL;
1586 u16 macid = PS_MACID_NONE;
1587
1588 if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1589 rt_ps_info = (struct rt_ps *)msg->inbuf;
1590 ps_allow = rt_ps_info->ps_allow;
1591 rt_rson = rt_ps_info->rt_rson;
1592 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): update -> ps_allow(%d), reason(%d).\n", __func__, ps_allow, rt_rson);
1593
1594 if (ps_allow) {
1595 if (TEST_STATUS_FLAG(ps->rt_stop_rson, rt_rson) == true)
1596 CLEAR_STATUS_FLAG(ps->rt_stop_rson, rt_rson);
1597 else
1598 return MDL_RET_SUCCESS;
1599 } else {
1600 if (TEST_STATUS_FLAG(ps->rt_stop_rson, rt_rson) == false)
1601 SET_STATUS_FLAG(ps->rt_stop_rson, rt_rson);
1602 else
1603 return MDL_RET_SUCCESS;
1604 }
1605
1606 if (ps->rej_pwr_req == true) {
1607 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): reject pwr req.\n", __func__);
1608 return MDL_RET_CANNOT_IO;
1609 }
1610
1611 if (ps->rt_stop_rson != PS_RT_RSON_NONE) {
1612 if (_leave_ps(ps, true, "cap chg") == RTW_PHL_STATUS_SUCCESS) {
1613 _leave_success_hdlr(ps);
1614 return MDL_RET_SUCCESS;
1615 } else {
1616 _leave_fail_hdlr(ps);
1617 return MDL_RET_CANNOT_IO;
1618 }
1619 }
1620 } else {
1621 if (ps->rt_stop_rson == PS_RT_RSON_NONE) {
1622 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): try enter ips.\n", __func__);
1623 if (_chk_ips_enter(ps, &macid)) {
1624 _enter_ps(ps, PS_MODE_IPS, macid, "cap chg");
1625 }
1626 }
1627 }
1628
1629 return MDL_RET_SUCCESS;
1630 }
1631
_tx_pkt_ntfy_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1632 static enum phl_mdl_ret_code _tx_pkt_ntfy_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1633 {
1634 if (!IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1635 PHL_DBG("[PS_CMD], %s(): rpwm with tx req.\n", __func__);
1636 /* rpwm with tx req */
1637 rtw_hal_ps_notify_wake(ps->phl_info->hal);
1638 }
1639
1640 return MDL_RET_SUCCESS;
1641 }
1642
_ps_period_chk_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1643 static enum phl_mdl_ret_code _ps_period_chk_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1644 {
1645 u16 macid = PS_MACID_NONE;
1646
1647 if (IS_MSG_IN_PRE_PHASE(msg->msg_id))
1648 return MDL_RET_SUCCESS;
1649
1650 ps->wdg_leave_ps = false;
1651
1652 if (ps->ps_state != PS_STATE_ENTERED) {
1653
1654 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): \n", __func__);
1655 if (_chk_lps_enter(ps, &macid))
1656 _enter_ps(ps, PS_MODE_LPS, macid, "period chk");
1657 }
1658
1659 _os_set_timer(phl_to_drvpriv(ps->phl_info), &ps->ps_timer, CMD_PS_TIMER_PERIOD);
1660
1661 return MDL_RET_SUCCESS;
1662 }
1663
_ps_dbg_lps_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1664 static enum phl_mdl_ret_code _ps_dbg_lps_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1665 {
1666 u16 macid = PS_MACID_NONE;
1667 u16 evt_id = MSG_EVT_ID_FIELD(msg->msg_id);
1668
1669 if (evt_id == MSG_EVT_PS_DBG_LPS_ENTER) {
1670 if (!IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1671 if (!_chk_wrole_with_ps_mode(ps, PS_MODE_LPS, &macid))
1672 return MDL_RET_SUCCESS;
1673 _enter_ps(ps, PS_MODE_LPS, macid, "dbg lps enter");
1674 ps->rej_pwr_req = true;
1675 }
1676 } else { /* MSG_EVT_PS_DBG_LPS_LEAVE */
1677 if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
1678 _leave_ps(ps, true, "dbg lps leave");
1679 ps->rej_pwr_req = false;
1680 }
1681 }
1682
1683 return MDL_RET_SUCCESS;
1684 }
1685
_ps_dbg_ips_msg_hdlr(struct cmd_ps * ps,struct phl_msg * msg)1686 static enum phl_mdl_ret_code _ps_dbg_ips_msg_hdlr(struct cmd_ps *ps, struct phl_msg *msg)
1687 {
1688 /* fw do not support ips currently */
1689 return MDL_RET_SUCCESS;
1690 }
1691
1692 static enum phl_mdl_ret_code
_ps_mdl_hdl_internal_evt(void * dispr,struct cmd_ps * ps,struct phl_msg * msg)1693 _ps_mdl_hdl_internal_evt(void *dispr, struct cmd_ps *ps, struct phl_msg *msg)
1694 {
1695 enum phl_mdl_ret_code ret = MDL_RET_CANNOT_IO;
1696
1697 switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
1698 case MSG_EVT_PHY_ON:
1699 ret = _phy_on_msg_hdlr(ps, msg);
1700 break;
1701 case MSG_EVT_PHY_IDLE:
1702 ret = _phy_idle_msg_hdlr(ps, msg);
1703 break;
1704 case MSG_EVT_PS_CAP_CHG:
1705 ret = _ps_cap_chg_msg_hdlr(ps, msg);
1706 break;
1707 case MSG_EVT_PS_PERIOD_CHK:
1708 ret = _ps_period_chk_hdlr(ps, msg);
1709 break;
1710 case MSG_EVT_PS_DBG_LPS_ENTER:
1711 case MSG_EVT_PS_DBG_LPS_LEAVE:
1712 ret = _ps_dbg_lps_msg_hdlr(ps, msg);
1713 break;
1714 case MSG_EVT_PS_DBG_IPS_ENTER:
1715 case MSG_EVT_PS_DBG_IPS_LEAVE:
1716 ret = _ps_dbg_ips_msg_hdlr(ps, msg);
1717 break;
1718 case MSG_EVT_TX_PKT_NTFY:
1719 ret = _tx_pkt_ntfy_msg_hdlr(ps, msg);
1720 break;
1721 default:
1722 ret = MDL_RET_CANNOT_IO;
1723 break;
1724 }
1725
1726 return ret;
1727 }
1728
1729 static enum phl_mdl_ret_code
_ps_mdl_msg_hdlr(void * dispr,void * priv,struct phl_msg * msg)1730 _ps_mdl_msg_hdlr(void *dispr, void *priv, struct phl_msg *msg)
1731 {
1732 enum phl_mdl_ret_code ret = MDL_RET_IGNORE;
1733 struct cmd_ps *ps = (struct cmd_ps *)priv;
1734
1735 if (IS_MSG_FAIL(msg->msg_id)) {
1736 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): MDL_ID(%d)-EVT_ID(%d) fail.\n", __func__,
1737 MSG_MDL_ID_FIELD(msg->msg_id), MSG_EVT_ID_FIELD(msg->msg_id));
1738 return MDL_RET_SUCCESS;
1739 }
1740
1741 switch (MSG_MDL_ID_FIELD(msg->msg_id)) {
1742 case PHL_MDL_POWER_MGNT:
1743 ret = _ps_mdl_hdl_internal_evt(dispr, ps, msg);
1744 break;
1745 default:
1746 ret = _ps_mdl_hdl_external_evt(dispr, ps, msg);
1747 break;
1748 }
1749
1750 return ret;
1751 }
1752
1753 static enum phl_mdl_ret_code
_ps_mdl_set_info(void * dispr,void * priv,struct phl_module_op_info * info)1754 _ps_mdl_set_info(void *dispr, void *priv, struct phl_module_op_info *info)
1755 {
1756 struct cmd_ps *ps = (struct cmd_ps *)priv;
1757
1758 switch (info->op_code) {
1759 case PS_MDL_OP_CANCEL_PWR_REQ:
1760 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): cancel pwr req, evt_id %d\n", __func__, *(u16 *)info->inbuf);
1761 if (ps->rej_pwr_req == false)
1762 _cancel_pwr_req(ps, *(u16 *)info->inbuf);
1763 break;
1764 case PS_MDL_OP_BTC_PWR_REQ:
1765 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): btc req pwr %d\n", __func__, *(bool *)info->inbuf);
1766 ps->btc_req_pwr = *(bool *)info->inbuf;
1767 break;
1768 }
1769
1770 return MDL_RET_SUCCESS;
1771 }
1772
1773 static enum phl_mdl_ret_code
_ps_mdl_query_info(void * dispr,void * priv,struct phl_module_op_info * info)1774 _ps_mdl_query_info(void *dispr, void *priv, struct phl_module_op_info *info)
1775 {
1776 struct cmd_ps *ps = (struct cmd_ps *)priv;
1777 u8 pwr_lvl = PS_PWR_LVL_PWROFF;
1778 struct phl_cmd_ps_basic_info *basic_info = NULL;
1779
1780 /* PHL_INFO("[PS_CMD], %s(): opcode %d.\n", __func__, info->op_code); */
1781
1782 switch (info->op_code) {
1783 case PS_MDL_OP_CUR_PWR_LVL:
1784 pwr_lvl = ps->cur_pwr_lvl;
1785 _os_mem_cpy(phl_to_drvpriv(ps->phl_info), (void *)info->inbuf,
1786 &pwr_lvl, sizeof(pwr_lvl));
1787 break;
1788 case PS_MDL_OP_BASIC_INFO:
1789 basic_info = (struct phl_cmd_ps_basic_info *)info->inbuf;
1790 basic_info->ps_mode = ps->ps_mode;
1791 basic_info->cur_pwr_lvl = ps->cur_pwr_lvl;
1792 basic_info->rej_pwr_req = ps->rej_pwr_req;
1793 basic_info->btc_req_pwr = ps->btc_req_pwr;
1794 basic_info->rt_stop_rson = ps->rt_stop_rson;
1795 basic_info->ap_active = ps->mr_info.ap_active;
1796 basic_info->gc_active = ps->mr_info.gc_active;
1797 basic_info->sta = ps->sta;
1798 /* enter/leave ps reason */
1799 _os_mem_set(phl_to_drvpriv(ps->phl_info), basic_info->enter_rson, 0, MAX_CMD_PS_RSON_LENGTH);
1800 _os_mem_cpy(phl_to_drvpriv(ps->phl_info), basic_info->enter_rson, ps->enter_rson, MAX_CMD_PS_RSON_LENGTH);
1801 _os_mem_set(phl_to_drvpriv(ps->phl_info), basic_info->leave_rson, 0, MAX_CMD_PS_RSON_LENGTH);
1802 _os_mem_cpy(phl_to_drvpriv(ps->phl_info), basic_info->leave_rson, ps->leave_rson, MAX_CMD_PS_RSON_LENGTH);
1803 break;
1804 }
1805
1806 return MDL_RET_SUCCESS;
1807 }
1808
phl_register_ps_module(struct phl_info_t * phl_info)1809 enum rtw_phl_status phl_register_ps_module(struct phl_info_t *phl_info)
1810 {
1811 enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
1812 struct phl_cmd_dispatch_engine *disp_eng = &(phl_info->disp_eng);
1813 struct phl_bk_module_ops bk_ops = {0};
1814 u8 i = 0;
1815
1816 PHL_INFO("[PS_CMD], %s(): \n", __func__);
1817
1818 bk_ops.init = _ps_mdl_init;
1819 bk_ops.deinit = _ps_mdl_deinit;
1820 bk_ops.start = _ps_mdl_start;
1821 bk_ops.stop = _ps_mdl_stop;
1822 bk_ops.msg_hdlr = _ps_mdl_msg_hdlr;
1823 bk_ops.set_info = _ps_mdl_set_info;
1824 bk_ops.query_info = _ps_mdl_query_info;
1825
1826 for (i = 0; i < disp_eng->phy_num; i++) {
1827 phl_status = phl_disp_eng_register_module(phl_info, i,
1828 PHL_MDL_POWER_MGNT, &bk_ops);
1829 if (phl_status != RTW_PHL_STATUS_SUCCESS) {
1830 PHL_ERR("register cmd PS module of phy%d failed.\n", i + 1);
1831 break;
1832 }
1833 }
1834
1835 return phl_status;
1836 }
1837
phl_ps_get_cur_pwr_lvl(struct phl_info_t * phl_info)1838 u8 phl_ps_get_cur_pwr_lvl(struct phl_info_t *phl_info)
1839 {
1840 struct phl_module_op_info op_info = {0};
1841 u8 pwr_lvl = PS_PWR_LVL_MAX;
1842
1843 op_info.op_code = PS_MDL_OP_CUR_PWR_LVL;
1844 op_info.inbuf = (u8 *)&pwr_lvl;
1845 op_info.inlen = sizeof(pwr_lvl);
1846
1847 phl_disp_eng_query_bk_module_info(phl_info, HW_BAND_0,
1848 PHL_MDL_POWER_MGNT, &op_info);
1849
1850 PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_, "[PS_CMD], %s(): pwr lvl(%s)\n", __func__, phl_ps_pwr_lvl_to_str(pwr_lvl));
1851
1852 return pwr_lvl;
1853 }
1854
phl_ps_is_datapath_allowed(struct phl_info_t * phl_info)1855 bool phl_ps_is_datapath_allowed(struct phl_info_t *phl_info)
1856 {
1857 struct phl_module_op_info op_info = {0};
1858 bool io_allowed = false;
1859 u8 pwr_lvl = PS_PWR_LVL_MAX;
1860 struct rtw_ps_cap_t *ps_cap = _get_ps_cap(phl_info);
1861
1862 if (!ps_cap->lps_pause_tx)
1863 return true;
1864
1865 op_info.op_code = PS_MDL_OP_CUR_PWR_LVL;
1866 op_info.inbuf = (u8 *)&pwr_lvl;
1867 op_info.inlen = sizeof(pwr_lvl);
1868
1869 phl_disp_eng_query_bk_module_info(phl_info, HW_BAND_0,
1870 PHL_MDL_POWER_MGNT, &op_info);
1871
1872 if (pwr_lvl == PS_PWR_LVL_PWRON)
1873 io_allowed = true;
1874
1875 return io_allowed;
1876 }
1877
_ps_tx_pkt_ntfy_done(void * priv,struct phl_msg * msg)1878 static void _ps_tx_pkt_ntfy_done(void *priv, struct phl_msg *msg)
1879 {
1880 struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1881
1882 PHL_DBG("[PS_CMD], %s(): reset ntfy\n", __func__);
1883
1884 _os_atomic_set(phl_to_drvpriv(phl_info),
1885 &phl_info->ps_info.tx_ntfy,
1886 0);
1887 }
1888
phl_ps_tx_pkt_ntfy(struct phl_info_t * phl_info)1889 void phl_ps_tx_pkt_ntfy(struct phl_info_t *phl_info)
1890 {
1891 struct phl_msg msg = {0};
1892 struct phl_msg_attribute attr = {0};
1893
1894 if (phl_ps_get_cur_pwr_lvl(phl_info) == PS_PWR_LVL_PWRON)
1895 return;
1896
1897 if (_os_atomic_read(phl_to_drvpriv(phl_info), &phl_info->ps_info.tx_ntfy)) {
1898 PHL_DBG("[PS_CMD], %s(): already ntfy\n", __func__);
1899 return;
1900 }
1901
1902 SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
1903 SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_TX_PKT_NTFY);
1904 msg.band_idx = HW_BAND_0;
1905 attr.completion.completion = _ps_tx_pkt_ntfy_done;
1906 attr.completion.priv = phl_info;
1907
1908 _os_atomic_set(phl_to_drvpriv(phl_info),
1909 &phl_info->ps_info.tx_ntfy,
1910 1);
1911
1912 if (phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL) !=
1913 RTW_PHL_STATUS_SUCCESS) {
1914 PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): fail to send tx pkt notify.\n", __func__);
1915 _os_atomic_set(phl_to_drvpriv(phl_info),
1916 &phl_info->ps_info.tx_ntfy,
1917 0);
1918 }
1919
1920 return;
1921 }
1922
1923
_ps_set_rt_cap_done(void * priv,struct phl_msg * msg)1924 static void _ps_set_rt_cap_done(void *priv, struct phl_msg *msg)
1925 {
1926 struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1927
1928 if (msg->inbuf && msg->inlen) {
1929 _os_mem_free(phl_to_drvpriv(phl_info),
1930 msg->inbuf, msg->inlen);
1931 }
1932
1933 }
1934
rtw_phl_ps_set_rt_cap(void * phl,u8 band_idx,bool ps_allow,enum phl_ps_rt_rson rt_rson)1935 void rtw_phl_ps_set_rt_cap(void *phl, u8 band_idx, bool ps_allow, enum phl_ps_rt_rson rt_rson)
1936 {
1937 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1938 struct phl_msg msg = {0};
1939 struct phl_msg_attribute attr = {0};
1940 struct rt_ps *ps_rt_info = NULL;
1941
1942 ps_rt_info = (struct rt_ps *)_os_mem_alloc(phl_to_drvpriv(phl_info), sizeof(struct rt_ps));
1943 if (ps_rt_info == NULL) {
1944 PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): fail to alloc ps_rt_info memory.\n", __func__);
1945 return;
1946 }
1947
1948 ps_rt_info->rt_rson = rt_rson;
1949 ps_rt_info->ps_allow = ps_allow;
1950
1951 SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
1952 SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_PS_CAP_CHG);
1953 msg.band_idx = band_idx;
1954 msg.inbuf = (u8*)ps_rt_info;
1955 msg.inlen = sizeof(struct rt_ps);
1956 attr.completion.completion = _ps_set_rt_cap_done;
1957 attr.completion.priv = phl_info;
1958
1959 if (phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL) !=
1960 RTW_PHL_STATUS_SUCCESS) {
1961 PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): fail to notify batter change.\n", __func__);
1962 goto cmd_fail;
1963 }
1964
1965 return;
1966
1967 cmd_fail:
1968 _os_mem_free(phl_to_drvpriv(phl_info), ps_rt_info, sizeof(ps_rt_info));
1969 }
1970
phl_ps_dbg_set_ps(struct phl_info_t * phl_info,u8 ps_mode,bool enter)1971 void phl_ps_dbg_set_ps(struct phl_info_t *phl_info, u8 ps_mode, bool enter)
1972 {
1973 struct phl_msg msg = {0};
1974 struct phl_msg_attribute attr = {0};
1975 u16 evt_id = 0;
1976
1977 if (ps_mode == PS_MODE_LPS) {
1978 evt_id = (enter ? MSG_EVT_PS_DBG_LPS_ENTER : MSG_EVT_PS_DBG_LPS_LEAVE);
1979 } else if (ps_mode == PS_MODE_IPS) {
1980 PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS_CMD], %s(): dbg ips is not support now.\n", __func__);
1981 /* evt_id = (enter ? MSG_EVT_PS_DBG_IPS_ENTER : MSG_EVT_PS_DBG_IPS_LEAVE); */
1982 } else {
1983 return;
1984 }
1985
1986 PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS_CMD], %s(): debug set %s %s\n", __func__,
1987 phl_ps_ps_mode_to_str(ps_mode), (enter ? "enter" : "leave"));
1988
1989 SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
1990 SET_MSG_EVT_ID_FIELD(msg.msg_id, evt_id);
1991 msg.band_idx = HW_BAND_0;
1992
1993 if (phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL) !=
1994 RTW_PHL_STATUS_SUCCESS) {
1995 PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS_CMD], %s(): fail to set dbg ps.\n", __func__);
1996 }
1997
1998 return;
1999 }
2000
2001 enum rtw_phl_status
rtw_phl_ps_set_rf_state(void * phl,u8 band_idx,enum rtw_rf_state rf_state)2002 rtw_phl_ps_set_rf_state(void *phl, u8 band_idx, enum rtw_rf_state rf_state)
2003 {
2004 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
2005 enum phl_msg_evt_id evt_id = (rf_state == RTW_RF_ON) ? MSG_EVT_RF_ON : MSG_EVT_RF_OFF;
2006
2007 phl_cmd_enqueue(phl_info, band_idx, evt_id, NULL, 0, NULL, PHL_CMD_WAIT, 0);
2008
2009 return RTW_PHL_STATUS_SUCCESS;
2010 }
2011
phl_ps_hal_pwr_req(struct rtw_phl_com_t * phl_com,u8 src,bool pwr_req)2012 enum rtw_phl_status phl_ps_hal_pwr_req(struct rtw_phl_com_t *phl_com, u8 src, bool pwr_req)
2013 {
2014 enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
2015 struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
2016 struct phl_module_op_info op_info = {0};
2017
2018 if (src != HAL_BTC_PWR_REQ)
2019 return RTW_PHL_STATUS_FAILURE;
2020
2021 op_info.op_code = PS_MDL_OP_BTC_PWR_REQ;
2022 op_info.inbuf = (u8 *)&pwr_req;
2023 status = phl_disp_eng_set_bk_module_info(phl_info, HW_BAND_0,
2024 PHL_MDL_POWER_MGNT, &op_info);
2025
2026 return status;
2027 }
2028 #endif
2029