1 /******************************************************************************
2 *
3 * Copyright(c) 2019 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15 #define _PHL_CMD_DISPR_C_
16 #include "phl_headers.h"
17 #ifdef CONFIG_CMD_DISP
18
19 #ifdef CONFIG_PHL_MSG_NUM
20 #define MAX_PHL_MSG_NUM CONFIG_PHL_MSG_NUM
21 #else
22 #define MAX_PHL_MSG_NUM (24)
23 #endif
24
25 #define MAX_CMD_REQ_NUM (8)
26 #define MODL_MASK_LEN (PHL_BK_MDL_END/8)
27
28 #define GEN_VALID_HDL(_idx) ((u32)(BIT31 | (u32)(_idx)))
29 #define IS_HDL_VALID(_hdl) ((_hdl) & BIT31)
30 #define GET_IDX_FROM_HDL(_hdl) ((u8)((_hdl) & 0xFF))
31
32 #define GET_CUR_PENDING_EVT( _obj, _mdl_id) \
33 ((u16)((_obj)->mdl_info[(_mdl_id)].pending_evt_id))
34 #define SET_CUR_PENDING_EVT( _obj, _mdl_id, _evt_id) \
35 ((_obj)->mdl_info[(_mdl_id)].pending_evt_id = (_evt_id))
36
37 #define IS_EXCL_MDL(_obj, _mdl) ((_obj)->exclusive_mdl == (_mdl))
38 #define SET_EXCL_MDL(_obj, _mdl) ((_obj)->exclusive_mdl = (_mdl))
39 #define CLEAR_EXCL_MDL(_obj) ((_obj)->exclusive_mdl = PHL_MDL_ID_MAX)
40 #define STOP_DISPATCH_MSG(_ret) \
41 ((_ret) != MDL_RET_SUCCESS && (_ret) != MDL_RET_IGNORE)
42
43 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
44 #define SET_MDL_HANDLE( _obj, _mdl_id, _handle) \
45 ((_obj)->mdl_info[(_mdl_id)].handle = (void*)(_handle))
46 #define GET_MDL_HANDLE( _obj, _mdl_id) \
47 ((_obj)->mdl_info[(_mdl_id)].handle)
48 #endif
49
50 enum phl_msg_status {
51 MSG_STATUS_ENQ = BIT0,
52 MSG_STATUS_RUN = BIT1,
53 MSG_STATUS_NOTIFY_COMPLETE = BIT2,
54 MSG_STATUS_CANCEL = BIT3,
55 MSG_STATUS_PRE_PHASE = BIT4,
56 MSG_STATUS_FAIL = BIT5,
57 MSG_STATUS_OWNER_BK_MDL = BIT6,
58 MSG_STATUS_OWNER_REQ = BIT7,
59 MSG_STATUS_CLR_SNDR_MSG_IF_PENDING = BIT8,
60 MSG_STATUS_PENDING = BIT9,
61 MSG_STATUS_FOR_ABORT = BIT10,
62 MSG_STATUS_PENDING_DURING_CANNOT_IO = BIT11,
63 };
64
65 enum cmd_req_status {
66 REQ_STATUS_ENQ = BIT0,
67 REQ_STATUS_RUN = BIT1,
68 REQ_STATUS_CANCEL = BIT2,
69 REQ_STATUS_LAST_PERMIT = BIT3,
70 REQ_STATUS_PREPARE = BIT4,
71 };
72
73 enum phl_mdl_status {
74 MDL_INIT = BIT0,
75 MDL_STARTED = BIT1,
76 };
77
78 enum dispatcher_status {
79 DISPR_INIT = BIT0,
80 DISPR_STARTED = BIT1,
81 DISPR_SHALL_STOP = BIT2,
82 DISPR_MSGQ_INIT = BIT3,
83 DISPR_REQ_INIT = BIT4,
84 DISPR_NOTIFY_IDLE = BIT5,
85 DISPR_CLR_PEND_MSG = BIT6,
86 DISPR_CTRL_PRESENT = BIT7,
87 DISPR_WAIT_ABORT_MSG_DONE = BIT8,
88 DISPR_CANNOT_IO = BIT9,
89 };
90
91 enum token_op_type {
92 TOKEN_OP_ADD_CMD_REQ = 1,
93 TOKEN_OP_FREE_CMD_REQ = 2,
94 TOKEN_OP_CANCEL_CMD_REQ = 3,
95 TOKEN_OP_RENEW_CMD_REQ = 4,
96 };
97
98 /**
99 * phl_bk_module - instance of phl background module,
100 * @status: contain mgnt status flags, refer to enum phl_mdl_status
101 * @id: refer to enum phl_module_id
102 * @priv: private context
103 * @ops: interface to interacting with phl_module
104 */
105 struct phl_bk_module {
106 _os_list list;
107 u8 status;
108 u8 id;
109 void *priv;
110 struct phl_bk_module_ops ops;
111 };
112 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
113 struct dispr_msg_attr {
114 struct msg_self_def_seq self_def_seq;
115 };
116 #endif
117 /**
118 * phl_dispr_msg_ex - phl msg extension,
119 * @status: contain mgnt status flags, refer to enum phl_msg_status
120 * @idx: idx in original msg_ex pool
121 * @msg: msg content from external module
122 * @premap: notifty map in pre-role phase, refer to enum phl_module_id
123 * @postmap: notifty map in post-role phase, refer to enum phl_module_id
124 * @completion: msg completion routine.
125 * @priv: private context to completion routine.
126 * @module: module handle of msg source, only used when msg fails
127 */
128 struct phl_dispr_msg_ex {
129 _os_list list;
130 u16 status;
131 u8 idx;
132 struct phl_msg msg;
133 u8 premap[MODL_MASK_LEN];
134 u8 postmap[MODL_MASK_LEN];
135 struct msg_completion_routine completion;
136 struct phl_bk_module *module; /* module handle which assign in msg_id*/
137 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
138 struct dispr_msg_attr *attr;
139 #endif
140 };
141
142 /**
143 * phl_token_op_info - for internal mgnt purpose,
144 * @info: mgnt data
145 */
146 struct phl_token_op_info {
147 _os_list list;
148 u8 used;
149 enum token_op_type type;
150 u8 data;
151 };
152 /**
153 * phl_cmd_token_req_ex - cmd token request extension,
154 * @status: contain mgnt status flags, refer to enum cmd_req_status
155 * @idx: idx in original req_ex pool
156 * @req: req content from external module.
157 */
158 struct phl_cmd_token_req_ex {
159 _os_list list;
160 u8 idx;
161 u8 status;
162 struct phl_cmd_token_req req;
163 struct phl_token_op_info add_req_info;
164 struct phl_token_op_info free_req_info;
165 };
166
167 struct mdl_mgnt_info {
168 u16 pending_evt_id;
169 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
170 void* handle;
171 #endif
172 };
173
174 /**
175 * cmd_dispatcher,
176 * @idx: idx in dispatch engine, corresponding to band idx
177 * @status: contain mgnt status flags, refer to enum dispatcher_status
178 * @phl_info: for general reference usage.
179 * @module_q: module queue that link each modules based on priority
180 * @msg_ex_pool: msg extension pool
181 * @bk_thread: background thread
182 * @token_req_ex_pool: req extension pool
183 * @token_cnt: current token count,
184 * cmd req can be executed when dispatcher's token count is 0
185 * @bitmap: cosist of existing background modules loaded in current dispatcher,
186 * refer to enum phl_module_id
187 * @basemap: BK modules that must be notified when handling msg
188 * @controller: instance of dispr controller module
189 * @renew_req_info: used to trigger next token req registration
190 * @exclusive_mdl: In certain conditions, like dev IO status change,
191 * dispr would only allow designated module to send msg and cancel the rest,
192 */
193 struct cmd_dispatcher {
194 u8 idx;
195 u16 status;
196 struct phl_info_t *phl_info;
197 struct phl_queue module_q[PHL_MDL_PRI_MAX];
198 struct phl_dispr_msg_ex msg_ex_pool[MAX_PHL_MSG_NUM];
199 _os_sema msg_q_sema; /* wake up background thread in SOLO_THREAD_MODE*/
200 struct phl_queue msg_wait_q;
201 struct phl_queue msg_idle_q;
202 struct phl_queue msg_pend_q;
203 _os_thread bk_thread;
204 struct phl_cmd_token_req_ex token_req_ex_pool[MAX_CMD_REQ_NUM];
205 struct phl_queue token_req_wait_q;
206 struct phl_queue token_req_idle_q;
207 struct phl_queue token_op_q;
208 _os_lock token_op_q_lock;
209 _os_atomic token_cnt; // atomic
210 struct phl_cmd_token_req_ex *cur_cmd_req;
211 u8 bitmap[MODL_MASK_LEN];
212 u8 basemap[MODL_MASK_LEN];
213 struct mdl_mgnt_info mdl_info[PHL_MDL_ID_MAX];
214 struct phl_bk_module controller;
215 struct phl_token_op_info renew_req_info;
216 u8 exclusive_mdl;
217 };
218
219 enum rtw_phl_status dispr_process_token_req(struct cmd_dispatcher *obj);
220 void send_bk_msg_phy_on(struct cmd_dispatcher *obj);
221 void send_bk_msg_phy_idle(struct cmd_dispatcher *obj);
222 enum rtw_phl_status send_dev_io_status_change(struct cmd_dispatcher *obj, u8 allow_io);
223 void _notify_dispr_controller(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex);
224 static u8 dispr_enqueue_token_op_info(struct cmd_dispatcher *obj, struct phl_token_op_info *op_info,
225 enum token_op_type type, u8 data);
226
227 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
228 static void free_dispr_attr(void *d, struct dispr_msg_attr **dispr_attr);
229 static enum phl_mdl_ret_code run_self_def_seq(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex,
230 enum phl_bk_module_priority priority, u8 pre_prot_phase);
231 #endif
232
233 inline static
_get_mdl_priority(enum phl_module_id id)234 enum phl_bk_module_priority _get_mdl_priority(enum phl_module_id id)
235 {
236 if (id < PHL_BK_MDL_ROLE_START)
237 return PHL_MDL_PRI_MAX;
238 else if (id <= PHL_BK_MDL_ROLE_END)
239 return PHL_MDL_PRI_ROLE;
240 else if ( id <= PHL_BK_MDL_MDRY_END)
241 return PHL_MDL_PRI_MANDATORY;
242 else if (id <= PHL_BK_MDL_OPT_END)
243 return PHL_MDL_PRI_OPTIONAL;
244 else
245 return PHL_MDL_PRI_MAX;
246 }
247
_is_bitmap_empty(void * d,u8 * bitmap)248 inline static u8 _is_bitmap_empty(void *d, u8 *bitmap)
249 {
250 u8 empty[MODL_MASK_LEN] = {0};
251
252 return (!_os_mem_cmp(d, bitmap, empty, MODL_MASK_LEN))?(true):(false);
253 }
254
_print_bitmap(u8 * bitmap)255 inline static void _print_bitmap(u8 *bitmap)
256 {
257 u8 k = 0;
258
259 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "print bitmap: \n");
260
261 for (k = 0; k < MODL_MASK_LEN; k++) {
262 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_DEBUG_,"[%d]:0x%x\n", k, bitmap[k]);
263 }
264 }
265
notify_bk_thread(struct cmd_dispatcher * obj)266 static void notify_bk_thread(struct cmd_dispatcher *obj)
267 {
268 void *d = phl_to_drvpriv(obj->phl_info);
269
270 if (disp_eng_is_solo_thread_mode(obj->phl_info))
271 _os_sema_up(d, &(obj->msg_q_sema));
272 else
273 disp_eng_notify_share_thread(obj->phl_info, (void*)obj);
274 }
275
on_abort_msg_complete(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)276 static void on_abort_msg_complete(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
277 {
278 /* since struct phl_token_op_info is used to synchronously handle token req in background thread
279 * here use add_req_info to notify background thread to run dispr_process_token_req again before handling next msg
280 */
281 CLEAR_STATUS_FLAG(obj->status, DISPR_WAIT_ABORT_MSG_DONE);
282 dispr_enqueue_token_op_info(obj, &obj->renew_req_info, TOKEN_OP_RENEW_CMD_REQ, 0xff);
283 }
284
pop_front_idle_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex ** msg)285 static u8 pop_front_idle_msg(struct cmd_dispatcher *obj,
286 struct phl_dispr_msg_ex **msg)
287 {
288 void *d = phl_to_drvpriv(obj->phl_info);
289 _os_list *new_msg = NULL;
290
291 (*msg) = NULL;
292 if (pq_pop(d, &(obj->msg_idle_q), &new_msg, _first, _bh)) {
293 (*msg) = (struct phl_dispr_msg_ex *)new_msg;
294 (*msg)->status = 0;
295 (*msg)->module = NULL;
296 (*msg)->completion.priv = NULL;
297 (*msg)->completion.completion = NULL;
298 _os_mem_set(d, (*msg)->premap, 0, MODL_MASK_LEN);
299 _os_mem_set(d, (*msg)->postmap, 0, MODL_MASK_LEN);
300 _os_mem_set(d, &((*msg)->msg), 0, sizeof(struct phl_msg));
301 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
302 (*msg)->attr = NULL;
303 #endif
304 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s: remain cnt(%d)\n", __FUNCTION__, obj->msg_idle_q.cnt);
305 return true;
306 } else {
307 return false;
308 }
309 }
310
push_back_idle_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)311 static void push_back_idle_msg(struct cmd_dispatcher *obj,
312 struct phl_dispr_msg_ex *ex)
313 {
314 void *d = phl_to_drvpriv(obj->phl_info);
315
316 if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_NOTIFY_COMPLETE) &&
317 ex->completion.completion) {
318 if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL))
319 SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_CANCEL);
320 ex->completion.completion(ex->completion.priv, &(ex->msg));
321 CLEAR_STATUS_FLAG(ex->status, MSG_STATUS_NOTIFY_COMPLETE);
322 }
323 if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_FOR_ABORT))
324 on_abort_msg_complete(obj, ex);
325 ex->status = 0;
326 if(GET_CUR_PENDING_EVT(obj, MSG_MDL_ID_FIELD(ex->msg.msg_id)) == MSG_EVT_ID_FIELD(ex->msg.msg_id))
327 SET_CUR_PENDING_EVT(obj, MSG_MDL_ID_FIELD(ex->msg.msg_id), MSG_EVT_MAX);
328 ex->msg.msg_id = 0;
329 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
330 free_dispr_attr(d, &(ex->attr));
331 #endif
332 pq_push(d, &(obj->msg_idle_q), &(ex->list), _tail, _bh);
333 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s: remain cnt(%d)\n", __FUNCTION__, obj->msg_idle_q.cnt);
334 }
335
pop_front_wait_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex ** msg)336 static u8 pop_front_wait_msg(struct cmd_dispatcher *obj,
337 struct phl_dispr_msg_ex **msg)
338 {
339 void *d = phl_to_drvpriv(obj->phl_info);
340 _os_list *new_msg = NULL;
341
342 (*msg) = NULL;
343 if (pq_pop(d, &(obj->msg_wait_q), &new_msg, _first, _bh)) {
344 (*msg) = (struct phl_dispr_msg_ex *)new_msg;
345 SET_STATUS_FLAG((*msg)->status, MSG_STATUS_RUN);
346 CLEAR_STATUS_FLAG((*msg)->status, MSG_STATUS_ENQ);
347 CLEAR_STATUS_FLAG((*msg)->status, MSG_STATUS_PENDING);
348 return true;
349 } else {
350 return false;
351 }
352 }
353
push_back_wait_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)354 static void push_back_wait_msg(struct cmd_dispatcher *obj,
355 struct phl_dispr_msg_ex *ex)
356 {
357 void *d = phl_to_drvpriv(obj->phl_info);
358
359 SET_STATUS_FLAG(ex->status, MSG_STATUS_ENQ);
360 CLEAR_STATUS_FLAG(ex->status, MSG_STATUS_RUN);
361 pq_push(d, &(obj->msg_wait_q), &(ex->list), _tail, _bh);
362 notify_bk_thread(obj);
363 }
364
is_higher_priority(void * d,void * priv,_os_list * input,_os_list * obj)365 u8 is_higher_priority(void *d, void *priv,_os_list *input, _os_list *obj)
366 {
367 struct phl_dispr_msg_ex *ex_input = (struct phl_dispr_msg_ex *)input;
368 struct phl_dispr_msg_ex *ex_obj = (struct phl_dispr_msg_ex *)obj;
369
370 if (IS_DISPR_CTRL(MSG_MDL_ID_FIELD(ex_input->msg.msg_id)) &&
371 !IS_DISPR_CTRL(MSG_MDL_ID_FIELD(ex_obj->msg.msg_id)))
372 return true;
373 return false;
374 }
375
insert_msg_by_priority(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)376 static void insert_msg_by_priority(struct cmd_dispatcher *obj,
377 struct phl_dispr_msg_ex *ex)
378 {
379 void *d = phl_to_drvpriv(obj->phl_info);
380
381 SET_STATUS_FLAG(ex->status, MSG_STATUS_ENQ);
382 CLEAR_STATUS_FLAG(ex->status, MSG_STATUS_RUN);
383 pq_insert(d, &(obj->msg_wait_q), _bh, NULL, &(ex->list), is_higher_priority);
384 notify_bk_thread(obj);
385 }
386
pop_front_pending_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex ** msg)387 static u8 pop_front_pending_msg(struct cmd_dispatcher *obj,
388 struct phl_dispr_msg_ex **msg)
389 {
390 void *d = phl_to_drvpriv(obj->phl_info);
391 _os_list *new_msg = NULL;
392
393 (*msg) = NULL;
394 if (pq_pop(d, &(obj->msg_pend_q), &new_msg, _first, _bh)) {
395 (*msg) = (struct phl_dispr_msg_ex *)new_msg;
396 return true;
397 } else {
398 return false;
399 }
400 }
401
push_back_pending_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)402 static void push_back_pending_msg(struct cmd_dispatcher *obj,
403 struct phl_dispr_msg_ex *ex)
404 {
405 void *d = phl_to_drvpriv(obj->phl_info);
406
407 SET_STATUS_FLAG(ex->status, MSG_STATUS_ENQ);
408 CLEAR_STATUS_FLAG(ex->status, MSG_STATUS_RUN);
409
410 if(TEST_STATUS_FLAG(ex->status, MSG_STATUS_CLR_SNDR_MSG_IF_PENDING))
411 SET_CUR_PENDING_EVT(obj, MSG_MDL_ID_FIELD(ex->msg.msg_id), MSG_EVT_ID_FIELD(ex->msg.msg_id));
412 pq_push(d, &(obj->msg_pend_q), &(ex->list), _tail, _bh);
413 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s: remain cnt(%d)\n", __FUNCTION__, obj->msg_pend_q.cnt);
414 }
415
clear_pending_msg(struct cmd_dispatcher * obj)416 static void clear_pending_msg(struct cmd_dispatcher *obj)
417 {
418 struct phl_dispr_msg_ex *ex = NULL;
419
420 if(!TEST_STATUS_FLAG(obj->status, DISPR_CLR_PEND_MSG))
421 return;
422 CLEAR_STATUS_FLAG(obj->status, DISPR_CLR_PEND_MSG);
423 while (pop_front_pending_msg(obj, &ex)) {
424 if (IS_DISPR_CTRL(MSG_EVT_ID_FIELD(ex->msg.msg_id)))
425 insert_msg_by_priority(obj, ex);
426 else
427 push_back_wait_msg(obj, ex);
428 }
429 }
430
clear_waiting_msg(struct cmd_dispatcher * obj)431 static void clear_waiting_msg(struct cmd_dispatcher *obj)
432 {
433 struct phl_dispr_msg_ex *ex = NULL;
434
435 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s: remain cnt(%d)\n", __FUNCTION__, obj->msg_idle_q.cnt);
436 while(obj->msg_idle_q.cnt != MAX_PHL_MSG_NUM) {
437 while (pop_front_pending_msg(obj, &ex))
438 push_back_wait_msg(obj, ex);
439 while (pop_front_wait_msg(obj, &ex))
440 push_back_idle_msg(obj, ex);
441 }
442 }
443
is_special_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)444 static bool is_special_msg(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
445 {
446 u8 mdl_id = MSG_MDL_ID_FIELD(ex->msg.msg_id);
447 u16 evt = MSG_EVT_ID_FIELD(ex->msg.msg_id);
448
449 if (TEST_STATUS_FLAG(obj->status, DISPR_CANNOT_IO)) {
450 if ( IS_EXCL_MDL(obj, mdl_id) ||
451 evt == MSG_EVT_DEV_CANNOT_IO ||
452 evt == MSG_EVT_DEV_RESUME_IO ||
453 evt == MSG_EVT_PHY_ON ||
454 evt == MSG_EVT_PHY_IDLE)
455 return true;
456 }
457 return false;
458 }
459
is_msg_canceled(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)460 static bool is_msg_canceled(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
461 {
462 u16 pending_evt = GET_CUR_PENDING_EVT(obj, MSG_MDL_ID_FIELD(ex->msg.msg_id));
463
464 if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED) ||
465 TEST_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL))
466 return true;
467
468 if (pending_evt != MSG_EVT_MAX && pending_evt != MSG_EVT_ID_FIELD(ex->msg.msg_id)) {
469 SET_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL);
470 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "msg canceled, cur pending evt(%d)\n", pending_evt);
471 return true;
472 }
473
474 if (TEST_STATUS_FLAG(obj->status, DISPR_SHALL_STOP)) {
475 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "msg canceled due to SHALL STOP status\n");
476
477 SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_CANNOT_IO);
478 SET_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL);
479 return true;
480 }
481
482 if (TEST_STATUS_FLAG(obj->status, DISPR_CANNOT_IO)) {
483 if( is_special_msg(obj, ex)) {
484 SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_CANNOT_IO);
485 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "special msg found, still sent with CANNOT IO flag set\n");
486 }
487 else if (!TEST_STATUS_FLAG(ex->status, MSG_STATUS_PENDING_DURING_CANNOT_IO)) {
488 SET_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL);
489 SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_CANNOT_IO);
490 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "msg canceled due to CANNOT IO status\n");
491 return true;
492 } else {
493 SET_STATUS_FLAG(ex->status, MSG_STATUS_PENDING);
494 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "msg pending due to CANNOT IO status\n");
495 }
496 }
497
498 return false;
499 }
500
init_dispr_msg_pool(struct cmd_dispatcher * obj)501 void init_dispr_msg_pool(struct cmd_dispatcher *obj)
502 {
503 u8 i = 0;
504 void *d = phl_to_drvpriv(obj->phl_info);
505
506 if (TEST_STATUS_FLAG(obj->status, DISPR_MSGQ_INIT))
507 return;
508 pq_init(d, &(obj->msg_idle_q));
509 pq_init(d, &(obj->msg_wait_q));
510 pq_init(d, &(obj->msg_pend_q));
511 _os_mem_set(d, obj->msg_ex_pool, 0,
512 sizeof(struct phl_dispr_msg_ex) * MAX_PHL_MSG_NUM);
513 for (i = 0; i < MAX_PHL_MSG_NUM; i++) {
514 obj->msg_ex_pool[i].idx = i;
515 push_back_idle_msg(obj, &(obj->msg_ex_pool[i]));
516 }
517
518 SET_STATUS_FLAG(obj->status, DISPR_MSGQ_INIT);
519 }
520
deinit_dispr_msg_pool(struct cmd_dispatcher * obj)521 void deinit_dispr_msg_pool(struct cmd_dispatcher *obj)
522 {
523 void *d = phl_to_drvpriv(obj->phl_info);
524
525 if (!TEST_STATUS_FLAG(obj->status, DISPR_MSGQ_INIT))
526 return;
527 CLEAR_STATUS_FLAG(obj->status, DISPR_MSGQ_INIT);
528
529 pq_deinit(d, &(obj->msg_idle_q));
530 pq_deinit(d, &(obj->msg_wait_q));
531 pq_deinit(d, &(obj->msg_pend_q));
532 }
533
cancel_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)534 void cancel_msg(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
535 {
536 void *d = phl_to_drvpriv(obj->phl_info);
537
538 /* zero bitmap to ensure msg would not be forward to
539 * any modules after cancel.
540 * */
541 _reset_bitmap(d, ex->premap, MODL_MASK_LEN);
542 _reset_bitmap(d, ex->postmap, MODL_MASK_LEN);
543
544 SET_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL);
545 }
546
cancel_running_msg(struct cmd_dispatcher * obj)547 void cancel_running_msg(struct cmd_dispatcher *obj)
548 {
549 u8 i = 0;
550
551 for (i = 0; i < MAX_PHL_MSG_NUM;i++) {
552 if(TEST_STATUS_FLAG(obj->msg_ex_pool[i].status, MSG_STATUS_RUN))
553 cancel_msg(obj, &(obj->msg_ex_pool[i]));
554 }
555 }
set_msg_bitmap(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex,u8 mdl_id)556 void set_msg_bitmap(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex, u8 mdl_id)
557 {
558 void *d = phl_to_drvpriv(obj->phl_info);
559
560 /* ensure mandatory & wifi role module recv all msg*/
561 _os_mem_cpy(d, ex->premap, obj->bitmap, MODL_MASK_LEN);
562 _os_mem_cpy(d, ex->postmap, obj->bitmap, MODL_MASK_LEN);
563 if(_chk_bitmap_bit(obj->bitmap, mdl_id)) {
564 _add_bitmap_bit(ex->premap, &mdl_id, 1);
565 _add_bitmap_bit(ex->postmap, &mdl_id, 1);
566 }
567 //_print_bitmap(ex->premap);
568 }
569
set_msg_custom_bitmap(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex,enum phl_msg_opt opt,u8 * id_arr,u32 len,u8 mdl_id)570 void set_msg_custom_bitmap(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex,
571 enum phl_msg_opt opt, u8 *id_arr, u32 len, u8 mdl_id)
572 {
573 void *d = phl_to_drvpriv(obj->phl_info);
574
575 if (opt & MSG_OPT_SKIP_NOTIFY_OPT_MDL) {
576 _os_mem_cpy(d, ex->premap, obj->basemap, MODL_MASK_LEN);
577 _os_mem_cpy(d, ex->postmap, obj->basemap, MODL_MASK_LEN);
578 }
579 if (opt & MSG_OPT_BLIST_PRESENT) {
580 _clr_bitmap_bit(ex->premap, id_arr, len);
581 _clr_bitmap_bit(ex->postmap, id_arr, len);
582 } else {
583 _add_bitmap_bit(ex->premap, id_arr, len);
584 _add_bitmap_bit(ex->postmap, id_arr, len);
585 }
586 if(_chk_bitmap_bit(obj->bitmap, mdl_id)) {
587 _add_bitmap_bit(ex->premap, &mdl_id, 1);
588 _add_bitmap_bit(ex->postmap, &mdl_id, 1);
589 }
590 }
591
get_msg_bitmap(struct phl_dispr_msg_ex * ex)592 u8 *get_msg_bitmap(struct phl_dispr_msg_ex *ex)
593 {
594 if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_PRE_PHASE)) {
595 SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_PRE_PHASE);
596 return ex->premap;
597 } else {
598 CLEAR_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_PRE_PHASE);
599 return ex->postmap;
600 }
601 }
602
603
init_dispr_mdl_mgnt_info(struct cmd_dispatcher * obj)604 void init_dispr_mdl_mgnt_info(struct cmd_dispatcher *obj)
605 {
606 u8 i = 0;
607
608 for (i = 0; i < PHL_MDL_ID_MAX; i++)
609 SET_CUR_PENDING_EVT(obj, i, MSG_EVT_MAX);
610
611 }
612
pop_front_idle_req(struct cmd_dispatcher * obj,struct phl_cmd_token_req_ex ** req)613 static u8 pop_front_idle_req(struct cmd_dispatcher *obj,
614 struct phl_cmd_token_req_ex **req)
615 {
616 void *d = phl_to_drvpriv(obj->phl_info);
617 _os_list *new_req = NULL;
618
619 (*req) = NULL;
620 if (pq_pop(d, &(obj->token_req_idle_q), &new_req, _first, _bh)) {
621 (*req) = (struct phl_cmd_token_req_ex*)new_req;
622 (*req)->status = 0;
623 _os_mem_set(d, &((*req)->req), 0,
624 sizeof(struct phl_cmd_token_req));
625 _os_mem_set(d, &((*req)->add_req_info), 0,
626 sizeof(struct phl_token_op_info));
627 _os_mem_set(d, &((*req)->free_req_info), 0,
628 sizeof(struct phl_token_op_info));
629 return true;
630 } else {
631 return false;
632 }
633 }
634
push_back_idle_req(struct cmd_dispatcher * obj,struct phl_cmd_token_req_ex * req)635 static void push_back_idle_req(struct cmd_dispatcher *obj,
636 struct phl_cmd_token_req_ex *req)
637 {
638 void *d = phl_to_drvpriv(obj->phl_info);
639
640 req->status = 0;
641 SET_CUR_PENDING_EVT(obj, req->req.module_id, MSG_EVT_MAX);
642 pq_push(d, &(obj->token_req_idle_q), &(req->list), _tail, _bh);
643 }
644
pop_front_wait_req(struct cmd_dispatcher * obj,struct phl_cmd_token_req_ex ** req)645 static u8 pop_front_wait_req(struct cmd_dispatcher *obj,
646 struct phl_cmd_token_req_ex **req)
647 {
648 void *d = phl_to_drvpriv(obj->phl_info);
649 _os_list *new_req = NULL;
650
651 (*req) = NULL;
652 if (pq_pop(d, &(obj->token_req_wait_q), &new_req, _first, _bh)) {
653 (*req) = (struct phl_cmd_token_req_ex*)new_req;
654 SET_STATUS_FLAG((*req)->status, REQ_STATUS_PREPARE);
655 CLEAR_STATUS_FLAG((*req)->status, REQ_STATUS_ENQ);
656 return true;
657 } else {
658 return false;
659 }
660 }
661
push_back_wait_req(struct cmd_dispatcher * obj,struct phl_cmd_token_req_ex * req)662 static void push_back_wait_req(struct cmd_dispatcher *obj,
663 struct phl_cmd_token_req_ex *req)
664 {
665 void *d = phl_to_drvpriv(obj->phl_info);
666
667 pq_push(d, &(obj->token_req_wait_q), &(req->list), _tail, _bh);
668 SET_STATUS_FLAG(req->status, REQ_STATUS_ENQ);
669 }
670
clear_wating_req(struct cmd_dispatcher * obj)671 static void clear_wating_req(struct cmd_dispatcher *obj)
672 {
673 struct phl_cmd_token_req_ex *ex = NULL;
674
675 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
676 "%s: remain cnt(%d)\n", __FUNCTION__, obj->token_req_idle_q.cnt);
677 while(obj->token_req_idle_q.cnt != MAX_CMD_REQ_NUM) {
678 while (pop_front_wait_req(obj, &ex)) {
679 ex->req.abort(obj, ex->req.priv);
680 push_back_idle_req(obj, ex);
681 }
682 }
683 }
684
deregister_cur_cmd_req(struct cmd_dispatcher * obj,u8 notify)685 void deregister_cur_cmd_req(struct cmd_dispatcher *obj, u8 notify)
686 {
687 struct phl_cmd_token_req *req = NULL;
688 void *d = phl_to_drvpriv(obj->phl_info);
689 u8 i = 0;
690 struct phl_dispr_msg_ex *ex = NULL;
691
692 if (obj->cur_cmd_req) {
693 req = &(obj->cur_cmd_req->req);
694 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
695 "%s, id(%d), status(%d)\n",
696 __FUNCTION__, req->module_id, obj->cur_cmd_req->status);
697 CLEAR_STATUS_FLAG(obj->cur_cmd_req->status, REQ_STATUS_RUN);
698 for (i = 0; i < MAX_PHL_MSG_NUM; i++) {
699 ex = &(obj->msg_ex_pool[i]);
700 if (req->module_id != MSG_MDL_ID_FIELD(ex->msg.msg_id))
701 continue;
702 CLEAR_STATUS_FLAG(ex->status, MSG_STATUS_OWNER_REQ);
703 cancel_msg(obj, ex);
704 if(TEST_STATUS_FLAG(ex->status, MSG_STATUS_PENDING)) {
705 dispr_clr_pending_msg((void*)obj);
706 /* inserted pending msg from this sepecific sender back to wait Q before abort notify
707 * would guarantee msg sent in abort notify is exactly last msg from this sender
708 * */
709 clear_pending_msg(obj);
710 }
711 }
712 if (notify == true) {
713 SET_STATUS_FLAG(obj->cur_cmd_req->status, REQ_STATUS_LAST_PERMIT);
714 req->abort(obj, req->priv);
715 CLEAR_STATUS_FLAG(obj->cur_cmd_req->status, REQ_STATUS_LAST_PERMIT);
716 }
717 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
718 SET_MDL_HANDLE(obj, obj->cur_cmd_req->req.module_id, NULL);
719 #endif
720 push_back_idle_req(obj, obj->cur_cmd_req);
721 _os_atomic_set(d, &(obj->token_cnt),
722 _os_atomic_read(d, &(obj->token_cnt))-1);
723 }
724 obj->cur_cmd_req = NULL;
725 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
726 }
727
register_cur_cmd_req(struct cmd_dispatcher * obj,struct phl_cmd_token_req_ex * req)728 u8 register_cur_cmd_req(struct cmd_dispatcher *obj,
729 struct phl_cmd_token_req_ex *req)
730 {
731 void *d = phl_to_drvpriv(obj->phl_info);
732 enum phl_mdl_ret_code ret = MDL_RET_SUCCESS;
733
734 SET_STATUS_FLAG(req->status, REQ_STATUS_RUN);
735 CLEAR_STATUS_FLAG(req->status, REQ_STATUS_PREPARE);
736 obj->cur_cmd_req = req;
737 _os_atomic_set(d, &(obj->token_cnt),
738 _os_atomic_read(d, &(obj->token_cnt))+1);
739 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
740 "%s, id(%d)\n", __FUNCTION__, obj->cur_cmd_req->req.module_id);
741 ret = obj->cur_cmd_req->req.acquired((void*)obj, obj->cur_cmd_req->req.priv);
742 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, ret(%d)\n", __FUNCTION__, ret);
743
744 if (ret == MDL_RET_FAIL) {
745 deregister_cur_cmd_req(obj, false);
746 return false;
747 }
748 else {
749 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
750 SET_MDL_HANDLE(obj, req->req.module_id, req);
751 #endif
752 return true;
753 }
754 }
755
cancel_all_cmd_req(struct cmd_dispatcher * obj)756 void cancel_all_cmd_req(struct cmd_dispatcher *obj)
757 {
758 u8 i = 0;
759 struct phl_cmd_token_req_ex* req_ex = NULL;
760
761 for (i = 0; i < MAX_CMD_REQ_NUM;i++) {
762 req_ex = &(obj->token_req_ex_pool[i]);
763 if (req_ex->status)
764 SET_STATUS_FLAG(req_ex->status, REQ_STATUS_CANCEL);
765 }
766 }
767
init_cmd_req_pool(struct cmd_dispatcher * obj)768 void init_cmd_req_pool(struct cmd_dispatcher *obj)
769 {
770 u8 i = 0;
771 void *d = phl_to_drvpriv(obj->phl_info);
772
773 if (TEST_STATUS_FLAG(obj->status, DISPR_REQ_INIT))
774 return;
775 pq_init(d, &(obj->token_req_wait_q));
776 pq_init(d, &(obj->token_req_idle_q));
777 pq_init(d, &(obj->token_op_q));
778 _os_mem_set(d, obj->token_req_ex_pool, 0,
779 sizeof(struct phl_cmd_token_req_ex) * MAX_CMD_REQ_NUM);
780 for (i = 0; i < MAX_CMD_REQ_NUM;i++) {
781 obj->token_req_ex_pool[i].idx = i;
782 pq_push(d, &(obj->token_req_idle_q),
783 &(obj->token_req_ex_pool[i].list), _tail, _bh);
784 }
785 SET_STATUS_FLAG(obj->status, DISPR_REQ_INIT);
786 }
787
deinit_cmd_req_pool(struct cmd_dispatcher * obj)788 void deinit_cmd_req_pool(struct cmd_dispatcher *obj)
789 {
790 void *d = phl_to_drvpriv(obj->phl_info);
791
792 CLEAR_STATUS_FLAG(obj->status, DISPR_REQ_INIT);
793
794 pq_deinit(d, &(obj->token_req_wait_q));
795 pq_deinit(d, &(obj->token_req_idle_q));
796 pq_deinit(d, &(obj->token_op_q));
797 }
798
chk_module_ops(struct phl_bk_module_ops * ops)799 u8 chk_module_ops(struct phl_bk_module_ops *ops)
800 {
801 if (ops == NULL ||
802 ops->init == NULL ||
803 ops->deinit == NULL ||
804 ops->msg_hdlr == NULL ||
805 ops->set_info == NULL ||
806 ops->query_info == NULL ||
807 ops->start == NULL ||
808 ops->stop == NULL)
809 return false;
810 return true;
811 }
812
chk_cmd_req_ops(struct phl_cmd_token_req * req)813 u8 chk_cmd_req_ops(struct phl_cmd_token_req *req)
814 {
815 if (req == NULL ||
816 req->module_id < PHL_FG_MDL_START ||
817 req->abort == NULL ||
818 req->acquired == NULL ||
819 req->msg_hdlr == NULL ||
820 req->set_info == NULL ||
821 req->query_info == NULL)
822 return false;
823 return true;
824 }
pop_front_token_op_info(struct cmd_dispatcher * obj,struct phl_token_op_info ** op_info)825 static u8 pop_front_token_op_info(struct cmd_dispatcher *obj,
826 struct phl_token_op_info **op_info)
827 {
828 void *d = phl_to_drvpriv(obj->phl_info);
829 _os_list *new_info = NULL;
830
831 (*op_info) = NULL;
832 if (pq_pop(d, &(obj->token_op_q), &new_info, _first, _bh)) {
833 (*op_info) = (struct phl_token_op_info *)new_info;
834 return true;
835 } else {
836 return false;
837 }
838 }
839
push_back_token_op_info(struct cmd_dispatcher * obj,struct phl_token_op_info * op_info,enum token_op_type type,u8 data)840 static u8 push_back_token_op_info(struct cmd_dispatcher *obj,
841 struct phl_token_op_info *op_info,
842 enum token_op_type type,
843 u8 data)
844 {
845 void *d = phl_to_drvpriv(obj->phl_info);
846 _os_spinlockfg sp_flags;
847
848 _os_spinlock(d, &obj->token_op_q_lock, _bh, &sp_flags);
849 if (op_info->used == true) {
850 _os_spinunlock(d, &obj->token_op_q_lock, _bh, &sp_flags);
851 return false;
852 }
853 op_info->used = true;
854 op_info->type = type;
855 op_info->data = data;
856 _os_spinunlock(d, &obj->token_op_q_lock, _bh, &sp_flags);
857 pq_push(d, &(obj->token_op_q), &(op_info->list), _tail, _bh);
858 notify_bk_thread(obj);
859 return true;
860 }
861
_handle_token_op_info(struct cmd_dispatcher * obj,struct phl_token_op_info * op_info)862 void _handle_token_op_info(struct cmd_dispatcher *obj, struct phl_token_op_info *op_info)
863 {
864 struct phl_cmd_token_req_ex *req_ex = NULL;
865 void *d = phl_to_drvpriv(obj->phl_info);
866
867 switch (op_info->type) {
868 case TOKEN_OP_RENEW_CMD_REQ:
869 /* fall through*/
870 case TOKEN_OP_ADD_CMD_REQ:
871 dispr_process_token_req(obj);
872 break;
873 case TOKEN_OP_FREE_CMD_REQ:
874 if (op_info->data >= MAX_CMD_REQ_NUM)
875 return;
876 req_ex = &(obj->token_req_ex_pool[op_info->data]);
877 if (!TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_RUN))
878 break;
879 deregister_cur_cmd_req(obj, false);
880 dispr_process_token_req(obj);
881 break;
882 case TOKEN_OP_CANCEL_CMD_REQ:
883 if (op_info->data >= MAX_CMD_REQ_NUM)
884 return;
885 req_ex = &(obj->token_req_ex_pool[op_info->data]);
886 SET_STATUS_FLAG(req_ex->status, REQ_STATUS_CANCEL);
887 if (TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_ENQ)) {
888 pq_del_node(d, &(obj->token_req_wait_q), &(req_ex->list), _bh);
889 /*
890 * Call command abort handle, abort handle
891 * should decide it has been acquired or not.
892 */
893 req_ex->req.abort(obj, req_ex->req.priv);
894 push_back_idle_req(obj, req_ex);
895 } else if (TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_RUN)){
896 deregister_cur_cmd_req(obj, true);
897 dispr_process_token_req(obj);
898 }
899 break;
900 default:
901 break;
902 }
903 }
904
token_op_hanler(struct cmd_dispatcher * obj)905 void token_op_hanler(struct cmd_dispatcher *obj)
906 {
907 struct phl_token_op_info *info = NULL;
908
909 while (pop_front_token_op_info(obj, &info)) {
910 _handle_token_op_info(obj, info);
911 info->used = false;
912 }
913 }
914 static u8
dispr_enqueue_token_op_info(struct cmd_dispatcher * obj,struct phl_token_op_info * op_info,enum token_op_type type,u8 data)915 dispr_enqueue_token_op_info(struct cmd_dispatcher *obj,
916 struct phl_token_op_info *op_info,
917 enum token_op_type type,
918 u8 data)
919 {
920 return push_back_token_op_info(obj, op_info, type, data);
921 }
922
bk_module_init(struct cmd_dispatcher * obj,struct phl_bk_module * module)923 u8 bk_module_init(struct cmd_dispatcher *obj, struct phl_bk_module *module)
924 {
925 if (TEST_STATUS_FLAG(module->status, MDL_INIT)) {
926 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
927 "%s module_id:%d already init\n",
928 __FUNCTION__, module->id);
929 return false;
930 }
931
932 if (module->ops.init((void*)obj->phl_info, (void*)obj,
933 &(module->priv)) == MDL_RET_SUCCESS) {
934 SET_STATUS_FLAG(module->status, MDL_INIT);
935 return true;
936 } else {
937 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
938 "%s fail module_id: %d \n", __FUNCTION__, module->id);
939 return false;
940 }
941 }
942
bk_module_deinit(struct cmd_dispatcher * obj,struct phl_bk_module * module)943 void bk_module_deinit(struct cmd_dispatcher *obj, struct phl_bk_module *module)
944 {
945 if (TEST_STATUS_FLAG(module->status, MDL_INIT))
946 module->ops.deinit((void*)obj, module->priv);
947 CLEAR_STATUS_FLAG(module->status, MDL_INIT);
948 }
949
bk_module_start(struct cmd_dispatcher * obj,struct phl_bk_module * module)950 u8 bk_module_start(struct cmd_dispatcher *obj, struct phl_bk_module *module)
951 {
952 if (!TEST_STATUS_FLAG(module->status, MDL_INIT) ||
953 TEST_STATUS_FLAG(module->status, MDL_STARTED)) {
954 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
955 "%s module_id:%d already start\n", __FUNCTION__,
956 module->id);
957 return false;
958 }
959
960 if (module->ops.start((void*)obj, module->priv) == MDL_RET_SUCCESS) {
961 SET_STATUS_FLAG(module->status, MDL_STARTED);
962 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
963 SET_MDL_HANDLE(obj, module->id, module);
964 #endif
965 return true;
966 } else {
967 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
968 "%s fail module_id: %d \n", __FUNCTION__, module->id);
969 return false;
970 }
971 }
972
bk_module_stop(struct cmd_dispatcher * obj,struct phl_bk_module * module)973 u8 bk_module_stop(struct cmd_dispatcher *obj, struct phl_bk_module *module)
974 {
975 if (!TEST_STATUS_FLAG(module->status, MDL_STARTED))
976 return false;
977 CLEAR_STATUS_FLAG(module->status, MDL_STARTED);
978 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
979 SET_MDL_HANDLE(obj, module->id, NULL);
980 #endif
981 if (module->ops.stop((void*)obj, module->priv) == MDL_RET_SUCCESS) {
982 return true;
983 } else {
984 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
985 "%s fail module_id: %d \n", __FUNCTION__,
986 module->id);
987 return false;
988 }
989 }
990
cur_req_hdl(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)991 void cur_req_hdl(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
992 {
993 struct phl_cmd_token_req_ex *cur_req = obj->cur_cmd_req;
994
995 if (cur_req == NULL)
996 return;
997 if (!TEST_STATUS_FLAG(cur_req->status, REQ_STATUS_RUN) ||
998 TEST_STATUS_FLAG(cur_req->status, REQ_STATUS_CANCEL))
999 return;
1000 if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_FOR_ABORT))
1001 return;
1002 cur_req->req.msg_hdlr((void*)obj, cur_req->req.priv, &(ex->msg));
1003 }
1004
notify_msg_fail(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex,enum phl_mdl_ret_code ret)1005 void notify_msg_fail(struct cmd_dispatcher *obj,
1006 struct phl_dispr_msg_ex *ex,
1007 enum phl_mdl_ret_code ret)
1008 {
1009 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1010
1011 SET_STATUS_FLAG(ex->status, MSG_STATUS_FAIL);
1012
1013 SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_FAIL);
1014 if (ret == MDL_RET_CANNOT_IO)
1015 SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_CANNOT_IO);
1016
1017 if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_OWNER_BK_MDL) &&
1018 (IS_DISPR_CTRL(MSG_MDL_ID_FIELD(ex->msg.msg_id)) || _chk_bitmap_bit(obj->bitmap, ex->module->id))) {
1019 ex->module->ops.msg_hdlr(obj, ex->module->priv, &(ex->msg));
1020 }
1021
1022 if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_OWNER_REQ)) {
1023 cur_req_hdl(obj, ex);
1024 }
1025 }
1026
feed_mdl_msg(struct cmd_dispatcher * obj,struct phl_bk_module * mdl,struct phl_dispr_msg_ex * ex)1027 enum phl_mdl_ret_code feed_mdl_msg(struct cmd_dispatcher *obj,
1028 struct phl_bk_module *mdl,
1029 struct phl_dispr_msg_ex *ex)
1030 {
1031 enum phl_mdl_ret_code ret = MDL_RET_FAIL;
1032 u8 *bitmap = NULL;
1033
1034 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_DEBUG_, "%s, id:%d \n", __FUNCTION__, mdl->id);
1035 ret = mdl->ops.msg_hdlr(obj, mdl->priv, &(ex->msg));
1036 if (ret == MDL_RET_FAIL || ret == MDL_RET_CANNOT_IO) {
1037 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "id:%d evt:0x%x fail\n",
1038 mdl->id, ex->msg.msg_id);
1039 ex->msg.rsvd[0] = mdl;
1040 notify_msg_fail(obj, ex, ret);
1041 } else if (ret == MDL_RET_PENDING) {
1042 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "id:%d evt:0x%x pending\n",
1043 mdl->id, ex->msg.msg_id);
1044 SET_STATUS_FLAG(ex->status, MSG_STATUS_PENDING);
1045 } else {
1046 if (MSG_INDC_FIELD(ex->msg.msg_id) & MSG_INDC_PRE_PHASE)
1047 bitmap = ex->premap;
1048 else
1049 bitmap = ex->postmap;
1050 _clr_bitmap_bit(bitmap, &(mdl->id), 1);
1051 }
1052 return ret;
1053 }
1054
msg_pre_phase_hdl(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)1055 void msg_pre_phase_hdl(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
1056 {
1057 s8 i = 0;
1058 void *d = phl_to_drvpriv(obj->phl_info);
1059 struct phl_bk_module *mdl = NULL;
1060 _os_list *node = NULL;
1061 struct phl_queue *q = NULL;
1062 enum phl_mdl_ret_code ret = MDL_RET_FAIL;
1063 u8 owner_id = (ex->module)?(ex->module->id):(PHL_MDL_ID_MAX);
1064 enum phl_bk_module_priority priority = PHL_MDL_PRI_MAX;
1065
1066 if (owner_id <= PHL_BK_MDL_END)
1067 priority = _get_mdl_priority(owner_id);
1068
1069 for (i = PHL_MDL_PRI_MAX - 1 ; i >= PHL_MDL_PRI_ROLE ; i--) {
1070 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
1071 ret = run_self_def_seq(obj, ex, i, true);
1072 if (STOP_DISPATCH_MSG(ret))
1073 return;
1074 #endif
1075 if (priority == i && _chk_bitmap_bit(ex->premap, owner_id)) {
1076 ret = feed_mdl_msg(obj, ex->module, ex);
1077 if (STOP_DISPATCH_MSG(ret))
1078 return;
1079 }
1080 q = &(obj->module_q[(u8)i]);
1081
1082 if (pq_get_front(d, q, &node, _bh) == false)
1083 continue;
1084
1085 do {
1086 mdl = (struct phl_bk_module*)node;
1087 if (!_chk_bitmap_bit(ex->premap, mdl->id) ||
1088 !TEST_STATUS_FLAG(mdl->status, MDL_STARTED))
1089 continue;
1090 ret = feed_mdl_msg(obj, mdl, ex);
1091 if (STOP_DISPATCH_MSG(ret))
1092 return;
1093 } while(pq_get_next(d, q, node, &node, _bh));
1094 }
1095 }
1096
msg_post_phase_hdl(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)1097 void msg_post_phase_hdl(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
1098 {
1099 s8 i = 0;
1100 void *d = phl_to_drvpriv(obj->phl_info);
1101 struct phl_bk_module *mdl = NULL;
1102 _os_list *node = NULL;
1103 struct phl_queue *q = NULL;
1104 enum phl_mdl_ret_code ret = MDL_RET_FAIL;
1105 u8 owner_id = (ex->module)?(ex->module->id):(PHL_MDL_ID_MAX);
1106 enum phl_bk_module_priority priority = PHL_MDL_PRI_MAX;
1107
1108 if (owner_id <= PHL_BK_MDL_END)
1109 priority = _get_mdl_priority(owner_id);
1110
1111 for (i = PHL_MDL_PRI_ROLE ; i < PHL_MDL_PRI_MAX ; i++) {
1112 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
1113 ret = run_self_def_seq(obj, ex, i, false);
1114 if (STOP_DISPATCH_MSG(ret))
1115 return;
1116 #endif
1117 if (priority == i && _chk_bitmap_bit(ex->postmap, owner_id)) {
1118 ret = feed_mdl_msg(obj, ex->module, ex);
1119 if (STOP_DISPATCH_MSG(ret))
1120 return;
1121 }
1122 q = &(obj->module_q[(u8)i]);
1123 if (pq_get_tail(d, q, &node, _bh) == false)
1124 continue;
1125 do {
1126 mdl = (struct phl_bk_module*)node;
1127 if (!_chk_bitmap_bit(ex->postmap, mdl->id)||
1128 !TEST_STATUS_FLAG(mdl->status, MDL_STARTED))
1129 continue;
1130 ret = feed_mdl_msg(obj, mdl, ex);
1131 if (STOP_DISPATCH_MSG(ret))
1132 return;
1133 } while(pq_get_prev(d, q, node, &node, _bh));
1134 }
1135 }
1136
get_cur_cmd_req_id(struct cmd_dispatcher * obj,u32 * req_status)1137 u8 get_cur_cmd_req_id(struct cmd_dispatcher *obj, u32 *req_status)
1138 {
1139 struct phl_cmd_token_req_ex *cur_req = obj->cur_cmd_req;
1140
1141 if(req_status)
1142 *req_status = 0;
1143
1144 if (cur_req == NULL )
1145 return (u8)PHL_MDL_ID_MAX;
1146
1147 if(req_status)
1148 *req_status = cur_req->status;
1149
1150 if(!TEST_STATUS_FLAG(cur_req->status, REQ_STATUS_RUN) ||
1151 TEST_STATUS_FLAG(cur_req->status, REQ_STATUS_CANCEL))
1152 return (u8)PHL_MDL_ID_MAX;
1153 else
1154 return cur_req->req.module_id;
1155 }
1156
1157 #define MSG_REDIRECT_CHK(_ex) \
1158 if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_FAIL)|| \
1159 TEST_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL)) \
1160 goto recycle;\
1161 if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_PENDING)) \
1162 goto reschedule;
1163
msg_dispatch(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)1164 void msg_dispatch(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
1165 {
1166 u8 *bitmap = get_msg_bitmap(ex);
1167 void *d = phl_to_drvpriv(obj->phl_info);
1168
1169 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_DEBUG_,
1170 "%s, msg_id:0x%x status: 0x%x\n", __FUNCTION__, ex->msg.msg_id, ex->status);
1171 MSG_REDIRECT_CHK(ex);
1172
1173 _notify_dispr_controller(obj, ex);
1174
1175 MSG_REDIRECT_CHK(ex);
1176
1177 if ((MSG_INDC_FIELD(ex->msg.msg_id) & MSG_INDC_PRE_PHASE) &&
1178 _is_bitmap_empty(d, bitmap) == false)
1179 msg_pre_phase_hdl(obj, ex);
1180
1181 MSG_REDIRECT_CHK(ex);
1182
1183 if (_is_bitmap_empty(d, bitmap)) {
1184 /* pre protocol phase done, switch to post protocol phase*/
1185 CLEAR_STATUS_FLAG(ex->status, MSG_STATUS_PRE_PHASE);
1186 bitmap = get_msg_bitmap(ex);
1187 } else {
1188 PHL_ERR("%s, invalid bitmap state, msg status:0x%x \n", __FUNCTION__, ex->status);
1189 SET_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL);
1190 goto recycle;
1191 }
1192
1193 if (_is_bitmap_empty(d, bitmap) == false)
1194 msg_post_phase_hdl(obj, ex);
1195
1196 MSG_REDIRECT_CHK(ex);
1197
1198 if (_is_bitmap_empty(d, bitmap)) {
1199 /* post protocol phase done */
1200 cur_req_hdl(obj, ex);
1201 goto recycle;
1202 } else {
1203 PHL_ERR("%s, invalid bitmap state, msg status:0x%x \n", __FUNCTION__, ex->status);
1204 SET_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL);
1205 goto recycle;
1206 }
1207 reschedule:
1208 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
1209 "%s, msg:0x%x reschedule \n", __FUNCTION__,
1210 ex->msg.msg_id);
1211 if(TEST_STATUS_FLAG(ex->status, MSG_STATUS_PENDING))
1212 push_back_pending_msg(obj, ex);
1213 else
1214 push_back_wait_msg(obj, ex);
1215 return;
1216 recycle:
1217 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_DEBUG_,
1218 "%s, msg:0x%x recycle \n", __FUNCTION__,
1219 ex->msg.msg_id);
1220 push_back_idle_msg(obj, ex);
1221 }
1222
dispr_thread_loop_hdl(struct cmd_dispatcher * obj)1223 void dispr_thread_loop_hdl(struct cmd_dispatcher *obj)
1224 {
1225 struct phl_dispr_msg_ex *ex = NULL;
1226
1227 /* check pending msg need in advance.
1228 * if pending msg is not empty before while loop breaks,
1229 * these msg would be cleared in deinit_dispr_msg_pool.
1230 */
1231 clear_pending_msg(obj);
1232 /* token op Q in advance.
1233 * if req wait Q is not empty before while loop breaks,
1234 * these msg would be cleared in deinit_cmd_req_pool.
1235 */
1236 token_op_hanler(obj);
1237
1238 if (pop_front_wait_msg(obj, &ex)) {
1239 if (is_msg_canceled(obj, ex)) {
1240 push_back_idle_msg(obj, ex);
1241 return;
1242 }
1243 /* ensure all modules set in msg bitmap
1244 exists in cur dispatcher*/
1245 _and_bitmaps(obj->bitmap, ex->premap, MODL_MASK_LEN);
1246 _and_bitmaps(obj->bitmap, ex->postmap, MODL_MASK_LEN);
1247 msg_dispatch(obj, ex);
1248 }
1249 }
1250
dispr_thread_leave_hdl(struct cmd_dispatcher * obj)1251 void dispr_thread_leave_hdl(struct cmd_dispatcher *obj)
1252 {
1253 deregister_cur_cmd_req(obj, true);
1254 /* clear remaining pending & waiting msg */
1255 clear_waiting_msg(obj);
1256 /* pop out all waiting cmd req and notify abort. */
1257 clear_wating_req(obj);
1258 }
1259
background_thread_hdl(void * param)1260 int background_thread_hdl(void *param)
1261 {
1262 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)param;
1263 void *d = phl_to_drvpriv(obj->phl_info);
1264
1265 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s enter\n", __FUNCTION__);
1266 while (!_os_thread_check_stop(d, &(obj->bk_thread))) {
1267
1268 _os_sema_down(d, &obj->msg_q_sema);
1269
1270 if(_os_thread_check_stop(d, &(obj->bk_thread)))
1271 break;
1272 dispr_thread_loop_hdl(obj);
1273 }
1274 dispr_thread_leave_hdl(obj);
1275 _os_thread_wait_stop(d, &(obj->bk_thread));
1276 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s down\n", __FUNCTION__);
1277 return 0;
1278 }
1279
search_mdl(void * d,void * mdl,void * priv)1280 u8 search_mdl(void *d, void *mdl, void *priv)
1281 {
1282 enum phl_module_id id = *(enum phl_module_id *)priv;
1283 struct phl_bk_module *module = NULL;
1284
1285 module = (struct phl_bk_module *)mdl;
1286 if (module->id == id) {
1287 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s :: id %d\n", __FUNCTION__, id);
1288 return true;
1289 }
1290 else
1291 return false;
1292 }
1293
get_module_by_id(struct cmd_dispatcher * obj,enum phl_module_id id,struct phl_bk_module ** mdl)1294 u8 get_module_by_id(struct cmd_dispatcher *obj, enum phl_module_id id,
1295 struct phl_bk_module **mdl)
1296 {
1297 void *d = phl_to_drvpriv(obj->phl_info);
1298 u8 i = 0;
1299 _os_list *node = NULL;
1300
1301 if (mdl == NULL)
1302 return false;
1303
1304 if (IS_DISPR_CTRL(id)) {
1305 if (!TEST_STATUS_FLAG(obj->status, DISPR_CTRL_PRESENT))
1306 return false;
1307 *mdl = &(obj->controller);
1308 return true;
1309 }
1310
1311 if (!_chk_bitmap_bit(obj->bitmap, id))
1312 return false;
1313
1314 for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
1315
1316 if(pq_search_node(d, &(obj->module_q[i]), &node, _bh, false, &id, search_mdl)) {
1317 *mdl = (struct phl_bk_module*)node;
1318 return true;
1319 }
1320 }
1321 *mdl = NULL;
1322 return false;
1323 }
1324
phl_dispr_get_idx(void * dispr,u8 * idx)1325 enum rtw_phl_status phl_dispr_get_idx(void *dispr, u8 *idx)
1326 {
1327 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1328
1329 if (dispr == NULL)
1330 return RTW_PHL_STATUS_FAILURE;
1331 if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT) || idx == NULL)
1332 return RTW_PHL_STATUS_FAILURE;
1333 *idx = obj->idx;
1334 return RTW_PHL_STATUS_SUCCESS;
1335 }
1336
1337 /* Each dispr has a controller.
1338 * A dispr controller is designed for phl instance to interact with dispr modules that are belonged to a specific hw band,
1339 * phl instance can perform follwing actions via dedicated controller:
1340 * 1. allow (phl status/non-dispr phl modules) to monitor & drop msg
1341 * 2. allow dispr modules, that are belonged to same dispr, to sequentially communicate with phl instance & call phl api,
1342 * and also allow (phl status/non-dispr phl modules) to notify dispr by hw band.
1343 * *Note*
1344 * 1. when cmd dispatch engine is in solo thread mode (each dispr has its own dedicated thread).
1345 * phl instance might receive msg from different dispr simutaneously and
1346 * currently using semaphore (dispr_ctrl_sema) to prevent multi-thread condition.
1347 * 2. when cmd dispatch engine is in share thread mode, msg from different dispr would pass to controller sequentially.
1348
1349 * PS:
1350 * phl instance: means phl_info_t, which include phl mgnt status & non-dispr phl modules
1351 * dispr modules: all existing background & foreground modules.
1352 * non-dispr phl module : Data path (TX/Rx), etc
1353 * phl mgnt status : stop/surprise remove/cannot io
1354 */
_register_dispr_controller(struct cmd_dispatcher * obj)1355 static enum rtw_phl_status _register_dispr_controller(struct cmd_dispatcher *obj)
1356 {
1357 struct phl_bk_module *ctrl = &(obj->controller);
1358
1359 dispr_ctrl_hook_ops(obj, &(ctrl->ops));
1360 ctrl->id = PHL_MDL_PHY_MGNT;
1361
1362 if(bk_module_init(obj, &(obj->controller)) == true)
1363 return RTW_PHL_STATUS_SUCCESS;
1364 else {
1365 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s(): fail \n", __func__);
1366 return RTW_PHL_STATUS_FAILURE;
1367 }
1368 }
1369
_deregister_dispr_controller(struct cmd_dispatcher * obj)1370 static void _deregister_dispr_controller(struct cmd_dispatcher *obj)
1371 {
1372 bk_module_deinit(obj, &(obj->controller));
1373 }
1374
_start_dispr_controller(struct cmd_dispatcher * obj)1375 static enum rtw_phl_status _start_dispr_controller(struct cmd_dispatcher *obj)
1376 {
1377 if (bk_module_start(obj, &(obj->controller)) == true) {
1378 SET_STATUS_FLAG(obj->status, DISPR_CTRL_PRESENT);
1379 return RTW_PHL_STATUS_SUCCESS;
1380 }
1381 else {
1382 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s(): fail \n", __func__);
1383 return RTW_PHL_STATUS_FAILURE;
1384 }
1385 }
1386
_stop_dispr_controller(struct cmd_dispatcher * obj)1387 static enum rtw_phl_status _stop_dispr_controller(struct cmd_dispatcher *obj)
1388 {
1389 CLEAR_STATUS_FLAG(obj->status, DISPR_CTRL_PRESENT);
1390 if (bk_module_stop(obj, &(obj->controller)) == true)
1391 return RTW_PHL_STATUS_SUCCESS;
1392 else {
1393 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s(): fail \n", __func__);
1394 return RTW_PHL_STATUS_FAILURE;
1395 }
1396 }
1397
_notify_dispr_controller(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)1398 void _notify_dispr_controller(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
1399 {
1400 if (!TEST_STATUS_FLAG(obj->status, DISPR_CTRL_PRESENT))
1401 return;
1402 #ifdef CONFIG_CMD_DISP_SOLO_MODE
1403 dispr_ctrl_sema_down(obj->phl_info);
1404 #endif
1405 feed_mdl_msg(obj, &(obj->controller), ex);
1406 #ifdef CONFIG_CMD_DISP_SOLO_MODE
1407 dispr_ctrl_sema_up(obj->phl_info);
1408 #endif
1409
1410 }
1411
dispr_thread_stop_prior_hdl(struct cmd_dispatcher * obj)1412 void dispr_thread_stop_prior_hdl(struct cmd_dispatcher *obj)
1413 {
1414 CLEAR_STATUS_FLAG(obj->status, DISPR_STARTED);
1415 _stop_dispr_controller(obj);
1416 cancel_all_cmd_req(obj);
1417 cancel_running_msg(obj);
1418 }
1419
dispr_thread_stop_post_hdl(struct cmd_dispatcher * obj)1420 void dispr_thread_stop_post_hdl(struct cmd_dispatcher *obj)
1421 {
1422 void *d = phl_to_drvpriv(obj->phl_info);
1423
1424 /* have to wait for bk thread ends before deinit msg & req*/
1425 deinit_dispr_msg_pool(obj);
1426 deinit_cmd_req_pool(obj);
1427 _os_atomic_set(d, &(obj->token_cnt), 0);
1428 _os_sema_free(d, &(obj->msg_q_sema));
1429 }
1430
dispr_init(struct phl_info_t * phl_info,void ** dispr,u8 idx)1431 enum rtw_phl_status dispr_init(struct phl_info_t *phl_info, void **dispr, u8 idx)
1432 {
1433 struct cmd_dispatcher *obj = NULL;
1434 void *d = phl_to_drvpriv(phl_info);
1435 u8 i = 0;
1436
1437 (*dispr) = NULL;
1438
1439 obj = (struct cmd_dispatcher *)_os_mem_alloc(d, sizeof(struct cmd_dispatcher));
1440 if (obj == NULL) {
1441 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s, alloc fail\n", __FUNCTION__);
1442 return RTW_PHL_STATUS_RESOURCE;
1443 }
1444
1445 obj->phl_info = phl_info;
1446 obj->idx = idx;
1447 _os_atomic_set(d, &(obj->token_cnt), 0);
1448 for (i = 0 ; i < PHL_MDL_PRI_MAX; i++)
1449 pq_init(d, &(obj->module_q[i]));
1450
1451 (*dispr) = (void*)obj;
1452 _os_spinlock_init(d, &(obj->token_op_q_lock));
1453 SET_STATUS_FLAG(obj->status, DISPR_INIT);
1454 SET_STATUS_FLAG(obj->status, DISPR_NOTIFY_IDLE);
1455 _register_dispr_controller(obj);
1456 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, size dispr(%d), msg_ex(%d), req_ex(%d) \n",
1457 __FUNCTION__, (int)sizeof(struct cmd_dispatcher),
1458 (int)sizeof(struct phl_dispr_msg_ex),
1459 (int)sizeof(struct phl_cmd_token_req_ex));
1460 return RTW_PHL_STATUS_SUCCESS;
1461 }
1462
dispr_deinit(struct phl_info_t * phl,void * dispr)1463 enum rtw_phl_status dispr_deinit(struct phl_info_t *phl, void *dispr)
1464 {
1465 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1466 void *d = phl_to_drvpriv(obj->phl_info);
1467 u8 i = 0;
1468
1469 if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT))
1470 return RTW_PHL_STATUS_SUCCESS;
1471 dispr_stop(dispr);
1472 _deregister_dispr_controller(obj);
1473 for (i = 0 ; i < PHL_MDL_PRI_MAX; i++)
1474 pq_deinit(d, &(obj->module_q[i]));
1475 _os_spinlock_free(d, &(obj->token_op_q_lock));
1476 _os_mem_free(d, obj, sizeof(struct cmd_dispatcher));
1477 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1478 return RTW_PHL_STATUS_SUCCESS;
1479 }
1480
dispr_start(void * dispr)1481 enum rtw_phl_status dispr_start(void *dispr)
1482 {
1483 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1484 void *d = phl_to_drvpriv(obj->phl_info);
1485
1486 if (TEST_STATUS_FLAG(obj->status, DISPR_STARTED))
1487 return RTW_PHL_STATUS_UNEXPECTED_ERROR;
1488
1489 init_dispr_msg_pool(obj);
1490 init_cmd_req_pool(obj);
1491 init_dispr_mdl_mgnt_info(obj);
1492 _os_mem_set(d, &(obj->renew_req_info), 0,
1493 sizeof(struct phl_token_op_info));
1494 _os_sema_init(d, &(obj->msg_q_sema), 0);
1495 CLEAR_EXCL_MDL(obj);
1496 if (disp_eng_is_solo_thread_mode(obj->phl_info)) {
1497 _os_thread_init(d, &(obj->bk_thread), background_thread_hdl, obj,
1498 "dispr_solo_thread");
1499 _os_thread_schedule(d, &(obj->bk_thread));
1500 }
1501 SET_STATUS_FLAG(obj->status, DISPR_STARTED);
1502 _start_dispr_controller(obj);
1503 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1504 return RTW_PHL_STATUS_SUCCESS;
1505 }
1506
is_dispr_started(void * dispr)1507 bool is_dispr_started(void *dispr)
1508 {
1509 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1510
1511 if (TEST_STATUS_FLAG(obj->status, DISPR_STARTED))
1512 return true;
1513 return false;
1514 }
1515
dispr_stop(void * dispr)1516 enum rtw_phl_status dispr_stop(void *dispr)
1517 {
1518 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1519 void *d = phl_to_drvpriv(obj->phl_info);
1520
1521 if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED))
1522 return RTW_PHL_STATUS_UNEXPECTED_ERROR;
1523
1524 dispr_thread_stop_prior_hdl(obj);
1525 if (disp_eng_is_solo_thread_mode(obj->phl_info)) {
1526 _os_thread_stop(d, &(obj->bk_thread));
1527 _os_sema_up(d, &(obj->msg_q_sema));
1528 _os_thread_deinit(d, &(obj->bk_thread));
1529 }
1530 dispr_thread_stop_post_hdl(obj);
1531 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1532 return RTW_PHL_STATUS_SUCCESS;
1533 }
1534
dispr_register_module(void * dispr,enum phl_module_id id,struct phl_bk_module_ops * ops)1535 enum rtw_phl_status dispr_register_module(void *dispr,
1536 enum phl_module_id id,
1537 struct phl_bk_module_ops *ops)
1538 {
1539 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1540 void *d = phl_to_drvpriv(obj->phl_info);
1541 struct phl_bk_module *module = NULL;
1542 u8 ret = true;
1543 enum phl_bk_module_priority priority = _get_mdl_priority(id);
1544
1545 FUNCIN();
1546
1547 if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT) ||
1548 priority == PHL_MDL_PRI_MAX ||
1549 chk_module_ops(ops) == false ||
1550 _chk_bitmap_bit(obj->bitmap, id) == true) {
1551 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s, register fail\n", __FUNCTION__);
1552 return RTW_PHL_STATUS_FAILURE;
1553 }
1554
1555 module = (struct phl_bk_module *)_os_mem_alloc(d, sizeof(struct phl_bk_module));
1556 if (module == NULL) {
1557 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s, allocte fail\n", __FUNCTION__);
1558 return RTW_PHL_STATUS_FAILURE;
1559 }
1560
1561 module->id = id;
1562 _os_mem_cpy(d, &(module->ops), ops, sizeof(struct phl_bk_module_ops));
1563 pq_push(d, &(obj->module_q[priority]), &(module->list), _tail, _bh);
1564
1565 ret = bk_module_init(obj, module);
1566 if (ret == true && TEST_STATUS_FLAG(obj->status, DISPR_STARTED)) {
1567 ret = bk_module_start(obj, module);
1568 if (ret == true)
1569 _add_bitmap_bit(obj->bitmap, &(module->id), 1);
1570 if (ret == true && priority != PHL_MDL_PRI_OPTIONAL)
1571 _add_bitmap_bit(obj->basemap, &(module->id), 1);
1572 }
1573 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s id:%d, ret:%d\n",__FUNCTION__, id, ret);
1574 if (ret == true) {
1575 return RTW_PHL_STATUS_SUCCESS;
1576 } else {
1577 bk_module_deinit(obj, module);
1578 _os_mem_free(d, module, sizeof(struct phl_bk_module));
1579 return RTW_PHL_STATUS_FAILURE;
1580 }
1581 }
1582
dispr_deregister_module(void * dispr,enum phl_module_id id)1583 enum rtw_phl_status dispr_deregister_module(void *dispr,
1584 enum phl_module_id id)
1585 {
1586 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1587 void *d = phl_to_drvpriv(obj->phl_info);
1588 struct phl_bk_module *module = NULL;
1589 _os_list *mdl = NULL;
1590 enum rtw_phl_status phl_stat = RTW_PHL_STATUS_FAILURE;
1591 enum phl_bk_module_priority priority = _get_mdl_priority(id);
1592
1593 FUNCIN();
1594
1595 if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT) ||
1596 priority == PHL_MDL_PRI_MAX)
1597 return phl_stat;
1598
1599 if(pq_search_node(d, &(obj->module_q[priority]), &mdl, _bh, true, &id, search_mdl)) {
1600 module = (struct phl_bk_module *)mdl;
1601 _clr_bitmap_bit(obj->bitmap, &(module->id), 1);
1602 _clr_bitmap_bit(obj->basemap, &(module->id), 1);
1603 bk_module_stop(obj, module);
1604 bk_module_deinit(obj, module);
1605 _os_mem_free(d, module, sizeof(struct phl_bk_module));
1606 phl_stat = RTW_PHL_STATUS_SUCCESS;
1607 }
1608
1609 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, id: %d stat:%d\n", __FUNCTION__, id, phl_stat);
1610 return phl_stat;
1611 }
1612
dispr_module_init(void * dispr)1613 enum rtw_phl_status dispr_module_init(void *dispr)
1614 {
1615 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1616 void *d = phl_to_drvpriv(obj->phl_info);
1617 _os_list *mdl = NULL;
1618 u8 i = 0;
1619
1620 if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT))
1621 return RTW_PHL_STATUS_FAILURE;
1622
1623 for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
1624 if (pq_get_front(d, &(obj->module_q[i]), &mdl, _bh) == false)
1625 continue;
1626 do {
1627 bk_module_init(obj, (struct phl_bk_module *)mdl);
1628 } while(pq_get_next(d, &(obj->module_q[i]), mdl, &mdl, _bh));
1629 }
1630 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1631 return RTW_PHL_STATUS_SUCCESS;
1632 }
1633
dispr_module_deinit(void * dispr)1634 enum rtw_phl_status dispr_module_deinit(void *dispr)
1635 {
1636 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1637 void *d = phl_to_drvpriv(obj->phl_info);
1638 _os_list *mdl = NULL;
1639 u8 i = 0;
1640
1641 if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT))
1642 return RTW_PHL_STATUS_FAILURE;
1643
1644 for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
1645 while (pq_pop(d, &(obj->module_q[i]), &mdl, _first, _bh)) {
1646 bk_module_deinit(obj, (struct phl_bk_module *)mdl);
1647 _os_mem_free(d, mdl, sizeof(struct phl_bk_module));
1648 }
1649 }
1650 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1651 return RTW_PHL_STATUS_SUCCESS;
1652 }
1653
dispr_module_start(void * dispr)1654 enum rtw_phl_status dispr_module_start(void *dispr)
1655 {
1656 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1657 void *d = phl_to_drvpriv(obj->phl_info);
1658 _os_list *mdl = NULL;
1659 struct phl_bk_module *module = NULL;
1660 u8 i = 0;
1661 u8 ret = false;
1662
1663 if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED))
1664 return RTW_PHL_STATUS_UNEXPECTED_ERROR;
1665
1666 for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
1667 if (pq_get_front(d, &(obj->module_q[i]), &mdl, _bh) == false)
1668 continue;
1669 do {
1670 module = (struct phl_bk_module*)mdl;
1671 ret = bk_module_start(obj, module);
1672 if (ret == true)
1673 _add_bitmap_bit(obj->bitmap, &(module->id), 1);
1674 if (ret == true && i != PHL_MDL_PRI_OPTIONAL)
1675 _add_bitmap_bit(obj->basemap, &(module->id), 1);
1676 } while(pq_get_next(d, &(obj->module_q[i]), mdl, &mdl, _bh));
1677 }
1678 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1679 /*_print_bitmap(obj->bitmap);*/
1680 return RTW_PHL_STATUS_SUCCESS;
1681 }
1682
dispr_module_stop(void * dispr)1683 enum rtw_phl_status dispr_module_stop(void *dispr)
1684 {
1685 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1686 void *d = phl_to_drvpriv(obj->phl_info);
1687 _os_list *mdl = NULL;
1688 struct phl_bk_module *module = NULL;
1689 u8 i = 0;
1690
1691 if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED))
1692 return RTW_PHL_STATUS_UNEXPECTED_ERROR;
1693
1694 for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
1695 if (pq_get_front(d, &(obj->module_q[i]), &mdl, _bh) == false)
1696 continue;
1697 do {
1698 module = (struct phl_bk_module *)mdl;
1699 _clr_bitmap_bit(obj->bitmap, &(module->id), 1);
1700 _clr_bitmap_bit(obj->basemap, &(module->id), 1);
1701 bk_module_stop(obj, module);
1702 } while(pq_get_next(d, &(obj->module_q[i]), mdl, &mdl, _bh));
1703 }
1704 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1705 /*_print_bitmap(obj->bitmap);*/
1706 return RTW_PHL_STATUS_SUCCESS;
1707 }
1708
1709 /**
1710 * dispr_get_cur_cmd_req -- background module can call this function to
1711 * check cmd dispatcher is idle to know the risk or conflict for the I/O.
1712 * @dispr: dispatcher handler, get from _disp_eng_get_dispr_by_idx
1713 * @handle: get current cmd request, NULL means cmd dispatcher is idle
1714
1715 * return RTW_PHL_STATUS_SUCCESS means cmd dispatcher is busy and can get
1716 * current cmd request from handle parameter
1717 * return RTW_PHL_STATUS_FAILURE means cmd dispatcher is idle
1718 */
1719 enum rtw_phl_status
dispr_get_cur_cmd_req(void * dispr,void ** handle)1720 dispr_get_cur_cmd_req(void *dispr, void **handle)
1721 {
1722 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1723 struct phl_cmd_token_req_ex *cur_req = NULL;
1724 enum rtw_phl_status phl_stat = RTW_PHL_STATUS_FAILURE;
1725
1726 if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT|DISPR_STARTED) || handle == NULL) {
1727 phl_stat = RTW_PHL_STATUS_UNEXPECTED_ERROR;
1728 return phl_stat;
1729 }
1730
1731 (*handle) = NULL;
1732 cur_req = obj->cur_cmd_req;
1733
1734 if (cur_req == NULL ||
1735 !TEST_STATUS_FLAG(cur_req->status, REQ_STATUS_RUN) ||
1736 TEST_STATUS_FLAG(cur_req->status, REQ_STATUS_CANCEL))
1737 return phl_stat;
1738
1739 *handle = (void *)cur_req;
1740 phl_stat = RTW_PHL_STATUS_SUCCESS;
1741
1742 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_DEBUG_,
1743 "%s, req module id:%d phl_stat:%d\n", __FUNCTION__,
1744 cur_req->req.module_id, phl_stat);
1745 return phl_stat;
1746 }
1747
1748 enum rtw_phl_status
dispr_set_cur_cmd_info(void * dispr,struct phl_module_op_info * op_info)1749 dispr_set_cur_cmd_info(void *dispr,
1750 struct phl_module_op_info *op_info)
1751 {
1752 void *handle = NULL;
1753 struct phl_cmd_token_req_ex *cmd_req = NULL;
1754 struct phl_cmd_token_req *req = NULL;
1755
1756 if (RTW_PHL_STATUS_SUCCESS != dispr_get_cur_cmd_req(dispr, &handle))
1757 return RTW_PHL_STATUS_FAILURE;
1758
1759 cmd_req = (struct phl_cmd_token_req_ex *)handle;
1760 req = &(cmd_req->req);
1761
1762 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, id:%d\n", __FUNCTION__, req->module_id);
1763 if (req->set_info(dispr, req->priv, op_info) == MDL_RET_SUCCESS)
1764 return RTW_PHL_STATUS_SUCCESS;
1765 else
1766 return RTW_PHL_STATUS_FAILURE;
1767 }
1768
1769 enum rtw_phl_status
dispr_query_cur_cmd_info(void * dispr,struct phl_module_op_info * op_info)1770 dispr_query_cur_cmd_info(void *dispr,
1771 struct phl_module_op_info *op_info)
1772 {
1773 void *handle = NULL;
1774 struct phl_cmd_token_req_ex *cmd_req = NULL;
1775 struct phl_cmd_token_req *req = NULL;
1776
1777 if (RTW_PHL_STATUS_SUCCESS != dispr_get_cur_cmd_req(dispr, &handle))
1778 return RTW_PHL_STATUS_FAILURE;
1779
1780 cmd_req = (struct phl_cmd_token_req_ex *)handle;
1781 req = &(cmd_req->req);
1782
1783 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_DEBUG_, "%s, id:%d\n", __FUNCTION__, req->module_id);
1784 if (req->query_info(dispr, req->priv, op_info) == MDL_RET_SUCCESS)
1785 return RTW_PHL_STATUS_SUCCESS;
1786 else
1787 return RTW_PHL_STATUS_FAILURE;
1788 }
1789
1790 enum rtw_phl_status
dispr_get_bk_module_handle(void * dispr,enum phl_module_id id,void ** handle)1791 dispr_get_bk_module_handle(void *dispr,
1792 enum phl_module_id id,
1793 void **handle)
1794 {
1795 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1796 void *d = phl_to_drvpriv(obj->phl_info);
1797 _os_list *mdl = NULL;
1798 enum rtw_phl_status phl_stat = RTW_PHL_STATUS_FAILURE;
1799 enum phl_bk_module_priority priority = _get_mdl_priority(id);
1800
1801 if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT) ||
1802 handle == NULL ||
1803 priority == PHL_MDL_PRI_MAX ||
1804 !_chk_bitmap_bit(obj->bitmap, id))
1805 return phl_stat;
1806
1807 (*handle) = NULL;
1808
1809
1810 if(pq_search_node(d, &(obj->module_q[priority]), &mdl, _bh, false, &id, search_mdl)) {
1811 (*handle) = mdl;
1812 phl_stat = RTW_PHL_STATUS_SUCCESS;
1813 }
1814 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
1815 "%s, id:%d phl_stat:%d\n", __FUNCTION__, id, phl_stat);
1816 return phl_stat;
1817 }
1818
1819 enum rtw_phl_status
dispr_set_bk_module_info(void * dispr,void * handle,struct phl_module_op_info * op_info)1820 dispr_set_bk_module_info(void *dispr,
1821 void *handle,
1822 struct phl_module_op_info *op_info)
1823 {
1824 struct phl_bk_module *module = (struct phl_bk_module *)handle;
1825 struct phl_bk_module_ops *ops = &(module->ops);
1826
1827 if (!TEST_STATUS_FLAG(module->status, MDL_INIT))
1828 return RTW_PHL_STATUS_FAILURE;
1829 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, id:%d\n", __FUNCTION__, module->id);
1830 if (ops->set_info(dispr, module->priv, op_info) == MDL_RET_SUCCESS)
1831 return RTW_PHL_STATUS_SUCCESS;
1832 else
1833 return RTW_PHL_STATUS_FAILURE;
1834 }
1835
1836 enum rtw_phl_status
dispr_query_bk_module_info(void * dispr,void * handle,struct phl_module_op_info * op_info)1837 dispr_query_bk_module_info(void *dispr,
1838 void *handle,
1839 struct phl_module_op_info *op_info)
1840 {
1841 struct phl_bk_module *module = (struct phl_bk_module *)handle;
1842 struct phl_bk_module_ops *ops = &(module->ops);
1843
1844 if (!TEST_STATUS_FLAG(module->status, MDL_INIT))
1845 return RTW_PHL_STATUS_FAILURE;
1846 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, id:%d\n", __FUNCTION__, module->id);
1847 if (ops->query_info(dispr, module->priv, op_info) == MDL_RET_SUCCESS)
1848 return RTW_PHL_STATUS_SUCCESS;
1849 else
1850 return RTW_PHL_STATUS_FAILURE;
1851 }
1852
1853 enum rtw_phl_status
dispr_set_src_info(void * dispr,struct phl_msg * msg,struct phl_module_op_info * op_info)1854 dispr_set_src_info(void *dispr,
1855 struct phl_msg *msg,
1856 struct phl_module_op_info *op_info)
1857 {
1858 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1859 enum rtw_phl_status phl_stat = RTW_PHL_STATUS_FAILURE;
1860 u8 id = MSG_MDL_ID_FIELD(msg->msg_id);
1861 struct phl_cmd_token_req_ex *cur_req = obj->cur_cmd_req;
1862 enum phl_mdl_ret_code ret = MDL_RET_FAIL;
1863 struct phl_dispr_msg_ex *ex = (struct phl_dispr_msg_ex *)msg;
1864 u8 cur_req_id = get_cur_cmd_req_id(obj, NULL);
1865
1866 if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT) ||
1867 (!_chk_bitmap_bit(obj->bitmap, id) &&
1868 cur_req_id != id))
1869 return phl_stat;
1870
1871 if (cur_req_id == id) {
1872 ret = cur_req->req.set_info(dispr, cur_req->req.priv, op_info);
1873 } else if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_OWNER_BK_MDL)) {
1874 ret = ex->module->ops.set_info(dispr, ex->module->priv,
1875 op_info);
1876 }
1877 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
1878 "%s, id:%d phl_stat:%d\n", __FUNCTION__, id, phl_stat);
1879 if (ret == MDL_RET_FAIL)
1880 return RTW_PHL_STATUS_FAILURE;
1881 else
1882 return RTW_PHL_STATUS_SUCCESS;
1883 }
1884
1885 enum rtw_phl_status
dispr_query_src_info(void * dispr,struct phl_msg * msg,struct phl_module_op_info * op_info)1886 dispr_query_src_info(void *dispr,
1887 struct phl_msg *msg,
1888 struct phl_module_op_info *op_info)
1889 {
1890 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1891 enum rtw_phl_status phl_stat = RTW_PHL_STATUS_FAILURE;
1892 u8 id = MSG_MDL_ID_FIELD(msg->msg_id);
1893 struct phl_cmd_token_req_ex *cur_req = obj->cur_cmd_req;
1894 struct phl_dispr_msg_ex *ex = (struct phl_dispr_msg_ex *)msg;
1895 enum phl_mdl_ret_code ret = MDL_RET_FAIL;
1896 u8 cur_req_id = get_cur_cmd_req_id(obj, NULL);
1897
1898 if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT) ||
1899 (!_chk_bitmap_bit(obj->bitmap, id) &&
1900 cur_req_id != id))
1901 return phl_stat;
1902
1903 if (cur_req_id == id) {
1904 ret = cur_req->req.query_info(dispr, cur_req->req.priv, op_info);
1905 } else if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_OWNER_BK_MDL)) {
1906 ret = ex->module->ops.query_info(dispr, ex->module->priv,
1907 op_info);
1908 }
1909 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
1910 "%s, id:%d phl_stat:%d\n", __FUNCTION__, id, phl_stat);
1911 if (ret == MDL_RET_FAIL)
1912 return RTW_PHL_STATUS_FAILURE;
1913 else
1914 return RTW_PHL_STATUS_SUCCESS;
1915 }
1916
1917 enum rtw_phl_status
dispr_send_msg(void * dispr,struct phl_msg * msg,struct phl_msg_attribute * attr,u32 * msg_hdl)1918 dispr_send_msg(void *dispr,
1919 struct phl_msg *msg,
1920 struct phl_msg_attribute *attr,
1921 u32 *msg_hdl)
1922 {
1923 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1924 void *d = phl_to_drvpriv(obj->phl_info);
1925 struct phl_dispr_msg_ex *msg_ex = NULL;
1926 u8 module_id = MSG_MDL_ID_FIELD(msg->msg_id); /* msg src */
1927 u32 req_status = 0;
1928 u8 cur_req_id = get_cur_cmd_req_id(obj, &req_status);
1929 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1930
1931 if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED)) {
1932 sts = RTW_PHL_STATUS_UNEXPECTED_ERROR;
1933 goto err;
1934 }
1935
1936 if (TEST_STATUS_FLAG(obj->status, DISPR_SHALL_STOP)){
1937 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,"%s: dispr shall stop\n", __FUNCTION__);
1938 sts = RTW_PHL_STATUS_UNEXPECTED_ERROR;
1939 goto err;
1940 }
1941
1942 if(attr && attr->notify.id_arr == NULL && attr->notify.len) {
1943 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s attribute err\n",__FUNCTION__);
1944 sts = RTW_PHL_STATUS_INVALID_PARAM;
1945 goto err;
1946 }
1947
1948 if (!IS_DISPR_CTRL(module_id) &&
1949 !_chk_bitmap_bit(obj->bitmap, module_id) &&
1950 ((cur_req_id != PHL_MDL_ID_MAX && cur_req_id != module_id) ||
1951 (cur_req_id == PHL_MDL_ID_MAX && req_status == 0)||
1952 (cur_req_id == PHL_MDL_ID_MAX && !TEST_STATUS_FLAG(req_status,REQ_STATUS_LAST_PERMIT)))) {
1953 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
1954 "%s module not allow to send\n", __FUNCTION__);
1955 sts = RTW_PHL_STATUS_INVALID_PARAM;
1956 goto err;
1957 }
1958
1959 if (!pop_front_idle_msg(obj, &msg_ex)) {
1960 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s idle msg empty\n", __FUNCTION__);
1961 sts = RTW_PHL_STATUS_RESOURCE;
1962 goto err;
1963 }
1964
1965 if (msg_hdl)
1966 *msg_hdl = 0;
1967
1968 _os_mem_cpy(d, &msg_ex->msg, msg, sizeof(struct phl_msg));
1969
1970 set_msg_bitmap(obj, msg_ex, module_id);
1971 if (attr) {
1972 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
1973 msg_ex->attr = (struct dispr_msg_attr *)attr->dispr_attr;
1974 attr->dispr_attr = NULL;
1975 #endif
1976 set_msg_custom_bitmap(obj, msg_ex, attr->opt,
1977 attr->notify.id_arr, attr->notify.len, module_id);
1978 if (attr->completion.completion) {
1979 SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_NOTIFY_COMPLETE);
1980 msg_ex->completion.completion = attr->completion.completion;
1981 msg_ex->completion.priv = attr->completion.priv;
1982 }
1983 if (TEST_STATUS_FLAG(attr->opt, MSG_OPT_CLR_SNDR_MSG_IF_PENDING))
1984 SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_CLR_SNDR_MSG_IF_PENDING);
1985
1986 if (TEST_STATUS_FLAG(attr->opt, MSG_OPT_PENDING_DURING_CANNOT_IO))
1987 SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_PENDING_DURING_CANNOT_IO);
1988 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, opt:0x%x\n",__FUNCTION__, attr->opt);
1989 }
1990
1991 if (get_module_by_id(obj, module_id, &(msg_ex->module)) == true) {
1992 SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_OWNER_BK_MDL);
1993 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
1994 "%s module(%d) found\n", __FUNCTION__, module_id);
1995 } else if ((cur_req_id == module_id) ||
1996 (cur_req_id == PHL_MDL_ID_MAX && TEST_STATUS_FLAG(req_status,REQ_STATUS_LAST_PERMIT))) {
1997 SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_OWNER_REQ);
1998 }
1999
2000 if(TEST_STATUS_FLAG(msg_ex->status, MSG_STATUS_OWNER_REQ) &&
2001 TEST_STATUS_FLAG(req_status,REQ_STATUS_LAST_PERMIT) &&
2002 (attr == NULL || !TEST_STATUS_FLAG(attr->opt, MSG_OPT_SEND_IN_ABORT))) {
2003 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
2004 "%s msg not allow since cur req is going to unload\n", __FUNCTION__);
2005 SET_MSG_INDC_FIELD(msg_ex->msg.msg_id, MSG_INDC_FAIL);
2006 push_back_idle_msg(obj, msg_ex);
2007 sts = RTW_PHL_STATUS_FAILURE;
2008 goto exit;
2009 }
2010
2011 if (TEST_STATUS_FLAG(msg_ex->status, MSG_STATUS_OWNER_REQ) &&
2012 TEST_STATUS_FLAG(req_status,REQ_STATUS_LAST_PERMIT)) {
2013 SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_FOR_ABORT);
2014 SET_STATUS_FLAG(obj->status, DISPR_WAIT_ABORT_MSG_DONE);
2015 }
2016
2017 SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_PRE_PHASE);
2018
2019 if (IS_DISPR_CTRL(module_id))
2020 insert_msg_by_priority(obj, msg_ex);
2021 else
2022 push_back_wait_msg(obj, msg_ex);
2023
2024 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, status:0x%x\n",__FUNCTION__, msg_ex->status);
2025 if(msg_hdl)
2026 *msg_hdl = GEN_VALID_HDL(msg_ex->idx);
2027 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, msg_id:0x%x\n", __FUNCTION__, msg->msg_id);
2028 return RTW_PHL_STATUS_SUCCESS;
2029 err:
2030 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
2031 if(attr)
2032 free_dispr_attr(d,(struct dispr_msg_attr **) &(attr->dispr_attr));
2033 #endif
2034 exit:
2035 return sts;
2036 }
2037
dispr_cancel_msg(void * dispr,u32 * msg_hdl)2038 enum rtw_phl_status dispr_cancel_msg(void *dispr, u32 *msg_hdl)
2039 {
2040 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2041 struct phl_dispr_msg_ex *msg_ex = NULL;
2042
2043 if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED) || msg_hdl == NULL)
2044 return RTW_PHL_STATUS_UNEXPECTED_ERROR;
2045
2046 if (!IS_HDL_VALID(*msg_hdl) ||
2047 GET_IDX_FROM_HDL(*msg_hdl) >= MAX_PHL_MSG_NUM) {
2048 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s, HDL invalid\n", __FUNCTION__);
2049 return RTW_PHL_STATUS_FAILURE;
2050 }
2051
2052 msg_ex = &(obj->msg_ex_pool[GET_IDX_FROM_HDL(*msg_hdl)]);
2053 *msg_hdl = 0;
2054 if (!TEST_STATUS_FLAG(msg_ex->status, MSG_STATUS_ENQ) &&
2055 !TEST_STATUS_FLAG(msg_ex->status, MSG_STATUS_RUN)) {
2056 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s, HDL status err\n", __FUNCTION__);
2057 return RTW_PHL_STATUS_FAILURE;
2058 }
2059
2060 cancel_msg(obj, msg_ex);
2061 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
2062 return RTW_PHL_STATUS_SUCCESS;
2063 }
2064
dispr_clr_pending_msg(void * dispr)2065 enum rtw_phl_status dispr_clr_pending_msg(void *dispr)
2066 {
2067 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2068
2069 SET_STATUS_FLAG(obj->status, DISPR_CLR_PEND_MSG);
2070 notify_bk_thread(obj);
2071 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
2072 return RTW_PHL_STATUS_SUCCESS;
2073 }
2074 enum rtw_phl_status
dispr_add_token_req(void * dispr,struct phl_cmd_token_req * req,u32 * req_hdl)2075 dispr_add_token_req(void *dispr,
2076 struct phl_cmd_token_req *req,
2077 u32 *req_hdl)
2078 {
2079 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2080 void *d = phl_to_drvpriv(obj->phl_info);
2081 struct phl_cmd_token_req_ex *req_ex = NULL;
2082 enum rtw_phl_status stat = RTW_PHL_STATUS_SUCCESS;
2083 _os_list *node = NULL;
2084
2085 if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED) ||
2086 req_hdl == NULL ||
2087 chk_cmd_req_ops(req) == false)
2088 return RTW_PHL_STATUS_UNEXPECTED_ERROR;
2089
2090 if (TEST_STATUS_FLAG(obj->status, DISPR_SHALL_STOP)){
2091 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,"%s: dispr shall stop\n", __FUNCTION__);
2092 return RTW_PHL_STATUS_UNEXPECTED_ERROR;
2093 }
2094
2095 if (!pop_front_idle_req(obj, &req_ex)) {
2096 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s idle req empty\n", __FUNCTION__);
2097 return RTW_PHL_STATUS_RESOURCE;
2098 }
2099 _os_mem_cpy(d, &(req_ex->req), req, sizeof(struct phl_cmd_token_req));
2100
2101 push_back_wait_req(obj, req_ex);
2102 *req_hdl = GEN_VALID_HDL(req_ex->idx);
2103 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
2104 "%s, id:%d, hdl:0x%x token_cnt:%d\n", __FUNCTION__,
2105 req->module_id,
2106 *req_hdl,
2107 _os_atomic_read(d, &(obj->token_cnt)));
2108
2109 if (pq_get_front(d, &(obj->token_op_q), &node, _bh) == false &&
2110 _os_atomic_read(d, &(obj->token_cnt)) == 0)
2111 stat = RTW_PHL_STATUS_SUCCESS;
2112 else
2113 stat = RTW_PHL_STATUS_PENDING;
2114 dispr_enqueue_token_op_info(obj, &req_ex->add_req_info, TOKEN_OP_ADD_CMD_REQ, req_ex->idx);
2115 return stat;
2116 }
2117
dispr_cancel_token_req(void * dispr,u32 * req_hdl)2118 enum rtw_phl_status dispr_cancel_token_req(void *dispr, u32 *req_hdl)
2119 {
2120 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2121 struct phl_cmd_token_req_ex *req_ex = NULL;
2122
2123 if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED) || req_hdl == NULL)
2124 return RTW_PHL_STATUS_UNEXPECTED_ERROR;
2125
2126 if (!IS_HDL_VALID(*req_hdl) ||
2127 GET_IDX_FROM_HDL(*req_hdl) >= MAX_CMD_REQ_NUM) {
2128 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
2129 "%s, HDL(0x%x) invalid\n", __FUNCTION__, *req_hdl);
2130 return RTW_PHL_STATUS_FAILURE;
2131 }
2132 req_ex = &(obj->token_req_ex_pool[GET_IDX_FROM_HDL(*req_hdl)]);
2133 if (!TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_ENQ) &&
2134 !TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_RUN) &&
2135 !TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_PREPARE)) {
2136 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
2137 "%s, HDL(0x%x) status err\n", __FUNCTION__, *req_hdl);
2138 return RTW_PHL_STATUS_FAILURE;
2139 }
2140
2141 if (dispr_enqueue_token_op_info(obj, &req_ex->free_req_info, TOKEN_OP_CANCEL_CMD_REQ, req_ex->idx))
2142 return RTW_PHL_STATUS_SUCCESS;
2143 else
2144 return RTW_PHL_STATUS_FAILURE;
2145 }
2146
dispr_free_token(void * dispr,u32 * req_hdl)2147 enum rtw_phl_status dispr_free_token(void *dispr, u32 *req_hdl)
2148 {
2149 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2150 void *d = phl_to_drvpriv(obj->phl_info);
2151 struct phl_cmd_token_req_ex *req_ex = NULL;
2152
2153 if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED) || req_hdl == NULL)
2154 return RTW_PHL_STATUS_UNEXPECTED_ERROR;
2155
2156 if (obj->cur_cmd_req == NULL ||
2157 _os_atomic_read(d, &(obj->token_cnt)) == 0 ||
2158 !IS_HDL_VALID(*req_hdl) ||
2159 GET_IDX_FROM_HDL(*req_hdl) >= MAX_CMD_REQ_NUM) {
2160 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
2161 "%s, HDL(0x%x) invalid\n", __FUNCTION__, *req_hdl);
2162 return RTW_PHL_STATUS_FAILURE;
2163 }
2164 req_ex = &(obj->token_req_ex_pool[GET_IDX_FROM_HDL(*req_hdl)]);
2165 if (!TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_RUN) &&
2166 !TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_PREPARE)) {
2167 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
2168 "%s, HDL(0x%x) mismatch\n", __FUNCTION__, *req_hdl);
2169 return RTW_PHL_STATUS_FAILURE;
2170 }
2171 SET_STATUS_FLAG(req_ex->status, REQ_STATUS_CANCEL);
2172 if (dispr_enqueue_token_op_info(obj, &req_ex->free_req_info, TOKEN_OP_FREE_CMD_REQ, req_ex->idx))
2173 return RTW_PHL_STATUS_SUCCESS;
2174 else
2175 return RTW_PHL_STATUS_FAILURE;
2176 }
2177
dispr_notify_dev_io_status(void * dispr,enum phl_module_id mdl_id,bool allow_io)2178 enum rtw_phl_status dispr_notify_dev_io_status(void *dispr, enum phl_module_id mdl_id, bool allow_io)
2179 {
2180 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2181 enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
2182
2183 if (allow_io == false) {
2184 if (!TEST_STATUS_FLAG(obj->status, DISPR_CANNOT_IO)) {
2185 SET_STATUS_FLAG(obj->status, DISPR_CANNOT_IO);
2186 SET_EXCL_MDL(obj, mdl_id);
2187 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
2188 "%s, mdl_id(%d) notify cannot io\n", __FUNCTION__, mdl_id);
2189 status = send_dev_io_status_change(obj, allow_io);
2190 }
2191 }
2192 else {
2193 if (TEST_STATUS_FLAG(obj->status, DISPR_CANNOT_IO)) {
2194 CLEAR_STATUS_FLAG(obj->status, DISPR_CANNOT_IO);
2195 CLEAR_EXCL_MDL(obj);
2196 status = send_dev_io_status_change(obj, allow_io);
2197 dispr_clr_pending_msg(dispr);
2198 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
2199 "%s, mdl_id(%d) notify io resume\n", __FUNCTION__, mdl_id);
2200 }
2201 }
2202 return status;
2203 }
2204
dispr_notify_shall_stop(void * dispr)2205 void dispr_notify_shall_stop(void *dispr)
2206 {
2207 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2208
2209 if (!TEST_STATUS_FLAG(obj->status, DISPR_SHALL_STOP)) {
2210 SET_STATUS_FLAG(obj->status, DISPR_SHALL_STOP);
2211 dispr_clr_pending_msg(dispr);
2212 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
2213 "%s, notify shall stop\n", __FUNCTION__);
2214 }
2215 }
2216
dispr_is_fg_empty(void * dispr)2217 u8 dispr_is_fg_empty(void *dispr)
2218 {
2219 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2220 bool is_empty = true;
2221 void *drv = phl_to_drvpriv(obj->phl_info);
2222 struct phl_queue *q = NULL;
2223 _os_list *node = NULL;
2224
2225 do {
2226 /* shall check wait queue first then check token op queue
2227 * to avoid to get the incorrect empty state of fg cmd
2228 */
2229 q = &(obj->token_req_wait_q);
2230 _os_spinlock(drv, &(q->lock), _bh, NULL);
2231 if(!list_empty(&q->queue) && (q->cnt > 0)) {
2232 is_empty = false;
2233 _os_spinunlock(drv, &(q->lock), _bh, NULL);
2234 break;
2235 }
2236 _os_spinunlock(drv, &(q->lock), _bh, NULL);
2237
2238 if (pq_get_front(drv, &(obj->token_op_q), &node, _bh) == true ||
2239 _os_atomic_read(drv, &(obj->token_cnt)) > 0) {
2240 is_empty = false;
2241 break;
2242 }
2243 } while(false);
2244
2245 return is_empty;
2246 }
2247
dispr_process_token_req(struct cmd_dispatcher * obj)2248 enum rtw_phl_status dispr_process_token_req(struct cmd_dispatcher *obj)
2249 {
2250 void *d = phl_to_drvpriv(obj->phl_info);
2251 struct phl_cmd_token_req_ex *ex = NULL;
2252
2253 do {
2254 if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED))
2255 return RTW_PHL_STATUS_UNEXPECTED_ERROR;
2256
2257 if (TEST_STATUS_FLAG(obj->status, DISPR_SHALL_STOP)) {
2258 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
2259 "%s: dispr shall stop\n", __FUNCTION__);
2260
2261 return RTW_PHL_STATUS_FAILURE;
2262 }
2263
2264 if (_os_atomic_read(d, &(obj->token_cnt)) > 0)
2265 return RTW_PHL_STATUS_FAILURE;
2266
2267 if (TEST_STATUS_FLAG(obj->status, DISPR_WAIT_ABORT_MSG_DONE)) {
2268 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
2269 "%s, wait for abort msg sent from prev req finish before register next req \n", __FUNCTION__);
2270 return RTW_PHL_STATUS_FAILURE;
2271 }
2272
2273 if (pop_front_wait_req(obj, &ex) == false) {
2274 if (!TEST_STATUS_FLAG(obj->status, DISPR_NOTIFY_IDLE)) {
2275 SET_STATUS_FLAG(obj->status, DISPR_NOTIFY_IDLE);
2276 send_bk_msg_phy_idle(obj);
2277 }
2278 return RTW_PHL_STATUS_SUCCESS;
2279 }
2280
2281 if (TEST_STATUS_FLAG(obj->status, DISPR_NOTIFY_IDLE)) {
2282 CLEAR_STATUS_FLAG(obj->status, DISPR_NOTIFY_IDLE);
2283 send_bk_msg_phy_on(obj);
2284 }
2285
2286 }while(!register_cur_cmd_req(obj, ex));
2287
2288 return RTW_PHL_STATUS_SUCCESS;
2289 }
2290
dispr_share_thread_loop_hdl(void * dispr)2291 void dispr_share_thread_loop_hdl(void *dispr)
2292 {
2293 dispr_thread_loop_hdl( (struct cmd_dispatcher *)dispr);
2294 }
2295
dispr_share_thread_leave_hdl(void * dispr)2296 void dispr_share_thread_leave_hdl(void *dispr)
2297 {
2298 dispr_thread_leave_hdl((struct cmd_dispatcher *)dispr);
2299 }
2300
dispr_share_thread_stop_prior_hdl(void * dispr)2301 void dispr_share_thread_stop_prior_hdl(void *dispr)
2302 {
2303 dispr_thread_stop_prior_hdl((struct cmd_dispatcher *)dispr);
2304 }
2305
dispr_share_thread_stop_post_hdl(void * dispr)2306 void dispr_share_thread_stop_post_hdl(void *dispr)
2307 {
2308 dispr_thread_stop_post_hdl((struct cmd_dispatcher *)dispr);
2309 }
2310
disp_query_mdl_id(struct phl_info_t * phl,void * bk_mdl)2311 u8 disp_query_mdl_id(struct phl_info_t *phl, void *bk_mdl)
2312 {
2313 struct phl_bk_module *mdl = NULL;
2314
2315 if (bk_mdl != NULL) {
2316 mdl = (struct phl_bk_module *)bk_mdl;
2317 return mdl->id;
2318 } else {
2319 return PHL_MDL_ID_MAX;
2320 }
2321 }
2322
send_bk_msg_phy_on(struct cmd_dispatcher * obj)2323 void send_bk_msg_phy_on(struct cmd_dispatcher *obj)
2324 {
2325 struct phl_msg msg = {0};
2326 struct phl_msg_attribute attr = {0};
2327
2328 SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
2329 SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_PHY_ON);
2330 dispr_send_msg((void*)obj, &msg, &attr, NULL);
2331 }
2332
send_bk_msg_phy_idle(struct cmd_dispatcher * obj)2333 void send_bk_msg_phy_idle(struct cmd_dispatcher *obj)
2334 {
2335 struct phl_msg msg = {0};
2336 struct phl_msg_attribute attr = {0};
2337
2338 SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
2339 SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_PHY_IDLE);
2340 dispr_send_msg((void*)obj, &msg, &attr, NULL);
2341 }
2342
send_dev_io_status_change(struct cmd_dispatcher * obj,u8 allow_io)2343 enum rtw_phl_status send_dev_io_status_change(struct cmd_dispatcher *obj, u8 allow_io)
2344 {
2345 struct phl_msg msg = {0};
2346 struct phl_msg_attribute attr = {0};
2347 u16 event = (allow_io == true) ? (MSG_EVT_DEV_RESUME_IO) : (MSG_EVT_DEV_CANNOT_IO);
2348
2349 SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_PHY_MGNT);
2350 SET_MSG_EVT_ID_FIELD(msg.msg_id, event);
2351 return dispr_send_msg((void*)obj, &msg, &attr, NULL);
2352 }
2353
2354 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
loop_through_map(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex,enum phl_bk_module_priority priority,struct msg_notify_map * map,u8 pre_prot_phase)2355 enum phl_mdl_ret_code loop_through_map(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex,
2356 enum phl_bk_module_priority priority, struct msg_notify_map *map, u8 pre_prot_phase)
2357 {
2358 u8 i = 0;
2359 struct phl_bk_module *mdl = NULL;
2360 enum phl_mdl_ret_code ret = MDL_RET_IGNORE;
2361 u8 *bitmap = (pre_prot_phase == true) ? (ex->premap) : (ex->postmap);
2362
2363 for (i = 0 ; i < map->len; i++) {
2364 if (map->id_arr[i] >= PHL_FG_MDL_START) {
2365 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_WARNING_,
2366 "%s, cmd req does not take precedence over bk module\n", __FUNCTION__);
2367 continue;
2368 }
2369 mdl = (struct phl_bk_module *)GET_MDL_HANDLE(obj, map->id_arr[i]);
2370 if (mdl == NULL || !_chk_bitmap_bit(bitmap, mdl->id))
2371 continue;
2372 /*only allow sequence rearrange for modules at the same priority*/
2373 if ( _get_mdl_priority(mdl->id) != priority)
2374 continue;
2375 ret = feed_mdl_msg(obj, mdl, ex);
2376 if (STOP_DISPATCH_MSG(ret))
2377 return ret;
2378 }
2379 return ret;
2380 }
2381
run_self_def_seq(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex,enum phl_bk_module_priority priority,u8 pre_prot_phase)2382 static enum phl_mdl_ret_code run_self_def_seq(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex,
2383 enum phl_bk_module_priority priority, u8 pre_prot_phase)
2384 {
2385 struct msg_notify_map *map = NULL;
2386 enum phl_mdl_ret_code ret = MDL_RET_IGNORE;
2387 struct msg_dispatch_seq* seq = NULL;
2388
2389 if (ex->attr == NULL)
2390 return ret;
2391 /*MANDATORY modules cannot change dispatch order*/
2392 if (pre_prot_phase == true)
2393 seq = &(ex->attr->self_def_seq.pre_prot_phase);
2394 else
2395 seq = &(ex->attr->self_def_seq.post_prot_phase);
2396
2397 return loop_through_map(obj, ex, priority, &(seq->map[priority]), pre_prot_phase);
2398 }
reset_self_def_seq(void * d,struct msg_self_def_seq * self_def_seq)2399 void reset_self_def_seq(void *d, struct msg_self_def_seq* self_def_seq)
2400 {
2401 u8 i = 0;
2402 u8 cnt = 0;
2403 struct msg_dispatch_seq *seq = (struct msg_dispatch_seq *)self_def_seq;
2404
2405 while (cnt++ < 2) {
2406 for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
2407 if (seq->map[i].len)
2408 _os_kmem_free(d, seq->map[i].id_arr, seq->map[i].len);
2409 seq->map[i].id_arr = NULL;
2410 seq->map[i].len = 0;
2411 }
2412 seq++;
2413 }
2414
2415 }
2416
copy_self_def_seq(void * d,struct msg_self_def_seq * self_def_dest,struct msg_self_def_seq * self_def_src)2417 u8 copy_self_def_seq(void *d, struct msg_self_def_seq* self_def_dest, struct msg_self_def_seq* self_def_src)
2418 {
2419 u8 i = 0;
2420 u8 cnt = 0;
2421 struct msg_dispatch_seq *dest = (struct msg_dispatch_seq *)self_def_dest;
2422 struct msg_dispatch_seq *src = (struct msg_dispatch_seq *)self_def_src;
2423
2424 while (cnt++ < 2) {
2425 for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
2426 if (src->map[i].len) {
2427 dest->map[i].id_arr = _os_kmem_alloc(d, src->map[i].len);
2428 if (dest->map[i].id_arr == NULL)
2429 return false;
2430 dest->map[i].len = src->map[i].len;
2431 _os_mem_cpy(d, dest->map[i].id_arr, src->map[i].id_arr, dest->map[i].len);
2432 }
2433 }
2434 dest++;
2435 src++;
2436 }
2437 return true;
2438 }
2439
alloc_dispr_attr(void * d,struct phl_msg_attribute * attr)2440 inline static u8 alloc_dispr_attr(void *d, struct phl_msg_attribute *attr)
2441 {
2442 if (attr->dispr_attr == NULL)
2443 attr->dispr_attr = _os_kmem_alloc(d, sizeof(struct dispr_msg_attr));
2444 if (attr->dispr_attr != NULL)
2445 _os_mem_set(d, attr->dispr_attr, sizeof(struct dispr_msg_attr));
2446 return (attr->dispr_attr == NULL) ? (false) : (true);
2447 }
2448
dispr_set_dispatch_seq(void * dispr,struct phl_msg_attribute * attr,struct msg_self_def_seq * seq)2449 enum rtw_phl_status dispr_set_dispatch_seq(void *dispr, struct phl_msg_attribute *attr,
2450 struct msg_self_def_seq* seq)
2451 {
2452 struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2453 void *d = phl_to_drvpriv(obj->phl_info);
2454 struct dispr_msg_attr *dispr_attr = NULL;
2455
2456 if (attr == NULL || seq == NULL)
2457 return RTW_PHL_STATUS_INVALID_PARAM;
2458
2459 if (alloc_dispr_attr(d, attr) == false)
2460 goto err_attr_alloc;
2461
2462 dispr_attr = attr->dispr_attr;
2463 reset_self_def_seq(d, &(dispr_attr->self_def_seq));
2464
2465 if (copy_self_def_seq(d, &(dispr_attr->self_def_seq), seq) == false)
2466 goto err_seq_copy;
2467 return RTW_PHL_STATUS_SUCCESS;
2468 err_seq_copy:
2469 free_dispr_attr(d, &(attr->dispr_attr));
2470 err_attr_alloc:
2471 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_WARNING_,
2472 "%s, err\n", __FUNCTION__);
2473 return RTW_PHL_STATUS_RESOURCE;
2474 }
2475
free_dispr_attr(void * d,struct dispr_msg_attr ** dispr_attr)2476 static void free_dispr_attr(void *d, struct dispr_msg_attr **dispr_attr)
2477 {
2478 struct dispr_msg_attr *attr = NULL;
2479
2480 if (dispr_attr == NULL || *dispr_attr == NULL)
2481 return;
2482 PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
2483 attr = *dispr_attr;
2484 reset_self_def_seq(d, &(attr->self_def_seq));
2485 _os_kmem_free(d, attr, sizeof(struct dispr_msg_attr));
2486 *dispr_attr = NULL;
2487 }
2488 #endif
2489 #endif
2490