xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852bs/phl/phl_cmd_dispatcher.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2019 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #define _PHL_CMD_DISPR_C_
16 #include "phl_headers.h"
17 #ifdef CONFIG_CMD_DISP
18 
19 #ifdef CONFIG_PHL_MSG_NUM
20 #define MAX_PHL_MSG_NUM CONFIG_PHL_MSG_NUM
21 #else
22 #define MAX_PHL_MSG_NUM (24)
23 #endif
24 
25 #define MAX_CMD_REQ_NUM (8)
26 #define MODL_MASK_LEN (PHL_BK_MDL_END/8)
27 
28 #define GEN_VALID_HDL(_idx) ((u32)(BIT31 | (u32)(_idx)))
29 #define IS_HDL_VALID(_hdl) ((_hdl) & BIT31)
30 #define GET_IDX_FROM_HDL(_hdl) ((u8)((_hdl) & 0xFF))
31 
32 #define GET_CUR_PENDING_EVT( _obj, _mdl_id) \
33 	((u16)((_obj)->mdl_info[(_mdl_id)].pending_evt_id))
34 #define SET_CUR_PENDING_EVT( _obj, _mdl_id, _evt_id) \
35 	((_obj)->mdl_info[(_mdl_id)].pending_evt_id = (_evt_id))
36 
37 #define IS_EXCL_MDL(_obj, _mdl) ((_obj)->exclusive_mdl == (_mdl))
38 #define SET_EXCL_MDL(_obj, _mdl) ((_obj)->exclusive_mdl = (_mdl))
39 #define CLEAR_EXCL_MDL(_obj) ((_obj)->exclusive_mdl = PHL_MDL_ID_MAX)
40 #define STOP_DISPATCH_MSG(_ret) \
41 	((_ret) != MDL_RET_SUCCESS && (_ret) != MDL_RET_IGNORE)
42 
43 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
44 #define SET_MDL_HANDLE( _obj, _mdl_id, _handle) \
45 	((_obj)->mdl_info[(_mdl_id)].handle = (void*)(_handle))
46 #define GET_MDL_HANDLE( _obj, _mdl_id) \
47 	((_obj)->mdl_info[(_mdl_id)].handle)
48 #endif
49 
50 enum phl_msg_status {
51 	MSG_STATUS_ENQ = BIT0,
52 	MSG_STATUS_RUN = BIT1,
53 	MSG_STATUS_NOTIFY_COMPLETE = BIT2,
54 	MSG_STATUS_CANCEL = BIT3,
55 	MSG_STATUS_PRE_PHASE = BIT4,
56 	MSG_STATUS_FAIL = BIT5,
57 	MSG_STATUS_OWNER_BK_MDL = BIT6,
58 	MSG_STATUS_OWNER_REQ = BIT7,
59 	MSG_STATUS_CLR_SNDR_MSG_IF_PENDING = BIT8,
60 	MSG_STATUS_PENDING = BIT9,
61 	MSG_STATUS_FOR_ABORT = BIT10,
62 	MSG_STATUS_PENDING_DURING_CANNOT_IO = BIT11,
63 };
64 
65 enum cmd_req_status {
66 	REQ_STATUS_ENQ = BIT0,
67 	REQ_STATUS_RUN = BIT1,
68 	REQ_STATUS_CANCEL = BIT2,
69 	REQ_STATUS_LAST_PERMIT = BIT3,
70 	REQ_STATUS_PREPARE = BIT4,
71 };
72 
73 enum phl_mdl_status {
74 	MDL_INIT = BIT0,
75 	MDL_STARTED = BIT1,
76 };
77 
78 enum dispatcher_status {
79 	DISPR_INIT = BIT0,
80 	DISPR_STARTED = BIT1,
81 	DISPR_SHALL_STOP = BIT2,
82 	DISPR_MSGQ_INIT = BIT3,
83 	DISPR_REQ_INIT = BIT4,
84 	DISPR_NOTIFY_IDLE = BIT5,
85 	DISPR_CLR_PEND_MSG = BIT6,
86 	DISPR_CTRL_PRESENT = BIT7,
87 	DISPR_WAIT_ABORT_MSG_DONE = BIT8,
88 	DISPR_CANNOT_IO = BIT9,
89 };
90 
91 enum token_op_type {
92 	TOKEN_OP_ADD_CMD_REQ = 1,
93 	TOKEN_OP_FREE_CMD_REQ = 2,
94 	TOKEN_OP_CANCEL_CMD_REQ = 3,
95 	TOKEN_OP_RENEW_CMD_REQ = 4,
96 };
97 
98 /**
99  * phl_bk_module - instance of phl background module,
100  * @status: contain mgnt status flags, refer to enum phl_mdl_status
101  * @id: refer to enum phl_module_id
102  * @priv: private context
103  * @ops: interface to interacting with phl_module
104  */
105 struct phl_bk_module {
106 	_os_list list;
107 	u8 status;
108 	u8 id;
109 	void *priv;
110 	struct phl_bk_module_ops ops;
111 };
112 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
113 struct dispr_msg_attr {
114 	struct msg_self_def_seq self_def_seq;
115 };
116 #endif
117 /**
118  * phl_dispr_msg_ex - phl msg extension,
119  * @status: contain mgnt status flags, refer to enum phl_msg_status
120  * @idx: idx in original msg_ex pool
121  * @msg: msg content from external module
122  * @premap: notifty map in pre-role phase, refer to enum phl_module_id
123  * @postmap: notifty map in post-role phase, refer to enum phl_module_id
124  * @completion: msg completion routine.
125  * @priv: private context to completion routine.
126  * @module: module handle of msg source, only used when msg fails
127  */
128 struct phl_dispr_msg_ex {
129 	_os_list list;
130 	u16 status;
131 	u8 idx;
132 	struct phl_msg msg;
133 	u8 premap[MODL_MASK_LEN];
134 	u8 postmap[MODL_MASK_LEN];
135 	struct msg_completion_routine completion;
136 	struct phl_bk_module *module; /* module handle which assign in msg_id*/
137 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
138 	struct dispr_msg_attr *attr;
139 #endif
140 };
141 
142 /**
143  * phl_token_op_info - for internal mgnt purpose,
144  * @info: mgnt data
145  */
146 struct phl_token_op_info {
147 	_os_list list;
148 	u8 used;
149 	enum token_op_type type;
150 	u8 data;
151 };
152 /**
153  * phl_cmd_token_req_ex - cmd token request extension,
154  * @status: contain mgnt status flags, refer to enum cmd_req_status
155  * @idx: idx in original req_ex pool
156  * @req: req content from external module.
157  */
158 struct phl_cmd_token_req_ex {
159 	_os_list list;
160 	u8 idx;
161 	u8 status;
162 	struct phl_cmd_token_req req;
163 	struct phl_token_op_info add_req_info;
164 	struct phl_token_op_info free_req_info;
165 };
166 
167 struct mdl_mgnt_info {
168 	u16 pending_evt_id;
169 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
170 	void* handle;
171 #endif
172 };
173 
174 /**
175  * cmd_dispatcher,
176  * @idx: idx in dispatch engine, corresponding to band idx
177  * @status: contain mgnt status flags, refer to enum dispatcher_status
178  * @phl_info: for general reference usage.
179  * @module_q: module queue that link each modules based on priority
180  * @msg_ex_pool: msg extension pool
181  * @bk_thread: background thread
182  * @token_req_ex_pool: req extension pool
183  * @token_cnt: current token count,
184  * 	       cmd req can be executed when dispatcher's token count is 0
185  * @bitmap: cosist of existing background modules loaded in current dispatcher,
186  *	    refer to enum phl_module_id
187  * @basemap: BK modules that must be notified when handling msg
188  * @controller: instance of dispr controller module
189  * @renew_req_info: used to trigger next token req registration
190  * @exclusive_mdl: In certain conditions, like dev IO status change,
191  * 		   dispr would only allow designated module to send msg and cancel the rest,
192  */
193 struct cmd_dispatcher {
194 	u8 idx;
195 	u16 status;
196 	struct phl_info_t *phl_info;
197 	struct phl_queue module_q[PHL_MDL_PRI_MAX];
198 	struct phl_dispr_msg_ex msg_ex_pool[MAX_PHL_MSG_NUM];
199 	_os_sema msg_q_sema; /* wake up background thread in SOLO_THREAD_MODE*/
200 	struct phl_queue msg_wait_q;
201 	struct phl_queue msg_idle_q;
202 	struct phl_queue msg_pend_q;
203 	_os_thread bk_thread;
204 	struct phl_cmd_token_req_ex token_req_ex_pool[MAX_CMD_REQ_NUM];
205 	struct phl_queue token_req_wait_q;
206 	struct phl_queue token_req_idle_q;
207 	struct phl_queue token_op_q;
208 	_os_lock token_op_q_lock;
209 	_os_atomic token_cnt; // atomic
210 	struct phl_cmd_token_req_ex *cur_cmd_req;
211 	u8 bitmap[MODL_MASK_LEN];
212 	u8 basemap[MODL_MASK_LEN];
213 	struct mdl_mgnt_info mdl_info[PHL_MDL_ID_MAX];
214 	struct phl_bk_module controller;
215 	struct phl_token_op_info renew_req_info;
216 	u8 exclusive_mdl;
217 };
218 
219 enum rtw_phl_status dispr_process_token_req(struct cmd_dispatcher *obj);
220 void send_bk_msg_phy_on(struct cmd_dispatcher *obj);
221 void send_bk_msg_phy_idle(struct cmd_dispatcher *obj);
222 enum rtw_phl_status send_dev_io_status_change(struct cmd_dispatcher *obj, u8 allow_io);
223 void _notify_dispr_controller(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex);
224 static u8 dispr_enqueue_token_op_info(struct cmd_dispatcher *obj, struct phl_token_op_info *op_info,
225 			    	enum token_op_type type, u8 data);
226 
227 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
228 static void free_dispr_attr(void *d, struct dispr_msg_attr **dispr_attr);
229 static enum phl_mdl_ret_code run_self_def_seq(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex,
230 			enum phl_bk_module_priority priority, u8 pre_prot_phase);
231 #endif
232 
233 inline static
_get_mdl_priority(enum phl_module_id id)234 enum phl_bk_module_priority _get_mdl_priority(enum phl_module_id id)
235 {
236 	if (id < PHL_BK_MDL_ROLE_START)
237 		return PHL_MDL_PRI_MAX;
238 	else if (id <= PHL_BK_MDL_ROLE_END)
239 		return PHL_MDL_PRI_ROLE;
240 	else if ( id <= PHL_BK_MDL_MDRY_END)
241 		return PHL_MDL_PRI_MANDATORY;
242 	else if (id <= PHL_BK_MDL_OPT_END)
243 		return PHL_MDL_PRI_OPTIONAL;
244 	else
245 		return PHL_MDL_PRI_MAX;
246 }
247 
_is_bitmap_empty(void * d,u8 * bitmap)248 inline static u8 _is_bitmap_empty(void *d, u8 *bitmap)
249 {
250 	u8 empty[MODL_MASK_LEN] = {0};
251 
252 	return (!_os_mem_cmp(d, bitmap, empty, MODL_MASK_LEN))?(true):(false);
253 }
254 
_print_bitmap(u8 * bitmap)255 inline static void _print_bitmap(u8 *bitmap)
256 {
257 	u8 k = 0;
258 
259 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "print bitmap: \n");
260 
261 	for (k = 0; k < MODL_MASK_LEN; k++) {
262 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_DEBUG_,"[%d]:0x%x\n", k, bitmap[k]);
263 	}
264 }
265 
notify_bk_thread(struct cmd_dispatcher * obj)266 static void notify_bk_thread(struct cmd_dispatcher *obj)
267 {
268 	void *d = phl_to_drvpriv(obj->phl_info);
269 
270 	if (disp_eng_is_solo_thread_mode(obj->phl_info))
271 		_os_sema_up(d, &(obj->msg_q_sema));
272 	else
273 		disp_eng_notify_share_thread(obj->phl_info, (void*)obj);
274 }
275 
on_abort_msg_complete(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)276 static void on_abort_msg_complete(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
277 {
278 	/* since struct phl_token_op_info is used to synchronously handle token req in background thread
279 	 * here use add_req_info to notify background thread to run dispr_process_token_req again before handling next msg
280 	 */
281 	CLEAR_STATUS_FLAG(obj->status, DISPR_WAIT_ABORT_MSG_DONE);
282 	dispr_enqueue_token_op_info(obj, &obj->renew_req_info, TOKEN_OP_RENEW_CMD_REQ, 0xff);
283 }
284 
pop_front_idle_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex ** msg)285 static u8 pop_front_idle_msg(struct cmd_dispatcher *obj,
286 			     struct phl_dispr_msg_ex **msg)
287 {
288 	void *d = phl_to_drvpriv(obj->phl_info);
289 	_os_list *new_msg = NULL;
290 
291 	(*msg) = NULL;
292 	if (pq_pop(d, &(obj->msg_idle_q), &new_msg, _first, _bh)) {
293 		(*msg) = (struct phl_dispr_msg_ex *)new_msg;
294 		(*msg)->status = 0;
295 		(*msg)->module = NULL;
296 		(*msg)->completion.priv = NULL;
297 		(*msg)->completion.completion = NULL;
298 		_os_mem_set(d, (*msg)->premap, 0, MODL_MASK_LEN);
299 		_os_mem_set(d, (*msg)->postmap, 0, MODL_MASK_LEN);
300 		_os_mem_set(d, &((*msg)->msg), 0, sizeof(struct phl_msg));
301 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
302 		(*msg)->attr = NULL;
303 #endif
304 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s: remain cnt(%d)\n", __FUNCTION__, obj->msg_idle_q.cnt);
305 		return true;
306 	} else {
307 		return false;
308 	}
309 }
310 
push_back_idle_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)311 static void push_back_idle_msg(struct cmd_dispatcher *obj,
312 			       struct phl_dispr_msg_ex *ex)
313 {
314 	void *d = phl_to_drvpriv(obj->phl_info);
315 
316 	if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_NOTIFY_COMPLETE) &&
317 	    ex->completion.completion) {
318 		if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL))
319 			SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_CANCEL);
320 		ex->completion.completion(ex->completion.priv, &(ex->msg));
321 		CLEAR_STATUS_FLAG(ex->status, MSG_STATUS_NOTIFY_COMPLETE);
322 	}
323 	if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_FOR_ABORT))
324 		on_abort_msg_complete(obj, ex);
325 	ex->status = 0;
326 	if(GET_CUR_PENDING_EVT(obj, MSG_MDL_ID_FIELD(ex->msg.msg_id)) == MSG_EVT_ID_FIELD(ex->msg.msg_id))
327 		SET_CUR_PENDING_EVT(obj, MSG_MDL_ID_FIELD(ex->msg.msg_id), MSG_EVT_MAX);
328 	ex->msg.msg_id = 0;
329 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
330 	free_dispr_attr(d, &(ex->attr));
331 #endif
332 	pq_push(d, &(obj->msg_idle_q), &(ex->list), _tail, _bh);
333 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s: remain cnt(%d)\n", __FUNCTION__, obj->msg_idle_q.cnt);
334 }
335 
pop_front_wait_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex ** msg)336 static u8 pop_front_wait_msg(struct cmd_dispatcher *obj,
337 			     struct phl_dispr_msg_ex **msg)
338 {
339 	void *d = phl_to_drvpriv(obj->phl_info);
340 	_os_list *new_msg = NULL;
341 
342 	(*msg) = NULL;
343 	if (pq_pop(d, &(obj->msg_wait_q), &new_msg, _first, _bh)) {
344 		(*msg) = (struct phl_dispr_msg_ex *)new_msg;
345 		SET_STATUS_FLAG((*msg)->status, MSG_STATUS_RUN);
346 		CLEAR_STATUS_FLAG((*msg)->status, MSG_STATUS_ENQ);
347 		CLEAR_STATUS_FLAG((*msg)->status, MSG_STATUS_PENDING);
348 		return true;
349 	} else {
350 		return false;
351 	}
352 }
353 
push_back_wait_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)354 static void push_back_wait_msg(struct cmd_dispatcher *obj,
355 			       struct phl_dispr_msg_ex *ex)
356 {
357 	void *d = phl_to_drvpriv(obj->phl_info);
358 
359 	SET_STATUS_FLAG(ex->status, MSG_STATUS_ENQ);
360 	CLEAR_STATUS_FLAG(ex->status, MSG_STATUS_RUN);
361 	pq_push(d, &(obj->msg_wait_q), &(ex->list), _tail, _bh);
362 	notify_bk_thread(obj);
363 }
364 
is_higher_priority(void * d,void * priv,_os_list * input,_os_list * obj)365  u8 is_higher_priority(void *d, void *priv,_os_list *input, _os_list *obj)
366  {
367 	struct phl_dispr_msg_ex *ex_input = (struct phl_dispr_msg_ex *)input;
368 	struct phl_dispr_msg_ex *ex_obj = (struct phl_dispr_msg_ex *)obj;
369 
370 	if (IS_DISPR_CTRL(MSG_MDL_ID_FIELD(ex_input->msg.msg_id)) &&
371 	    !IS_DISPR_CTRL(MSG_MDL_ID_FIELD(ex_obj->msg.msg_id)))
372 		return true;
373 	return false;
374  }
375 
insert_msg_by_priority(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)376 static void insert_msg_by_priority(struct cmd_dispatcher *obj,
377 			       struct phl_dispr_msg_ex *ex)
378 {
379 	void *d = phl_to_drvpriv(obj->phl_info);
380 
381 	SET_STATUS_FLAG(ex->status, MSG_STATUS_ENQ);
382 	CLEAR_STATUS_FLAG(ex->status, MSG_STATUS_RUN);
383 	pq_insert(d, &(obj->msg_wait_q), _bh, NULL, &(ex->list), is_higher_priority);
384 	notify_bk_thread(obj);
385 }
386 
pop_front_pending_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex ** msg)387 static u8 pop_front_pending_msg(struct cmd_dispatcher *obj,
388 			     struct phl_dispr_msg_ex **msg)
389 {
390 	void *d = phl_to_drvpriv(obj->phl_info);
391 	_os_list *new_msg = NULL;
392 
393 	(*msg) = NULL;
394 	if (pq_pop(d, &(obj->msg_pend_q), &new_msg, _first, _bh)) {
395 		(*msg) = (struct phl_dispr_msg_ex *)new_msg;
396 		return true;
397 	} else {
398 		return false;
399 	}
400 }
401 
push_back_pending_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)402 static void push_back_pending_msg(struct cmd_dispatcher *obj,
403 			       struct phl_dispr_msg_ex *ex)
404 {
405 	void *d = phl_to_drvpriv(obj->phl_info);
406 
407 	SET_STATUS_FLAG(ex->status, MSG_STATUS_ENQ);
408 	CLEAR_STATUS_FLAG(ex->status, MSG_STATUS_RUN);
409 
410 	if(TEST_STATUS_FLAG(ex->status, MSG_STATUS_CLR_SNDR_MSG_IF_PENDING))
411 		SET_CUR_PENDING_EVT(obj, MSG_MDL_ID_FIELD(ex->msg.msg_id), MSG_EVT_ID_FIELD(ex->msg.msg_id));
412 	pq_push(d, &(obj->msg_pend_q), &(ex->list), _tail, _bh);
413 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s: remain cnt(%d)\n", __FUNCTION__, obj->msg_pend_q.cnt);
414 }
415 
clear_pending_msg(struct cmd_dispatcher * obj)416 static void clear_pending_msg(struct cmd_dispatcher *obj)
417 {
418 	struct phl_dispr_msg_ex *ex = NULL;
419 
420 	if(!TEST_STATUS_FLAG(obj->status, DISPR_CLR_PEND_MSG))
421 		return;
422 	CLEAR_STATUS_FLAG(obj->status, DISPR_CLR_PEND_MSG);
423 	while (pop_front_pending_msg(obj, &ex)) {
424 		if (IS_DISPR_CTRL(MSG_EVT_ID_FIELD(ex->msg.msg_id)))
425 			insert_msg_by_priority(obj, ex);
426 		else
427 			push_back_wait_msg(obj, ex);
428 	}
429 }
430 
clear_waiting_msg(struct cmd_dispatcher * obj)431 static void clear_waiting_msg(struct cmd_dispatcher *obj)
432 {
433 	struct phl_dispr_msg_ex *ex = NULL;
434 
435 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s: remain cnt(%d)\n", __FUNCTION__, obj->msg_idle_q.cnt);
436 	while(obj->msg_idle_q.cnt != MAX_PHL_MSG_NUM) {
437 		while (pop_front_pending_msg(obj, &ex))
438 			push_back_wait_msg(obj, ex);
439 		while (pop_front_wait_msg(obj, &ex))
440 			push_back_idle_msg(obj, ex);
441 	}
442 }
443 
is_special_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)444 static bool is_special_msg(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
445 {
446 	u8 mdl_id = MSG_MDL_ID_FIELD(ex->msg.msg_id);
447 	u16 evt = MSG_EVT_ID_FIELD(ex->msg.msg_id);
448 
449 	if (TEST_STATUS_FLAG(obj->status, DISPR_CANNOT_IO)) {
450 		if ( IS_EXCL_MDL(obj, mdl_id) ||
451 		     evt == MSG_EVT_DEV_CANNOT_IO ||
452 		     evt == MSG_EVT_DEV_RESUME_IO ||
453 		     evt == MSG_EVT_PHY_ON ||
454 		     evt == MSG_EVT_PHY_IDLE)
455 			return true;
456 	}
457 	return false;
458 }
459 
is_msg_canceled(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)460 static bool is_msg_canceled(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
461 {
462 	u16 pending_evt = GET_CUR_PENDING_EVT(obj, MSG_MDL_ID_FIELD(ex->msg.msg_id));
463 
464 	if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED) ||
465 	    TEST_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL))
466 		return true;
467 
468 	if (pending_evt != MSG_EVT_MAX && pending_evt != MSG_EVT_ID_FIELD(ex->msg.msg_id)) {
469 		SET_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL);
470 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "msg canceled, cur pending evt(%d)\n", pending_evt);
471 		return true;
472 	}
473 
474 	if (TEST_STATUS_FLAG(obj->status, DISPR_SHALL_STOP)) {
475 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "msg canceled due to SHALL STOP status\n");
476 
477 		SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_CANNOT_IO);
478 		SET_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL);
479 		return true;
480 	}
481 
482 	if (TEST_STATUS_FLAG(obj->status, DISPR_CANNOT_IO)) {
483 		if( is_special_msg(obj, ex)) {
484 			SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_CANNOT_IO);
485 			PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "special msg found, still sent with CANNOT IO flag set\n");
486 		}
487 		else if (!TEST_STATUS_FLAG(ex->status, MSG_STATUS_PENDING_DURING_CANNOT_IO)) {
488 			SET_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL);
489 			SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_CANNOT_IO);
490 			PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "msg canceled due to CANNOT IO status\n");
491 			return true;
492 		} else {
493 			SET_STATUS_FLAG(ex->status, MSG_STATUS_PENDING);
494 			PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "msg pending due to CANNOT IO status\n");
495 		}
496 	}
497 
498 	return false;
499 }
500 
init_dispr_msg_pool(struct cmd_dispatcher * obj)501 void init_dispr_msg_pool(struct cmd_dispatcher *obj)
502 {
503 	u8 i = 0;
504 	void *d = phl_to_drvpriv(obj->phl_info);
505 
506 	if (TEST_STATUS_FLAG(obj->status, DISPR_MSGQ_INIT))
507 		return;
508 	pq_init(d, &(obj->msg_idle_q));
509 	pq_init(d, &(obj->msg_wait_q));
510 	pq_init(d, &(obj->msg_pend_q));
511 	_os_mem_set(d, obj->msg_ex_pool, 0,
512 		    sizeof(struct phl_dispr_msg_ex) * MAX_PHL_MSG_NUM);
513 	for (i = 0; i < MAX_PHL_MSG_NUM; i++) {
514 		obj->msg_ex_pool[i].idx = i;
515 		push_back_idle_msg(obj, &(obj->msg_ex_pool[i]));
516 	}
517 
518 	SET_STATUS_FLAG(obj->status, DISPR_MSGQ_INIT);
519 }
520 
deinit_dispr_msg_pool(struct cmd_dispatcher * obj)521 void deinit_dispr_msg_pool(struct cmd_dispatcher *obj)
522 {
523 	void *d = phl_to_drvpriv(obj->phl_info);
524 
525 	if (!TEST_STATUS_FLAG(obj->status, DISPR_MSGQ_INIT))
526 		return;
527 	CLEAR_STATUS_FLAG(obj->status, DISPR_MSGQ_INIT);
528 
529 	pq_deinit(d, &(obj->msg_idle_q));
530 	pq_deinit(d, &(obj->msg_wait_q));
531 	pq_deinit(d, &(obj->msg_pend_q));
532 }
533 
cancel_msg(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)534 void cancel_msg(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
535 {
536 	void *d = phl_to_drvpriv(obj->phl_info);
537 
538 	/* zero bitmap to ensure msg would not be forward to
539 	 * any modules after cancel.
540 	 * */
541 	_reset_bitmap(d, ex->premap, MODL_MASK_LEN);
542 	_reset_bitmap(d, ex->postmap, MODL_MASK_LEN);
543 
544 	SET_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL);
545 }
546 
cancel_running_msg(struct cmd_dispatcher * obj)547 void cancel_running_msg(struct cmd_dispatcher *obj)
548 {
549 	u8 i = 0;
550 
551 	for (i = 0; i < MAX_PHL_MSG_NUM;i++) {
552 		if(TEST_STATUS_FLAG(obj->msg_ex_pool[i].status, MSG_STATUS_RUN))
553 			cancel_msg(obj, &(obj->msg_ex_pool[i]));
554 	}
555 }
set_msg_bitmap(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex,u8 mdl_id)556 void set_msg_bitmap(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex, u8 mdl_id)
557 {
558 	void *d = phl_to_drvpriv(obj->phl_info);
559 
560 	/* ensure mandatory & wifi role module recv all msg*/
561 	_os_mem_cpy(d, ex->premap, obj->bitmap, MODL_MASK_LEN);
562 	_os_mem_cpy(d, ex->postmap, obj->bitmap, MODL_MASK_LEN);
563 	if(_chk_bitmap_bit(obj->bitmap, mdl_id)) {
564 		_add_bitmap_bit(ex->premap, &mdl_id, 1);
565 		_add_bitmap_bit(ex->postmap, &mdl_id, 1);
566 	}
567 //_print_bitmap(ex->premap);
568 }
569 
set_msg_custom_bitmap(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex,enum phl_msg_opt opt,u8 * id_arr,u32 len,u8 mdl_id)570 void set_msg_custom_bitmap(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex,
571 		    enum phl_msg_opt opt, u8 *id_arr, u32 len, u8 mdl_id)
572 {
573 	void *d = phl_to_drvpriv(obj->phl_info);
574 
575 	if (opt & MSG_OPT_SKIP_NOTIFY_OPT_MDL) {
576 		_os_mem_cpy(d, ex->premap, obj->basemap, MODL_MASK_LEN);
577 		_os_mem_cpy(d, ex->postmap, obj->basemap, MODL_MASK_LEN);
578 	}
579 	if (opt & MSG_OPT_BLIST_PRESENT) {
580 		_clr_bitmap_bit(ex->premap, id_arr, len);
581 		_clr_bitmap_bit(ex->postmap, id_arr, len);
582 	} else {
583 		_add_bitmap_bit(ex->premap, id_arr, len);
584 		_add_bitmap_bit(ex->postmap, id_arr, len);
585 	}
586 	if(_chk_bitmap_bit(obj->bitmap, mdl_id)) {
587 		_add_bitmap_bit(ex->premap, &mdl_id, 1);
588 		_add_bitmap_bit(ex->postmap, &mdl_id, 1);
589 	}
590 }
591 
get_msg_bitmap(struct phl_dispr_msg_ex * ex)592 u8 *get_msg_bitmap(struct phl_dispr_msg_ex *ex)
593 {
594 	if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_PRE_PHASE)) {
595 		SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_PRE_PHASE);
596 		return ex->premap;
597 	} else {
598 		CLEAR_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_PRE_PHASE);
599 		return ex->postmap;
600 	}
601 }
602 
603 
init_dispr_mdl_mgnt_info(struct cmd_dispatcher * obj)604 void init_dispr_mdl_mgnt_info(struct cmd_dispatcher *obj)
605 {
606 	u8 i = 0;
607 
608 	for (i = 0; i < PHL_MDL_ID_MAX; i++)
609 		SET_CUR_PENDING_EVT(obj, i, MSG_EVT_MAX);
610 
611 }
612 
pop_front_idle_req(struct cmd_dispatcher * obj,struct phl_cmd_token_req_ex ** req)613 static u8 pop_front_idle_req(struct cmd_dispatcher *obj,
614 			     struct phl_cmd_token_req_ex **req)
615 {
616 	void *d = phl_to_drvpriv(obj->phl_info);
617 	_os_list *new_req = NULL;
618 
619 	(*req) = NULL;
620 	if (pq_pop(d, &(obj->token_req_idle_q), &new_req, _first, _bh)) {
621 		(*req) = (struct phl_cmd_token_req_ex*)new_req;
622 		(*req)->status = 0;
623 		_os_mem_set(d, &((*req)->req), 0,
624 			    sizeof(struct phl_cmd_token_req));
625 		_os_mem_set(d, &((*req)->add_req_info), 0,
626 			    sizeof(struct phl_token_op_info));
627 		_os_mem_set(d, &((*req)->free_req_info), 0,
628 			    sizeof(struct phl_token_op_info));
629 		return true;
630 	} else {
631 		return false;
632 	}
633 }
634 
push_back_idle_req(struct cmd_dispatcher * obj,struct phl_cmd_token_req_ex * req)635 static void push_back_idle_req(struct cmd_dispatcher *obj,
636 			       struct phl_cmd_token_req_ex *req)
637 {
638 	void *d = phl_to_drvpriv(obj->phl_info);
639 
640 	req->status = 0;
641 	SET_CUR_PENDING_EVT(obj, req->req.module_id, MSG_EVT_MAX);
642 	pq_push(d, &(obj->token_req_idle_q), &(req->list), _tail, _bh);
643 }
644 
pop_front_wait_req(struct cmd_dispatcher * obj,struct phl_cmd_token_req_ex ** req)645 static u8 pop_front_wait_req(struct cmd_dispatcher *obj,
646 			     struct phl_cmd_token_req_ex **req)
647 {
648 	void *d = phl_to_drvpriv(obj->phl_info);
649 	_os_list *new_req = NULL;
650 
651 	(*req) = NULL;
652 	if (pq_pop(d, &(obj->token_req_wait_q), &new_req, _first, _bh)) {
653 		(*req) = (struct phl_cmd_token_req_ex*)new_req;
654 		SET_STATUS_FLAG((*req)->status, REQ_STATUS_PREPARE);
655 		CLEAR_STATUS_FLAG((*req)->status, REQ_STATUS_ENQ);
656 		return true;
657 	} else {
658 		return false;
659 	}
660 }
661 
push_back_wait_req(struct cmd_dispatcher * obj,struct phl_cmd_token_req_ex * req)662 static void push_back_wait_req(struct cmd_dispatcher *obj,
663 			       struct phl_cmd_token_req_ex *req)
664 {
665 	void *d = phl_to_drvpriv(obj->phl_info);
666 
667 	pq_push(d, &(obj->token_req_wait_q), &(req->list), _tail, _bh);
668 	SET_STATUS_FLAG(req->status, REQ_STATUS_ENQ);
669 }
670 
clear_wating_req(struct cmd_dispatcher * obj)671 static void clear_wating_req(struct cmd_dispatcher *obj)
672 {
673 	 struct phl_cmd_token_req_ex *ex = NULL;
674 
675 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
676 		"%s: remain cnt(%d)\n", __FUNCTION__, obj->token_req_idle_q.cnt);
677 	while(obj->token_req_idle_q.cnt != MAX_CMD_REQ_NUM) {
678 		while (pop_front_wait_req(obj, &ex)) {
679 			ex->req.abort(obj, ex->req.priv);
680 			push_back_idle_req(obj, ex);
681 		}
682 	}
683 }
684 
deregister_cur_cmd_req(struct cmd_dispatcher * obj,u8 notify)685 void deregister_cur_cmd_req(struct cmd_dispatcher *obj, u8 notify)
686 {
687 	struct phl_cmd_token_req *req = NULL;
688 	void *d = phl_to_drvpriv(obj->phl_info);
689 	u8 i = 0;
690 	struct phl_dispr_msg_ex *ex = NULL;
691 
692 	if (obj->cur_cmd_req) {
693 		req = &(obj->cur_cmd_req->req);
694 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
695 				"%s, id(%d), status(%d)\n",
696 				__FUNCTION__, req->module_id, obj->cur_cmd_req->status);
697 		CLEAR_STATUS_FLAG(obj->cur_cmd_req->status, REQ_STATUS_RUN);
698 		for (i = 0; i < MAX_PHL_MSG_NUM; i++) {
699 			ex = &(obj->msg_ex_pool[i]);
700 			if (req->module_id != MSG_MDL_ID_FIELD(ex->msg.msg_id))
701 				continue;
702 			CLEAR_STATUS_FLAG(ex->status, MSG_STATUS_OWNER_REQ);
703 			cancel_msg(obj, ex);
704 			if(TEST_STATUS_FLAG(ex->status, MSG_STATUS_PENDING)) {
705 				dispr_clr_pending_msg((void*)obj);
706 				/* inserted pending msg from this sepecific sender back to wait Q before abort notify
707 				 * would guarantee msg sent in abort notify is exactly last msg from this sender
708 				 * */
709 				clear_pending_msg(obj);
710 			}
711 		}
712 		if (notify == true) {
713 			SET_STATUS_FLAG(obj->cur_cmd_req->status, REQ_STATUS_LAST_PERMIT);
714 			req->abort(obj, req->priv);
715 			CLEAR_STATUS_FLAG(obj->cur_cmd_req->status, REQ_STATUS_LAST_PERMIT);
716 		}
717 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
718 		SET_MDL_HANDLE(obj, obj->cur_cmd_req->req.module_id, NULL);
719 #endif
720 		push_back_idle_req(obj, obj->cur_cmd_req);
721 		_os_atomic_set(d, &(obj->token_cnt),
722 			       _os_atomic_read(d, &(obj->token_cnt))-1);
723 	}
724 	obj->cur_cmd_req = NULL;
725 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
726 }
727 
register_cur_cmd_req(struct cmd_dispatcher * obj,struct phl_cmd_token_req_ex * req)728 u8 register_cur_cmd_req(struct cmd_dispatcher *obj,
729 			  struct phl_cmd_token_req_ex *req)
730 {
731 	void *d = phl_to_drvpriv(obj->phl_info);
732 	enum phl_mdl_ret_code ret = MDL_RET_SUCCESS;
733 
734 	SET_STATUS_FLAG(req->status, REQ_STATUS_RUN);
735 	CLEAR_STATUS_FLAG(req->status, REQ_STATUS_PREPARE);
736 	obj->cur_cmd_req = req;
737 	_os_atomic_set(d, &(obj->token_cnt),
738 		       _os_atomic_read(d, &(obj->token_cnt))+1);
739 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
740 			"%s, id(%d)\n", __FUNCTION__, obj->cur_cmd_req->req.module_id);
741 	ret = obj->cur_cmd_req->req.acquired((void*)obj, obj->cur_cmd_req->req.priv);
742 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, ret(%d)\n", __FUNCTION__, ret);
743 
744 	if (ret == MDL_RET_FAIL) {
745 		deregister_cur_cmd_req(obj, false);
746 		return false;
747 	}
748 	else {
749 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
750 		SET_MDL_HANDLE(obj, req->req.module_id, req);
751 #endif
752 		return true;
753 	}
754 }
755 
cancel_all_cmd_req(struct cmd_dispatcher * obj)756 void cancel_all_cmd_req(struct cmd_dispatcher *obj)
757 {
758 	u8 i = 0;
759 	struct phl_cmd_token_req_ex* req_ex = NULL;
760 
761 	for (i = 0; i < MAX_CMD_REQ_NUM;i++) {
762 		req_ex = &(obj->token_req_ex_pool[i]);
763 		if (req_ex->status)
764 			SET_STATUS_FLAG(req_ex->status, REQ_STATUS_CANCEL);
765 	}
766 }
767 
init_cmd_req_pool(struct cmd_dispatcher * obj)768 void init_cmd_req_pool(struct cmd_dispatcher *obj)
769 {
770 	u8 i = 0;
771 	void *d = phl_to_drvpriv(obj->phl_info);
772 
773 	if (TEST_STATUS_FLAG(obj->status, DISPR_REQ_INIT))
774 		return;
775 	pq_init(d, &(obj->token_req_wait_q));
776 	pq_init(d, &(obj->token_req_idle_q));
777 	pq_init(d, &(obj->token_op_q));
778 	_os_mem_set(d, obj->token_req_ex_pool, 0,
779 		    sizeof(struct phl_cmd_token_req_ex) * MAX_CMD_REQ_NUM);
780 	for (i = 0; i < MAX_CMD_REQ_NUM;i++) {
781 		obj->token_req_ex_pool[i].idx = i;
782 		pq_push(d, &(obj->token_req_idle_q),
783 			&(obj->token_req_ex_pool[i].list), _tail, _bh);
784 	}
785 	SET_STATUS_FLAG(obj->status, DISPR_REQ_INIT);
786 }
787 
deinit_cmd_req_pool(struct cmd_dispatcher * obj)788 void deinit_cmd_req_pool(struct cmd_dispatcher *obj)
789 {
790 	void *d = phl_to_drvpriv(obj->phl_info);
791 
792 	CLEAR_STATUS_FLAG(obj->status, DISPR_REQ_INIT);
793 
794 	pq_deinit(d, &(obj->token_req_wait_q));
795 	pq_deinit(d, &(obj->token_req_idle_q));
796 	pq_deinit(d, &(obj->token_op_q));
797 }
798 
chk_module_ops(struct phl_bk_module_ops * ops)799 u8 chk_module_ops(struct phl_bk_module_ops *ops)
800 {
801 	if (ops == NULL ||
802 	    ops->init == NULL ||
803 	    ops->deinit == NULL ||
804 	    ops->msg_hdlr == NULL ||
805 	    ops->set_info == NULL ||
806 	    ops->query_info == NULL ||
807 	    ops->start == NULL ||
808 	    ops->stop == NULL)
809 		return false;
810 	return true;
811 }
812 
chk_cmd_req_ops(struct phl_cmd_token_req * req)813 u8 chk_cmd_req_ops(struct phl_cmd_token_req *req)
814 {
815 	if (req == NULL ||
816 	    req->module_id < PHL_FG_MDL_START ||
817 	    req->abort == NULL ||
818 	    req->acquired == NULL ||
819 	    req->msg_hdlr == NULL ||
820 	    req->set_info == NULL ||
821 	    req->query_info == NULL)
822 		return false;
823 	return true;
824 }
pop_front_token_op_info(struct cmd_dispatcher * obj,struct phl_token_op_info ** op_info)825 static u8 pop_front_token_op_info(struct cmd_dispatcher *obj,
826 				  struct phl_token_op_info **op_info)
827 {
828 	void *d = phl_to_drvpriv(obj->phl_info);
829 	_os_list *new_info = NULL;
830 
831 	(*op_info) = NULL;
832 	if (pq_pop(d, &(obj->token_op_q), &new_info, _first, _bh)) {
833 		(*op_info) = (struct phl_token_op_info *)new_info;
834 		return true;
835 	} else {
836 		return false;
837 	}
838 }
839 
push_back_token_op_info(struct cmd_dispatcher * obj,struct phl_token_op_info * op_info,enum token_op_type type,u8 data)840 static u8 push_back_token_op_info(struct cmd_dispatcher *obj,
841 				  struct phl_token_op_info *op_info,
842 				  enum token_op_type type,
843 			    	  u8 data)
844 {
845 	void *d = phl_to_drvpriv(obj->phl_info);
846 	_os_spinlockfg sp_flags;
847 
848 	_os_spinlock(d, &obj->token_op_q_lock, _bh, &sp_flags);
849 	if (op_info->used == true) {
850 		_os_spinunlock(d, &obj->token_op_q_lock, _bh, &sp_flags);
851 		return false;
852 	}
853 	op_info->used = true;
854 	op_info->type = type;
855 	op_info->data = data;
856 	_os_spinunlock(d, &obj->token_op_q_lock, _bh, &sp_flags);
857 	pq_push(d, &(obj->token_op_q), &(op_info->list), _tail, _bh);
858 	notify_bk_thread(obj);
859 	return true;
860 }
861 
_handle_token_op_info(struct cmd_dispatcher * obj,struct phl_token_op_info * op_info)862 void _handle_token_op_info(struct cmd_dispatcher *obj, struct phl_token_op_info *op_info)
863 {
864 	struct phl_cmd_token_req_ex *req_ex = NULL;
865 	void *d = phl_to_drvpriv(obj->phl_info);
866 
867 	switch (op_info->type) {
868 		case TOKEN_OP_RENEW_CMD_REQ:
869 			/* fall through*/
870 		case TOKEN_OP_ADD_CMD_REQ:
871 			dispr_process_token_req(obj);
872 			break;
873 		case TOKEN_OP_FREE_CMD_REQ:
874 			if (op_info->data >= MAX_CMD_REQ_NUM)
875 				return;
876 			req_ex = &(obj->token_req_ex_pool[op_info->data]);
877 			if (!TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_RUN))
878 				break;
879 			deregister_cur_cmd_req(obj, false);
880 			dispr_process_token_req(obj);
881 			break;
882 		case TOKEN_OP_CANCEL_CMD_REQ:
883 			if (op_info->data >= MAX_CMD_REQ_NUM)
884 				return;
885 			req_ex = &(obj->token_req_ex_pool[op_info->data]);
886 			if (TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_ENQ)) {
887 				pq_del_node(d, &(obj->token_req_wait_q), &(req_ex->list), _bh);
888 				/*
889 				 * Call command abort handle, abort handle
890 				 * should decide it has been acquired or not.
891 				 */
892 				req_ex->req.abort(obj, req_ex->req.priv);
893 				push_back_idle_req(obj, req_ex);
894 			} else if (TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_RUN)){
895 				deregister_cur_cmd_req(obj, true);
896 				dispr_process_token_req(obj);
897 			}
898 			break;
899 		default:
900 			break;
901 	}
902 }
903 
token_op_hanler(struct cmd_dispatcher * obj)904 void token_op_hanler(struct cmd_dispatcher *obj)
905 {
906 	struct phl_token_op_info *info = NULL;
907 
908 	while (pop_front_token_op_info(obj, &info)) {
909 		_handle_token_op_info(obj, info);
910 		info->used = false;
911 	}
912 }
913 static u8
dispr_enqueue_token_op_info(struct cmd_dispatcher * obj,struct phl_token_op_info * op_info,enum token_op_type type,u8 data)914 dispr_enqueue_token_op_info(struct cmd_dispatcher *obj,
915 			    struct phl_token_op_info *op_info,
916 			    enum token_op_type type,
917 			    u8 data)
918 {
919 	return push_back_token_op_info(obj, op_info, type, data);
920 }
921 
bk_module_init(struct cmd_dispatcher * obj,struct phl_bk_module * module)922 u8 bk_module_init(struct cmd_dispatcher *obj, struct phl_bk_module *module)
923 {
924 	if (TEST_STATUS_FLAG(module->status, MDL_INIT)) {
925 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
926 			"%s module_id:%d already init\n",
927 			__FUNCTION__, module->id);
928 		return false;
929 	}
930 
931 	if (module->ops.init((void*)obj->phl_info, (void*)obj,
932 			     &(module->priv)) == MDL_RET_SUCCESS) {
933 		SET_STATUS_FLAG(module->status, MDL_INIT);
934 		return true;
935 	} else {
936 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
937 			"%s fail module_id: %d \n", __FUNCTION__, module->id);
938 		return false;
939 	}
940 }
941 
bk_module_deinit(struct cmd_dispatcher * obj,struct phl_bk_module * module)942 void bk_module_deinit(struct cmd_dispatcher *obj, struct phl_bk_module *module)
943 {
944 	if (TEST_STATUS_FLAG(module->status, MDL_INIT))
945 		module->ops.deinit((void*)obj, module->priv);
946 	CLEAR_STATUS_FLAG(module->status, MDL_INIT);
947 }
948 
bk_module_start(struct cmd_dispatcher * obj,struct phl_bk_module * module)949 u8 bk_module_start(struct cmd_dispatcher *obj, struct phl_bk_module *module)
950 {
951 	if (!TEST_STATUS_FLAG(module->status, MDL_INIT) ||
952 	    TEST_STATUS_FLAG(module->status, MDL_STARTED)) {
953 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
954 			"%s module_id:%d already start\n", __FUNCTION__,
955 			module->id);
956 		return false;
957 	}
958 
959 	if (module->ops.start((void*)obj, module->priv) == MDL_RET_SUCCESS) {
960 		SET_STATUS_FLAG(module->status, MDL_STARTED);
961 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
962 		SET_MDL_HANDLE(obj, module->id, module);
963 #endif
964 		return true;
965 	} else {
966 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
967 			"%s fail module_id: %d \n", __FUNCTION__, module->id);
968 		return false;
969 	}
970 }
971 
bk_module_stop(struct cmd_dispatcher * obj,struct phl_bk_module * module)972 u8 bk_module_stop(struct cmd_dispatcher *obj, struct phl_bk_module *module)
973 {
974 	if (!TEST_STATUS_FLAG(module->status, MDL_STARTED))
975 		return false;
976 	CLEAR_STATUS_FLAG(module->status, MDL_STARTED);
977 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
978 	SET_MDL_HANDLE(obj, module->id, NULL);
979 #endif
980 	if (module->ops.stop((void*)obj, module->priv) == MDL_RET_SUCCESS) {
981 		return true;
982 	} else {
983 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
984 			"%s fail module_id: %d \n", __FUNCTION__,
985 			module->id);
986 		return false;
987 	}
988 }
989 
cur_req_hdl(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)990 void cur_req_hdl(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
991 {
992 	struct phl_cmd_token_req_ex *cur_req = obj->cur_cmd_req;
993 
994 	if (cur_req == NULL)
995 		return;
996 	if (!TEST_STATUS_FLAG(cur_req->status, REQ_STATUS_RUN) ||
997 	    TEST_STATUS_FLAG(cur_req->status, REQ_STATUS_CANCEL))
998 		return;
999 	if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_FOR_ABORT))
1000 		return;
1001 	cur_req->req.msg_hdlr((void*)obj, cur_req->req.priv, &(ex->msg));
1002 }
1003 
notify_msg_fail(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex,enum phl_mdl_ret_code ret)1004 void notify_msg_fail(struct cmd_dispatcher *obj,
1005                      struct phl_dispr_msg_ex *ex,
1006                      enum phl_mdl_ret_code ret)
1007 {
1008 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1009 
1010 	SET_STATUS_FLAG(ex->status, MSG_STATUS_FAIL);
1011 
1012 	SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_FAIL);
1013 	if (ret == MDL_RET_CANNOT_IO)
1014 		SET_MSG_INDC_FIELD(ex->msg.msg_id, MSG_INDC_CANNOT_IO);
1015 
1016 	if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_OWNER_BK_MDL) &&
1017 	   (IS_DISPR_CTRL(MSG_MDL_ID_FIELD(ex->msg.msg_id)) || _chk_bitmap_bit(obj->bitmap, ex->module->id))) {
1018 		ex->module->ops.msg_hdlr(obj, ex->module->priv, &(ex->msg));
1019 	}
1020 
1021 	if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_OWNER_REQ)) {
1022 		cur_req_hdl(obj, ex);
1023 	}
1024 }
1025 
feed_mdl_msg(struct cmd_dispatcher * obj,struct phl_bk_module * mdl,struct phl_dispr_msg_ex * ex)1026 enum phl_mdl_ret_code feed_mdl_msg(struct cmd_dispatcher *obj,
1027 				   struct phl_bk_module *mdl,
1028 				   struct phl_dispr_msg_ex *ex)
1029 {
1030 	enum phl_mdl_ret_code ret = MDL_RET_FAIL;
1031 	u8 *bitmap = NULL;
1032 
1033 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_DEBUG_, "%s, id:%d \n", __FUNCTION__, mdl->id);
1034 	ret = mdl->ops.msg_hdlr(obj, mdl->priv, &(ex->msg));
1035 	if (ret == MDL_RET_FAIL || ret == MDL_RET_CANNOT_IO) {
1036 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "id:%d evt:0x%x fail\n",
1037 			 mdl->id, ex->msg.msg_id);
1038 		ex->msg.rsvd[0] = mdl;
1039 		notify_msg_fail(obj, ex, ret);
1040 	} else if (ret == MDL_RET_PENDING) {
1041 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "id:%d evt:0x%x pending\n",
1042 			 mdl->id, ex->msg.msg_id);
1043 		SET_STATUS_FLAG(ex->status, MSG_STATUS_PENDING);
1044 	} else {
1045 		if (MSG_INDC_FIELD(ex->msg.msg_id) & MSG_INDC_PRE_PHASE)
1046 			bitmap = ex->premap;
1047 		else
1048 			bitmap = ex->postmap;
1049 		_clr_bitmap_bit(bitmap, &(mdl->id), 1);
1050 	}
1051 	return ret;
1052 }
1053 
msg_pre_phase_hdl(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)1054 void msg_pre_phase_hdl(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
1055 {
1056 	s8 i = 0;
1057 	void *d = phl_to_drvpriv(obj->phl_info);
1058 	struct phl_bk_module *mdl = NULL;
1059 	_os_list *node = NULL;
1060 	struct phl_queue *q = NULL;
1061 	enum phl_mdl_ret_code ret = MDL_RET_FAIL;
1062 	u8 owner_id = (ex->module)?(ex->module->id):(PHL_MDL_ID_MAX);
1063 	enum phl_bk_module_priority priority = PHL_MDL_PRI_MAX;
1064 
1065 	if (owner_id <= PHL_BK_MDL_END)
1066 		priority = _get_mdl_priority(owner_id);
1067 
1068 	for (i = PHL_MDL_PRI_MAX - 1 ; i >= PHL_MDL_PRI_ROLE ; i--) {
1069 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
1070 		ret = run_self_def_seq(obj, ex, i, true);
1071 		if (STOP_DISPATCH_MSG(ret))
1072 			return;
1073 #endif
1074 		if (priority == i && _chk_bitmap_bit(ex->premap, owner_id)) {
1075 			ret = feed_mdl_msg(obj, ex->module, ex);
1076 			if (STOP_DISPATCH_MSG(ret))
1077 				return;
1078 		}
1079 		q = &(obj->module_q[(u8)i]);
1080 
1081 		if (pq_get_front(d, q, &node, _bh) == false)
1082 			continue;
1083 
1084 		do {
1085 			mdl = (struct phl_bk_module*)node;
1086 			if (!_chk_bitmap_bit(ex->premap, mdl->id) ||
1087 			    !TEST_STATUS_FLAG(mdl->status, MDL_STARTED))
1088 				continue;
1089 			ret = feed_mdl_msg(obj, mdl, ex);
1090 			if (STOP_DISPATCH_MSG(ret))
1091 				return;
1092 		} while(pq_get_next(d, q, node, &node, _bh));
1093 	}
1094 }
1095 
msg_post_phase_hdl(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)1096 void msg_post_phase_hdl(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
1097 {
1098 	s8 i = 0;
1099 	void *d = phl_to_drvpriv(obj->phl_info);
1100 	struct phl_bk_module *mdl = NULL;
1101 	_os_list *node = NULL;
1102 	struct phl_queue *q = NULL;
1103 	enum phl_mdl_ret_code ret = MDL_RET_FAIL;
1104 	u8 owner_id = (ex->module)?(ex->module->id):(PHL_MDL_ID_MAX);
1105 	enum phl_bk_module_priority priority = PHL_MDL_PRI_MAX;
1106 
1107 	if (owner_id <= PHL_BK_MDL_END)
1108 		priority = _get_mdl_priority(owner_id);
1109 
1110 	for (i = PHL_MDL_PRI_ROLE ; i < PHL_MDL_PRI_MAX ; i++) {
1111 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
1112 		ret = run_self_def_seq(obj, ex, i, false);
1113 		if (STOP_DISPATCH_MSG(ret))
1114 			return;
1115 #endif
1116 		if (priority == i && _chk_bitmap_bit(ex->postmap, owner_id)) {
1117 			ret = feed_mdl_msg(obj, ex->module, ex);
1118 			if (STOP_DISPATCH_MSG(ret))
1119 				return;
1120 		}
1121 		q = &(obj->module_q[(u8)i]);
1122 		if (pq_get_tail(d, q, &node, _bh) == false)
1123 			continue;
1124 		do {
1125 			mdl = (struct phl_bk_module*)node;
1126 			if (!_chk_bitmap_bit(ex->postmap, mdl->id)||
1127 			    !TEST_STATUS_FLAG(mdl->status, MDL_STARTED))
1128 				continue;
1129 			ret = feed_mdl_msg(obj, mdl, ex);
1130 			if (STOP_DISPATCH_MSG(ret))
1131 				return;
1132 		} while(pq_get_prev(d, q, node, &node, _bh));
1133 	}
1134 }
1135 
get_cur_cmd_req_id(struct cmd_dispatcher * obj,u32 * req_status)1136 u8 get_cur_cmd_req_id(struct cmd_dispatcher *obj, u32 *req_status)
1137 {
1138 	struct phl_cmd_token_req_ex *cur_req = obj->cur_cmd_req;
1139 
1140 	if(req_status)
1141 		*req_status = 0;
1142 
1143 	if (cur_req == NULL )
1144 		return (u8)PHL_MDL_ID_MAX;
1145 
1146 	if(req_status)
1147 		*req_status = cur_req->status;
1148 
1149 	if(!TEST_STATUS_FLAG(cur_req->status, REQ_STATUS_RUN) ||
1150 		TEST_STATUS_FLAG(cur_req->status, REQ_STATUS_CANCEL))
1151 		return (u8)PHL_MDL_ID_MAX;
1152 	else
1153 		return cur_req->req.module_id;
1154 }
1155 
1156 #define MSG_REDIRECT_CHK(_ex) \
1157 	if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_FAIL)|| \
1158 	    TEST_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL)) \
1159 		goto recycle;\
1160 	if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_PENDING)) \
1161 		goto reschedule;
1162 
msg_dispatch(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)1163 void msg_dispatch(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
1164 {
1165 	u8 *bitmap = get_msg_bitmap(ex);
1166 	void *d = phl_to_drvpriv(obj->phl_info);
1167 
1168 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_DEBUG_,
1169 		"%s, msg_id:0x%x status: 0x%x\n", __FUNCTION__, ex->msg.msg_id, ex->status);
1170 	MSG_REDIRECT_CHK(ex);
1171 
1172 	_notify_dispr_controller(obj, ex);
1173 
1174 	MSG_REDIRECT_CHK(ex);
1175 
1176 	if ((MSG_INDC_FIELD(ex->msg.msg_id) & MSG_INDC_PRE_PHASE) &&
1177 	    _is_bitmap_empty(d, bitmap) == false)
1178 		msg_pre_phase_hdl(obj, ex);
1179 
1180 	MSG_REDIRECT_CHK(ex);
1181 
1182 	if (_is_bitmap_empty(d, bitmap)) {
1183 		/* pre protocol phase done, switch to post protocol phase*/
1184 		CLEAR_STATUS_FLAG(ex->status, MSG_STATUS_PRE_PHASE);
1185 		bitmap = get_msg_bitmap(ex);
1186 	} else {
1187 		PHL_ERR("%s, invalid bitmap state, msg status:0x%x \n", __FUNCTION__, ex->status);
1188 		SET_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL);
1189 		goto recycle;
1190 	}
1191 
1192 	if (_is_bitmap_empty(d, bitmap) == false)
1193 		msg_post_phase_hdl(obj, ex);
1194 
1195 	MSG_REDIRECT_CHK(ex);
1196 
1197 	if (_is_bitmap_empty(d, bitmap)) {
1198 		/* post protocol phase done */
1199 		cur_req_hdl(obj, ex);
1200 		goto recycle;
1201 	} else {
1202 		PHL_ERR("%s, invalid bitmap state, msg status:0x%x \n", __FUNCTION__, ex->status);
1203 		SET_STATUS_FLAG(ex->status, MSG_STATUS_CANCEL);
1204 		goto recycle;
1205 	}
1206 reschedule:
1207 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
1208 			"%s, msg:0x%x reschedule \n", __FUNCTION__,
1209 		 	ex->msg.msg_id);
1210 	if(TEST_STATUS_FLAG(ex->status, MSG_STATUS_PENDING))
1211 		push_back_pending_msg(obj, ex);
1212 	else
1213 		push_back_wait_msg(obj, ex);
1214 	return;
1215 recycle:
1216 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_DEBUG_,
1217 		"%s, msg:0x%x recycle \n", __FUNCTION__,
1218 		 ex->msg.msg_id);
1219 	push_back_idle_msg(obj, ex);
1220 }
1221 
dispr_thread_loop_hdl(struct cmd_dispatcher * obj)1222 void dispr_thread_loop_hdl(struct cmd_dispatcher *obj)
1223 {
1224 	struct phl_dispr_msg_ex *ex = NULL;
1225 
1226 	/* check pending msg need in advance.
1227 	* if pending msg is not empty before while loop breaks,
1228 	* these msg would be cleared in deinit_dispr_msg_pool.
1229 	*/
1230 	clear_pending_msg(obj);
1231 	/* token op Q in advance.
1232 	* if req wait Q is not empty before while loop breaks,
1233 	* these msg would be cleared in deinit_cmd_req_pool.
1234 	*/
1235 	token_op_hanler(obj);
1236 
1237 	if (pop_front_wait_msg(obj, &ex)) {
1238 		if (is_msg_canceled(obj, ex)) {
1239 			push_back_idle_msg(obj, ex);
1240 			return;
1241 		}
1242 		/* ensure all modules set in msg bitmap
1243 			exists in cur dispatcher*/
1244 		_and_bitmaps(obj->bitmap, ex->premap, MODL_MASK_LEN);
1245 		_and_bitmaps(obj->bitmap, ex->postmap, MODL_MASK_LEN);
1246 		msg_dispatch(obj, ex);
1247 	}
1248 }
1249 
dispr_thread_leave_hdl(struct cmd_dispatcher * obj)1250 void dispr_thread_leave_hdl(struct cmd_dispatcher *obj)
1251 {
1252 	deregister_cur_cmd_req(obj, true);
1253 	/* clear remaining pending & waiting msg */
1254 	clear_waiting_msg(obj);
1255 	/* pop out all waiting cmd req and notify abort. */
1256 	clear_wating_req(obj);
1257 }
1258 
background_thread_hdl(void * param)1259 int background_thread_hdl(void *param)
1260 {
1261 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)param;
1262 	void *d = phl_to_drvpriv(obj->phl_info);
1263 
1264 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s enter\n", __FUNCTION__);
1265 	while (!_os_thread_check_stop(d, &(obj->bk_thread))) {
1266 
1267 		_os_sema_down(d, &obj->msg_q_sema);
1268 
1269 		if(_os_thread_check_stop(d, &(obj->bk_thread)))
1270 			break;
1271 		dispr_thread_loop_hdl(obj);
1272 	}
1273 	dispr_thread_leave_hdl(obj);
1274 	_os_thread_wait_stop(d, &(obj->bk_thread));
1275 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s down\n", __FUNCTION__);
1276 	return 0;
1277 }
1278 
search_mdl(void * d,void * mdl,void * priv)1279 u8 search_mdl(void *d, void *mdl, void *priv)
1280 {
1281 	enum phl_module_id id = *(enum phl_module_id *)priv;
1282 	struct phl_bk_module *module = NULL;
1283 
1284 	module = (struct phl_bk_module *)mdl;
1285 	if (module->id == id) {
1286 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s :: id %d\n", __FUNCTION__, id);
1287 		return true;
1288 	}
1289 	else
1290 		return false;
1291 }
1292 
get_module_by_id(struct cmd_dispatcher * obj,enum phl_module_id id,struct phl_bk_module ** mdl)1293 u8 get_module_by_id(struct cmd_dispatcher *obj, enum phl_module_id id,
1294 		    struct phl_bk_module **mdl)
1295 {
1296 	void *d = phl_to_drvpriv(obj->phl_info);
1297 	u8 i = 0;
1298 	_os_list *node = NULL;
1299 
1300 	if (mdl == NULL)
1301 		return false;
1302 
1303 	if (IS_DISPR_CTRL(id)) {
1304 		if (!TEST_STATUS_FLAG(obj->status, DISPR_CTRL_PRESENT))
1305 			return false;
1306 		*mdl = &(obj->controller);
1307 		return true;
1308 	}
1309 
1310 	if (!_chk_bitmap_bit(obj->bitmap, id))
1311 		return false;
1312 
1313 	for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
1314 
1315 		if(pq_search_node(d, &(obj->module_q[i]), &node, _bh, false, &id, search_mdl)) {
1316 			*mdl = (struct phl_bk_module*)node;
1317 			return true;
1318 		}
1319 	}
1320 	*mdl = NULL;
1321 	return false;
1322 }
1323 
phl_dispr_get_idx(void * dispr,u8 * idx)1324 enum rtw_phl_status phl_dispr_get_idx(void *dispr, u8 *idx)
1325 {
1326 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1327 
1328 	if (dispr == NULL)
1329 		return RTW_PHL_STATUS_FAILURE;
1330 	if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT) || idx == NULL)
1331 		return RTW_PHL_STATUS_FAILURE;
1332 	*idx = obj->idx;
1333 	return RTW_PHL_STATUS_SUCCESS;
1334 }
1335 
1336 /* Each dispr has a controller.
1337  * A dispr controller is designed for phl instance to interact with dispr modules that are belonged to a specific hw band,
1338  * phl instance can perform follwing actions via dedicated controller:
1339  * 1. allow (phl status/non-dispr phl modules) to monitor & drop msg
1340  * 2. allow dispr modules, that are belonged to same dispr, to sequentially communicate with phl instance & call phl api,
1341  *    and also allow (phl status/non-dispr phl modules) to notify dispr by hw band.
1342  * *Note*
1343  * 1. when cmd dispatch engine is in solo thread mode (each dispr has its own dedicated thread).
1344  *    phl instance might receive msg from different dispr simutaneously and
1345  *    currently using semaphore (dispr_ctrl_sema) to prevent multi-thread condition.
1346  * 2. when cmd dispatch engine is in share thread mode, msg from different dispr would pass to controller sequentially.
1347 
1348  * PS:
1349  * phl instance: means phl_info_t, which include phl mgnt status & non-dispr phl modules
1350  * dispr modules: all existing background & foreground modules.
1351  * non-dispr phl module : Data path (TX/Rx), etc
1352  * phl mgnt status : stop/surprise remove/cannot io
1353 */
_register_dispr_controller(struct cmd_dispatcher * obj)1354 static enum rtw_phl_status _register_dispr_controller(struct cmd_dispatcher *obj)
1355 {
1356 	struct phl_bk_module *ctrl = &(obj->controller);
1357 
1358 	dispr_ctrl_hook_ops(obj, &(ctrl->ops));
1359 	ctrl->id = PHL_MDL_PHY_MGNT;
1360 
1361 	if(bk_module_init(obj, &(obj->controller)) == true)
1362 		return RTW_PHL_STATUS_SUCCESS;
1363 	else {
1364 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s(): fail \n", __func__);
1365 		return RTW_PHL_STATUS_FAILURE;
1366 	}
1367 }
1368 
_deregister_dispr_controller(struct cmd_dispatcher * obj)1369 static void _deregister_dispr_controller(struct cmd_dispatcher *obj)
1370 {
1371 	bk_module_deinit(obj, &(obj->controller));
1372 }
1373 
_start_dispr_controller(struct cmd_dispatcher * obj)1374 static enum rtw_phl_status _start_dispr_controller(struct cmd_dispatcher *obj)
1375 {
1376 	if (bk_module_start(obj, &(obj->controller)) == true) {
1377 		SET_STATUS_FLAG(obj->status, DISPR_CTRL_PRESENT);
1378 		return RTW_PHL_STATUS_SUCCESS;
1379 	}
1380 	else {
1381 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s(): fail \n", __func__);
1382 		return RTW_PHL_STATUS_FAILURE;
1383 	}
1384 }
1385 
_stop_dispr_controller(struct cmd_dispatcher * obj)1386 static enum rtw_phl_status _stop_dispr_controller(struct cmd_dispatcher *obj)
1387 {
1388 	CLEAR_STATUS_FLAG(obj->status, DISPR_CTRL_PRESENT);
1389 	if (bk_module_stop(obj, &(obj->controller)) == true)
1390 		return RTW_PHL_STATUS_SUCCESS;
1391 	else {
1392 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s(): fail \n", __func__);
1393 		return RTW_PHL_STATUS_FAILURE;
1394 	}
1395 }
1396 
_notify_dispr_controller(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex)1397 void _notify_dispr_controller(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex)
1398 {
1399 	if (!TEST_STATUS_FLAG(obj->status, DISPR_CTRL_PRESENT))
1400 		return;
1401 #ifdef CONFIG_CMD_DISP_SOLO_MODE
1402 	dispr_ctrl_sema_down(obj->phl_info);
1403 #endif
1404 	feed_mdl_msg(obj, &(obj->controller), ex);
1405 #ifdef CONFIG_CMD_DISP_SOLO_MODE
1406 	dispr_ctrl_sema_up(obj->phl_info);
1407 #endif
1408 
1409 }
1410 
dispr_thread_stop_prior_hdl(struct cmd_dispatcher * obj)1411 void dispr_thread_stop_prior_hdl(struct cmd_dispatcher *obj)
1412 {
1413 	CLEAR_STATUS_FLAG(obj->status, DISPR_STARTED);
1414 	_stop_dispr_controller(obj);
1415 	cancel_all_cmd_req(obj);
1416 	cancel_running_msg(obj);
1417 }
1418 
dispr_thread_stop_post_hdl(struct cmd_dispatcher * obj)1419 void dispr_thread_stop_post_hdl(struct cmd_dispatcher *obj)
1420 {
1421 	void *d = phl_to_drvpriv(obj->phl_info);
1422 
1423 	/* have to wait for bk thread ends before deinit msg & req*/
1424 	deinit_dispr_msg_pool(obj);
1425 	deinit_cmd_req_pool(obj);
1426 	_os_atomic_set(d, &(obj->token_cnt), 0);
1427 	_os_sema_free(d, &(obj->msg_q_sema));
1428 }
1429 
dispr_init(struct phl_info_t * phl_info,void ** dispr,u8 idx)1430 enum rtw_phl_status dispr_init(struct phl_info_t *phl_info, void **dispr, u8 idx)
1431 {
1432 	struct cmd_dispatcher *obj = NULL;
1433 	void *d = phl_to_drvpriv(phl_info);
1434 	u8 i = 0;
1435 
1436 	(*dispr) = NULL;
1437 
1438 	obj = (struct cmd_dispatcher *)_os_mem_alloc(d, sizeof(struct cmd_dispatcher));
1439 	if (obj == NULL) {
1440 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s, alloc fail\n", __FUNCTION__);
1441 		return RTW_PHL_STATUS_RESOURCE;
1442 	}
1443 
1444 	obj->phl_info = phl_info;
1445 	obj->idx = idx;
1446 	_os_atomic_set(d, &(obj->token_cnt), 0);
1447 	for (i = 0 ; i < PHL_MDL_PRI_MAX; i++)
1448 		pq_init(d, &(obj->module_q[i]));
1449 
1450 	(*dispr) = (void*)obj;
1451 	_os_spinlock_init(d, &(obj->token_op_q_lock));
1452 	SET_STATUS_FLAG(obj->status, DISPR_INIT);
1453 	SET_STATUS_FLAG(obj->status, DISPR_NOTIFY_IDLE);
1454 	_register_dispr_controller(obj);
1455 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, size dispr(%d), msg_ex(%d), req_ex(%d) \n",
1456 		 __FUNCTION__, (int)sizeof(struct cmd_dispatcher),
1457 		 (int)sizeof(struct phl_dispr_msg_ex),
1458 		 (int)sizeof(struct phl_cmd_token_req_ex));
1459 	return RTW_PHL_STATUS_SUCCESS;
1460 }
1461 
dispr_deinit(struct phl_info_t * phl,void * dispr)1462 enum rtw_phl_status dispr_deinit(struct phl_info_t *phl, void *dispr)
1463 {
1464 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1465 	void *d = phl_to_drvpriv(obj->phl_info);
1466 	u8 i = 0;
1467 
1468 	if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT))
1469 		return RTW_PHL_STATUS_SUCCESS;
1470 	dispr_stop(dispr);
1471 	_deregister_dispr_controller(obj);
1472 	for (i = 0 ; i < PHL_MDL_PRI_MAX; i++)
1473 		pq_deinit(d, &(obj->module_q[i]));
1474 	_os_spinlock_free(d, &(obj->token_op_q_lock));
1475 	_os_mem_free(d, obj, sizeof(struct cmd_dispatcher));
1476 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1477 	return RTW_PHL_STATUS_SUCCESS;
1478 }
1479 
dispr_start(void * dispr)1480 enum rtw_phl_status dispr_start(void *dispr)
1481 {
1482 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1483 	void *d = phl_to_drvpriv(obj->phl_info);
1484 
1485 	if (TEST_STATUS_FLAG(obj->status, DISPR_STARTED))
1486 		return RTW_PHL_STATUS_UNEXPECTED_ERROR;
1487 
1488 	init_dispr_msg_pool(obj);
1489 	init_cmd_req_pool(obj);
1490 	init_dispr_mdl_mgnt_info(obj);
1491 	_os_mem_set(d, &(obj->renew_req_info), 0,
1492 			    sizeof(struct phl_token_op_info));
1493 	_os_sema_init(d, &(obj->msg_q_sema), 0);
1494 	CLEAR_EXCL_MDL(obj);
1495 	if (disp_eng_is_solo_thread_mode(obj->phl_info)) {
1496 		_os_thread_init(d, &(obj->bk_thread), background_thread_hdl, obj,
1497 				"dispr_solo_thread");
1498 		_os_thread_schedule(d, &(obj->bk_thread));
1499 	}
1500 	SET_STATUS_FLAG(obj->status, DISPR_STARTED);
1501 	_start_dispr_controller(obj);
1502 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1503 	return RTW_PHL_STATUS_SUCCESS;
1504 }
1505 
is_dispr_started(void * dispr)1506 bool is_dispr_started(void *dispr)
1507 {
1508 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1509 
1510 	if (TEST_STATUS_FLAG(obj->status, DISPR_STARTED))
1511 		return true;
1512 	return false;
1513 }
1514 
dispr_stop(void * dispr)1515 enum rtw_phl_status dispr_stop(void *dispr)
1516 {
1517 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1518 	void *d = phl_to_drvpriv(obj->phl_info);
1519 
1520 	if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED))
1521 		return RTW_PHL_STATUS_UNEXPECTED_ERROR;
1522 
1523 	dispr_thread_stop_prior_hdl(obj);
1524 	if (disp_eng_is_solo_thread_mode(obj->phl_info)) {
1525 		_os_thread_stop(d, &(obj->bk_thread));
1526 		_os_sema_up(d, &(obj->msg_q_sema));
1527 		_os_thread_deinit(d, &(obj->bk_thread));
1528 	}
1529 	dispr_thread_stop_post_hdl(obj);
1530 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1531 	return RTW_PHL_STATUS_SUCCESS;
1532 }
1533 
dispr_register_module(void * dispr,enum phl_module_id id,struct phl_bk_module_ops * ops)1534 enum rtw_phl_status dispr_register_module(void *dispr,
1535 					  enum phl_module_id id,
1536 					  struct phl_bk_module_ops *ops)
1537 {
1538 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1539 	void *d = phl_to_drvpriv(obj->phl_info);
1540 	struct phl_bk_module *module = NULL;
1541 	u8 ret = true;
1542 	enum phl_bk_module_priority priority = _get_mdl_priority(id);
1543 
1544 	FUNCIN();
1545 
1546 	if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT)  ||
1547 	    priority == PHL_MDL_PRI_MAX ||
1548 	    chk_module_ops(ops) == false ||
1549 	    _chk_bitmap_bit(obj->bitmap, id) == true) {
1550 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s, register fail\n", __FUNCTION__);
1551 		return RTW_PHL_STATUS_FAILURE;
1552 	}
1553 
1554 	module = (struct phl_bk_module *)_os_mem_alloc(d, sizeof(struct phl_bk_module));
1555 	if (module == NULL) {
1556 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s, allocte fail\n", __FUNCTION__);
1557 		return RTW_PHL_STATUS_FAILURE;
1558 	}
1559 
1560 	module->id = id;
1561 	_os_mem_cpy(d, &(module->ops), ops, sizeof(struct phl_bk_module_ops));
1562 	pq_push(d, &(obj->module_q[priority]), &(module->list), _tail, _bh);
1563 
1564 	ret = bk_module_init(obj, module);
1565 	if (ret == true && TEST_STATUS_FLAG(obj->status, DISPR_STARTED)) {
1566 		ret = bk_module_start(obj, module);
1567 		if (ret == true)
1568 			_add_bitmap_bit(obj->bitmap, &(module->id), 1);
1569 		if (ret == true && priority != PHL_MDL_PRI_OPTIONAL)
1570 			_add_bitmap_bit(obj->basemap, &(module->id), 1);
1571 	}
1572 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s id:%d, ret:%d\n",__FUNCTION__, id, ret);
1573 	if (ret == true) {
1574 		return RTW_PHL_STATUS_SUCCESS;
1575 	} else {
1576 		bk_module_deinit(obj, module);
1577 		_os_mem_free(d, module, sizeof(struct phl_bk_module));
1578 		return RTW_PHL_STATUS_FAILURE;
1579 	}
1580 }
1581 
dispr_deregister_module(void * dispr,enum phl_module_id id)1582 enum rtw_phl_status dispr_deregister_module(void *dispr,
1583 					    enum phl_module_id id)
1584 {
1585 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1586 	void *d = phl_to_drvpriv(obj->phl_info);
1587 	struct phl_bk_module *module = NULL;
1588 	_os_list *mdl = NULL;
1589 	enum rtw_phl_status phl_stat = RTW_PHL_STATUS_FAILURE;
1590 	enum phl_bk_module_priority priority = _get_mdl_priority(id);
1591 
1592 	FUNCIN();
1593 
1594 	if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT) ||
1595 	    priority == PHL_MDL_PRI_MAX)
1596 		return phl_stat;
1597 
1598 	if(pq_search_node(d, &(obj->module_q[priority]), &mdl, _bh, true, &id, search_mdl)) {
1599 		module = (struct phl_bk_module *)mdl;
1600 		_clr_bitmap_bit(obj->bitmap, &(module->id), 1);
1601 		_clr_bitmap_bit(obj->basemap, &(module->id), 1);
1602 		bk_module_stop(obj, module);
1603 		bk_module_deinit(obj, module);
1604 		_os_mem_free(d, module, sizeof(struct phl_bk_module));
1605 		phl_stat = RTW_PHL_STATUS_SUCCESS;
1606 	}
1607 
1608 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, id: %d stat:%d\n", __FUNCTION__, id, phl_stat);
1609 	return phl_stat;
1610 }
1611 
dispr_module_init(void * dispr)1612 enum rtw_phl_status dispr_module_init(void *dispr)
1613 {
1614 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1615 	void *d = phl_to_drvpriv(obj->phl_info);
1616 	_os_list *mdl = NULL;
1617 	u8 i = 0;
1618 
1619 	if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT))
1620 		return RTW_PHL_STATUS_FAILURE;
1621 
1622 	for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
1623 		if (pq_get_front(d, &(obj->module_q[i]), &mdl, _bh) == false)
1624 			continue;
1625 		do {
1626 			bk_module_init(obj, (struct phl_bk_module *)mdl);
1627 		} while(pq_get_next(d, &(obj->module_q[i]), mdl, &mdl, _bh));
1628 	}
1629 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1630 	return RTW_PHL_STATUS_SUCCESS;
1631 }
1632 
dispr_module_deinit(void * dispr)1633 enum rtw_phl_status dispr_module_deinit(void *dispr)
1634 {
1635 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1636 	void *d = phl_to_drvpriv(obj->phl_info);
1637 	_os_list *mdl = NULL;
1638 	u8 i = 0;
1639 
1640 	if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT))
1641 		return RTW_PHL_STATUS_FAILURE;
1642 
1643 	for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
1644 		while (pq_pop(d, &(obj->module_q[i]), &mdl, _first, _bh)) {
1645 			bk_module_deinit(obj, (struct phl_bk_module *)mdl);
1646 			_os_mem_free(d, mdl, sizeof(struct phl_bk_module));
1647 		}
1648 	}
1649 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1650 	return RTW_PHL_STATUS_SUCCESS;
1651 }
1652 
dispr_module_start(void * dispr)1653 enum rtw_phl_status dispr_module_start(void *dispr)
1654 {
1655 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1656 	void *d = phl_to_drvpriv(obj->phl_info);
1657 	_os_list *mdl = NULL;
1658 	struct phl_bk_module *module = NULL;
1659 	u8 i = 0;
1660 	u8 ret = false;
1661 
1662 	if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED))
1663 		return RTW_PHL_STATUS_UNEXPECTED_ERROR;
1664 
1665 	for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
1666 		if (pq_get_front(d, &(obj->module_q[i]), &mdl, _bh) == false)
1667 			continue;
1668 		do {
1669 			module = (struct phl_bk_module*)mdl;
1670 			ret = bk_module_start(obj, module);
1671 			if (ret == true)
1672 				_add_bitmap_bit(obj->bitmap, &(module->id), 1);
1673 			if (ret == true && i != PHL_MDL_PRI_OPTIONAL)
1674 				_add_bitmap_bit(obj->basemap, &(module->id), 1);
1675 		} while(pq_get_next(d, &(obj->module_q[i]), mdl, &mdl, _bh));
1676 	}
1677 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1678 	/*_print_bitmap(obj->bitmap);*/
1679 	return RTW_PHL_STATUS_SUCCESS;
1680 }
1681 
dispr_module_stop(void * dispr)1682 enum rtw_phl_status dispr_module_stop(void *dispr)
1683 {
1684 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1685 	void *d = phl_to_drvpriv(obj->phl_info);
1686 	_os_list *mdl = NULL;
1687 	struct phl_bk_module *module = NULL;
1688 	u8 i = 0;
1689 
1690 	if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED))
1691 		return RTW_PHL_STATUS_UNEXPECTED_ERROR;
1692 
1693 	for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
1694 		if (pq_get_front(d, &(obj->module_q[i]), &mdl, _bh) == false)
1695 			continue;
1696 		do {
1697 			module = (struct phl_bk_module *)mdl;
1698 			_clr_bitmap_bit(obj->bitmap, &(module->id), 1);
1699 			_clr_bitmap_bit(obj->basemap, &(module->id), 1);
1700 			bk_module_stop(obj, module);
1701 		} while(pq_get_next(d, &(obj->module_q[i]), mdl, &mdl, _bh));
1702 	}
1703 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
1704 	/*_print_bitmap(obj->bitmap);*/
1705 	return RTW_PHL_STATUS_SUCCESS;
1706 }
1707 
1708 /**
1709  * dispr_get_cur_cmd_req -- background module can call this function to
1710  * check cmd dispatcher is idle to know the risk or conflict for the I/O.
1711  * @dispr: dispatcher handler, get from _disp_eng_get_dispr_by_idx
1712  * @handle: get current cmd request, NULL means cmd dispatcher is idle
1713 
1714  * return RTW_PHL_STATUS_SUCCESS means cmd dispatcher is busy and can get
1715  * current cmd request from handle parameter
1716  * return RTW_PHL_STATUS_FAILURE means cmd dispatcher is idle
1717  */
1718 enum rtw_phl_status
dispr_get_cur_cmd_req(void * dispr,void ** handle)1719 dispr_get_cur_cmd_req(void *dispr, void **handle)
1720 {
1721 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1722 	struct phl_cmd_token_req_ex *cur_req = NULL;
1723 	enum rtw_phl_status phl_stat = RTW_PHL_STATUS_FAILURE;
1724 
1725 	if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT|DISPR_STARTED) || handle == NULL) {
1726 		phl_stat = RTW_PHL_STATUS_UNEXPECTED_ERROR;
1727 		return phl_stat;
1728 	}
1729 
1730 	(*handle) = NULL;
1731 	cur_req = obj->cur_cmd_req;
1732 
1733 	if (cur_req == NULL ||
1734 	    !TEST_STATUS_FLAG(cur_req->status, REQ_STATUS_RUN) ||
1735 	    TEST_STATUS_FLAG(cur_req->status, REQ_STATUS_CANCEL))
1736 		return phl_stat;
1737 
1738 	*handle = (void *)cur_req;
1739 	phl_stat = RTW_PHL_STATUS_SUCCESS;
1740 
1741 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_DEBUG_,
1742 		"%s, req module id:%d phl_stat:%d\n", __FUNCTION__,
1743 		 cur_req->req.module_id, phl_stat);
1744 	return phl_stat;
1745 }
1746 
1747 enum rtw_phl_status
dispr_set_cur_cmd_info(void * dispr,struct phl_module_op_info * op_info)1748 dispr_set_cur_cmd_info(void *dispr,
1749 			   struct phl_module_op_info *op_info)
1750 {
1751 	void *handle = NULL;
1752 	struct phl_cmd_token_req_ex *cmd_req = NULL;
1753 	struct phl_cmd_token_req *req = NULL;
1754 
1755 	if (RTW_PHL_STATUS_SUCCESS != dispr_get_cur_cmd_req(dispr, &handle))
1756 		return RTW_PHL_STATUS_FAILURE;
1757 
1758 	cmd_req = (struct phl_cmd_token_req_ex *)handle;
1759 	req = &(cmd_req->req);
1760 
1761 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, id:%d\n", __FUNCTION__, req->module_id);
1762 	if (req->set_info(dispr, req->priv, op_info) == MDL_RET_SUCCESS)
1763 		return RTW_PHL_STATUS_SUCCESS;
1764 	else
1765 		return RTW_PHL_STATUS_FAILURE;
1766 }
1767 
1768 enum rtw_phl_status
dispr_query_cur_cmd_info(void * dispr,struct phl_module_op_info * op_info)1769 dispr_query_cur_cmd_info(void *dispr,
1770 			     struct phl_module_op_info *op_info)
1771 {
1772 	void *handle = NULL;
1773 	struct phl_cmd_token_req_ex *cmd_req = NULL;
1774 	struct phl_cmd_token_req *req = NULL;
1775 
1776 	if (RTW_PHL_STATUS_SUCCESS != dispr_get_cur_cmd_req(dispr, &handle))
1777 		return RTW_PHL_STATUS_FAILURE;
1778 
1779 	cmd_req = (struct phl_cmd_token_req_ex *)handle;
1780 	req = &(cmd_req->req);
1781 
1782 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_DEBUG_, "%s, id:%d\n", __FUNCTION__, req->module_id);
1783 	if (req->query_info(dispr, req->priv, op_info) == MDL_RET_SUCCESS)
1784 		return RTW_PHL_STATUS_SUCCESS;
1785 	else
1786 		return RTW_PHL_STATUS_FAILURE;
1787 }
1788 
1789 enum rtw_phl_status
dispr_get_bk_module_handle(void * dispr,enum phl_module_id id,void ** handle)1790 dispr_get_bk_module_handle(void *dispr,
1791 			       enum phl_module_id id,
1792 			       void **handle)
1793 {
1794 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1795 	void *d = phl_to_drvpriv(obj->phl_info);
1796 	_os_list *mdl = NULL;
1797 	enum rtw_phl_status phl_stat = RTW_PHL_STATUS_FAILURE;
1798 	enum phl_bk_module_priority priority = _get_mdl_priority(id);
1799 
1800 	if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT) ||
1801 	    handle == NULL ||
1802 	    priority == PHL_MDL_PRI_MAX ||
1803 	    !_chk_bitmap_bit(obj->bitmap, id))
1804 		return phl_stat;
1805 
1806 	(*handle) = NULL;
1807 
1808 
1809 	if(pq_search_node(d, &(obj->module_q[priority]), &mdl, _bh, false, &id, search_mdl)) {
1810 		(*handle) = mdl;
1811 		phl_stat = RTW_PHL_STATUS_SUCCESS;
1812 	}
1813 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
1814 			"%s, id:%d phl_stat:%d\n", __FUNCTION__, id, phl_stat);
1815 	return phl_stat;
1816 }
1817 
1818 enum rtw_phl_status
dispr_set_bk_module_info(void * dispr,void * handle,struct phl_module_op_info * op_info)1819 dispr_set_bk_module_info(void *dispr,
1820 			     void *handle,
1821 			     struct phl_module_op_info *op_info)
1822 {
1823 	struct phl_bk_module *module = (struct phl_bk_module *)handle;
1824 	struct phl_bk_module_ops *ops = &(module->ops);
1825 
1826 	if (!TEST_STATUS_FLAG(module->status, MDL_INIT))
1827 		return RTW_PHL_STATUS_FAILURE;
1828 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, id:%d\n", __FUNCTION__, module->id);
1829 	if (ops->set_info(dispr, module->priv, op_info) == MDL_RET_SUCCESS)
1830 		return RTW_PHL_STATUS_SUCCESS;
1831 	else
1832 		return RTW_PHL_STATUS_FAILURE;
1833 }
1834 
1835 enum rtw_phl_status
dispr_query_bk_module_info(void * dispr,void * handle,struct phl_module_op_info * op_info)1836 dispr_query_bk_module_info(void *dispr,
1837 			       void *handle,
1838 			       struct phl_module_op_info *op_info)
1839 {
1840 	struct phl_bk_module *module = (struct phl_bk_module *)handle;
1841 	struct phl_bk_module_ops *ops = &(module->ops);
1842 
1843 	if (!TEST_STATUS_FLAG(module->status, MDL_INIT))
1844 		return RTW_PHL_STATUS_FAILURE;
1845 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, id:%d\n", __FUNCTION__, module->id);
1846 	if (ops->query_info(dispr, module->priv, op_info) == MDL_RET_SUCCESS)
1847 		return RTW_PHL_STATUS_SUCCESS;
1848 	else
1849 		return RTW_PHL_STATUS_FAILURE;
1850 }
1851 
1852 enum rtw_phl_status
dispr_set_src_info(void * dispr,struct phl_msg * msg,struct phl_module_op_info * op_info)1853 dispr_set_src_info(void *dispr,
1854 		       struct phl_msg *msg,
1855 		       struct phl_module_op_info *op_info)
1856 {
1857 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1858 	enum rtw_phl_status phl_stat = RTW_PHL_STATUS_FAILURE;
1859 	u8 id = MSG_MDL_ID_FIELD(msg->msg_id);
1860 	struct phl_cmd_token_req_ex *cur_req = obj->cur_cmd_req;
1861 	enum phl_mdl_ret_code ret = MDL_RET_FAIL;
1862 	struct phl_dispr_msg_ex *ex = (struct phl_dispr_msg_ex *)msg;
1863 	u8 cur_req_id = get_cur_cmd_req_id(obj, NULL);
1864 
1865 	if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT) ||
1866 	    (!_chk_bitmap_bit(obj->bitmap, id) &&
1867 	    cur_req_id != id))
1868 		return phl_stat;
1869 
1870 	if (cur_req_id == id) {
1871 		ret = cur_req->req.set_info(dispr, cur_req->req.priv, op_info);
1872 	} else if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_OWNER_BK_MDL)) {
1873 		ret = ex->module->ops.set_info(dispr, ex->module->priv,
1874 					       op_info);
1875 	}
1876 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
1877 			"%s, id:%d phl_stat:%d\n", __FUNCTION__, id, phl_stat);
1878 	if (ret == MDL_RET_FAIL)
1879 		return RTW_PHL_STATUS_FAILURE;
1880 	else
1881 		return RTW_PHL_STATUS_SUCCESS;
1882 }
1883 
1884 enum rtw_phl_status
dispr_query_src_info(void * dispr,struct phl_msg * msg,struct phl_module_op_info * op_info)1885 dispr_query_src_info(void *dispr,
1886 			 struct phl_msg *msg,
1887 			 struct phl_module_op_info *op_info)
1888 {
1889 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1890 	enum rtw_phl_status phl_stat = RTW_PHL_STATUS_FAILURE;
1891 	u8 id = MSG_MDL_ID_FIELD(msg->msg_id);
1892 	struct phl_cmd_token_req_ex *cur_req = obj->cur_cmd_req;
1893 	struct phl_dispr_msg_ex *ex = (struct phl_dispr_msg_ex *)msg;
1894 	enum phl_mdl_ret_code ret = MDL_RET_FAIL;
1895 	u8 cur_req_id = get_cur_cmd_req_id(obj, NULL);
1896 
1897 	if (!TEST_STATUS_FLAG(obj->status, DISPR_INIT) ||
1898 	    (!_chk_bitmap_bit(obj->bitmap, id) &&
1899 	    cur_req_id != id))
1900 		return phl_stat;
1901 
1902 	if (cur_req_id == id) {
1903 		ret = cur_req->req.query_info(dispr, cur_req->req.priv, op_info);
1904 	} else if (TEST_STATUS_FLAG(ex->status, MSG_STATUS_OWNER_BK_MDL)) {
1905 		ret = ex->module->ops.query_info(dispr, ex->module->priv,
1906 						 op_info);
1907 	}
1908 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
1909 		"%s, id:%d phl_stat:%d\n", __FUNCTION__, id, phl_stat);
1910 	if (ret == MDL_RET_FAIL)
1911 		return RTW_PHL_STATUS_FAILURE;
1912 	else
1913 		return RTW_PHL_STATUS_SUCCESS;
1914 }
1915 
1916 enum rtw_phl_status
dispr_send_msg(void * dispr,struct phl_msg * msg,struct phl_msg_attribute * attr,u32 * msg_hdl)1917 dispr_send_msg(void *dispr,
1918                struct phl_msg *msg,
1919                struct phl_msg_attribute *attr,
1920                u32 *msg_hdl)
1921 {
1922 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
1923 	void *d = phl_to_drvpriv(obj->phl_info);
1924 	struct phl_dispr_msg_ex *msg_ex = NULL;
1925 	u8 module_id = MSG_MDL_ID_FIELD(msg->msg_id); /* msg src */
1926 	u32 req_status = 0;
1927 	u8 cur_req_id = get_cur_cmd_req_id(obj, &req_status);
1928 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1929 
1930 	if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED)) {
1931 		sts = RTW_PHL_STATUS_UNEXPECTED_ERROR;
1932 		goto err;
1933 	}
1934 
1935 	if (TEST_STATUS_FLAG(obj->status, DISPR_SHALL_STOP)){
1936 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,"%s: dispr shall stop\n", __FUNCTION__);
1937 		sts = RTW_PHL_STATUS_UNEXPECTED_ERROR;
1938 		goto err;
1939 	}
1940 
1941 	if(attr && attr->notify.id_arr == NULL && attr->notify.len) {
1942 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s attribute err\n",__FUNCTION__);
1943 		sts = RTW_PHL_STATUS_INVALID_PARAM;
1944 		goto err;
1945 	}
1946 
1947 	if (!IS_DISPR_CTRL(module_id) &&
1948 	    !_chk_bitmap_bit(obj->bitmap, module_id) &&
1949 	    ((cur_req_id != PHL_MDL_ID_MAX  && cur_req_id != module_id) ||
1950 	     (cur_req_id == PHL_MDL_ID_MAX && req_status == 0)||
1951 	     (cur_req_id == PHL_MDL_ID_MAX && !TEST_STATUS_FLAG(req_status,REQ_STATUS_LAST_PERMIT)))) {
1952 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
1953 			"%s module not allow to send\n", __FUNCTION__);
1954 		sts = RTW_PHL_STATUS_INVALID_PARAM;
1955 		goto err;
1956 	}
1957 
1958 	if (!pop_front_idle_msg(obj, &msg_ex)) {
1959 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s idle msg empty\n", __FUNCTION__);
1960 		sts = RTW_PHL_STATUS_RESOURCE;
1961 		goto err;
1962 	}
1963 
1964 	if (msg_hdl)
1965 		*msg_hdl = 0;
1966 
1967 	_os_mem_cpy(d, &msg_ex->msg, msg, sizeof(struct phl_msg));
1968 
1969 	set_msg_bitmap(obj, msg_ex, module_id);
1970 	if (attr) {
1971 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
1972 		msg_ex->attr = (struct dispr_msg_attr *)attr->dispr_attr;
1973 		attr->dispr_attr = NULL;
1974 #endif
1975 		set_msg_custom_bitmap(obj, msg_ex, attr->opt,
1976 			       attr->notify.id_arr, attr->notify.len, module_id);
1977 		if (attr->completion.completion) {
1978 			SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_NOTIFY_COMPLETE);
1979 			msg_ex->completion.completion = attr->completion.completion;
1980 			msg_ex->completion.priv = attr->completion.priv;
1981 		}
1982 		if (TEST_STATUS_FLAG(attr->opt, MSG_OPT_CLR_SNDR_MSG_IF_PENDING))
1983 			SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_CLR_SNDR_MSG_IF_PENDING);
1984 
1985 		if (TEST_STATUS_FLAG(attr->opt, MSG_OPT_PENDING_DURING_CANNOT_IO))
1986 			SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_PENDING_DURING_CANNOT_IO);
1987 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, opt:0x%x\n",__FUNCTION__, attr->opt);
1988 	}
1989 
1990 	if (get_module_by_id(obj, module_id, &(msg_ex->module)) == true) {
1991 		SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_OWNER_BK_MDL);
1992 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
1993 			"%s module(%d) found\n", __FUNCTION__, module_id);
1994 	} else 	if ((cur_req_id == module_id) ||
1995 		    (cur_req_id == PHL_MDL_ID_MAX && TEST_STATUS_FLAG(req_status,REQ_STATUS_LAST_PERMIT))) {
1996 		SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_OWNER_REQ);
1997 	}
1998 
1999 	if(TEST_STATUS_FLAG(msg_ex->status, MSG_STATUS_OWNER_REQ) &&
2000 		TEST_STATUS_FLAG(req_status,REQ_STATUS_LAST_PERMIT) &&
2001 	   (attr == NULL || !TEST_STATUS_FLAG(attr->opt, MSG_OPT_SEND_IN_ABORT))) {
2002 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
2003 			"%s msg not allow since cur req is going to unload\n", __FUNCTION__);
2004 		SET_MSG_INDC_FIELD(msg_ex->msg.msg_id, MSG_INDC_FAIL);
2005 		push_back_idle_msg(obj, msg_ex);
2006 		sts = RTW_PHL_STATUS_FAILURE;
2007 		goto exit;
2008 	}
2009 
2010 	if (TEST_STATUS_FLAG(msg_ex->status, MSG_STATUS_OWNER_REQ) &&
2011 		TEST_STATUS_FLAG(req_status,REQ_STATUS_LAST_PERMIT)) {
2012 		SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_FOR_ABORT);
2013 		SET_STATUS_FLAG(obj->status, DISPR_WAIT_ABORT_MSG_DONE);
2014 	}
2015 
2016 	SET_STATUS_FLAG(msg_ex->status, MSG_STATUS_PRE_PHASE);
2017 
2018 	if (IS_DISPR_CTRL(module_id))
2019 		insert_msg_by_priority(obj, msg_ex);
2020 	else
2021 		push_back_wait_msg(obj, msg_ex);
2022 
2023 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, status:0x%x\n",__FUNCTION__, msg_ex->status);
2024 	if(msg_hdl)
2025 		*msg_hdl = GEN_VALID_HDL(msg_ex->idx);
2026 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s, msg_id:0x%x\n", __FUNCTION__, msg->msg_id);
2027 	return RTW_PHL_STATUS_SUCCESS;
2028 err:
2029 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
2030 	if(attr)
2031 		free_dispr_attr(d,(struct dispr_msg_attr **) &(attr->dispr_attr));
2032 #endif
2033 exit:
2034 	return sts;
2035 }
2036 
dispr_cancel_msg(void * dispr,u32 * msg_hdl)2037 enum rtw_phl_status dispr_cancel_msg(void *dispr, u32 *msg_hdl)
2038 {
2039 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2040 	struct phl_dispr_msg_ex *msg_ex = NULL;
2041 
2042 	if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED) || msg_hdl == NULL)
2043 		return RTW_PHL_STATUS_UNEXPECTED_ERROR;
2044 
2045 	if (!IS_HDL_VALID(*msg_hdl) ||
2046 	    GET_IDX_FROM_HDL(*msg_hdl) >= MAX_PHL_MSG_NUM) {
2047 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s, HDL invalid\n", __FUNCTION__);
2048 		return RTW_PHL_STATUS_FAILURE;
2049 	}
2050 
2051 	msg_ex = &(obj->msg_ex_pool[GET_IDX_FROM_HDL(*msg_hdl)]);
2052 	*msg_hdl = 0;
2053 	if (!TEST_STATUS_FLAG(msg_ex->status, MSG_STATUS_ENQ) &&
2054 	    !TEST_STATUS_FLAG(msg_ex->status, MSG_STATUS_RUN)) {
2055 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s, HDL status err\n", __FUNCTION__);
2056 		return RTW_PHL_STATUS_FAILURE;
2057 	}
2058 
2059 	cancel_msg(obj, msg_ex);
2060 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
2061 	return RTW_PHL_STATUS_SUCCESS;
2062 }
2063 
dispr_clr_pending_msg(void * dispr)2064 enum rtw_phl_status dispr_clr_pending_msg(void *dispr)
2065 {
2066 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2067 
2068 	SET_STATUS_FLAG(obj->status, DISPR_CLR_PEND_MSG);
2069 	notify_bk_thread(obj);
2070 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
2071 	return RTW_PHL_STATUS_SUCCESS;
2072 }
2073 enum rtw_phl_status
dispr_add_token_req(void * dispr,struct phl_cmd_token_req * req,u32 * req_hdl)2074 dispr_add_token_req(void *dispr,
2075 			struct phl_cmd_token_req *req,
2076 			u32 *req_hdl)
2077 {
2078 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2079 	void *d = phl_to_drvpriv(obj->phl_info);
2080 	struct phl_cmd_token_req_ex *req_ex = NULL;
2081 	enum rtw_phl_status stat = RTW_PHL_STATUS_SUCCESS;
2082 	_os_list *node = NULL;
2083 
2084 	if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED) ||
2085 	    req_hdl == NULL ||
2086 	    chk_cmd_req_ops(req) == false)
2087 		return RTW_PHL_STATUS_UNEXPECTED_ERROR;
2088 
2089 	if (TEST_STATUS_FLAG(obj->status, DISPR_SHALL_STOP)){
2090 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,"%s: dispr shall stop\n", __FUNCTION__);
2091 		return RTW_PHL_STATUS_UNEXPECTED_ERROR;
2092 	}
2093 
2094 	if (!pop_front_idle_req(obj, &req_ex)) {
2095 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_, "%s idle req empty\n", __FUNCTION__);
2096 		return RTW_PHL_STATUS_RESOURCE;
2097 	}
2098 	_os_mem_cpy(d, &(req_ex->req), req, sizeof(struct phl_cmd_token_req));
2099 
2100 	push_back_wait_req(obj, req_ex);
2101 	*req_hdl = GEN_VALID_HDL(req_ex->idx);
2102 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
2103 		"%s, id:%d, hdl:0x%x token_cnt:%d\n", __FUNCTION__,
2104 		 req->module_id,
2105 		 *req_hdl,
2106 		 _os_atomic_read(d, &(obj->token_cnt)));
2107 
2108 	if (pq_get_front(d, &(obj->token_op_q), &node, _bh) == false &&
2109 	    _os_atomic_read(d, &(obj->token_cnt)) == 0)
2110 		stat = RTW_PHL_STATUS_SUCCESS;
2111 	else
2112 		stat = RTW_PHL_STATUS_PENDING;
2113 	dispr_enqueue_token_op_info(obj, &req_ex->add_req_info, TOKEN_OP_ADD_CMD_REQ, req_ex->idx);
2114 	return stat;
2115 }
2116 
dispr_cancel_token_req(void * dispr,u32 * req_hdl)2117 enum rtw_phl_status dispr_cancel_token_req(void *dispr, u32 *req_hdl)
2118 {
2119 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2120 	struct phl_cmd_token_req_ex *req_ex = NULL;
2121 
2122 	if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED) || req_hdl == NULL)
2123 		return RTW_PHL_STATUS_UNEXPECTED_ERROR;
2124 
2125 	if (!IS_HDL_VALID(*req_hdl) ||
2126 	    GET_IDX_FROM_HDL(*req_hdl) >= MAX_CMD_REQ_NUM) {
2127 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
2128 			"%s, HDL(0x%x) invalid\n", __FUNCTION__, *req_hdl);
2129 		return RTW_PHL_STATUS_FAILURE;
2130 	}
2131 	req_ex = &(obj->token_req_ex_pool[GET_IDX_FROM_HDL(*req_hdl)]);
2132 	if (!TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_ENQ) &&
2133 	    !TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_RUN) &&
2134 	    !TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_PREPARE)) {
2135 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
2136 			"%s, HDL(0x%x) status err\n", __FUNCTION__, *req_hdl);
2137 		return RTW_PHL_STATUS_FAILURE;
2138 	}
2139 	SET_STATUS_FLAG(req_ex->status, REQ_STATUS_CANCEL);
2140 	if (dispr_enqueue_token_op_info(obj, &req_ex->free_req_info, TOKEN_OP_CANCEL_CMD_REQ, req_ex->idx))
2141 		return RTW_PHL_STATUS_SUCCESS;
2142 	else
2143 		return RTW_PHL_STATUS_FAILURE;
2144 }
2145 
dispr_free_token(void * dispr,u32 * req_hdl)2146 enum rtw_phl_status dispr_free_token(void *dispr, u32 *req_hdl)
2147 {
2148 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2149 	void *d = phl_to_drvpriv(obj->phl_info);
2150 	struct phl_cmd_token_req_ex *req_ex = NULL;
2151 
2152 	if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED) || req_hdl == NULL)
2153 		return RTW_PHL_STATUS_UNEXPECTED_ERROR;
2154 
2155 	if (obj->cur_cmd_req == NULL ||
2156 	    _os_atomic_read(d, &(obj->token_cnt)) == 0  ||
2157 	    !IS_HDL_VALID(*req_hdl) ||
2158 	    GET_IDX_FROM_HDL(*req_hdl) >= MAX_CMD_REQ_NUM) {
2159 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
2160 			"%s, HDL(0x%x) invalid\n", __FUNCTION__, *req_hdl);
2161 		return RTW_PHL_STATUS_FAILURE;
2162 	}
2163 	req_ex = &(obj->token_req_ex_pool[GET_IDX_FROM_HDL(*req_hdl)]);
2164 	if (!TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_RUN) &&
2165 	    !TEST_STATUS_FLAG(req_ex->status, REQ_STATUS_PREPARE)) {
2166 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_ERR_,
2167 			"%s, HDL(0x%x) mismatch\n", __FUNCTION__, *req_hdl);
2168 		return RTW_PHL_STATUS_FAILURE;
2169 	}
2170 	SET_STATUS_FLAG(req_ex->status, REQ_STATUS_CANCEL);
2171 	if (dispr_enqueue_token_op_info(obj, &req_ex->free_req_info, TOKEN_OP_FREE_CMD_REQ, req_ex->idx))
2172 		return RTW_PHL_STATUS_SUCCESS;
2173 	else
2174 		return RTW_PHL_STATUS_FAILURE;
2175 }
2176 
dispr_notify_dev_io_status(void * dispr,enum phl_module_id mdl_id,bool allow_io)2177 enum rtw_phl_status dispr_notify_dev_io_status(void *dispr, enum phl_module_id mdl_id, bool allow_io)
2178 {
2179 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2180 	enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
2181 
2182 	if (allow_io == false) {
2183 		if (!TEST_STATUS_FLAG(obj->status, DISPR_CANNOT_IO)) {
2184 			SET_STATUS_FLAG(obj->status, DISPR_CANNOT_IO);
2185 			SET_EXCL_MDL(obj, mdl_id);
2186 			PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
2187 				"%s, mdl_id(%d) notify cannot io\n", __FUNCTION__, mdl_id);
2188 			status = send_dev_io_status_change(obj, allow_io);
2189 		}
2190 	}
2191 	else {
2192 		if (TEST_STATUS_FLAG(obj->status, DISPR_CANNOT_IO)) {
2193 			CLEAR_STATUS_FLAG(obj->status, DISPR_CANNOT_IO);
2194 			CLEAR_EXCL_MDL(obj);
2195 			status = send_dev_io_status_change(obj, allow_io);
2196 			dispr_clr_pending_msg(dispr);
2197 			PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
2198 				"%s, mdl_id(%d) notify io resume\n", __FUNCTION__, mdl_id);
2199 		}
2200 	}
2201 	return status;
2202 }
2203 
dispr_notify_shall_stop(void * dispr)2204 void dispr_notify_shall_stop(void *dispr)
2205 {
2206 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2207 
2208 	if (!TEST_STATUS_FLAG(obj->status, DISPR_SHALL_STOP)) {
2209 		SET_STATUS_FLAG(obj->status, DISPR_SHALL_STOP);
2210 		dispr_clr_pending_msg(dispr);
2211 		PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
2212 		          "%s, notify shall stop\n", __FUNCTION__);
2213 	}
2214 }
2215 
dispr_is_fg_empty(void * dispr)2216 u8 dispr_is_fg_empty(void *dispr)
2217 {
2218 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2219 	bool is_empty = true;
2220 	void *drv = phl_to_drvpriv(obj->phl_info);
2221 	struct phl_queue *q = NULL;
2222 	_os_list *node = NULL;
2223 
2224 	do {
2225 		/* shall check wait queue first then check token op queue
2226 		 * to avoid to get the incorrect empty state of fg cmd
2227 		 */
2228 		q = &(obj->token_req_wait_q);
2229 		_os_spinlock(drv, &(q->lock), _bh, NULL);
2230 		if(!list_empty(&q->queue) && (q->cnt > 0)) {
2231 			is_empty = false;
2232 			_os_spinunlock(drv, &(q->lock), _bh, NULL);
2233 			break;
2234 		}
2235 		_os_spinunlock(drv, &(q->lock), _bh, NULL);
2236 
2237 		if (pq_get_front(drv, &(obj->token_op_q), &node, _bh) == true ||
2238 		    _os_atomic_read(drv, &(obj->token_cnt)) > 0) {
2239 			is_empty = false;
2240 			break;
2241 		}
2242 	} while(false);
2243 
2244 	return is_empty;
2245 }
2246 
dispr_process_token_req(struct cmd_dispatcher * obj)2247 enum rtw_phl_status dispr_process_token_req(struct cmd_dispatcher *obj)
2248 {
2249 	void *d = phl_to_drvpriv(obj->phl_info);
2250 	struct phl_cmd_token_req_ex *ex = NULL;
2251 
2252 	do {
2253 		if (!TEST_STATUS_FLAG(obj->status, DISPR_STARTED))
2254 			return RTW_PHL_STATUS_UNEXPECTED_ERROR;
2255 
2256 		if (TEST_STATUS_FLAG(obj->status, DISPR_SHALL_STOP)) {
2257 			PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
2258 			          "%s: dispr shall stop\n", __FUNCTION__);
2259 
2260 			return RTW_PHL_STATUS_FAILURE;
2261 		}
2262 
2263 		if (_os_atomic_read(d, &(obj->token_cnt)) > 0)
2264 			return RTW_PHL_STATUS_FAILURE;
2265 
2266 		if (TEST_STATUS_FLAG(obj->status, DISPR_WAIT_ABORT_MSG_DONE)) {
2267 			PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_,
2268 				"%s, wait for abort msg sent from prev req finish before register next req \n", __FUNCTION__);
2269 			return RTW_PHL_STATUS_FAILURE;
2270 		}
2271 
2272 		if (pop_front_wait_req(obj, &ex) == false) {
2273 			if (!TEST_STATUS_FLAG(obj->status, DISPR_NOTIFY_IDLE)) {
2274 				SET_STATUS_FLAG(obj->status, DISPR_NOTIFY_IDLE);
2275 				send_bk_msg_phy_idle(obj);
2276 			}
2277 			return RTW_PHL_STATUS_SUCCESS;
2278 		}
2279 
2280 		if (TEST_STATUS_FLAG(obj->status, DISPR_NOTIFY_IDLE)) {
2281 			CLEAR_STATUS_FLAG(obj->status, DISPR_NOTIFY_IDLE);
2282 			send_bk_msg_phy_on(obj);
2283 		}
2284 
2285 	}while(!register_cur_cmd_req(obj, ex));
2286 
2287 	return RTW_PHL_STATUS_SUCCESS;
2288 }
2289 
dispr_share_thread_loop_hdl(void * dispr)2290 void dispr_share_thread_loop_hdl(void *dispr)
2291 {
2292 	dispr_thread_loop_hdl( (struct cmd_dispatcher *)dispr);
2293 }
2294 
dispr_share_thread_leave_hdl(void * dispr)2295 void dispr_share_thread_leave_hdl(void *dispr)
2296 {
2297 	dispr_thread_leave_hdl((struct cmd_dispatcher *)dispr);
2298 }
2299 
dispr_share_thread_stop_prior_hdl(void * dispr)2300 void dispr_share_thread_stop_prior_hdl(void *dispr)
2301 {
2302 	dispr_thread_stop_prior_hdl((struct cmd_dispatcher *)dispr);
2303 }
2304 
dispr_share_thread_stop_post_hdl(void * dispr)2305 void dispr_share_thread_stop_post_hdl(void *dispr)
2306 {
2307 	dispr_thread_stop_post_hdl((struct cmd_dispatcher *)dispr);
2308 }
2309 
disp_query_mdl_id(struct phl_info_t * phl,void * bk_mdl)2310 u8 disp_query_mdl_id(struct phl_info_t *phl, void *bk_mdl)
2311 {
2312 	struct phl_bk_module *mdl = NULL;
2313 
2314 	if (bk_mdl != NULL) {
2315 		mdl = (struct phl_bk_module *)bk_mdl;
2316 		return mdl->id;
2317 	} else {
2318 		return PHL_MDL_ID_MAX;
2319 	}
2320 }
2321 
send_bk_msg_phy_on(struct cmd_dispatcher * obj)2322 void send_bk_msg_phy_on(struct cmd_dispatcher *obj)
2323 {
2324 	struct phl_msg msg = {0};
2325 	struct phl_msg_attribute attr = {0};
2326 
2327 	SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
2328 	SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_PHY_ON);
2329 	dispr_send_msg((void*)obj, &msg, &attr, NULL);
2330 }
2331 
send_bk_msg_phy_idle(struct cmd_dispatcher * obj)2332 void send_bk_msg_phy_idle(struct cmd_dispatcher *obj)
2333 {
2334 	struct phl_msg msg = {0};
2335 	struct phl_msg_attribute attr = {0};
2336 
2337 	SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
2338 	SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_PHY_IDLE);
2339 	dispr_send_msg((void*)obj, &msg, &attr, NULL);
2340 }
2341 
send_dev_io_status_change(struct cmd_dispatcher * obj,u8 allow_io)2342 enum rtw_phl_status send_dev_io_status_change(struct cmd_dispatcher *obj, u8 allow_io)
2343 {
2344 	struct phl_msg msg = {0};
2345 	struct phl_msg_attribute attr = {0};
2346 	u16 event = (allow_io == true) ? (MSG_EVT_DEV_RESUME_IO) : (MSG_EVT_DEV_CANNOT_IO);
2347 
2348 	SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_PHY_MGNT);
2349 	SET_MSG_EVT_ID_FIELD(msg.msg_id, event);
2350 	return dispr_send_msg((void*)obj, &msg, &attr, NULL);
2351 }
2352 
2353 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
loop_through_map(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex,enum phl_bk_module_priority priority,struct msg_notify_map * map,u8 pre_prot_phase)2354 enum phl_mdl_ret_code loop_through_map(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex,
2355 					enum phl_bk_module_priority priority, struct msg_notify_map *map, u8 pre_prot_phase)
2356 {
2357 	u8 i = 0;
2358 	struct phl_bk_module *mdl = NULL;
2359 	enum phl_mdl_ret_code ret = MDL_RET_IGNORE;
2360 	u8 *bitmap = (pre_prot_phase == true) ? (ex->premap) : (ex->postmap);
2361 
2362 	for (i = 0 ; i < map->len; i++) {
2363 		if (map->id_arr[i] >= PHL_FG_MDL_START) {
2364 			PHL_TRACE(COMP_PHL_CMDDISP, _PHL_WARNING_,
2365 			"%s, cmd req does not take precedence over bk module\n", __FUNCTION__);
2366 			continue;
2367 		}
2368 		mdl = (struct phl_bk_module *)GET_MDL_HANDLE(obj, map->id_arr[i]);
2369 		if (mdl == NULL || !_chk_bitmap_bit(bitmap, mdl->id))
2370 			continue;
2371 		/*only allow sequence rearrange for modules at the same priority*/
2372 		if ( _get_mdl_priority(mdl->id) != priority)
2373 			continue;
2374 		ret = feed_mdl_msg(obj, mdl, ex);
2375 		if (STOP_DISPATCH_MSG(ret))
2376 			return ret;
2377 	}
2378 	return ret;
2379 }
2380 
run_self_def_seq(struct cmd_dispatcher * obj,struct phl_dispr_msg_ex * ex,enum phl_bk_module_priority priority,u8 pre_prot_phase)2381 static enum phl_mdl_ret_code run_self_def_seq(struct cmd_dispatcher *obj, struct phl_dispr_msg_ex *ex,
2382 					enum phl_bk_module_priority priority, u8 pre_prot_phase)
2383 {
2384 	struct msg_notify_map *map = NULL;
2385 	enum phl_mdl_ret_code ret = MDL_RET_IGNORE;
2386 	struct msg_dispatch_seq* seq = NULL;
2387 
2388 	if (ex->attr == NULL)
2389 		return ret;
2390 	/*MANDATORY modules cannot change dispatch order*/
2391 	if (pre_prot_phase == true)
2392 		seq = &(ex->attr->self_def_seq.pre_prot_phase);
2393 	else
2394 		seq = &(ex->attr->self_def_seq.post_prot_phase);
2395 
2396 	return loop_through_map(obj, ex, priority, &(seq->map[priority]), pre_prot_phase);
2397 }
reset_self_def_seq(void * d,struct msg_self_def_seq * self_def_seq)2398 void reset_self_def_seq(void *d, struct msg_self_def_seq* self_def_seq)
2399 {
2400 	u8 i = 0;
2401 	u8 cnt = 0;
2402 	struct msg_dispatch_seq *seq = (struct msg_dispatch_seq *)self_def_seq;
2403 
2404 	while (cnt++ < 2) {
2405 		for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
2406 			if (seq->map[i].len)
2407 				_os_kmem_free(d, seq->map[i].id_arr, seq->map[i].len);
2408 			seq->map[i].id_arr = NULL;
2409 			seq->map[i].len = 0;
2410 		}
2411 		seq++;
2412 	}
2413 
2414 }
2415 
copy_self_def_seq(void * d,struct msg_self_def_seq * self_def_dest,struct msg_self_def_seq * self_def_src)2416 u8 copy_self_def_seq(void *d, struct msg_self_def_seq* self_def_dest, struct msg_self_def_seq* self_def_src)
2417 {
2418 	u8 i = 0;
2419 	u8 cnt = 0;
2420 	struct msg_dispatch_seq *dest = (struct msg_dispatch_seq *)self_def_dest;
2421 	struct msg_dispatch_seq *src = (struct msg_dispatch_seq *)self_def_src;
2422 
2423 	while (cnt++ < 2) {
2424 		for (i = 0; i < PHL_MDL_PRI_MAX; i++) {
2425 			if (src->map[i].len) {
2426 				dest->map[i].id_arr = _os_kmem_alloc(d, src->map[i].len);
2427 				if (dest->map[i].id_arr == NULL)
2428 					return false;
2429 				dest->map[i].len = src->map[i].len;
2430 				_os_mem_cpy(d, dest->map[i].id_arr, src->map[i].id_arr, dest->map[i].len);
2431 			}
2432 		}
2433 		dest++;
2434 		src++;
2435 	}
2436 	return true;
2437 }
2438 
alloc_dispr_attr(void * d,struct phl_msg_attribute * attr)2439 inline static u8 alloc_dispr_attr(void *d, struct phl_msg_attribute *attr)
2440 {
2441 	if (attr->dispr_attr == NULL)
2442 		attr->dispr_attr = _os_kmem_alloc(d, sizeof(struct dispr_msg_attr));
2443 	if (attr->dispr_attr != NULL)
2444 		_os_mem_set(d, attr->dispr_attr, sizeof(struct dispr_msg_attr));
2445 	return (attr->dispr_attr == NULL) ? (false) : (true);
2446 }
2447 
dispr_set_dispatch_seq(void * dispr,struct phl_msg_attribute * attr,struct msg_self_def_seq * seq)2448 enum rtw_phl_status dispr_set_dispatch_seq(void *dispr, struct phl_msg_attribute *attr,
2449 							struct msg_self_def_seq* seq)
2450 {
2451 	struct cmd_dispatcher *obj = (struct cmd_dispatcher *)dispr;
2452 	void *d = phl_to_drvpriv(obj->phl_info);
2453 	struct dispr_msg_attr *dispr_attr = NULL;
2454 
2455 	if (attr == NULL || seq == NULL)
2456 		return RTW_PHL_STATUS_INVALID_PARAM;
2457 
2458 	if (alloc_dispr_attr(d, attr) == false)
2459 		goto err_attr_alloc;
2460 
2461 	dispr_attr = attr->dispr_attr;
2462 	reset_self_def_seq(d, &(dispr_attr->self_def_seq));
2463 
2464 	if (copy_self_def_seq(d, &(dispr_attr->self_def_seq), seq) == false)
2465 		goto err_seq_copy;
2466 	return RTW_PHL_STATUS_SUCCESS;
2467 err_seq_copy:
2468 	free_dispr_attr(d, &(attr->dispr_attr));
2469 err_attr_alloc:
2470 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_WARNING_,
2471 			"%s, err\n", __FUNCTION__);
2472 	return RTW_PHL_STATUS_RESOURCE;
2473 }
2474 
free_dispr_attr(void * d,struct dispr_msg_attr ** dispr_attr)2475 static void free_dispr_attr(void *d, struct dispr_msg_attr **dispr_attr)
2476 {
2477 	struct dispr_msg_attr *attr = NULL;
2478 
2479 	if (dispr_attr == NULL || *dispr_attr == NULL)
2480 		return;
2481 	PHL_TRACE(COMP_PHL_CMDDISP, _PHL_INFO_, "%s\n", __FUNCTION__);
2482 	attr = *dispr_attr;
2483 	reset_self_def_seq(d, &(attr->self_def_seq));
2484 	_os_kmem_free(d, attr, sizeof(struct dispr_msg_attr));
2485 	*dispr_attr = NULL;
2486 }
2487 #endif
2488 #endif
2489