xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852bs/phl/phl_cmd_dispatch_engine.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2019 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #define _PHL_PHY_MGNT_C_
16 #include "phl_headers.h"
17 #ifdef CONFIG_CMD_DISP
18 
19 enum rtw_phl_status phl_disp_eng_bk_module_deinit(struct phl_info_t *phl);
20 enum rtw_phl_status _disp_eng_get_dispr_by_idx(struct phl_info_t *phl,
21                                                u8 band_idx,
22                                                void **dispr);
23 
share_thread_hdl(void * param)24 int share_thread_hdl(void *param)
25 {
26 	struct phl_info_t *phl_info = (struct phl_info_t *)param;
27 	void *d = phl_to_drvpriv(phl_info);
28 	struct phl_cmd_dispatch_engine *disp_eng = &(phl_info->disp_eng);
29 	u8 i = 0;
30 
31 	PHL_INFO("%s enter\n", __FUNCTION__);
32 	while (!_os_thread_check_stop(d, &(disp_eng->share_thread))) {
33 
34 		_os_sema_down(d, &disp_eng->msg_q_sema);
35 
36 		/* A simple for-loop would guarantee
37 		 * all dispatcher split entire bandwidth of shared thread evenly,
38 		 * if adopting FIFO rule here,
39 		 * would lead to disproportionate distribution of thread bandwidth.
40 		*/
41 		for (i = 0 ; i < disp_eng->phy_num; i++) {
42 			if(_os_thread_check_stop(d, &(disp_eng->share_thread)))
43 				break;
44 			dispr_share_thread_loop_hdl(disp_eng->dispatcher[i]);
45 		}
46 	}
47 	for (i = 0 ; i < disp_eng->phy_num; i++)
48 		dispr_share_thread_leave_hdl(disp_eng->dispatcher[i]);
49 	_os_thread_wait_stop(d, &(disp_eng->share_thread));
50 	PHL_INFO("%s down\n", __FUNCTION__);
51 	return 0;
52 }
53 
54 enum rtw_phl_status
phl_disp_eng_init(struct phl_info_t * phl,u8 phy_num)55 phl_disp_eng_init(struct phl_info_t *phl, u8 phy_num)
56 {
57 	u8 i = 0;
58 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
59 	void *d = phl_to_drvpriv(phl);
60 	enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
61 
62 	if (disp_eng->dispatcher != NULL) {
63 		PHL_ERR("[PHY]: %s, not empty\n",__FUNCTION__);
64 		return RTW_PHL_STATUS_FAILURE;
65 	}
66 
67 	disp_eng->phl_info = phl;
68 	disp_eng->phy_num = phy_num;
69 #ifdef CONFIG_CMD_DISP_SOLO_MODE
70 	disp_eng->thread_mode = SOLO_THREAD_MODE;
71 #else
72 	disp_eng->thread_mode = SHARE_THREAD_MODE;
73 #endif
74 	disp_eng->dispatcher = _os_mem_alloc(d, sizeof(void*) * phy_num);
75 #ifdef CONFIG_CMD_DISP_SOLO_MODE
76 	_os_sema_init(d, &(disp_eng->dispr_ctrl_sema), 1);
77 #endif
78 	if (disp_eng->dispatcher == NULL) {
79 		disp_eng->phy_num = 0;
80 		PHL_ERR("[PHY]: %s, alloc fail\n",__FUNCTION__);
81 		return RTW_PHL_STATUS_RESOURCE;
82 	}
83 
84 	for (i = 0 ; i < phy_num; i++) {
85 		status = dispr_init(phl, &(disp_eng->dispatcher[i]), i);
86 		if(status != RTW_PHL_STATUS_SUCCESS)
87 			break;
88 	}
89 
90 	if (status != RTW_PHL_STATUS_SUCCESS)
91 		phl_disp_eng_deinit(phl);
92 
93 	return RTW_PHL_STATUS_SUCCESS;
94 }
95 
96 enum rtw_phl_status
phl_disp_eng_deinit(struct phl_info_t * phl)97 phl_disp_eng_deinit(struct phl_info_t *phl)
98 {
99 	u8 i = 0;
100 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
101 	void *d = phl_to_drvpriv(phl);
102 
103 	if (disp_eng->dispatcher == NULL)
104 		return RTW_PHL_STATUS_FAILURE;
105 
106 	phl_disp_eng_bk_module_deinit(phl);
107 
108 	for (i = 0 ; i < disp_eng->phy_num; i++) {
109 		if(disp_eng->dispatcher[i] == NULL)
110 			continue;
111 		dispr_deinit(phl, disp_eng->dispatcher[i]);
112 		disp_eng->dispatcher[i] = NULL;
113 	}
114 #ifdef CONFIG_CMD_DISP_SOLO_MODE
115 	_os_sema_free(d, &(disp_eng->dispr_ctrl_sema));
116 #endif
117 	if (disp_eng->phy_num) {
118 		_os_mem_free(d, disp_eng->dispatcher,
119 				sizeof(void *) * (disp_eng->phy_num));
120 		disp_eng->dispatcher = NULL;
121 		disp_eng->phy_num = 0;
122 	}
123 
124 	return RTW_PHL_STATUS_SUCCESS;
125 }
126 
127 enum rtw_phl_status
phl_disp_eng_bk_module_deinit(struct phl_info_t * phl)128 phl_disp_eng_bk_module_deinit(struct phl_info_t *phl)
129 {
130 	u8 i = 0;
131 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
132 
133 	for (i = 0 ; i < disp_eng->phy_num; i++) {
134 		if(disp_eng->dispatcher[i] == NULL)
135 			continue;
136 		dispr_module_deinit(disp_eng->dispatcher[i]);
137 	}
138 
139 	return RTW_PHL_STATUS_SUCCESS;
140 }
141 
142 enum rtw_phl_status
phl_disp_eng_start(struct phl_info_t * phl)143 phl_disp_eng_start(struct phl_info_t *phl)
144 {
145 	u8 i = 0;
146 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
147 	void *d = phl_to_drvpriv(phl);
148 
149 	_os_sema_init(d, &(disp_eng->msg_q_sema), 0);
150 	if (!disp_eng_is_solo_thread_mode(phl)) {
151 		_os_thread_init(d, &(disp_eng->share_thread), share_thread_hdl, phl,
152 				"disp_eng_share_thread");
153 		_os_thread_schedule(d, &(disp_eng->share_thread));
154 	}
155 	for (i = 0 ; i < disp_eng->phy_num; i++){
156 		if(disp_eng->dispatcher[i] == NULL)
157 			continue;
158 		dispr_start(disp_eng->dispatcher[i]);
159 		dispr_module_start(disp_eng->dispatcher[i]);
160 	}
161 
162 	return RTW_PHL_STATUS_SUCCESS;
163 }
164 
165 enum rtw_phl_status
phl_disp_eng_stop(struct phl_info_t * phl)166 phl_disp_eng_stop(struct phl_info_t *phl)
167 {
168 	u8 i = 0;
169 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
170 	void *d = phl_to_drvpriv(phl);
171 	u8 solo_mode = (disp_eng_is_solo_thread_mode(phl)) ? (true) : (false);
172 
173 	if (disp_eng->dispatcher == NULL) {
174 		PHL_ERR("[PHY]: %s, abnomarl state\n",__FUNCTION__);
175 		return RTW_PHL_STATUS_SUCCESS;
176 	}
177 
178 	for (i = 0 ; i < disp_eng->phy_num; i++) {
179 		if(disp_eng->dispatcher[i] == NULL)
180 			continue;
181 		dispr_module_stop(disp_eng->dispatcher[i]);
182 		if (solo_mode == true)
183 			dispr_stop(disp_eng->dispatcher[i]);
184 		else
185 			dispr_share_thread_stop_prior_hdl(disp_eng->dispatcher[i]);
186 	}
187 
188 	if (solo_mode == false) {
189 		_os_thread_stop(d, &(disp_eng->share_thread));
190 		_os_sema_up(d, &(disp_eng->msg_q_sema));
191 		_os_thread_deinit(d, &(disp_eng->share_thread));
192 
193 		for (i = 0 ; i < disp_eng->phy_num; i++)
194 			dispr_share_thread_stop_post_hdl(disp_eng->dispatcher[i]);
195 	}
196 	_os_sema_free(d, &(disp_eng->msg_q_sema));
197 	return RTW_PHL_STATUS_SUCCESS;
198 }
199 
200 enum rtw_phl_status
rtw_phl_register_module(void * phl,u8 band_idx,enum phl_module_id id,struct phl_bk_module_ops * ops)201 rtw_phl_register_module(void *phl,
202                         u8 band_idx,
203                         enum phl_module_id id,
204                         struct phl_bk_module_ops *ops)
205 {
206 	return phl_disp_eng_register_module((struct phl_info_t *)phl,
207 	                                    band_idx, id, ops);
208 }
209 
210 enum rtw_phl_status
rtw_phl_deregister_module(void * phl,u8 band_idx,enum phl_module_id id)211 rtw_phl_deregister_module(void *phl,u8 band_idx, enum phl_module_id id)
212 {
213 	return phl_disp_eng_deregister_module((struct phl_info_t *)phl,
214 	                                      band_idx,
215 	                                      id);
216 }
217 
rtw_phl_is_fg_empty(void * phl,u8 band_idx)218 u8 rtw_phl_is_fg_empty(void *phl,u8 band_idx)
219 {
220 	return phl_disp_eng_is_fg_empty((struct phl_info_t *)phl, band_idx);
221 }
222 
223 enum rtw_phl_status
rtw_phl_send_msg_to_dispr(void * phl,struct phl_msg * msg,struct phl_msg_attribute * attr,u32 * msg_hdl)224 rtw_phl_send_msg_to_dispr(void *phl,
225                           struct phl_msg *msg,
226                           struct phl_msg_attribute *attr,
227                           u32 *msg_hdl)
228 {
229 	return phl_disp_eng_send_msg(phl, msg, attr, msg_hdl);
230 }
231 
232 enum rtw_phl_status
rtw_phl_cancel_dispr_msg(void * phl,u8 band_idx,u32 * msg_hdl)233 rtw_phl_cancel_dispr_msg(void *phl, u8 band_idx, u32 *msg_hdl)
234 {
235 	return phl_disp_eng_cancel_msg(phl, band_idx, msg_hdl);
236 }
237 
238 enum rtw_phl_status
rtw_phl_add_cmd_token_req(void * phl,u8 band_idx,struct phl_cmd_token_req * req,u32 * req_hdl)239 rtw_phl_add_cmd_token_req(void *phl,
240                           u8 band_idx,
241                           struct phl_cmd_token_req *req,
242                           u32 *req_hdl)
243 {
244 	return phl_disp_eng_add_token_req(phl, band_idx, req, req_hdl);
245 }
246 
247 enum rtw_phl_status
rtw_phl_cancel_cmd_token(void * phl,u8 band_idx,u32 * req_hdl)248 rtw_phl_cancel_cmd_token(void *phl, u8 band_idx, u32 *req_hdl)
249 {
250 	return phl_disp_eng_cancel_token_req(phl, band_idx, req_hdl);
251 }
252 
253 enum rtw_phl_status
rtw_phl_free_cmd_token(void * phl,u8 band_idx,u32 * req_hdl)254 rtw_phl_free_cmd_token(void *phl, u8 band_idx, u32 *req_hdl)
255 {
256 	return phl_disp_eng_free_token(phl, band_idx, req_hdl);
257 }
258 
259 enum rtw_phl_status
rtw_phl_set_cur_cmd_info(void * phl,u8 band_idx,struct phl_module_op_info * op_info)260 rtw_phl_set_cur_cmd_info(void *phl,
261                          u8 band_idx,
262                          struct phl_module_op_info* op_info)
263 {
264 	return phl_disp_eng_set_cur_cmd_info(phl, band_idx, op_info);
265 }
266 
267 enum rtw_phl_status
rtw_phl_query_cur_cmd_info(void * phl,u8 band_idx,struct phl_module_op_info * op_info)268 rtw_phl_query_cur_cmd_info(void *phl,
269                            u8 band_idx,
270                            struct phl_module_op_info* op_info)
271 {
272 	return phl_disp_eng_query_cur_cmd_info(phl, band_idx, op_info);
273 }
274 
275 enum rtw_phl_status
rtw_phl_set_bk_module_info(void * phl,u8 band_idx,enum phl_module_id id,struct phl_module_op_info * op_info)276 rtw_phl_set_bk_module_info(void *phl,
277                            u8 band_idx,
278                            enum phl_module_id id,
279                            struct phl_module_op_info *op_info)
280 {
281 	return phl_disp_eng_set_bk_module_info(phl, band_idx, id, op_info);
282 }
283 
284 enum rtw_phl_status
rtw_phl_query_bk_module_info(void * phl,u8 band_idx,enum phl_module_id id,struct phl_module_op_info * op_info)285 rtw_phl_query_bk_module_info(void *phl,
286                              u8 band_idx,
287                              enum phl_module_id id,
288                              struct phl_module_op_info *op_info)
289 {
290 	return phl_disp_eng_query_bk_module_info(phl, band_idx, id, op_info);
291 }
292 
293 enum rtw_phl_status
rtw_phl_set_msg_disp_seq(void * phl,struct phl_msg_attribute * attr,struct msg_self_def_seq * seq)294 rtw_phl_set_msg_disp_seq(void *phl,
295                          struct phl_msg_attribute *attr,
296                          struct msg_self_def_seq* seq)
297 {
298 	return phl_disp_eng_set_msg_disp_seq(phl, attr, seq);
299 }
300 
301 enum rtw_phl_status
_disp_eng_get_dispr_by_idx(struct phl_info_t * phl,u8 band_idx,void ** dispr)302 _disp_eng_get_dispr_by_idx(struct phl_info_t *phl, u8 band_idx, void **dispr)
303 {
304 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
305 
306 	if (band_idx > (disp_eng->phy_num - 1) || (dispr == NULL)) {
307 		PHL_WARN("%s invalid input :%d\n", __func__, band_idx);
308 		return RTW_PHL_STATUS_INVALID_PARAM;
309 	}
310 	(*dispr) = disp_eng->dispatcher[band_idx];
311 	return RTW_PHL_STATUS_SUCCESS;
312 }
313 
314 enum rtw_phl_status
phl_disp_eng_register_module(struct phl_info_t * phl,u8 band_idx,enum phl_module_id id,struct phl_bk_module_ops * ops)315 phl_disp_eng_register_module(struct phl_info_t *phl,
316                              u8 band_idx,
317                              enum phl_module_id id,
318                              struct phl_bk_module_ops *ops)
319 {
320 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
321 	u8 idx = band_idx;
322 
323 	if ((band_idx + 1) > disp_eng->phy_num) {
324 		PHL_WARN("%s invalid input :%d\n", __func__, band_idx);
325 		return RTW_PHL_STATUS_INVALID_PARAM;
326 	}
327 
328 	return dispr_register_module(disp_eng->dispatcher[idx], id, ops);
329 }
330 
331 enum rtw_phl_status
phl_disp_eng_deregister_module(struct phl_info_t * phl,u8 band_idx,enum phl_module_id id)332 phl_disp_eng_deregister_module(struct phl_info_t *phl,
333                                u8 band_idx,
334                                enum phl_module_id id)
335 {
336 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
337 	u8 idx = band_idx;
338 
339 	if ((band_idx + 1) > disp_eng->phy_num) {
340 		PHL_WARN("%s invalid input :%d\n", __func__, band_idx);
341 		return RTW_PHL_STATUS_INVALID_PARAM;
342 	}
343 
344 	return dispr_deregister_module(disp_eng->dispatcher[idx], id);
345 }
346 
disp_eng_notify_share_thread(struct phl_info_t * phl,void * dispr)347 void disp_eng_notify_share_thread(struct phl_info_t *phl, void *dispr)
348 {
349 	void *d = phl_to_drvpriv(phl);
350 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
351 
352 	_os_sema_up(d, &(disp_eng->msg_q_sema));
353 }
354 
phl_disp_eng_is_dispr_busy(struct phl_info_t * phl,u8 band_idx)355 u8 phl_disp_eng_is_dispr_busy(struct phl_info_t *phl, u8 band_idx)
356 {
357 	void* dispr = NULL;
358 	void *handle = NULL;
359 
360 	if (_disp_eng_get_dispr_by_idx(phl, band_idx, &dispr) != RTW_PHL_STATUS_SUCCESS)
361 		return false;
362 	if (dispr_get_cur_cmd_req(dispr, &handle) == RTW_PHL_STATUS_SUCCESS)
363 		return true;
364 	else
365 		return false;
366 }
367 
368 enum rtw_phl_status
phl_disp_eng_set_cur_cmd_info(struct phl_info_t * phl,u8 band_idx,struct phl_module_op_info * op_info)369 phl_disp_eng_set_cur_cmd_info(struct phl_info_t *phl,
370                               u8 band_idx,
371                               struct phl_module_op_info *op_info)
372 {
373 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
374 	void* dispr = NULL;
375 
376 	status = _disp_eng_get_dispr_by_idx(phl, band_idx, &dispr);
377 	if (RTW_PHL_STATUS_SUCCESS != status)
378 		return status;
379 
380 	return dispr_set_cur_cmd_info(dispr, op_info);
381 }
382 
383 enum rtw_phl_status
phl_disp_eng_query_cur_cmd_info(struct phl_info_t * phl,u8 band_idx,struct phl_module_op_info * op_info)384 phl_disp_eng_query_cur_cmd_info(struct phl_info_t *phl,
385                                 u8 band_idx,
386                                 struct phl_module_op_info *op_info)
387 {
388 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
389 	void* dispr = NULL;
390 
391 	status = _disp_eng_get_dispr_by_idx(phl, band_idx, &dispr);
392 	if (RTW_PHL_STATUS_SUCCESS != status)
393 		return status;
394 
395 	return dispr_query_cur_cmd_info(dispr, op_info);
396 }
397 
398 enum rtw_phl_status
phl_disp_eng_set_bk_module_info(struct phl_info_t * phl,u8 band_idx,enum phl_module_id id,struct phl_module_op_info * op_info)399 phl_disp_eng_set_bk_module_info(struct phl_info_t *phl,
400                                 u8 band_idx,
401                                 enum phl_module_id id,
402                                 struct phl_module_op_info *op_info)
403 {
404 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
405 	void* dispr = NULL;
406 	void* handle = NULL;
407 
408 	status = _disp_eng_get_dispr_by_idx(phl, band_idx, &dispr);
409 	if (RTW_PHL_STATUS_SUCCESS != status)
410 		return status;
411 
412 	status = dispr_get_bk_module_handle(dispr, id, &handle);
413 	if (RTW_PHL_STATUS_SUCCESS != status)
414 		return status;
415 
416 	return dispr_set_bk_module_info(dispr, handle, op_info);
417 }
418 
419 enum rtw_phl_status
phl_disp_eng_query_bk_module_info(struct phl_info_t * phl,u8 band_idx,enum phl_module_id id,struct phl_module_op_info * op_info)420 phl_disp_eng_query_bk_module_info(struct phl_info_t *phl,
421                                   u8 band_idx,
422                                   enum phl_module_id id,
423                                   struct phl_module_op_info *op_info)
424 {
425 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
426 	void* dispr = NULL;
427 	void* handle = NULL;
428 
429 	status = _disp_eng_get_dispr_by_idx(phl, band_idx, &dispr);
430 	if (RTW_PHL_STATUS_SUCCESS != status)
431 		return status;
432 
433 	status = dispr_get_bk_module_handle(dispr, id, &handle);
434 	if (RTW_PHL_STATUS_SUCCESS != status)
435 		return status;
436 
437 	return dispr_query_bk_module_info(dispr, handle, op_info);
438 }
439 
440 enum rtw_phl_status
phl_disp_eng_set_src_info(struct phl_info_t * phl,struct phl_msg * msg,struct phl_module_op_info * op_info)441 phl_disp_eng_set_src_info(struct phl_info_t *phl,
442                           struct phl_msg *msg,
443                           struct phl_module_op_info *op_info)
444 {
445 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
446 	void* dispr = NULL;
447 
448 	status = _disp_eng_get_dispr_by_idx(phl, msg->band_idx, &dispr);
449 	if (RTW_PHL_STATUS_SUCCESS != status)
450 		return status;
451 
452 	return dispr_set_src_info(dispr, msg, op_info);
453 }
454 
455 enum rtw_phl_status
phl_disp_eng_query_src_info(struct phl_info_t * phl,struct phl_msg * msg,struct phl_module_op_info * op_info)456 phl_disp_eng_query_src_info(struct phl_info_t *phl,
457                             struct phl_msg *msg,
458                             struct phl_module_op_info *op_info)
459 {
460 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
461 	void* dispr = NULL;
462 
463 	status = _disp_eng_get_dispr_by_idx(phl, msg->band_idx, &dispr);
464 	if (RTW_PHL_STATUS_SUCCESS != status)
465 		return status;
466 
467 	return dispr_query_src_info(dispr, msg, op_info);
468 }
469 
470 enum rtw_phl_status
phl_disp_eng_send_msg(struct phl_info_t * phl,struct phl_msg * msg,struct phl_msg_attribute * attr,u32 * msg_hdl)471 phl_disp_eng_send_msg(struct phl_info_t *phl,
472                       struct phl_msg *msg,
473                       struct phl_msg_attribute *attr,
474                       u32 *msg_hdl)
475 {
476 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
477 	void* dispr = NULL;
478 
479 	status = _disp_eng_get_dispr_by_idx(phl, msg->band_idx, &dispr);
480 	if (RTW_PHL_STATUS_SUCCESS != status)
481 		return status;
482 
483 	return dispr_send_msg(dispr, msg, attr, msg_hdl);
484 }
485 
486 enum rtw_phl_status
phl_disp_eng_cancel_msg(struct phl_info_t * phl,u8 band_idx,u32 * msg_hdl)487 phl_disp_eng_cancel_msg(struct phl_info_t *phl, u8 band_idx, u32 *msg_hdl)
488 {
489 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
490 	void* dispr = NULL;
491 
492 	status = _disp_eng_get_dispr_by_idx(phl, band_idx, &dispr);
493 	if (RTW_PHL_STATUS_SUCCESS != status)
494 		return status;
495 
496 	return dispr_cancel_msg(dispr, msg_hdl);
497 }
498 
499 enum rtw_phl_status
phl_disp_eng_clr_pending_msg(struct phl_info_t * phl,u8 band_idx)500 phl_disp_eng_clr_pending_msg(struct phl_info_t *phl, u8 band_idx)
501 {
502 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
503 	void* dispr = NULL;
504 
505 	status = _disp_eng_get_dispr_by_idx(phl, band_idx, &dispr);
506 	if (RTW_PHL_STATUS_SUCCESS != status)
507 		return status;
508 
509 	return dispr_clr_pending_msg(dispr);
510 }
511 
512 enum rtw_phl_status
phl_disp_eng_add_token_req(struct phl_info_t * phl,u8 band_idx,struct phl_cmd_token_req * req,u32 * req_hdl)513 phl_disp_eng_add_token_req(struct phl_info_t *phl,
514                            u8 band_idx,
515                            struct phl_cmd_token_req *req,
516                            u32 *req_hdl)
517 {
518 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
519 	void* dispr = NULL;
520 
521 	status = _disp_eng_get_dispr_by_idx(phl, band_idx, &dispr);
522 	if (RTW_PHL_STATUS_SUCCESS != status)
523 		return status;
524 
525 	return dispr_add_token_req(dispr, req, req_hdl);
526 }
527 
528 enum rtw_phl_status
phl_disp_eng_cancel_token_req(struct phl_info_t * phl,u8 band_idx,u32 * req_hdl)529 phl_disp_eng_cancel_token_req(struct phl_info_t *phl, u8 band_idx, u32 *req_hdl)
530 {
531 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
532 	void* dispr = NULL;
533 
534 	status = _disp_eng_get_dispr_by_idx(phl, band_idx, &dispr);
535 	if (RTW_PHL_STATUS_SUCCESS != status)
536 		return status;
537 
538 	return dispr_cancel_token_req(dispr, req_hdl);
539 }
540 
541 enum rtw_phl_status
phl_disp_eng_free_token(struct phl_info_t * phl,u8 band_idx,u32 * req_hdl)542 phl_disp_eng_free_token(struct phl_info_t *phl, u8 band_idx, u32 *req_hdl)
543 {
544 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
545 	void* dispr = NULL;
546 
547 	status = _disp_eng_get_dispr_by_idx(phl, band_idx, &dispr);
548 	if (RTW_PHL_STATUS_SUCCESS != status)
549 		return status;
550 
551 	return dispr_free_token(dispr, req_hdl);
552 }
553 
554 enum rtw_phl_status
phl_disp_eng_notify_dev_io_status(struct phl_info_t * phl,u8 band_idx,enum phl_module_id mdl_id,bool allow_io)555 phl_disp_eng_notify_dev_io_status(struct phl_info_t *phl,
556                                   u8 band_idx,
557                                   enum phl_module_id mdl_id,
558                                   bool allow_io)
559 {
560 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
561 	u8 i = 0;
562 
563 	band_idx = HW_BAND_MAX; /* force all band stop IO */
564 	if (band_idx != HW_BAND_MAX)
565 		return dispr_notify_dev_io_status(disp_eng->dispatcher[band_idx], mdl_id, allow_io);
566 
567 	for (i = 0; i < disp_eng->phy_num; i++)
568 		dispr_notify_dev_io_status(disp_eng->dispatcher[i], mdl_id, allow_io);
569 
570 	return RTW_PHL_STATUS_SUCCESS;
571 }
572 
phl_disp_eng_notify_shall_stop(struct phl_info_t * phl)573 void phl_disp_eng_notify_shall_stop(struct phl_info_t *phl)
574 {
575 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
576 	u8 i = 0;
577 
578 	for (i = 0; i < disp_eng->phy_num; i++) {
579 		if (is_dispr_started(disp_eng->dispatcher[i]))
580 			dispr_notify_shall_stop(disp_eng->dispatcher[i]);
581 	}
582 }
583 
phl_disp_eng_is_fg_empty(struct phl_info_t * phl,u8 band_idx)584 u8 phl_disp_eng_is_fg_empty(struct phl_info_t *phl, u8 band_idx)
585 {
586 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
587 	u8 i = 0;
588 
589 	if (band_idx != HW_BAND_MAX)
590 		return dispr_is_fg_empty(disp_eng->dispatcher[band_idx]);
591 
592 	for (i = 0; i < disp_eng->phy_num; i++)
593 		if (false == dispr_is_fg_empty(disp_eng->dispatcher[i]))
594 			return false;
595 
596 	return true;
597 }
598 
599 #ifdef CONFIG_CMD_DISP_SOLO_MODE
dispr_ctrl_sema_down(struct phl_info_t * phl_info)600 void dispr_ctrl_sema_down(struct phl_info_t *phl_info)
601 {
602 	_os_sema_down(phl_to_drvpriv(phl_info),
603 			&(phl_info->disp_eng.dispr_ctrl_sema));
604 }
605 
dispr_ctrl_sema_up(struct phl_info_t * phl_info)606 void dispr_ctrl_sema_up(struct phl_info_t *phl_info)
607 {
608 	_os_sema_up(phl_to_drvpriv(phl_info),
609 			&(phl_info->disp_eng.dispr_ctrl_sema));
610 }
611 #endif
612 
613 enum rtw_phl_status
phl_disp_eng_set_msg_disp_seq(struct phl_info_t * phl,struct phl_msg_attribute * attr,struct msg_self_def_seq * seq)614 phl_disp_eng_set_msg_disp_seq(struct phl_info_t *phl,
615                               struct phl_msg_attribute *attr,
616                               struct msg_self_def_seq *seq)
617 {
618 #ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
619 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
620 	struct phl_cmd_dispatch_engine *disp_eng = &(phl->disp_eng);
621 	void* dispr = NULL;
622 
623 	status = _disp_eng_get_dispr_by_idx(phl, HW_BAND_0, &dispr);
624 	if (RTW_PHL_STATUS_SUCCESS != status)
625 		return status;
626 
627 	return dispr_set_dispatch_seq(dispr, attr, seq);
628 #else
629 	return RTW_PHL_STATUS_FAILURE;
630 #endif
631 }
632 
phl_disp_query_mdl_id(struct phl_info_t * phl,void * bk_mdl)633 u8 phl_disp_query_mdl_id(struct phl_info_t *phl, void *bk_mdl)
634 {
635 	return disp_query_mdl_id(phl, bk_mdl);
636 }
637 
638 #else
rtw_phl_set_bk_module_info(void * phl,u8 band_idx,enum phl_module_id id,struct phl_module_op_info * op_info)639 enum rtw_phl_status rtw_phl_set_bk_module_info(void *phl, u8 band_idx,
640 		enum phl_module_id id, struct phl_module_op_info *op_info)
641 {
642 	return RTW_PHL_STATUS_SUCCESS;
643 }
rtw_phl_query_bk_module_info(void * phl,u8 band_idx,enum phl_module_id id,struct phl_module_op_info * op_info)644 enum rtw_phl_status rtw_phl_query_bk_module_info(void *phl, u8 band_idx,
645 		enum phl_module_id id, struct phl_module_op_info *op_info)
646 {
647 	return RTW_PHL_STATUS_SUCCESS;
648 }
phl_disp_eng_init(struct phl_info_t * phl,u8 phy_num)649 enum rtw_phl_status phl_disp_eng_init(struct phl_info_t *phl, u8 phy_num)
650 {
651 	return RTW_PHL_STATUS_SUCCESS;
652 }
phl_disp_eng_deinit(struct phl_info_t * phl)653 enum rtw_phl_status phl_disp_eng_deinit(struct phl_info_t *phl)
654 {
655 	return RTW_PHL_STATUS_SUCCESS;
656 }
phl_disp_eng_start(struct phl_info_t * phl)657 enum rtw_phl_status phl_disp_eng_start(struct phl_info_t *phl)
658 {
659 	return RTW_PHL_STATUS_SUCCESS;
660 }
phl_disp_eng_stop(struct phl_info_t * phl)661 enum rtw_phl_status phl_disp_eng_stop(struct phl_info_t *phl)
662 {
663 	return RTW_PHL_STATUS_SUCCESS;
664 }
665 
phl_disp_eng_register_module(struct phl_info_t * phl,u8 band_idx,enum phl_module_id id,struct phl_bk_module_ops * ops)666 enum rtw_phl_status phl_disp_eng_register_module(struct phl_info_t *phl,
667 						 u8 band_idx,
668 						 enum phl_module_id id,
669 						 struct phl_bk_module_ops *ops)
670 {
671 	return RTW_PHL_STATUS_FAILURE;
672 }
673 
phl_disp_eng_deregister_module(struct phl_info_t * phl,u8 band_idx,enum phl_module_id id)674 enum rtw_phl_status phl_disp_eng_deregister_module(struct phl_info_t *phl,
675 						   u8 band_idx,
676 						   enum phl_module_id id)
677 {
678 	return RTW_PHL_STATUS_FAILURE;
679 }
phl_dispr_get_idx(void * dispr,u8 * idx)680 enum rtw_phl_status phl_dispr_get_idx(void *dispr, u8 *idx)
681 {
682 	return RTW_PHL_STATUS_FAILURE;
683 }
684 
phl_disp_eng_is_dispr_busy(struct phl_info_t * phl,u8 band_idx)685 u8 phl_disp_eng_is_dispr_busy(struct phl_info_t *phl, u8 band_idx)
686 {
687 	return true;
688 }
phl_disp_eng_set_cur_cmd_info(struct phl_info_t * phl,u8 band_idx,struct phl_module_op_info * op_info)689 enum rtw_phl_status phl_disp_eng_set_cur_cmd_info(struct phl_info_t *phl, u8 band_idx,
690 					       struct phl_module_op_info *op_info)
691 {
692 	return RTW_PHL_STATUS_FAILURE;
693 }
phl_disp_eng_query_cur_cmd_info(struct phl_info_t * phl,u8 band_idx,struct phl_module_op_info * op_info)694 enum rtw_phl_status phl_disp_eng_query_cur_cmd_info(struct phl_info_t *phl, u8 band_idx,
695 						 struct phl_module_op_info *op_info)
696 {
697 	return RTW_PHL_STATUS_FAILURE;
698 }
phl_disp_eng_set_bk_module_info(struct phl_info_t * phl,u8 band_idx,enum phl_module_id id,struct phl_module_op_info * op_info)699 enum rtw_phl_status phl_disp_eng_set_bk_module_info(struct phl_info_t *phl, u8 band_idx,
700 						enum phl_module_id id, struct phl_module_op_info *op_info)
701 {
702 	return RTW_PHL_STATUS_FAILURE;
703 }
phl_disp_eng_query_bk_module_info(struct phl_info_t * phl,u8 band_idx,enum phl_module_id id,struct phl_module_op_info * op_info)704 enum rtw_phl_status phl_disp_eng_query_bk_module_info(struct phl_info_t *phl, u8 band_idx,
705 							enum phl_module_id id, struct phl_module_op_info *op_info)
706 {
707 	return RTW_PHL_STATUS_FAILURE;
708 }
phl_disp_eng_set_src_info(struct phl_info_t * phl,struct phl_msg * msg,struct phl_module_op_info * op_info)709 enum rtw_phl_status phl_disp_eng_set_src_info(struct phl_info_t *phl, struct phl_msg *msg,
710 						struct phl_module_op_info *op_info)
711 {
712 	return RTW_PHL_STATUS_FAILURE;
713 }
phl_disp_eng_query_src_info(struct phl_info_t * phl,struct phl_msg * msg,struct phl_module_op_info * op_info)714 enum rtw_phl_status phl_disp_eng_query_src_info(struct phl_info_t *phl, struct phl_msg *msg,
715 						struct phl_module_op_info *op_info)
716 {
717 	return RTW_PHL_STATUS_FAILURE;
718 }
phl_disp_eng_send_msg(struct phl_info_t * phl,struct phl_msg * msg,struct phl_msg_attribute * attr,u32 * msg_hdl)719 enum rtw_phl_status phl_disp_eng_send_msg(struct phl_info_t *phl, struct phl_msg *msg,
720 						struct phl_msg_attribute *attr, u32 *msg_hdl)
721 {
722 	return RTW_PHL_STATUS_FAILURE;
723 }
phl_disp_eng_cancel_msg(struct phl_info_t * phl,u8 band_idx,u32 * msg_hdl)724 enum rtw_phl_status phl_disp_eng_cancel_msg(struct phl_info_t *phl, u8 band_idx, u32 *msg_hdl)
725 {
726 	return RTW_PHL_STATUS_FAILURE;
727 }
phl_disp_eng_clr_pending_msg(struct phl_info_t * phl,u8 band_idx)728 enum rtw_phl_status phl_disp_eng_clr_pending_msg(struct phl_info_t *phl, u8 band_idx)
729 {
730 	return RTW_PHL_STATUS_FAILURE;
731 }
732 
phl_disp_eng_add_token_req(struct phl_info_t * phl,u8 band_idx,struct phl_cmd_token_req * req,u32 * req_hdl)733 enum rtw_phl_status phl_disp_eng_add_token_req(struct phl_info_t *phl, u8 band_idx,
734 					    struct phl_cmd_token_req *req, u32 *req_hdl)
735 {
736 	return RTW_PHL_STATUS_FAILURE;
737 }
phl_disp_eng_cancel_token_req(struct phl_info_t * phl,u8 band_idx,u32 * req_hdl)738 enum rtw_phl_status phl_disp_eng_cancel_token_req(struct phl_info_t *phl, u8 band_idx, u32 *req_hdl)
739 {
740 	return RTW_PHL_STATUS_FAILURE;
741 }
phl_disp_eng_free_token(struct phl_info_t * phl,u8 band_idx,u32 * req_hdl)742 enum rtw_phl_status phl_disp_eng_free_token(struct phl_info_t *phl, u8 band_idx, u32 *req_hdl)
743 {
744 	return RTW_PHL_STATUS_FAILURE;
745 }
phl_disp_eng_notify_dev_io_status(struct phl_info_t * phl,u8 band_idx,enum phl_module_id mdl_id,bool allow_io)746 enum rtw_phl_status phl_disp_eng_notify_dev_io_status(struct phl_info_t *phl, u8 band_idx,
747 							enum phl_module_id mdl_id, bool allow_io)
748 {
749 	return RTW_PHL_STATUS_FAILURE;
750 }
751 }
752 
753 void
754 phl_disp_eng_notify_shall_stop(struct phl_info_t *phl)
755 {
756 }
757 
758 enum rtw_phl_status rtw_phl_set_msg_disp_seq(void *phl,
759 						struct phl_msg_attribute *attr,
760 						struct msg_self_def_seq* seq)
761 {
762 	return RTW_PHL_STATUS_FAILURE;
763 }
764 
765 u8 phl_disp_query_mdl_id(struct phl_info_t *phl, void *bk_mdl)
766 {
767 	return PHL_MDL_ID_MAX;
768 }
769 
770 #endif
771