xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852bs/phl/phl_chan_info.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2019 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #define _PHL_CHAN_INFO_C_
16 #include "phl_headers.h"
17 
18 #ifdef CONFIG_PHL_CHANNEL_INFO
19 
20 struct chinfo_param {
21 	struct rtw_phl_stainfo_t *sta;
22 	u8 enable;
23 };
24 
25 enum rtw_phl_status
_phl_cfg_chinfo(void * phl,struct rtw_phl_stainfo_t * sta,u8 enable)26 _phl_cfg_chinfo(void *phl, struct rtw_phl_stainfo_t *sta, u8 enable)
27 {
28 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
29 
30 	if (RTW_HAL_STATUS_SUCCESS == rtw_hal_cfg_chinfo(phl_info->hal, sta, enable))
31 		return RTW_PHL_STATUS_SUCCESS;
32 	else
33 		return RTW_PHL_STATUS_FAILURE;
34 }
35 
36 #ifdef CONFIG_CMD_DISP
37 enum rtw_phl_status
phl_cmd_cfg_chinfo_hdl(struct phl_info_t * phl_info,u8 * param)38 phl_cmd_cfg_chinfo_hdl(struct phl_info_t *phl_info, u8 *param)
39 {
40 	struct chinfo_param *chinfo = (struct chinfo_param *)param;
41 
42 	return _phl_cfg_chinfo(phl_info, chinfo->sta,  chinfo->enable);
43 }
44 
_phl_cfg_chinfo_done(void * drv_priv,u8 * cmd,u32 cmd_len,enum rtw_phl_status status)45 static void _phl_cfg_chinfo_done(void *drv_priv, u8 *cmd, u32 cmd_len, enum rtw_phl_status status)
46 {
47 	if (cmd) {
48 		_os_kmem_free(drv_priv, cmd, cmd_len);
49 		cmd = NULL;
50 		PHL_INFO("%s.....\n", __func__);
51 	}
52 }
53 
54 enum rtw_phl_status
_phl_cmd_cfg_chinfo(void * phl,struct rtw_phl_stainfo_t * sta,u8 enable,enum phl_cmd_type cmd_type,u32 cmd_timeout)55 _phl_cmd_cfg_chinfo(void *phl, struct rtw_phl_stainfo_t *sta, u8 enable,
56 		    enum phl_cmd_type cmd_type, u32 cmd_timeout)
57 {
58 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
59 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
60 	struct chinfo_param *param = NULL;
61 	u32 param_len;
62 
63 	if (cmd_type == PHL_CMD_DIRECTLY) {
64 		return _phl_cfg_chinfo(phl, sta, enable);
65 	}
66 
67 	param_len = sizeof(struct chinfo_param);
68 	param = _os_kmem_alloc(phl_to_drvpriv(phl_info), param_len);
69 	if (param == NULL) {
70 		PHL_ERR("%s: alloc param failed!\n", __func__);
71 		goto _exit;
72 	}
73 
74 	param->enable = enable;
75 	param->sta =sta;
76 
77 	sts = phl_cmd_enqueue(phl,
78 			       sta->wrole->hw_band,
79 			       MSG_EVT_CFG_CHINFO,
80 			       (u8 *)param,
81 			       param_len,
82 			       _phl_cfg_chinfo_done,
83 			       cmd_type, cmd_timeout);
84 
85 	if (is_cmd_failure(sts)) {
86 		/* Send cmd success, but wait cmd fail*/
87 		sts = RTW_PHL_STATUS_FAILURE;
88 	} else if (sts != RTW_PHL_STATUS_SUCCESS) {
89 		/* Send cmd fail */
90 		_os_kmem_free(phl_to_drvpriv(phl_info), param, param_len);
91 		sts = RTW_PHL_STATUS_FAILURE;
92 	}
93 
94 _exit:
95 	return sts;
96 }
97 #endif
98 
rtw_phl_cmd_cfg_chinfo(void * phl,struct rtw_phl_stainfo_t * sta,u8 enable,enum phl_cmd_type cmd_type,u32 cmd_timeout)99 enum rtw_phl_status rtw_phl_cmd_cfg_chinfo(void *phl,
100 					   struct rtw_phl_stainfo_t *sta,
101 					   u8 enable,
102 					   enum phl_cmd_type cmd_type,
103 					   u32 cmd_timeout)
104 {
105 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
106 
107 #ifdef CONFIG_CMD_DISP
108 	return _phl_cmd_cfg_chinfo(phl, sta, enable, cmd_type, cmd_timeout);
109 #else
110 	return _phl_cfg_chinfo(phl_info, sta, enable);
111 #endif
112 }
113 
rtw_phl_query_chan_info(void * phl,u32 buf_len,u8 * chan_info_buffer,u32 * length,struct csi_header_t * csi_header)114 enum rtw_phl_status rtw_phl_query_chan_info(void *phl, u32 buf_len,
115 	u8* chan_info_buffer, u32 *length, struct csi_header_t *csi_header)
116 {
117 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
118 	void *drv_priv = phl_to_drvpriv(phl_info);
119 	struct rtw_phl_com_t *phl_com = phl_info->phl_com;
120 	struct chan_info_t *chan_info_pkt_latest = NULL;
121 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
122 
123 	if(chan_info_buffer == NULL){
124 		PHL_ERR("buffer is not ready.\n");
125 		return status;
126 	}
127 
128 	/* Get the latest channel info from busy queue. */
129 	chan_info_pkt_latest = rtw_phl_query_busy_chaninfo_latest(drv_priv, phl_com);
130 	if (chan_info_pkt_latest != NULL) {
131 		if (buf_len < chan_info_pkt_latest->length) {
132 			PHL_ERR("%s: Buffer length not sufficient! \n", __func__);
133 			rtw_phl_enqueue_idle_chaninfo(drv_priv, phl_com, chan_info_pkt_latest);
134 			return status;
135 		}
136 		/* copy raw data resources. */
137 		_os_mem_cpy(drv_priv, chan_info_buffer,
138 			chan_info_pkt_latest->chan_info_buffer, chan_info_pkt_latest->length);
139 		_os_mem_cpy(drv_priv, csi_header,
140 			&chan_info_pkt_latest->csi_header, sizeof(struct csi_header_t));
141 		*length = chan_info_pkt_latest->length;
142 
143 		rtw_phl_enqueue_idle_chaninfo(drv_priv, phl_com, chan_info_pkt_latest);
144 		status = RTW_PHL_STATUS_SUCCESS;
145 	} else {
146 		PHL_INFO("%s: There is no channel info packet.\n", __func__);
147 	}
148 	return status;
149 }
150 
151 /*channel info packet pool init/deinit*/
_phl_chaninfo_deinit(struct phl_info_t * phl_info)152 static void _phl_chaninfo_deinit (struct phl_info_t *phl_info)
153 {
154 	struct rx_chan_info_pool *chan_info_pool = NULL;
155 	u8* chan_info_buffer = NULL;
156 	u32 buf_len, i = 0;
157 	FUNCIN();
158 
159 	chan_info_pool = (struct rx_chan_info_pool *)phl_info->phl_com->chan_info_pool;
160 	if (NULL != chan_info_pool) {
161 		_os_spinlock_free(phl_to_drvpriv(phl_info),
162 					&chan_info_pool->idle_lock);
163 		_os_spinlock_free(phl_to_drvpriv(phl_info),
164 					&chan_info_pool->busy_lock);
165 		for (i = 0; i < CHAN_INFO_PKT_TOTAL; i++) {
166 			chan_info_buffer = chan_info_pool->channl_info_pkt[i].chan_info_buffer;
167 			if (chan_info_buffer != NULL) {
168 				_os_mem_free(phl_to_drvpriv(phl_info), chan_info_buffer,
169 					CHAN_INFO_MAX_SIZE);
170 				chan_info_pool->channl_info_pkt[i].chan_info_buffer = NULL;
171 			}
172 		}
173 		buf_len = sizeof(*chan_info_pool);
174 		_os_mem_free(phl_to_drvpriv(phl_info), chan_info_pool, buf_len);
175 	}
176 
177 	FUNCOUT();
178 }
179 
_phl_chaninfo_init(struct phl_info_t * phl_info)180 static enum rtw_phl_status _phl_chaninfo_init(struct phl_info_t *phl_info)
181 {
182 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
183 	struct rx_chan_info_pool *chan_info_pool = NULL;
184 	struct chan_info_t *chan_info_pkt = NULL;
185 	u32 buf_len = 0, i = 0;
186 	FUNCIN_WSTS(pstatus);
187 
188 	buf_len = sizeof(*chan_info_pool);
189 	chan_info_pool = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
190 
191 	if (NULL != chan_info_pool) {
192 		_os_mem_set(phl_to_drvpriv(phl_info), chan_info_pool, 0, buf_len);
193 		INIT_LIST_HEAD(&chan_info_pool->idle);
194 		INIT_LIST_HEAD(&chan_info_pool->busy);
195 		_os_spinlock_init(phl_to_drvpriv(phl_info),
196 					&chan_info_pool->idle_lock);
197 		_os_spinlock_init(phl_to_drvpriv(phl_info),
198 					&chan_info_pool->busy_lock);
199 		chan_info_pool->idle_cnt = 0;
200 
201 		for (i = 0; i < CHAN_INFO_PKT_TOTAL; i++) {
202 			chan_info_pkt = &chan_info_pool->channl_info_pkt[i];
203 			chan_info_pkt->chan_info_buffer = _os_mem_alloc(phl_to_drvpriv(phl_info),
204 				CHAN_INFO_MAX_SIZE);
205 			if (NULL != chan_info_pkt->chan_info_buffer) {
206 				chan_info_pkt->length = 0;
207 				INIT_LIST_HEAD(&chan_info_pkt->list);
208 				list_add_tail(&chan_info_pkt->list, &chan_info_pool->idle);
209 				chan_info_pool->idle_cnt++;
210 			} else {
211 				pstatus = RTW_PHL_STATUS_RESOURCE;
212 				break;
213 			}
214 		}
215 		phl_info->phl_com->chan_info_pool = chan_info_pool;
216 	} else {
217 		pstatus = RTW_PHL_STATUS_RESOURCE;
218 	}
219 
220 	if (RTW_PHL_STATUS_SUCCESS != pstatus)
221 		_phl_chaninfo_deinit(phl_info);
222 	FUNCOUT_WSTS(pstatus);
223 
224 	return pstatus;
225 }
226 
phl_chaninfo_init(struct phl_info_t * phl_info)227 enum rtw_phl_status phl_chaninfo_init(struct phl_info_t *phl_info)
228 {
229 	enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
230 
231 	phl_status = _phl_chaninfo_init(phl_info);
232 	if (phl_status != RTW_PHL_STATUS_SUCCESS)
233 		PHL_ERR("channel info pool allocate fail\n");
234 
235 	return phl_status;
236 }
237 
phl_chaninfo_deinit(struct phl_info_t * phl_info)238 void phl_chaninfo_deinit(struct phl_info_t *phl_info)
239 {
240 	_phl_chaninfo_deinit(phl_info);
241 }
242 
rtw_phl_get_chaninfo_idle_number(void * drv_priv,struct rtw_phl_com_t * phl_com)243 u32 rtw_phl_get_chaninfo_idle_number(void* drv_priv, struct rtw_phl_com_t *phl_com)
244 {
245 	u32 number;
246 	struct rx_chan_info_pool *chan_info_pool = NULL;
247 
248 	chan_info_pool = phl_com->chan_info_pool;
249 	number = chan_info_pool->idle_cnt;
250 	return number;
251 }
252 
rtw_phl_get_chaninfo_busy_number(void * drv_priv,struct rtw_phl_com_t * phl_com)253 u32 rtw_phl_get_chaninfo_busy_number(void* drv_priv, struct rtw_phl_com_t *phl_com)
254 {
255 	u32 number;
256 	struct rx_chan_info_pool *chan_info_pool = NULL;
257 
258 	chan_info_pool = phl_com->chan_info_pool;
259 	number = chan_info_pool->busy_cnt;
260 	return number;
261 }
262 
rtw_phl_query_idle_chaninfo(void * drv_priv,struct rtw_phl_com_t * phl_com)263 struct chan_info_t *rtw_phl_query_idle_chaninfo(void* drv_priv, struct rtw_phl_com_t *phl_com)
264 {
265 	struct rx_chan_info_pool *chan_info_pool = NULL;
266 	struct chan_info_t *chan_info_pkt = NULL;
267 
268 	chan_info_pool = phl_com->chan_info_pool;
269 
270 	_os_spinlock(drv_priv, &chan_info_pool->idle_lock, _bh, NULL);
271 	if (false == list_empty(&chan_info_pool->idle)) {
272 		chan_info_pkt = list_first_entry(&chan_info_pool->idle,
273 			struct chan_info_t, list);
274 		list_del(&chan_info_pkt->list);
275 		chan_info_pool->idle_cnt--;
276 	}
277 	_os_spinunlock(drv_priv, &chan_info_pool->idle_lock, _bh, NULL);
278 
279 	return chan_info_pkt;
280 }
281 
rtw_phl_query_busy_chaninfo(void * drv_priv,struct rtw_phl_com_t * phl_com)282 struct chan_info_t *rtw_phl_query_busy_chaninfo(void* drv_priv, struct rtw_phl_com_t *phl_com)
283 {
284 	struct rx_chan_info_pool *chan_info_pool = NULL;
285 	struct chan_info_t *chan_info_pkt = NULL;
286 
287 	chan_info_pool = phl_com->chan_info_pool;
288 
289 	_os_spinlock(drv_priv, &chan_info_pool->busy_lock, _bh, NULL);
290 	if (false == list_empty(&chan_info_pool->busy)) {
291 		chan_info_pkt = list_first_entry(&chan_info_pool->busy,
292 			struct chan_info_t, list);
293 		list_del(&chan_info_pkt->list);
294 		chan_info_pool->busy_cnt--;
295 	}
296 	_os_spinunlock(drv_priv, &chan_info_pool->busy_lock, _bh, NULL);
297 
298 	return chan_info_pkt;
299 }
300 
rtw_phl_query_busy_chaninfo_latest(void * drv_priv,struct rtw_phl_com_t * phl_com)301 struct chan_info_t *rtw_phl_query_busy_chaninfo_latest(void* drv_priv, struct rtw_phl_com_t *phl_com)
302 {
303 	struct rx_chan_info_pool *chan_info_pool = NULL;
304 	struct chan_info_t *chan_info_pkt = NULL;
305 
306 	chan_info_pool = phl_com->chan_info_pool;
307 
308 	_os_spinlock(drv_priv, &chan_info_pool->busy_lock, _bh, NULL);
309 	if (false == list_empty(&chan_info_pool->busy)) {
310 		chan_info_pkt = list_last_entry(&chan_info_pool->busy,
311 			struct chan_info_t, list);
312 		list_del(&chan_info_pkt->list);
313 		chan_info_pool->busy_cnt--;
314 	}
315 	_os_spinunlock(drv_priv, &chan_info_pool->busy_lock, _bh, NULL);
316 
317 	return chan_info_pkt;
318 }
319 
320 
rtw_phl_enqueue_idle_chaninfo(void * drv_priv,struct rtw_phl_com_t * phl_com,struct chan_info_t * chan_info_pkt)321 void rtw_phl_enqueue_idle_chaninfo(void* drv_priv, struct rtw_phl_com_t *phl_com,
322 				struct chan_info_t *chan_info_pkt)
323 {
324 	struct rx_chan_info_pool *chan_info_pool = NULL;
325 
326 	chan_info_pool = phl_com->chan_info_pool;
327 
328 	_os_spinlock(drv_priv, &chan_info_pool->idle_lock, _bh, NULL);
329 	_os_mem_set(drv_priv, &chan_info_pkt->csi_header, 0,
330 		sizeof( chan_info_pkt->csi_header));
331 	_os_mem_set(drv_priv, chan_info_pkt->chan_info_buffer, 0,
332 		CHAN_INFO_MAX_SIZE);
333 	chan_info_pkt->length = 0;
334 	INIT_LIST_HEAD(&chan_info_pkt->list);
335 	list_add_tail(&chan_info_pkt->list, &chan_info_pool->idle);
336 	chan_info_pool->idle_cnt++;
337 	_os_spinunlock(drv_priv, &chan_info_pool->idle_lock, _bh, NULL);
338 }
339 
rtw_phl_recycle_busy_chaninfo(void * drv_priv,struct rtw_phl_com_t * phl_com,struct chan_info_t * chan_info_pkt)340 struct chan_info_t * rtw_phl_recycle_busy_chaninfo(void* drv_priv,
341 	struct rtw_phl_com_t *phl_com, struct chan_info_t *chan_info_pkt)
342 {
343 	struct rx_chan_info_pool *chan_info_pool = NULL;
344 	struct chan_info_t *chan_info_pkt_recycle = NULL;
345 
346 	chan_info_pool = phl_com->chan_info_pool;
347 
348 	_os_spinlock(drv_priv, &chan_info_pool->busy_lock, _bh, NULL);
349 	/* enqueue the latest first. */
350 	INIT_LIST_HEAD(&chan_info_pkt->list);
351 	list_add_tail(&chan_info_pkt->list, &chan_info_pool->busy);
352 	chan_info_pool->busy_cnt++;
353 
354 	/* if the number is greater than max, dequeue the oldest one.*/
355 	if (chan_info_pool->busy_cnt > MAX_CHAN_INFO_PKT_KEEP) {
356 		chan_info_pkt_recycle = list_first_entry(&chan_info_pool->busy,
357 			struct chan_info_t, list);
358 		list_del(&chan_info_pkt_recycle->list);
359 		chan_info_pool->busy_cnt--;
360 	}
361 	_os_spinunlock(drv_priv, &chan_info_pool->busy_lock, _bh, NULL);
362 
363 	return chan_info_pkt_recycle;
364 }
365 
366 #endif /* CONFIG_PHL_CHANNEL_INFO */
367