1 /******************************************************************************
2 *
3 * Copyright(c) 2019 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15 #define _HAL_TRX_8852BE_C_
16 #include "../../hal_headers.h"
17 #include "../rtl8852b_hal.h"
18 #include "hal_trx_8852be.h"
19
20 /**
21 * this function will query total hw tx dma channels number
22 *
23 * returns the number of hw tx dma channel
24 */
hal_query_txch_num_8852be(void)25 static u8 hal_query_txch_num_8852be(void)
26 {
27 u8 ch_num = 0;
28
29 ch_num = TX_DMA_CHANNEL_ENTRY_8852BE;
30
31 return ch_num;
32 }
33
34 /**
35 * this function will query total hw rx dma channels number
36 *
37 * returns the number of hw rx dma channel
38 */
hal_query_rxch_num_8852be(void)39 static u8 hal_query_rxch_num_8852be(void)
40 {
41 u8 ch_num = 0;
42
43 ch_num = RX_DMA_CHANNEL_ENTRY_8852BE;
44
45 return ch_num;
46 }
hal_qsel_to_tid_8852be(struct hal_info_t * hal,u8 qsel_id,u8 tid_indic)47 static u8 hal_qsel_to_tid_8852be(struct hal_info_t *hal, u8 qsel_id, u8 tid_indic)
48 {
49 u8 tid = 0;
50
51 switch (qsel_id) {
52 case RTW_TXDESC_QSEL_BE_0:
53 case RTW_TXDESC_QSEL_BE_1:
54 case RTW_TXDESC_QSEL_BE_2:
55 case RTW_TXDESC_QSEL_BE_3:
56 tid = (1 == tid_indic) ? RTW_PHL_RING_CAT_TID3 : RTW_PHL_RING_CAT_TID0;
57 break;
58 case RTW_TXDESC_QSEL_BK_0:
59 case RTW_TXDESC_QSEL_BK_1:
60 case RTW_TXDESC_QSEL_BK_2:
61 case RTW_TXDESC_QSEL_BK_3:
62 tid = (1 == tid_indic) ? RTW_PHL_RING_CAT_TID2 : RTW_PHL_RING_CAT_TID1;
63 break;
64 case RTW_TXDESC_QSEL_VI_0:
65 case RTW_TXDESC_QSEL_VI_1:
66 case RTW_TXDESC_QSEL_VI_2:
67 case RTW_TXDESC_QSEL_VI_3:
68 tid = (1 == tid_indic) ? RTW_PHL_RING_CAT_TID5 : RTW_PHL_RING_CAT_TID4;
69 break;
70 case RTW_TXDESC_QSEL_VO_0:
71 case RTW_TXDESC_QSEL_VO_1:
72 case RTW_TXDESC_QSEL_VO_2:
73 case RTW_TXDESC_QSEL_VO_3:
74 tid = (1 == tid_indic) ? RTW_PHL_RING_CAT_TID7 : RTW_PHL_RING_CAT_TID6;
75 break;
76 case RTW_TXDESC_QSEL_MGT_0:
77 case RTW_TXDESC_QSEL_MGT_1:
78 tid = RTW_PHL_RING_CAT_MGNT;
79 break;
80 case RTW_TXDESC_QSEL_HIGH_0:
81 case RTW_TXDESC_QSEL_HIGH_1:
82 tid = RTW_PHL_RING_CAT_HIQ;
83 break;
84 default :
85 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING]unknown qsel_id (%d)\n",
86 qsel_id);
87 tid = 0;
88 break;
89 }
90
91 return tid;
92 }
93
94 /**
95 * Get target DMA channel's BD ram hw register of rtl8852b
96 * @dma_ch: input target dma channel with the hw definition of rtl8852BE
97 * return the BD ram hw register
98 */
_hal_get_bd_ram_reg_8852be(u8 dma_ch)99 static u32 _hal_get_bd_ram_reg_8852be(u8 dma_ch)
100 {
101 u32 reg = 0;
102
103 switch (dma_ch) {
104 case ACH0_QUEUE_IDX_8852BE:
105 reg = R_AX_ACH0_BDRAM_CTRL;
106 break;
107 case ACH1_QUEUE_IDX_8852BE:
108 reg = R_AX_ACH1_BDRAM_CTRL;
109 break;
110 case ACH2_QUEUE_IDX_8852BE:
111 reg = R_AX_ACH2_BDRAM_CTRL;
112 break;
113 case ACH3_QUEUE_IDX_8852BE:
114 reg = R_AX_ACH3_BDRAM_CTRL;
115 break;
116 case ACH4_QUEUE_IDX_8852BE:
117 reg = R_AX_ACH4_BDRAM_CTRL;
118 break;
119 case ACH5_QUEUE_IDX_8852BE:
120 reg = R_AX_ACH5_BDRAM_CTRL;
121 break;
122 case ACH6_QUEUE_IDX_8852BE:
123 reg = R_AX_ACH6_BDRAM_CTRL;
124 break;
125 case ACH7_QUEUE_IDX_8852BE:
126 reg = R_AX_ACH7_BDRAM_CTRL;
127 break;
128 case MGQ_B0_QUEUE_IDX_8852BE:
129 reg = R_AX_CH8_BDRAM_CTRL;
130 break;
131 case HIQ_B0_QUEUE_IDX_8852BE:
132 reg = R_AX_CH9_BDRAM_CTRL;
133 break;
134 case MGQ_B1_QUEUE_IDX_8852BE:
135 reg = R_AX_CH10_BDRAM_CTRL;
136 break;
137 case HIQ_B1_QUEUE_IDX_8852BE:
138 reg = R_AX_CH11_BDRAM_CTRL;
139 break;
140 case FWCMD_QUEUE_IDX_8852BE:
141 reg = R_AX_CH12_BDRAM_CTRL;
142 break;
143 default :
144 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING]unknown channel (%d)\n",
145 dma_ch);
146 reg = 0xFFFF;
147 break;
148 }
149
150 return reg;
151 }
152
153
154
155 /**
156 * Get target DMA channel's BD num hw register of rtl8852b
157 * @dma_ch: input target dma channel with the hw definition of rtl8852BE
158 * return the BD num hw register
159 */
_hal_get_bd_num_reg_8852be(u8 dma_ch)160 static u32 _hal_get_bd_num_reg_8852be(u8 dma_ch)
161 {
162 u32 reg = 0;
163
164 switch (dma_ch) {
165 case ACH0_QUEUE_IDX_8852BE:
166 reg = R_AX_ACH0_TXBD_NUM;
167 break;
168 case ACH1_QUEUE_IDX_8852BE:
169 reg = R_AX_ACH1_TXBD_NUM;
170 break;
171 case ACH2_QUEUE_IDX_8852BE:
172 reg = R_AX_ACH2_TXBD_NUM;
173 break;
174 case ACH3_QUEUE_IDX_8852BE:
175 reg = R_AX_ACH3_TXBD_NUM;
176 break;
177 case ACH4_QUEUE_IDX_8852BE:
178 reg = R_AX_ACH4_TXBD_NUM;
179 break;
180 case ACH5_QUEUE_IDX_8852BE:
181 reg = R_AX_ACH5_TXBD_NUM;
182 break;
183 case ACH6_QUEUE_IDX_8852BE:
184 reg = R_AX_ACH6_TXBD_NUM;
185 break;
186 case ACH7_QUEUE_IDX_8852BE:
187 reg = R_AX_ACH7_TXBD_NUM;
188 break;
189 case MGQ_B0_QUEUE_IDX_8852BE:
190 reg = R_AX_CH8_TXBD_NUM;
191 break;
192 case HIQ_B0_QUEUE_IDX_8852BE:
193 reg = R_AX_CH9_TXBD_NUM;
194 break;
195 case MGQ_B1_QUEUE_IDX_8852BE:
196 reg = R_AX_CH10_TXBD_NUM;
197 break;
198 case HIQ_B1_QUEUE_IDX_8852BE:
199 reg = R_AX_CH11_TXBD_NUM;
200 break;
201 case FWCMD_QUEUE_IDX_8852BE:
202 reg = R_AX_CH12_TXBD_NUM;
203 break;
204 case RX_QUEUE_IDX_8852BE:
205 reg = R_AX_RXQ_RXBD_NUM;
206 break;
207 case RP_QUEUE_IDX_8852BE:
208 reg = R_AX_RPQ_RXBD_NUM;
209 break;
210 default :
211 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING]unknown channel (%d)\n",
212 dma_ch);
213 reg = 0xFFFF;
214 break;
215 }
216
217 return reg;
218 }
219
220
221 /**
222 * Get target DMA channel's BD Desc hw register of rtl8852b
223 * @dma_ch: input target dma channel with the hw definition of rtl8852BE
224 * return the BD Desc hw register
225 */
_hal_get_bd_desc_reg_8852be(u8 dma_ch,u32 * addr_l,u32 * addr_h)226 static void _hal_get_bd_desc_reg_8852be(u8 dma_ch, u32 *addr_l, u32 *addr_h)
227 {
228 u32 reg = 0;
229
230 switch (dma_ch) {
231 case ACH0_QUEUE_IDX_8852BE:
232 *addr_l = R_AX_ACH0_TXBD_DESA_L;
233 *addr_h = R_AX_ACH0_TXBD_DESA_H;
234 break;
235 case ACH1_QUEUE_IDX_8852BE:
236 *addr_l = R_AX_ACH1_TXBD_DESA_L;
237 *addr_h = R_AX_ACH1_TXBD_DESA_H;
238 break;
239 case ACH2_QUEUE_IDX_8852BE:
240 *addr_l = R_AX_ACH2_TXBD_DESA_L;
241 *addr_h = R_AX_ACH2_TXBD_DESA_H;
242 break;
243 case ACH3_QUEUE_IDX_8852BE:
244 *addr_l = R_AX_ACH3_TXBD_DESA_L;
245 *addr_h = R_AX_ACH3_TXBD_DESA_H;
246 break;
247 case ACH4_QUEUE_IDX_8852BE:
248 *addr_l = R_AX_ACH4_TXBD_DESA_L;
249 *addr_h = R_AX_ACH4_TXBD_DESA_H;
250 break;
251 case ACH5_QUEUE_IDX_8852BE:
252 *addr_l = R_AX_ACH5_TXBD_DESA_L;
253 *addr_h = R_AX_ACH5_TXBD_DESA_H;
254 break;
255 case ACH6_QUEUE_IDX_8852BE:
256 *addr_l = R_AX_ACH6_TXBD_DESA_L;
257 *addr_h = R_AX_ACH6_TXBD_DESA_H;
258 break;
259 case ACH7_QUEUE_IDX_8852BE:
260 *addr_l = R_AX_ACH7_TXBD_DESA_L;
261 *addr_h = R_AX_ACH7_TXBD_DESA_H;
262 break;
263 case MGQ_B0_QUEUE_IDX_8852BE:
264 *addr_l = R_AX_CH8_TXBD_DESA_L;
265 *addr_h = R_AX_CH8_TXBD_DESA_H;
266 break;
267 case HIQ_B0_QUEUE_IDX_8852BE:
268 *addr_l = R_AX_CH9_TXBD_DESA_L;
269 *addr_h = R_AX_CH9_TXBD_DESA_H;
270 break;
271 case MGQ_B1_QUEUE_IDX_8852BE:
272 *addr_l = R_AX_CH10_TXBD_DESA_L;
273 *addr_h = R_AX_CH10_TXBD_DESA_H;
274 break;
275 case HIQ_B1_QUEUE_IDX_8852BE:
276 *addr_l = R_AX_CH11_TXBD_DESA_L;
277 *addr_h = R_AX_CH11_TXBD_DESA_H;
278 break;
279 case FWCMD_QUEUE_IDX_8852BE:
280 *addr_l = R_AX_CH12_TXBD_DESA_L;
281 *addr_h = R_AX_CH12_TXBD_DESA_H;
282 break;
283 case RX_QUEUE_IDX_8852BE:
284 *addr_l = R_AX_RXQ_RXBD_DESA_L;
285 *addr_h = R_AX_RXQ_RXBD_DESA_H;
286 break;
287 case RP_QUEUE_IDX_8852BE:
288 *addr_l = R_AX_RPQ_RXBD_DESA_L;
289 *addr_h = R_AX_RPQ_RXBD_DESA_H;
290 break;
291 default :
292 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING]unknown channel (%d)\n",
293 dma_ch);
294 reg = 0xFFFF;
295 break;
296 }
297
298 }
299
300
301
302 /**
303 * Get target DMA channel's BD Index hw register of rtl8852b
304 * @dma_ch: input target dma channel with the hw definition of rtl8852BE
305 * return the BD Index hw register
306 */
_hal_get_bd_idx_reg_8852be(u8 dma_ch)307 static u32 _hal_get_bd_idx_reg_8852be(u8 dma_ch)
308 {
309 u32 reg = 0;
310
311 switch (dma_ch) {
312 case ACH0_QUEUE_IDX_8852BE:
313 reg = R_AX_ACH0_TXBD_IDX;
314 break;
315 case ACH1_QUEUE_IDX_8852BE:
316 reg = R_AX_ACH1_TXBD_IDX;
317 break;
318 case ACH2_QUEUE_IDX_8852BE:
319 reg = R_AX_ACH2_TXBD_IDX;
320 break;
321 case ACH3_QUEUE_IDX_8852BE:
322 reg = R_AX_ACH3_TXBD_IDX;
323 break;
324 case ACH4_QUEUE_IDX_8852BE:
325 reg = R_AX_ACH4_TXBD_IDX;
326 break;
327 case ACH5_QUEUE_IDX_8852BE:
328 reg = R_AX_ACH5_TXBD_IDX;
329 break;
330 case ACH6_QUEUE_IDX_8852BE:
331 reg = R_AX_ACH6_TXBD_IDX;
332 break;
333 case ACH7_QUEUE_IDX_8852BE:
334 reg = R_AX_ACH7_TXBD_IDX;
335 break;
336 case MGQ_B0_QUEUE_IDX_8852BE:
337 reg = R_AX_CH8_TXBD_IDX;
338 break;
339 case HIQ_B0_QUEUE_IDX_8852BE:
340 reg = R_AX_CH9_TXBD_IDX;
341 break;
342 case MGQ_B1_QUEUE_IDX_8852BE:
343 reg = R_AX_CH10_TXBD_IDX;
344 break;
345 case HIQ_B1_QUEUE_IDX_8852BE:
346 reg = R_AX_CH11_TXBD_IDX;
347 break;
348 case FWCMD_QUEUE_IDX_8852BE:
349 reg = R_AX_CH12_TXBD_IDX;
350 break;
351 case RX_QUEUE_IDX_8852BE:
352 reg = R_AX_RXQ_RXBD_IDX;
353 break;
354 case RP_QUEUE_IDX_8852BE:
355 reg = R_AX_RPQ_RXBD_IDX;
356 break;
357 default :
358 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING]unknown channel (%d)\n",
359 dma_ch);
360 reg = 0xFFFF;
361 break;
362 }
363
364 return reg;
365 }
366
367
368 /**
369 * this function maps the sw xmit ring identified by macid, tid and band
370 * to rtl8852BE hw tx dma channel
371 * @macid: input target macid range is 0 ~ 127
372 * @cat: input target packet category, see enum rtw_phl_ring_cat
373 * @band: input target band, 0 for band 0 / 1 for band 1
374 *
375 * returns the mapping hw dma channel defined by XXX_QUEUE_IDX_8852BE
376 * if the input parameter is unknown value, returns ACH0_QUEUE_IDX_8852BE
377 */
hal_mapping_hw_tx_chnl_8852be(u16 macid,enum rtw_phl_ring_cat cat,u8 band)378 static u8 hal_mapping_hw_tx_chnl_8852be(u16 macid, enum rtw_phl_ring_cat cat,
379 u8 band)
380 {
381 u8 dma_ch = 0;
382
383 /* hana_todo, decided by tid only currently,
384 we should consider more situation later */
385
386 if (0 == band) {
387 switch (cat) {
388 case RTW_PHL_RING_CAT_TID0:
389 case RTW_PHL_RING_CAT_TID3:
390 dma_ch = ACH0_QUEUE_IDX_8852BE;
391 break;
392 case RTW_PHL_RING_CAT_TID1:
393 case RTW_PHL_RING_CAT_TID2:
394 dma_ch = ACH1_QUEUE_IDX_8852BE;
395 break;
396 case RTW_PHL_RING_CAT_TID4:
397 case RTW_PHL_RING_CAT_TID5:
398 dma_ch = ACH2_QUEUE_IDX_8852BE;
399 break;
400 case RTW_PHL_RING_CAT_TID6:
401 case RTW_PHL_RING_CAT_TID7:
402 dma_ch = ACH3_QUEUE_IDX_8852BE;
403 break;
404 case RTW_PHL_RING_CAT_MGNT:
405 dma_ch = MGQ_B0_QUEUE_IDX_8852BE;
406 break;
407 case RTW_PHL_RING_CAT_HIQ:
408 dma_ch = HIQ_B0_QUEUE_IDX_8852BE;
409 break;
410 default:
411 dma_ch = ACH0_QUEUE_IDX_8852BE;
412 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING]unknown category (%d)\n",
413 cat);
414 break;
415 }
416 } else {
417 dma_ch = ACH0_QUEUE_IDX_8852BE;
418 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING]unknown band (%d)\n",
419 band);
420 }
421
422 return dma_ch;
423 }
424
425
426 /**
427 * this function will return available txbd number of target dma channel
428 * @ch_idx: input, target dma channel index
429 * @host_idx: the ptr of current host index of this channel
430 * @hw_idx: the ptr of current hw index of this channel
431 *
432 * NOTE, input host_idx and hw_idx ptr shall NOT be NULL
433 */
hal_get_avail_txbd_8852be(struct rtw_hal_com_t * hal_com,u8 ch_idx,u16 * host_idx,u16 * hw_idx)434 static u16 hal_get_avail_txbd_8852be(struct rtw_hal_com_t *hal_com, u8 ch_idx,
435 u16 *host_idx, u16 *hw_idx)
436 {
437 struct bus_cap_t *bus_cap = &hal_com->bus_cap;
438 u16 avail_txbd = 0;
439 u32 tmp32 = 0, reg = 0;
440 u8 tx_dma_ch = 0;
441
442 tx_dma_ch = ACH0_QUEUE_IDX_8852BE + ch_idx;
443
444 reg = _hal_get_bd_idx_reg_8852be(tx_dma_ch);
445 if (0xFFFF == reg) {
446 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
447 "[WARNING]get dma channel register fail\n");
448 avail_txbd = 0;
449 } else {
450 tmp32 = hal_read32(hal_com, reg);
451
452 *host_idx = (u16)(tmp32 & 0x0FFF);
453 *hw_idx = (u16)((tmp32 >> 16) & 0x0FFF);
454
455 avail_txbd = hal_calc_avail_wptr(*hw_idx, *host_idx,
456 (u16)bus_cap->txbd_num);
457 PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
458 "hal_get_avail_txbd_8852be => dma_ch %d, host_idx %d, "
459 "hw_idx %d, avail_txbd %d\n",
460 ch_idx, *host_idx, *hw_idx, avail_txbd);
461 }
462
463 return avail_txbd;
464 }
465
466 /**
467 * this function will return available txbd number of target dma channel
468 * @ch_idx: input, target dma channel index
469 * @host_idx: the ptr of current host index of this channel
470 * @hw_idx: the ptr of current hw index of this channel
471 *
472 * NOTE, input host_idx and hw_idx ptr shall NOT be NULL
473 */
hal_get_avail_rxbd_8852be(struct rtw_hal_com_t * hal_com,u8 ch_idx,u16 * host_idx,u16 * hw_idx)474 static u16 hal_get_avail_rxbd_8852be(struct rtw_hal_com_t *hal_com, u8 ch_idx,
475 u16 *host_idx, u16 *hw_idx)
476 {
477 struct bus_cap_t *bus_cap = &hal_com->bus_cap;
478 u16 avail_rxbd = 0;
479 u32 tmp32 = 0, reg = 0;
480 u8 rx_dma_ch = 0;
481
482 rx_dma_ch = RX_QUEUE_IDX_8852BE + ch_idx;
483
484 reg = _hal_get_bd_idx_reg_8852be(rx_dma_ch);
485 if (0xFFFF == reg) {
486 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING]get dma channel register fail\n");
487 avail_rxbd = 0;
488 } else {
489 tmp32 = hal_read32(hal_com, reg);
490
491 *host_idx = (u16)(tmp32 & 0x0FFF);
492 *hw_idx = (u16)((tmp32 >> 16) & 0x0FFF);
493
494 avail_rxbd = hal_calc_avail_rptr(*host_idx, *hw_idx,
495 (u16)bus_cap->rxbd_num);
496 }
497
498 return avail_rxbd;
499 }
500
501
_hal_fill_wp_seq_field_8852be(u8 * seq_info,u16 wp_seq)502 void _hal_fill_wp_seq_field_8852be(u8 *seq_info, u16 wp_seq)
503 {
504 /* update sw wp seq */
505 SET_PCIE_SEQ_INFO_0(seq_info, wp_seq);
506 SET_PCIE_SEQ_INFO_0_VALID(seq_info, 1);
507 }
508
_hal_fill_wp_addr_info_8852be(struct rtw_hal_com_t * hal_com,u8 * addr_info,struct rtw_pkt_buf_list * pkt,u8 num,u8 mpdu_ls,u8 msdu_ls)509 void _hal_fill_wp_addr_info_8852be(struct rtw_hal_com_t *hal_com,
510 u8 *addr_info, struct rtw_pkt_buf_list *pkt,
511 u8 num, u8 mpdu_ls, u8 msdu_ls)
512 {
513 SET_ADDR_INFO_LEN(addr_info, pkt->length);
514 SET_ADDR_INFO_NUM(addr_info, num);
515 SET_ADDR_INFO_MSDU_LS(addr_info, msdu_ls);
516 SET_ADDR_INFO_ADDR_LOW(addr_info, pkt->phy_addr_l);
517 SET_ADDR_INFO_ADDR_HIGH(addr_info, pkt->phy_addr_h);
518 }
519
_hal_get_tid_indic_8852be(u8 tid)520 u8 _hal_get_tid_indic_8852be(u8 tid)
521 {
522 u8 tid_indic = 0;
523 switch (tid) {
524 case RTW_PHL_RING_CAT_TID0:
525 case RTW_PHL_RING_CAT_TID1:
526 case RTW_PHL_RING_CAT_TID4:
527 case RTW_PHL_RING_CAT_TID6:
528 case RTW_PHL_RING_CAT_MGNT:
529 case RTW_PHL_RING_CAT_HIQ:
530 tid_indic = 0;
531 break;
532 case RTW_PHL_RING_CAT_TID3:
533 case RTW_PHL_RING_CAT_TID2:
534 case RTW_PHL_RING_CAT_TID5:
535 case RTW_PHL_RING_CAT_TID7:
536 tid_indic = 1;
537 break;
538 default:
539 PHL_ERR("unknown tid %d\n", tid);
540 break;
541 }
542
543 return tid_indic;
544 }
545
546 #ifdef CONFIG_PHL_TXSC
547 static u8 qsel_tbl[] = {
548 TID_0_QSEL/*0*/, TID_1_QSEL/*1*/, TID_2_QSEL/*1*/, TID_3_QSEL/*0*/,
549 TID_4_QSEL/*2*/, TID_5_QSEL/*2*/, TID_6_QSEL/*3*/, TID_7_QSEL/*2*/
550 };
551
552 static u8 tid_ind[] = {
553 TID_0_IND, TID_1_IND, TID_2_IND, TID_3_IND,
554 TID_4_IND, TID_5_IND, TID_6_IND, TID_7_IND
555 };
556
557 static enum rtw_hal_status
_hal_txsc_update_wd(struct hal_info_t * hal,struct rtw_phl_pkt_req * req,u32 * wd_len)558 _hal_txsc_update_wd(struct hal_info_t *hal,
559 struct rtw_phl_pkt_req *req, u32 *wd_len)
560 {
561 struct rtw_xmit_req *tx_req = req ? req->tx_req : NULL;
562 struct rtw_t_meta_data *mdata;
563 u32 *wd_words;
564 u32 w0, w2, w3;
565
566 if (NULL == tx_req)
567 return RTW_HAL_STATUS_FAILURE;
568
569 if (req->wd_len == 0) {
570 rtw_hal_mac_ax_fill_txdesc(hal->mac, tx_req, req->wd_page,
571 wd_len);
572 req->wd_len = (u8)*wd_len;
573 } else {
574 mdata = &tx_req->mdata;
575 mdata->dma_ch = hal_mapping_hw_tx_chnl_8852be(mdata->macid, mdata->tid, mdata->band);
576
577 wd_words = (u32 *)req->wd_page;
578 w0 = le32_to_cpu(wd_words[0])
579 & ~((AX_TXD_HW_SSN_SEL_MSK << AX_TXD_HW_SSN_SEL_SH)
580 | (AX_TXD_CH_DMA_MSK << AX_TXD_CH_DMA_SH));
581 w2 = le32_to_cpu(wd_words[2])
582 & ~(AX_TXD_TID_IND
583 | (AX_TXD_QSEL_MSK << AX_TXD_QSEL_SH)
584 | (AX_TXD_TXPKTSIZE_MSK << AX_TXD_TXPKTSIZE_SH));
585
586 /* Update SSN SEL, DMA CH, QSEL, and TID indicator in WD cache */
587 w0 |= (((mdata->hw_ssn_sel & AX_TXD_HW_SSN_SEL_MSK) << AX_TXD_HW_SSN_SEL_SH)
588 | ((mdata->dma_ch & AX_TXD_CH_DMA_MSK) << AX_TXD_CH_DMA_SH));
589 wd_words[0] = cpu_to_le32(w0);
590
591 if (mdata->hw_seq_mode == 0) {
592 w3 = cpu_to_le32((mdata->sw_seq & 0xFFF) |
593 (mdata->ampdu_en ? BIT(12) : 0) |
594 ((mdata->bk || mdata->ack_ch_info) ? BIT(13) : 0));
595 wd_words[3] = w3;
596 }
597
598 if (tid_ind[mdata->tid])
599 w2 |= AX_TXD_TID_IND;
600
601 w2 |= (qsel_tbl[mdata->tid] & AX_TXD_QSEL_MSK) << AX_TXD_QSEL_SH;
602 w2 |= (mdata->pktlen & AX_TXD_TXPKTSIZE_MSK) << AX_TXD_TXPKTSIZE_SH;
603 wd_words[2] = cpu_to_le32(w2);
604
605 *wd_len = req->wd_len;
606 }
607
608 return RTW_HAL_STATUS_SUCCESS;
609 }
610 #endif
611
612 /**
613 * the function update wd page, including wd info, wd body, seq info, addr info
614 * @hal: see struct hal_info_t
615 * @phl_pkt_req: see struct rtw_phl_pkt_req
616 */
617 static enum rtw_hal_status
hal_update_wd_8852be(struct hal_info_t * hal,struct rtw_phl_pkt_req * req)618 hal_update_wd_8852be(struct hal_info_t *hal,
619 struct rtw_phl_pkt_req *req)
620 {
621 enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
622 struct rtw_hal_com_t *hal_com = hal->hal_com;
623 struct bus_hw_cap_t *bus_hw_cap = &hal_com->bus_hw_cap;
624 struct rtw_xmit_req *tx_req = NULL;
625 struct rtw_pkt_buf_list *pkt_list = NULL;
626 u32 wd_len = 0, seq_ofst = 0, addr_info_ofst = 0;
627 u16 wp_seq = 0;
628 u8 i = 0, wp_num = 0, mpdu_ls = 0, msdu_ls = 0, tid_indic = 0;
629 FUNCIN_WSTS(hstatus);
630 do {
631 if (NULL == req)
632 break;
633
634 tx_req = req->tx_req;
635 pkt_list = (struct rtw_pkt_buf_list *)tx_req->pkt_list;
636
637 #ifdef CONFIG_PHL_TXSC
638 _hal_txsc_update_wd(hal, req, &wd_len);
639 #else
640 /* connect with halmac */
641 rtw_hal_mac_ax_fill_txdesc(hal->mac, tx_req, req->wd_page,
642 &wd_len);
643 #endif
644 tid_indic = _hal_get_tid_indic_8852be(tx_req->mdata.tid);
645
646 seq_ofst = wd_len;
647 wp_seq = (1 == tid_indic) ?
648 (req->wp_seq | WP_TID_INDIC_RESERVED_BIT) : req->wp_seq;
649 _hal_fill_wp_seq_field_8852be(req->wd_page + seq_ofst, wp_seq);
650
651 addr_info_ofst = seq_ofst + bus_hw_cap->seq_info_size;
652 for (i = 0; i < tx_req->mdata.addr_info_num; i++) {
653 if (0 == i)
654 wp_num = tx_req->mdata.addr_info_num;
655 else
656 wp_num = 0;
657
658 if ((tx_req->mdata.addr_info_num - 1) == i)
659 msdu_ls = 1;
660 else
661 msdu_ls = 0;
662
663 _hal_fill_wp_addr_info_8852be(hal_com,
664 req->wd_page + addr_info_ofst,
665 &pkt_list[i], wp_num, mpdu_ls, msdu_ls);
666
667 addr_info_ofst += bus_hw_cap->addr_info_size;
668
669 //debug_dump_data(pkt_list[i].vir_addr, pkt_list[i].length, "dump wp");
670 }
671
672 /* 8852BE length in txbd(wd_len) should be fix to 128 */
673 if (addr_info_ofst != bus_hw_cap->max_wd_page_size)
674 req->wd_len = bus_hw_cap->max_wd_page_size;
675
676 } while (false);
677
678 #if 0 /* remove this for saving cpu cycle */
679 if (RTW_HAL_STATUS_SUCCESS == hstatus) {
680 debug_dump_data(req->wd_page, (u16)addr_info_ofst, "dump wd page");
681 }
682 #endif
683 FUNCOUT_WSTS(hstatus);
684 return hstatus;
685 }
686
687 /**
688 * the function update txbd
689 * @hal: see struct hal_info_t
690 * @txbd_ring: the target txbd ring buffer going to update, see struct tx_base_desc
691 * @wd: the wd page going to be filled in txbd, see struct rtw_wd_page
692 */
693 static enum rtw_hal_status
hal_update_txbd_8852be(struct hal_info_t * hal,struct tx_base_desc * txbd_ring,struct rtw_wd_page * wd_page,u8 ch_idx,u16 wd_num)694 hal_update_txbd_8852be(struct hal_info_t *hal,
695 struct tx_base_desc *txbd_ring,
696 struct rtw_wd_page *wd_page,
697 u8 ch_idx, u16 wd_num)
698 {
699 enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
700 struct rtw_hal_com_t *hal_com = hal->hal_com;
701 struct bus_hw_cap_t *bus_hw_cap = &hal_com->bus_hw_cap;
702 u8 *ring_head = 0;
703 u8 *target_txbd = 0;
704 u16 host_idx = 0;
705 u16 txbd_num = (u16)hal_com->bus_cap.txbd_num;
706
707 do {
708 if (NULL == wd_page)
709 break;
710 if (NULL == txbd_ring)
711 break;
712
713 /* connect with halmac */
714 host_idx = txbd_ring[ch_idx].host_idx;
715
716 PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
717 "hal_update_txbd_8852be => ch_idx %d, wd_num %d\n",
718 ch_idx, wd_num);
719
720 while (wd_num > 0) {
721
722 ring_head = txbd_ring[ch_idx].vir_addr;
723 target_txbd = ring_head + (host_idx *
724 bus_hw_cap->txbd_len);
725
726 SET_TXBUFFER_DESC_LEN(target_txbd, wd_page->buf_len);
727 SET_TXBUFFER_DESC_LS(target_txbd, wd_page->ls);
728 SET_TXBUFFER_DESC_ADD_LOW(target_txbd,
729 wd_page->phy_addr_l);
730 SET_TXBUFFER_DESC_ADD_HIGH(target_txbd,
731 wd_page->phy_addr_h);
732 host_idx = (host_idx + 1) % txbd_num;
733 wd_page->host_idx = host_idx;
734 wd_num--;
735
736 //multi wd page in one update txbd
737 #if 0//S_TODO
738 if(wd_num > 0){
739 wd_page = list_first_entry(wd_page->list,
740 struct rtw_wd_page,
741 wd_page->list);
742 if(NULL == wd_page)
743 break;
744 }
745 #endif
746 }
747
748 txbd_ring[ch_idx].host_idx = host_idx;
749 } while (false);
750
751 return hstatus;
752 }
753
754 /**
755 * the function trigger tx start
756 * @hal: see struct hal_info_t
757 * @txbd_ring: the target txbd ring buffer going to update, see struct tx_base_desc
758 * @ch_idx: the dma channel index of this txbd_ring
759 */
760 static enum rtw_hal_status
hal_trigger_txdma_8852be(struct hal_info_t * hal,struct tx_base_desc * txbd_ring,u8 ch_idx)761 hal_trigger_txdma_8852be(struct hal_info_t *hal,
762 struct tx_base_desc *txbd_ring, u8 ch_idx)
763 {
764 enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
765 u8 tx_dma_ch;
766 u32 txbd_reg;
767
768 do {
769 /* connect with halmac */
770 tx_dma_ch = ACH0_QUEUE_IDX_8852BE + ch_idx;
771 txbd_reg = _hal_get_bd_idx_reg_8852be(tx_dma_ch);
772 if (0xFFFF == txbd_reg)
773 break;
774 PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
775 "hal_trigger_txdma_8852be => dma_ch %d, host_idx %d.\n",
776 ch_idx, txbd_ring[ch_idx].host_idx);
777 hal_write16(hal->hal_com, txbd_reg, txbd_ring[ch_idx].host_idx);
778 #ifdef CONFIG_DBG_H2C_TX
779 if (tx_dma_ch == FWCMD_QUEUE_IDX_8852BE)
780 PHL_INFO("%s: 0x%x -> 0x%x\n", __func__,
781 _hal_get_bd_idx_reg_8852be(FWCMD_QUEUE_IDX_8852BE),
782 hal_read32(hal->hal_com, _hal_get_bd_idx_reg_8852be(FWCMD_QUEUE_IDX_8852BE)));
783 #endif /* CONFIG_DBG_H2C_TX */
784 hstatus = RTW_HAL_STATUS_SUCCESS;
785 } while (false);
786
787 return hstatus;
788 }
789
790
hal_pltfm_tx_8852be(void * hal,struct rtw_h2c_pkt * h2c_pkt)791 static enum rtw_hal_status hal_pltfm_tx_8852be(void *hal, struct rtw_h2c_pkt *h2c_pkt)
792 {
793 enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
794 #if 0
795
796 struct hal_info_t *hal_info = (struct hal_info_t *)hal;
797 struct rtw_wd_page wd_page;
798 struct tx_base_desc *txbd_ring = NULL;
799
800 _os_mem_set(hal_to_drvpriv(hal_info), &wd_page, 0, sizeof(wd_page));
801
802 txbd_ring = (struct tx_base_desc *)hal_info->hal_com->fw_txbd;
803 wd_page.vir_addr = h2c_pkt->vir_addr;
804 wd_page.phy_addr_l = h2c_pkt->phy_addr_l;
805 wd_page.phy_addr_h= h2c_pkt->phy_addr_h;
806 wd_page.buf_len = h2c_pkt->buf_len;
807 wd_page.cache = 1;
808
809 _os_spinlock(hal_to_drvpriv(hal_info), &txbd_ring[FWCMD_QUEUE_IDX_8852BE].txbd_lock, _ps, NULL);
810 hstatus = hal_update_txbd_8852BE(hal_info, txbd_ring, &wd_page,
811 FWCMD_QUEUE_IDX_8852BE, 1);
812 _os_spinunlock(hal_to_drvpriv(hal_info), &txbd_ring[FWCMD_QUEUE_IDX_8852BE].txbd_lock, _ps, NULL);
813
814 /* enqueue busy queue */
815
816 hstatus = hal_trigger_txdma_8852BE(hal_info, txbd_ring, FWCMD_QUEUE_IDX_8852BE);
817 #endif
818 return hstatus;
819 }
820
hal_get_fwcmd_queue_idx_8852be(void)821 u8 hal_get_fwcmd_queue_idx_8852be(void)
822 {
823 return FWCMD_QUEUE_IDX_8852BE;
824 }
825
hal_check_rxrdy_8852be(struct rtw_phl_com_t * phl_com,u8 * rxbd_info,u8 ch_idx)826 static u8 hal_check_rxrdy_8852be(struct rtw_phl_com_t *phl_com, u8 *rxbd_info,
827 u8 ch_idx)
828 {
829 struct hal_spec_t *hal_spec = &phl_com->hal_spec;
830 u8 res = false;
831 u16 tag = 0, target_tag = 0;
832 u16 read_cnt = 0;
833
834 do {
835 if (rxbd_info == NULL) {
836 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
837 "[WARNING] input rx bd info is NULL!\n");
838 res = false;
839 break;
840 }
841
842 tag = (u16)GET_RX_BD_INFO_TAG(rxbd_info);
843
844 if (hal_spec->rx_tag[ch_idx] == 0x1fff ||
845 hal_spec->rx_tag[ch_idx] == 0)
846 target_tag = 1;
847 else
848 target_tag = hal_spec->rx_tag[ch_idx] + 1;
849
850 while (read_cnt < 10000) {
851
852 read_cnt++;
853
854 tag = (u16)GET_RX_BD_INFO_TAG(rxbd_info);
855
856 if (tag == target_tag) {
857 res = true;
858 break;
859 }
860 }
861
862 if (true == res) {
863 hal_spec->rx_tag[ch_idx] = tag;
864 } else {
865 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING] polling Rx Tag fail, tag = %d, target_tag = %d\n",
866 tag, target_tag);
867 #ifdef RTW_WKARD_98D_RXTAG
868 if (tag) {
869 hal_spec->rx_tag[ch_idx] = tag;
870 res = true;
871 }
872 #endif
873 }
874
875 } while (false);
876
877 return res;
878 }
879
880
881
hal_handle_rx_report_8852be(struct hal_info_t * hal,u8 * rp,u16 len,u8 * sw_retry,u8 * dma_ch,u16 * wp_seq,u8 * macid,u8 * ac_queue,u8 * txsts)882 u16 hal_handle_rx_report_8852be(struct hal_info_t *hal, u8 *rp, u16 len,
883 u8 *sw_retry, u8 *dma_ch, u16 *wp_seq,
884 u8 *macid, u8 *ac_queue, u8 *txsts)
885 {
886 u8 polluted = false;
887 u16 rsize = 0;
888 u8 tid = 0, qsel_value = 0, band = 0, tid_indic = 0;
889
890 do {
891 if (len < RX_RP_PACKET_SIZE)
892 break;
893
894 *macid = (u8)GET_RX_RP_PKT_MAC_ID(rp);
895 qsel_value = (u8)GET_RX_RP_PKT_QSEL(rp);
896 *ac_queue = qsel_value % RTW_MAX_AC_QUEUE_NUM;
897 *txsts = (u8)GET_RX_RP_PKT_TX_STS(rp);
898 *wp_seq = (u16)GET_RX_RP_PKT_PCIE_SEQ(rp);
899 polluted = (u8)GET_RX_RP_PKT_POLLUTED(rp);
900
901
902 band = (qsel_value & BIT3) ? 1 : 0;
903 tid_indic = (*wp_seq & WP_TID_INDIC_RESERVED_BIT) ? 1 : 0;
904 *wp_seq &= (WP_RESERVED_SEQ);
905 tid = hal_qsel_to_tid_8852be(hal, qsel_value, tid_indic);
906 *dma_ch = hal_mapping_hw_tx_chnl_8852be(*macid, tid, band);
907
908 PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "Get recycle report: qsel = %d, macid = %d, wp_seq = 0x%x, tid_indic = %d,"
909 " tid = %d, band = %d, dma_ch = %d\n",
910 qsel_value, *macid, *wp_seq, tid_indic, tid, band, *dma_ch);
911
912 if (TX_STATUS_TX_DONE != *txsts) {
913 *sw_retry = true;
914 /* hana_todo handle sw retry */
915 } else if (true == polluted) {
916 PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "this wp is polluted\n");
917 *sw_retry = true;
918 /* hana_todo handle sw retry */
919 } else {
920 *sw_retry = false;
921 }
922
923 rsize = RX_RP_PACKET_SIZE;
924 } while (false);
925
926 return rsize;
927 }
928
929
930 /**
931 * Process Rx PPDU Status with HALMAC API and PHYDM API
932 */
933 static enum rtw_hal_status
hal_handle_ppdusts_8852be(void * hal,u8 * psbuf,u16 sz,struct rtw_r_meta_data * mdata,struct rx_ppdu_status * rxps)934 hal_handle_ppdusts_8852be(void *hal, u8 *psbuf, u16 sz,
935 struct rtw_r_meta_data *mdata, struct rx_ppdu_status *rxps)
936 {
937 enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
938
939 if (mdata->mac_info_vld) {
940 /*Call HALMAC API HALMAC API*/
941 rxps->mac_info_length = 4; //To DO
942 }
943
944 rxps->phy_info_length = sz - rxps->mac_info_length;
945 if (rxps->phy_info_length > 0) {
946 /* the remaining length > 4 the phy info is valid */
947 /* CALL PHYDM API Here*/
948 //rx_desc->mac_id
949
950 }
951 hstatus = RTW_HAL_STATUS_SUCCESS;
952 return hstatus;
953 }
954
955 /**
956 * SW Parsing Rx Desc
957 **/
958 static enum rtw_hal_status
_hal_parsing_rx_wd_8852be(struct hal_info_t * hal,u8 * desc,struct rtw_r_meta_data * mdata)959 _hal_parsing_rx_wd_8852be(struct hal_info_t *hal, u8 *desc,
960 struct rtw_r_meta_data *mdata)
961 {
962 enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
963
964 mdata->pktlen = GET_RX_AX_DESC_PKT_LEN_8852B(desc);
965 mdata->shift = GET_RX_AX_DESC_SHIFT_8852B(desc);
966 mdata->wl_hd_iv_len = GET_RX_AX_DESC_HDR_IV_L_8852B(desc);
967 mdata->bb_sel = GET_RX_AX_DESC_BB_SEL_8852B(desc);
968 mdata->mac_info_vld = GET_RX_AX_DESC_MAC_INFO_VLD_8852B(desc);
969 mdata->rpkt_type = GET_RX_AX_DESC_RPKT_TYPE_8852B(desc);
970 mdata->drv_info_size = GET_RX_AX_DESC_DRV_INFO_SIZE_8852B(desc);
971 mdata->long_rxd = GET_RX_AX_DESC_LONG_RXD_8852B(desc);
972
973 mdata->ppdu_type = GET_RX_AX_DESC_PPDU_TYPE_8852B(desc);
974 mdata->ppdu_cnt = GET_RX_AX_DESC_PPDU_CNT_8852B(desc);
975 mdata->sr_en = GET_RX_AX_DESC_SR_EN_8852B(desc);
976 mdata->user_id = GET_RX_AX_DESC_USER_ID_8852B(desc);
977 mdata->rx_rate = GET_RX_AX_DESC_RX_DATARATE_8852B(desc);
978 mdata->rx_gi_ltf = GET_RX_AX_DESC_RX_GI_LTF_8852B(desc);
979 mdata->non_srg_ppdu = GET_RX_AX_DESC_NON_SRG_PPDU_8852B(desc);
980 mdata->inter_ppdu = GET_RX_AX_DESC_INTER_PPDU_8852B(desc);
981 mdata->bw = GET_RX_AX_DESC_BW_8852B(desc);
982
983 mdata->freerun_cnt = GET_RX_AX_DESC_FREERUN_CNT_8852B(desc);
984
985 mdata->a1_match = GET_RX_AX_DESC_A1_MATCH_8852B(desc);
986 mdata->sw_dec = GET_RX_AX_DESC_SW_DEC_8852B(desc);
987 mdata->hw_dec = GET_RX_AX_DESC_HW_DEC_8852B(desc);
988 mdata->ampdu = GET_RX_AX_DESC_AMPDU_8852B(desc);
989 mdata->ampdu_end_pkt = GET_RX_AX_DESC_AMPDU_EDN_PKT_8852B(desc);
990 mdata->amsdu = GET_RX_AX_DESC_AMSDU_8852B(desc);
991 mdata->amsdu_cut = GET_RX_AX_DESC_AMSDU_CUT_8852B(desc);
992 mdata->last_msdu = GET_RX_AX_DESC_LAST_MSDU_8852B(desc);
993 mdata->bypass = GET_RX_AX_DESC_BYPASS_8852B(desc);
994 mdata->crc32 = GET_RX_AX_DESC_CRC32_8852B(desc);
995 mdata->icverr = GET_RX_AX_DESC_ICVERR_8852B(desc);
996 mdata->magic_wake = GET_RX_AX_DESC_MAGIC_WAKE_8852B(desc);
997 mdata->unicast_wake = GET_RX_AX_DESC_UNICAST_WAKE_8852B(desc);
998 mdata->pattern_wake = GET_RX_AX_DESC_PATTERN_WAKE_8852B(desc);
999
1000 if (mdata->long_rxd==1) {
1001 mdata->macid = GET_RX_AX_DESC_MACID_8852B(desc);
1002 }
1003
1004 hstatus = RTW_HAL_STATUS_SUCCESS;
1005 return hstatus;
1006 }
1007
hal_handle_rxbd_info_8852be(struct hal_info_t * hal,u8 * rxbd_info,u16 * size)1008 static u8 hal_handle_rxbd_info_8852be(struct hal_info_t *hal,
1009 u8 *rxbd_info, u16 *size)
1010 {
1011 u8 res = false;
1012 u16 pld_size = 0;
1013 u8 fs = 0, ls = 0;
1014 u8 pkt_rdy = false;
1015
1016 do {
1017 if (NULL == rxbd_info)
1018 break;
1019 if (NULL == size)
1020 break;
1021
1022 fs = (u8)GET_RX_BD_INFO_FS(rxbd_info);
1023 ls = (u8)GET_RX_BD_INFO_LS(rxbd_info);
1024 pld_size = (u16)GET_RX_BD_INFO_HW_W_SIZE(rxbd_info);
1025
1026 if (fs == 1) {
1027 if (ls == 1)
1028 pkt_rdy = true;
1029 else
1030 pkt_rdy = false;
1031
1032 } else if (fs == 0) {
1033 if (ls == 1)
1034 pkt_rdy = false;
1035 else
1036 pkt_rdy = false;
1037 }
1038
1039 if (pkt_rdy) {
1040 *size = pld_size;
1041 res = true;
1042 } else {
1043 *size = 0;
1044 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING] need to handle RX FS/LS\n");
1045 res = false;
1046 }
1047 }while(false);
1048 return res;
1049 }
1050
1051
1052
1053 /**
1054 * the function update rxbd
1055 */
1056 static enum rtw_hal_status
hal_update_rxbd_8852be(struct hal_info_t * hal,struct rx_base_desc * rxbd,struct rtw_rx_buf * rx_buf)1057 hal_update_rxbd_8852be(struct hal_info_t *hal, struct rx_base_desc *rxbd,
1058 struct rtw_rx_buf *rx_buf)
1059 {
1060 enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
1061 struct rtw_hal_com_t *hal_com = hal->hal_com;
1062 struct bus_hw_cap_t *bus_hw_cap = &hal_com->bus_hw_cap;
1063 u8 *ring_head = NULL;
1064 u8 *target_rxbd = NULL;
1065 u16 rxbd_num = (u16)hal_com->bus_cap.rxbd_num;
1066
1067 do {
1068 if (NULL == rxbd)
1069 break;
1070 if (NULL == rx_buf)
1071 break;
1072
1073 ring_head = rxbd->vir_addr;
1074 target_rxbd = ring_head + (rxbd->host_idx *
1075 bus_hw_cap->rxbd_len);
1076 /* connect with halmac */
1077 SET_RX_BD_RXBUFFSIZE(target_rxbd, rx_buf->buf_len);
1078 SET_RX_BD_PHYSICAL_ADDR_LOW(target_rxbd,
1079 (u32)rx_buf->phy_addr_l);
1080 SET_RX_BD_PHYSICAL_ADDR_HIGH(target_rxbd,
1081 (u32)rx_buf->phy_addr_h);
1082 rxbd->host_idx = (rxbd->host_idx + 1) % rxbd_num;
1083 } while (false);
1084
1085 return hstatus;
1086 }
1087
1088
1089 /**
1090 * the function notify rx done
1091 */
1092 static enum rtw_hal_status
hal_notify_rxdone_8852be(struct hal_info_t * hal,struct rx_base_desc * rxbd,u8 ch,u16 rxcnt)1093 hal_notify_rxdone_8852be(struct hal_info_t *hal,
1094 struct rx_base_desc *rxbd, u8 ch, u16 rxcnt)
1095 {
1096 enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
1097 u32 reg = 0;
1098 u8 rx_dma_ch = 0;
1099
1100 do {
1101 rx_dma_ch = RX_QUEUE_IDX_8852BE + ch;
1102 reg = _hal_get_bd_idx_reg_8852be(rx_dma_ch);
1103 /* connect with halmac */
1104 if (0xFFFF == reg)
1105 break;
1106 hal_write16(hal->hal_com, reg, rxbd->host_idx);
1107 hstatus = RTW_HAL_STATUS_SUCCESS;
1108 } while (false);
1109
1110 return hstatus;
1111 }
1112
1113
1114 struct bd_ram bdram_table_8852b[] = {
1115 /* ACH0_QUEUE_IDX_8852BE */ {0, 25, 4},
1116 /* ACH1_QUEUE_IDX_8852BE */ {25, 25, 4},
1117 /* ACH2_QUEUE_IDX_8852BE */ {50, 25, 4},
1118 /* ACH3_QUEUE_IDX_8852BE */ {75, 25, 4},
1119 /* ACH4_QUEUE_IDX_8852BE */ {100, 25, 4},
1120 /* ACH5_QUEUE_IDX_8852BE */ {125, 25, 4},
1121 /* ACH6_QUEUE_IDX_8852BE */ {150, 25, 4},
1122 /* ACH7_QUEUE_IDX_8852BE */ {175, 25, 4},
1123 /* MGQ_B0_QUEUE_IDX_8852BE */ {185, 10, 4},
1124 /* HIQ_B0_QUEUE_IDX_8852BE */ {195, 10, 4},
1125 /* MGQ_B1_QUEUE_IDX_8852BE */ {205, 10, 4},
1126 /* HIQ_B1_QUEUE_IDX_8852BE */ {215, 10, 4},
1127 /* FWCMD_QUEUE_IDX_8852BE */ {225, 10, 4}
1128 };
1129
_hal_tx_init_bd_ram_8852be(struct hal_info_t * hal)1130 static void _hal_tx_init_bd_ram_8852be(struct hal_info_t *hal)
1131 {
1132 u32 reg = 0;
1133 u32 value = 0;
1134 u8 i = 0;
1135
1136 for (i = 0; i < TX_DMA_CHANNEL_ENTRY_8852BE; i++) {
1137 /*if (FWCMD_QUEUE_IDX_8852BE == i)
1138 continue;*/
1139 value = 0;
1140 value = bdram_table_8852b[i].sidx +
1141 ((bdram_table_8852b[i].max << 8) & 0xFF00) +
1142 ((bdram_table_8852b[i].min << 16) & 0xFF0000);
1143
1144 reg = _hal_get_bd_ram_reg_8852be(i);
1145 if (0 != reg)
1146 hal_write32(hal->hal_com, reg, value);
1147 else
1148 PHL_ERR("query txbd num reg fail (ch_idx = %d)\n", i);
1149 }
1150
1151 value = hal_read32(hal->hal_com, R_AX_PCIE_INIT_CFG1);
1152 value |= (B_AX_RST_BDRAM);
1153 hal_write32(hal->hal_com, R_AX_PCIE_INIT_CFG1, value);
1154
1155 }
_hal_trx_init_bd_num_8852be(struct hal_info_t * hal)1156 static void _hal_trx_init_bd_num_8852be(struct hal_info_t *hal)
1157 {
1158 struct rtw_hal_com_t *hal_com = hal->hal_com;
1159 u16 txbd_num = (u16)hal_com->bus_cap.txbd_num;
1160 u16 rxbd_num = (u16)hal_com->bus_cap.rxbd_num;
1161 u32 reg = 0;
1162 u16 value = 0;
1163 u8 i = 0;
1164
1165 for (i = 0; i < TX_DMA_CHANNEL_ENTRY_8852BE; i++) {
1166 /*if (FWCMD_QUEUE_IDX_8852BE == i)
1167 continue;*/
1168
1169 value = txbd_num & B_AX_DESC_NUM_MSK;
1170
1171 reg = _hal_get_bd_num_reg_8852be(i);
1172 if (0 != reg)
1173 hal_write16(hal->hal_com, reg, value);
1174 else
1175 PHL_ERR("query txbd num reg fail (ch_idx = %d)\n", i);
1176 }
1177
1178 for (i = RX_QUEUE_IDX_8852BE;
1179 i < RX_QUEUE_IDX_8852BE + RX_DMA_CHANNEL_ENTRY_8852BE; i++) {
1180 /*if (FWCMD_QUEUE_IDX_8852BE == i)
1181 continue;*/
1182
1183 value = rxbd_num & B_AX_DESC_NUM_MSK;
1184
1185 reg = _hal_get_bd_num_reg_8852be(i);
1186 if (0 != reg)
1187 hal_write16(hal->hal_com, reg, value);
1188 else
1189 PHL_ERR("query rxbd num reg fail (ch_idx = %d)\n", i);
1190 }
1191 }
1192
1193
_hal_trx_init_bd_8852be(struct hal_info_t * hal,u8 * txbd_buf,u8 * rxbd_buf)1194 static void _hal_trx_init_bd_8852be(struct hal_info_t *hal, u8 *txbd_buf, u8 *rxbd_buf)
1195 {
1196 struct tx_base_desc *txbd = NULL;
1197 struct rx_base_desc *rxbd = NULL;
1198 u32 reg_addr_l = 0, reg_addr_h = 0;
1199 u8 i = 0, rxch_idx = 0;
1200
1201 if (NULL != txbd_buf && NULL != rxbd_buf) {
1202 txbd = (struct tx_base_desc *)txbd_buf;
1203 rxbd = (struct rx_base_desc *)rxbd_buf;
1204
1205 for (i = 0; i < TX_DMA_CHANNEL_ENTRY_8852BE; i++) {
1206 /*if (FWCMD_QUEUE_IDX_8852BE == i)
1207 continue;*/
1208
1209 _hal_get_bd_desc_reg_8852be(i, ®_addr_l, ®_addr_h);
1210 if (0 != reg_addr_l)
1211 hal_write32(hal->hal_com, reg_addr_l, txbd[i].phy_addr_l);
1212 else
1213 PHL_ERR("query txbd desc reg_addr_l fail (ch_idx = %d)\n", i);
1214
1215 if (0 != reg_addr_h)
1216 hal_write32(hal->hal_com, reg_addr_h, txbd[i].phy_addr_h);
1217 else
1218 PHL_ERR("query txbd desc reg_addr_h fail (ch_idx = %d)\n", i);
1219 }
1220
1221 for (i = 0; i < RX_DMA_CHANNEL_ENTRY_8852BE; i++) {
1222 rxch_idx = i + RX_QUEUE_IDX_8852BE;
1223 _hal_get_bd_desc_reg_8852be(rxch_idx, ®_addr_l, ®_addr_h);
1224 if (0 != reg_addr_l)
1225 hal_write32(hal->hal_com, reg_addr_l, rxbd[i].phy_addr_l);
1226 else
1227 PHL_ERR("query rxbd desc reg_addr_l fail (ch_idx = %d)\n", i);
1228 if (0 != reg_addr_h)
1229 hal_write32(hal->hal_com, reg_addr_h, rxbd[i].phy_addr_h);
1230 else
1231 PHL_ERR("query rxbd desc reg_addr_h fail (ch_idx = %d)\n", i);
1232 }
1233
1234 }
1235 }
1236
_hal_tx_enable_truncate_mode(struct hal_info_t * hal,u8 * txbd_buf,u8 * rxbd_buf)1237 static void _hal_tx_enable_truncate_mode(struct hal_info_t *hal, u8 *txbd_buf,
1238 u8 *rxbd_buf)
1239 {
1240 struct tx_base_desc *txbd = NULL;
1241 struct rx_base_desc *rxbd = NULL;
1242 u32 value = 0;
1243
1244 txbd = (struct tx_base_desc *)txbd_buf;
1245 rxbd = (struct rx_base_desc *)rxbd_buf;
1246
1247 value = hal_read32(hal->hal_com, R_AX_PCIE_INIT_CFG1);
1248 value |= (B_AX_TX_TRUNC_MODE | B_AX_RX_TRUNC_MODE);
1249 hal_write32(hal->hal_com, R_AX_PCIE_INIT_CFG1, value);
1250
1251 hal_write32(hal->hal_com, R_AX_TXDMA_ADDR_H, txbd->phy_addr_h);
1252 hal_write32(hal->hal_com, R_AX_RXDMA_ADDR_H, rxbd->phy_addr_h);
1253 }
1254
_hal_tx_disable_truncate_mode(struct hal_info_t * hal)1255 static void _hal_tx_disable_truncate_mode(struct hal_info_t *hal)
1256 {
1257 u32 value = 0;
1258
1259 value = hal_read32(hal->hal_com, R_AX_TX_ADDRESS_INFO_MODE_SETTING);
1260 value &= ~(B_AX_HOST_ADDR_INFO_8B_SEL);
1261 hal_write32(hal->hal_com, R_AX_TX_ADDRESS_INFO_MODE_SETTING, value);
1262
1263 value = hal_read32(hal->hal_com, R_AX_PKTIN_SETTING);
1264 value |= (B_AX_WD_ADDR_INFO_LENGTH);
1265 hal_write32(hal->hal_com, R_AX_PKTIN_SETTING, value);
1266 }
1267
1268
_hal_select_rxbd_mode(struct hal_info_t * hal,enum rxbd_mode_8852BE mode)1269 static void _hal_select_rxbd_mode(struct hal_info_t *hal,
1270 enum rxbd_mode_8852BE mode)
1271 {
1272 u32 value = 0;
1273
1274 if (RXBD_MODE_PACKET == mode) {
1275 value = hal_read32(hal->hal_com, R_AX_PCIE_INIT_CFG1);
1276 value &= ~(B_AX_RXBD_MODE);
1277 hal_write32(hal->hal_com, R_AX_PCIE_INIT_CFG1, value);
1278 } else if (RXBD_MODE_SEPARATION == mode) {
1279 /*
1280 value = hal_read32(hal->hal_com, R_AX_PCIE_INIT_CFG1);
1281 value |= (B_AX_RXBD_MODE);
1282 hal_write32(hal->hal_com, R_AX_PCIE_INIT_CFG1, value);
1283 */
1284 /* hana_todo, append length setting */
1285 } else {
1286 PHL_WARN("Unknown Rx BD mode (%d)\n", mode);
1287 }
1288 }
1289
hal_cfg_wow_txdma_8852be(struct hal_info_t * hal,u8 en)1290 static void hal_cfg_wow_txdma_8852be(struct hal_info_t *hal, u8 en)
1291 {
1292 struct mac_ax_txdma_ch_map ch_map;
1293
1294 ch_map.ch0 = en ? MAC_AX_PCIE_ENABLE : MAC_AX_PCIE_DISABLE;
1295 ch_map.ch1 = en ? MAC_AX_PCIE_ENABLE : MAC_AX_PCIE_DISABLE;
1296 ch_map.ch2 = en ? MAC_AX_PCIE_ENABLE : MAC_AX_PCIE_DISABLE;
1297 ch_map.ch3 = en ? MAC_AX_PCIE_ENABLE : MAC_AX_PCIE_DISABLE;
1298 ch_map.ch4 = MAC_AX_PCIE_IGNORE;
1299 ch_map.ch5 = MAC_AX_PCIE_IGNORE;
1300 ch_map.ch6 = MAC_AX_PCIE_IGNORE;
1301 ch_map.ch7 = MAC_AX_PCIE_IGNORE;
1302 ch_map.ch8 = en ? MAC_AX_PCIE_ENABLE : MAC_AX_PCIE_DISABLE;
1303 ch_map.ch9 = en ? MAC_AX_PCIE_ENABLE : MAC_AX_PCIE_DISABLE;
1304 ch_map.ch10 = MAC_AX_PCIE_IGNORE;
1305 ch_map.ch11 = MAC_AX_PCIE_IGNORE;
1306 ch_map.ch12 = MAC_AX_PCIE_IGNORE;
1307
1308 if (RTW_HAL_STATUS_SUCCESS != rtw_hal_mac_cfg_txdma(hal, &ch_map))
1309 PHL_ERR("%s failure \n", __func__);
1310
1311 }
1312
hal_poll_txdma_idle_8852be(struct hal_info_t * hal)1313 static u8 hal_poll_txdma_idle_8852be(struct hal_info_t *hal)
1314 {
1315 struct mac_ax_txdma_ch_map ch_map;
1316
1317 ch_map.ch0 = MAC_AX_PCIE_ENABLE;
1318 ch_map.ch1 = MAC_AX_PCIE_ENABLE;
1319 ch_map.ch2 = MAC_AX_PCIE_ENABLE;
1320 ch_map.ch3 = MAC_AX_PCIE_ENABLE;
1321 ch_map.ch4 = MAC_AX_PCIE_IGNORE;
1322 ch_map.ch5 = MAC_AX_PCIE_IGNORE;
1323 ch_map.ch6 = MAC_AX_PCIE_IGNORE;
1324 ch_map.ch7 = MAC_AX_PCIE_IGNORE;
1325 ch_map.ch8 = MAC_AX_PCIE_ENABLE;
1326 ch_map.ch9 = MAC_AX_PCIE_ENABLE;
1327 ch_map.ch10 = MAC_AX_PCIE_IGNORE;
1328 ch_map.ch11 = MAC_AX_PCIE_IGNORE;
1329 ch_map.ch12 = MAC_AX_PCIE_ENABLE;
1330
1331
1332 if (RTW_HAL_STATUS_SUCCESS != rtw_hal_mac_poll_txdma_idle(hal, &ch_map)) {
1333
1334 PHL_ERR("%s failure \n", __func__);
1335
1336 return false;
1337 }
1338 return true;
1339 }
_hal_clear_trx_state(struct hal_info_t * hal)1340 static void _hal_clear_trx_state(struct hal_info_t *hal)
1341 {
1342 u32 value = 0;
1343
1344 /* disable hci */
1345 value = hal_read32(hal->hal_com, R_AX_PCIE_INIT_CFG1);
1346 value &= ~(B_AX_RXHCI_EN | B_AX_TXHCI_EN);
1347 hal_write32(hal->hal_com, R_AX_PCIE_INIT_CFG1, value);
1348
1349 /* check mac power on status */
1350
1351 /* clear hci idx */
1352 value = hal_read32(hal->hal_com, R_AX_TXBD_RWPTR_CLR1);
1353 value |= (B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
1354 B_AX_CLR_ACH3_IDX | B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
1355 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX | B_AX_CLR_CH8_IDX |
1356 B_AX_CLR_CH9_IDX | B_AX_CLR_CH12_IDX);
1357 hal_write32(hal->hal_com, R_AX_TXBD_RWPTR_CLR1, value);
1358
1359 value = hal_read32(hal->hal_com, R_AX_TXBD_RWPTR_CLR2);
1360 value |= (B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
1361 hal_write32(hal->hal_com, R_AX_TXBD_RWPTR_CLR2, value);
1362
1363 value = hal_read32(hal->hal_com, R_AX_RXBD_RWPTR_CLR);
1364 value |= (B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
1365 hal_write32(hal->hal_com, R_AX_RXBD_RWPTR_CLR, value);
1366 }
1367
1368 /**
1369 * the function will deinitializing 8852BE specific data and hw configuration
1370 */
hal_trx_deinit_8852be(struct hal_info_t * hal)1371 static void hal_trx_deinit_8852be(struct hal_info_t *hal)
1372 {
1373 /*struct rtw_hal_com_t *hal_com = hal->hal_com;*/
1374 }
1375
1376 /**
1377 * the function will initializing 8852BE specific data and hw configuration
1378 */
hal_trx_init_8852be(struct hal_info_t * hal,u8 * txbd_buf,u8 * rxbd_buf)1379 static enum rtw_hal_status hal_trx_init_8852be(struct hal_info_t *hal, u8 *txbd_buf, u8 *rxbd_buf)
1380 {
1381 enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
1382 /* Set AMPDU max agg number to 128 */
1383 /* CR setting*/
1384 rtw_hal_mac_set_hw_ampdu_cfg(hal, 0, 0x7F, 0xAB);
1385 return hstatus;
1386 }
1387
1388 static struct hal_trx_ops ops = {0};
hal_trx_ops_init_8852be(void)1389 void hal_trx_ops_init_8852be(void)
1390 {
1391 ops.init = hal_trx_init_8852be;
1392 ops.deinit = hal_trx_deinit_8852be;
1393 ops.query_tx_res = hal_get_avail_txbd_8852be;
1394 ops.query_rx_res = hal_get_avail_rxbd_8852be;
1395 ops.cfg_wow_txdma = hal_cfg_wow_txdma_8852be;
1396 ops.poll_txdma_idle = hal_poll_txdma_idle_8852be;
1397 ops.map_hw_tx_chnl = hal_mapping_hw_tx_chnl_8852be;
1398 ops.qsel_to_tid = hal_qsel_to_tid_8852be;
1399 ops.query_txch_num = hal_query_txch_num_8852be;
1400 ops.query_rxch_num = hal_query_rxch_num_8852be;
1401 ops.update_wd = hal_update_wd_8852be;
1402 ops.update_txbd = hal_update_txbd_8852be;
1403 ops.tx_start = hal_trigger_txdma_8852be;
1404 ops.check_rxrdy = hal_check_rxrdy_8852be;
1405 ops.handle_rxbd_info = hal_handle_rxbd_info_8852be;
1406 ops.handle_rx_buffer = hal_handle_rx_buffer_8852b;
1407 ops.update_rxbd = hal_update_rxbd_8852be;
1408 ops.notify_rxdone = hal_notify_rxdone_8852be;
1409 ops.handle_wp_rpt = hal_handle_rx_report_8852be;
1410 ops.get_fwcmd_queue_idx = hal_get_fwcmd_queue_idx_8852be;
1411 }
1412
1413
1414
hal_hook_trx_ops_8852be(struct rtw_phl_com_t * phl_com,struct hal_info_t * hal_info)1415 u32 hal_hook_trx_ops_8852be(struct rtw_phl_com_t *phl_com,
1416 struct hal_info_t *hal_info)
1417 {
1418 enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
1419
1420 if (NULL != hal_info) {
1421 hal_trx_ops_init_8852be();
1422 hal_info->trx_ops = &ops;
1423 hstatus = RTW_HAL_STATUS_SUCCESS;
1424 }
1425
1426 return hstatus;
1427 }
1428