1 /******************************************************************************
2 *
3 * Copyright(c) 2007 - 2017 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26 #include "mp_precomp.h"
27 #if (DM_ODM_SUPPORT_TYPE == ODM_WIN)
28 #if RT_PLATFORM == PLATFORM_MACOSX
29 #include "phydm_precomp.h"
30 #else
31 #include "../phydm_precomp.h"
32 #endif
33 #else
34 #include "../../phydm_precomp.h"
35 #endif
36
37 #if (RTL8822B_SUPPORT == 1)
38
39 /*@---------------------------Define Local Constant---------------------------*/
40
phydm_get_read_counter_8822b(struct dm_struct * dm)41 void phydm_get_read_counter_8822b(struct dm_struct *dm)
42 {
43 u32 counter = 0x0, rf_reg;
44
45 while (1) {
46 rf_reg = odm_get_rf_reg(dm, RF_PATH_A, RF_0x8, MASK20BITS);
47 if (rf_reg == 0xabcde || counter > 300)
48 break;
49 counter++;
50 ODM_delay_ms(1);
51 }
52
53 odm_set_rf_reg(dm, RF_PATH_A, RF_0x8, MASK20BITS, 0x0);
54 RF_DBG(dm, DBG_RF_IQK, "[IQK]counter = %d\n", counter);
55 }
56
57 /*@---------------------------Define Local Constant---------------------------*/
58
59 #if !(DM_ODM_SUPPORT_TYPE & ODM_AP)
do_iqk_8822b(void * dm_void,u8 delta_thermal_index,u8 thermal_value,u8 threshold)60 void do_iqk_8822b(void *dm_void, u8 delta_thermal_index, u8 thermal_value,
61 u8 threshold)
62 {
63 struct dm_struct *dm = (struct dm_struct *)dm_void;
64 struct dm_iqk_info *iqk = &dm->IQK_info;
65
66 dm->rf_calibrate_info.thermal_value_iqk = thermal_value;
67 halrf_segment_iqk_trigger(dm, true, iqk->segment_iqk);
68 }
69 #else
70 /*Originally config->do_iqk is hooked phy_iq_calibrate_8822b*/
71 /*But do_iqk_8822b and phy_iq_calibrate_8822b have different arguments*/
do_iqk_8822b(void * dm_void,u8 delta_thermal_index,u8 thermal_value,u8 threshold)72 void do_iqk_8822b(void *dm_void, u8 delta_thermal_index, u8 thermal_value,
73 u8 threshold)
74 {
75 struct dm_struct *dm = (struct dm_struct *)dm_void;
76 struct dm_iqk_info *iqk = &dm->IQK_info;
77 boolean is_recovery = (boolean)delta_thermal_index;
78
79 halrf_segment_iqk_trigger(dm, true, iqk->segment_iqk);
80 }
81 #endif
82
_iqk_ltec_read_8822b(struct dm_struct * dm,u16 reg_addr)83 u32 _iqk_ltec_read_8822b(struct dm_struct *dm, u16 reg_addr)
84 {
85 u32 j = 0;
86
87 /*wait for ready bit before access 0x1700*/
88 odm_write_4byte(dm, 0x1700, 0x800f0000 | reg_addr);
89
90 do {
91 j++;
92 } while (((odm_read_1byte(dm, 0x1703) & BIT(5)) == 0) && (j < 30000));
93
94 return odm_read_4byte(dm, 0x1708); /*get read data*/
95 }
96
_iqk_ltec_write_8822b(struct dm_struct * dm,u16 reg_addr,u32 bit_mask,u32 reg_value)97 void _iqk_ltec_write_8822b(struct dm_struct *dm, u16 reg_addr, u32 bit_mask,
98 u32 reg_value)
99 {
100 u32 val, i = 0, j = 0, bitpos = 0;
101
102 if (bit_mask == 0x0)
103 return;
104 if (bit_mask == 0xffffffff) {
105 odm_write_4byte(dm, 0x1704, reg_value); /*put write data*/
106
107 /*wait for ready bit before access 0x1700*/
108 do {
109 j++;
110 } while (((odm_read_1byte(dm, 0x1703) & BIT(5)) == 0) && (j < 30000));
111
112 odm_write_4byte(dm, 0x1700, 0xc00f0000 | reg_addr);
113 } else {
114 for (i = 0; i <= 31; i++) {
115 if (((bit_mask >> i) & 0x1) == 0x1) {
116 bitpos = i;
117 break;
118 }
119 }
120
121 /*read back register value before write*/
122 val = _iqk_ltec_read_8822b(dm, reg_addr);
123 val = (val & (~bit_mask)) | (reg_value << bitpos);
124
125 odm_write_4byte(dm, 0x1704, val); /*put write data*/
126
127 /*wait for ready bit before access 0x1700*/
128 do {
129 j++;
130 } while (((odm_read_1byte(dm, 0x1703) & BIT(5)) == 0) && (j < 30000));
131
132 odm_write_4byte(dm, 0x1700, 0xc00f0000 | reg_addr);
133 }
134 }
135
_iqk_rf_set_check_8822b(struct dm_struct * dm,u8 path,u16 add,u32 data)136 void _iqk_rf_set_check_8822b(struct dm_struct *dm, u8 path, u16 add, u32 data)
137 {
138 u32 i;
139
140 odm_set_rf_reg(dm, (enum rf_path)path, add, MASK20BITS, data);
141
142 for (i = 0; i < 100; i++) {
143 if (odm_get_rf_reg(dm, (enum rf_path)path,
144 add, MASK20BITS) == data)
145 break;
146
147 ODM_delay_us(10);
148 odm_set_rf_reg(dm, (enum rf_path)path, add, MASK20BITS, data);
149 }
150 }
151
_iqk_rf0xb0_workaround_8822b(struct dm_struct * dm)152 void _iqk_rf0xb0_workaround_8822b(struct dm_struct *dm)
153 {
154 /*add 0xb8 control for the bad phase noise after switching channel*/
155 odm_set_rf_reg(dm, (enum rf_path)0x0, RF_0xb8, MASK20BITS, 0x00a00);
156 odm_set_rf_reg(dm, (enum rf_path)0x0, RF_0xb8, MASK20BITS, 0x80a00);
157 }
158
_iqk_0xc94_workaround_8822b(struct dm_struct * dm)159 void _iqk_0xc94_workaround_8822b(struct dm_struct *dm)
160 {
161 if (odm_get_bb_reg(dm, R_0xc94, BIT(0)) == 0x1) {
162 odm_set_bb_reg(dm, R_0xc94, BIT(0), 0x0);
163 odm_set_bb_reg(dm, R_0xc94, BIT(0), 0x1);
164 }
165
166 if (odm_get_bb_reg(dm, R_0xe94, BIT(0)) == 0x1) {
167 odm_set_bb_reg(dm, R_0xe94, BIT(0), 0x0);
168 odm_set_bb_reg(dm, R_0xe94, BIT(0), 0x1);
169 }
170 }
171
_iqk_fill_iqk_report_8822b(void * dm_void,u8 ch)172 void _iqk_fill_iqk_report_8822b(void *dm_void, u8 ch)
173 {
174 struct dm_struct *dm = (struct dm_struct *)dm_void;
175 struct dm_iqk_info *iqk = &dm->IQK_info;
176 u32 tmp1 = 0x0, tmp2 = 0x0, tmp3 = 0x0, data;
177 u8 i;
178
179 for (i = 0; i < SS_8822B; i++) {
180 tmp1 += ((iqk->iqk_fail_report[ch][i][TX_IQK] & 1) << i);
181 tmp2 += ((iqk->iqk_fail_report[ch][i][RX_IQK] & 1) << (i + 4));
182 tmp3 += ((iqk->rxiqk_fail_code[ch][i] & 0x3) << (i * 2 + 8));
183 }
184 odm_write_4byte(dm, 0x1b00, 0xf8000008);
185 odm_set_bb_reg(dm, R_0x1bf0, 0x0000ffff, tmp1 | tmp2 | tmp3);
186
187 for (i = 0; i < 2; i++) {
188 data = ((iqk->rxiqk_agc[ch][(i * 2) + 1] << 16) |
189 iqk->rxiqk_agc[ch][i * 2]);
190 odm_write_4byte(dm, 0x1be8 + (i * 4), data);
191 }
192 }
193
_iqk_fail_count_8822b(void * dm_void)194 void _iqk_fail_count_8822b(void *dm_void)
195 {
196 struct dm_struct *dm = (struct dm_struct *)dm_void;
197 struct dm_iqk_info *iqk = &dm->IQK_info;
198 u8 i;
199
200 dm->n_iqk_cnt++;
201 if (odm_get_rf_reg(dm, RF_PATH_A, RF_0x1bf0, BIT(16)) == 1)
202 iqk->is_reload = true;
203 else
204 iqk->is_reload = false;
205
206 if (!iqk->is_reload) {
207 for (i = 0; i < 8; i++) {
208 if (odm_get_bb_reg(dm, R_0x1bf0, BIT(i)) == 1)
209 dm->n_iqk_fail_cnt++;
210 }
211 }
212 RF_DBG(dm, DBG_RF_IQK, "[IQK]All/Fail = %d %d\n", dm->n_iqk_cnt,
213 dm->n_iqk_fail_cnt);
214 }
215
_iqk_iqk_fail_report_8822b(struct dm_struct * dm)216 void _iqk_iqk_fail_report_8822b(struct dm_struct *dm)
217 {
218 u32 tmp1bf0 = 0x0;
219 u8 i;
220
221 tmp1bf0 = odm_read_4byte(dm, 0x1bf0);
222
223 for (i = 0; i < 4; i++) {
224 if (tmp1bf0 & (0x1 << i))
225 #if !(DM_ODM_SUPPORT_TYPE & ODM_AP)
226 RF_DBG(dm, DBG_RF_IQK, "[IQK] please check S%d TXIQK\n",
227 i);
228 #else
229 panic_printk("[IQK] please check S%d TXIQK\n", i);
230 #endif
231 if (tmp1bf0 & (0x1 << (i + 12)))
232 #if !(DM_ODM_SUPPORT_TYPE & ODM_AP)
233 RF_DBG(dm, DBG_RF_IQK, "[IQK] please check S%d RXIQK\n",
234 i);
235 #else
236 panic_printk("[IQK] please check S%d RXIQK\n", i);
237 #endif
238 }
239 }
240
_iqk_backup_mac_bb_8822b(struct dm_struct * dm,u32 * MAC_backup,u32 * BB_backup,u32 * backup_mac_reg,u32 * backup_bb_reg)241 void _iqk_backup_mac_bb_8822b(struct dm_struct *dm, u32 *MAC_backup,
242 u32 *BB_backup, u32 *backup_mac_reg,
243 u32 *backup_bb_reg)
244 {
245 u32 i;
246
247 for (i = 0; i < MAC_REG_NUM_8822B; i++)
248 MAC_backup[i] = odm_read_4byte(dm, backup_mac_reg[i]);
249
250 for (i = 0; i < BB_REG_NUM_8822B; i++)
251 BB_backup[i] = odm_read_4byte(dm, backup_bb_reg[i]);
252 #if 0
253 /* RF_DBG(dm, DBG_RF_IQK, "[IQK]BackupMacBB Success!!!!\n"); */
254 #endif
255 }
256
_iqk_backup_rf_8822b(struct dm_struct * dm,u32 RF_backup[][2],u32 * bkup_reg)257 void _iqk_backup_rf_8822b(struct dm_struct *dm, u32 RF_backup[][2],
258 u32 *bkup_reg)
259 {
260 u32 i;
261
262 for (i = 0; i < RF_REG_NUM_8822B; i++) {
263 RF_backup[i][RF_PATH_A] =
264 odm_get_rf_reg(dm, RF_PATH_A, bkup_reg[i], MASK20BITS);
265 RF_backup[i][RF_PATH_B] =
266 odm_get_rf_reg(dm, RF_PATH_B, bkup_reg[i], MASK20BITS);
267 }
268 #if 0
269 /* RF_DBG(dm, DBG_RF_IQK, "[IQK]BackupRF Success!!!!\n"); */
270 #endif
271 }
272
_iqk_agc_bnd_int_8822b(struct dm_struct * dm)273 void _iqk_agc_bnd_int_8822b(struct dm_struct *dm)
274 {
275 /*initialize RX AGC bnd, it must do after bbreset*/
276 odm_write_4byte(dm, 0x1b00, 0xf8000008);
277 odm_write_4byte(dm, 0x1b00, 0xf80a7008);
278 odm_write_4byte(dm, 0x1b00, 0xf8015008);
279 odm_write_4byte(dm, 0x1b00, 0xf8000008);
280 #if 0
281 /*RF_DBG(dm, DBG_RF_IQK, "[IQK]init. rx agc bnd\n");*/
282 #endif
283 }
284
_iqk_bb_reset_8822b(struct dm_struct * dm)285 void _iqk_bb_reset_8822b(struct dm_struct *dm)
286 {
287 boolean cca_ing = false;
288 u32 count = 0;
289 u32 bit_mask = (BIT(27) | BIT(26) | BIT(25) | BIT(24));
290
291 odm_set_rf_reg(dm, RF_PATH_A, RF_0x0, MASK20BITS, 0x10000);
292 odm_set_rf_reg(dm, RF_PATH_B, RF_0x0, MASK20BITS, 0x10000);
293 /*reset BB report*/
294 odm_set_bb_reg(dm, R_0x8f8, 0x0ff00000, 0x0);
295
296 while (1) {
297 odm_write_4byte(dm, 0x8fc, 0x0);
298 odm_set_bb_reg(dm, R_0x198c, 0x7, 0x7);
299 cca_ing = (boolean)odm_get_bb_reg(dm, R_0xfa0, BIT(3));
300
301 if (count > 30)
302 cca_ing = false;
303
304 if (cca_ing) {
305 ODM_delay_ms(1);
306 count++;
307 } else {
308 /*RX ant off*/
309 odm_write_1byte(dm, 0x808, 0x0);
310 /*CCK RX path off*/
311 odm_set_bb_reg(dm, R_0xa04, bit_mask, 0x0);
312
313 /*BBreset*/
314 odm_set_bb_reg(dm, R_0x0, BIT(16), 0x0);
315 odm_set_bb_reg(dm, R_0x0, BIT(16), 0x1);
316
317 if (odm_get_bb_reg(dm, R_0x660, BIT(16)))
318 odm_write_4byte(dm, 0x6b4, 0x89000006);
319 #if 0
320 /*RF_DBG(dm, DBG_RF_IQK, "[IQK]BBreset!!!!\n");*/
321 #endif
322 break;
323 }
324 }
325 }
326
_iqk_afe_setting_8822b(struct dm_struct * dm,boolean do_iqk)327 void _iqk_afe_setting_8822b(struct dm_struct *dm, boolean do_iqk)
328 {
329 if (do_iqk) {
330 odm_write_4byte(dm, 0xc60, 0x50000000);
331 odm_write_4byte(dm, 0xc60, 0x70070040);
332 odm_write_4byte(dm, 0xe60, 0x50000000);
333 odm_write_4byte(dm, 0xe60, 0x70070040);
334 odm_write_4byte(dm, 0xc58, 0xd8000402);
335 odm_write_4byte(dm, 0xc5c, 0xd1000120);
336 odm_write_4byte(dm, 0xc6c, 0x00000a15);
337 odm_write_4byte(dm, 0xe58, 0xd8000402);
338 odm_write_4byte(dm, 0xe5c, 0xd1000120);
339 odm_write_4byte(dm, 0xe6c, 0x00000a15);
340 _iqk_bb_reset_8822b(dm);
341 #if 0
342 /* RF_DBG(dm, DBG_RF_IQK, "[IQK]AFE setting for IQK mode!!!!\n"); */
343 #endif
344 } else {
345 odm_write_4byte(dm, 0xc60, 0x50000000);
346 odm_write_4byte(dm, 0xc60, 0x70038040);
347 odm_write_4byte(dm, 0xe60, 0x50000000);
348 odm_write_4byte(dm, 0xe60, 0x70038040);
349 #if 0
350 /* RF_DBG(dm, DBG_RF_IQK, "[IQK]AFE setting for Normal mode!!!!\n"); */
351 #endif
352 }
353 /*0x9a4[31]=0: Select da clock*/
354 odm_set_bb_reg(dm, R_0x9a4, BIT(31), 0x0);
355 }
356
_iqk_restore_mac_bb_8822b(struct dm_struct * dm,u32 * MAC_backup,u32 * BB_backup,u32 * backup_mac_reg,u32 * backup_bb_reg)357 void _iqk_restore_mac_bb_8822b(struct dm_struct *dm, u32 *MAC_backup,
358 u32 *BB_backup, u32 *backup_mac_reg,
359 u32 *backup_bb_reg)
360 {
361 u32 i;
362
363 for (i = 0; i < MAC_REG_NUM_8822B; i++)
364 odm_write_4byte(dm, backup_mac_reg[i], MAC_backup[i]);
365 for (i = 0; i < BB_REG_NUM_8822B; i++)
366 odm_write_4byte(dm, backup_bb_reg[i], BB_backup[i]);
367 #if 0
368 /* RF_DBG(dm, DBG_RF_IQK, "[IQK]RestoreMacBB Success!!!!\n"); */
369 #endif
370 }
371
_iqk_restore_rf_8822b(struct dm_struct * dm,u32 * backup_rf_reg,u32 RF_backup[][2])372 void _iqk_restore_rf_8822b(struct dm_struct *dm, u32 *backup_rf_reg,
373 u32 RF_backup[][2])
374 {
375 u32 i;
376
377 odm_set_rf_reg(dm, RF_PATH_A, RF_0xef, MASK20BITS, 0x0);
378 odm_set_rf_reg(dm, RF_PATH_B, RF_0xef, MASK20BITS, 0x0);
379 /*0xdf[4]=0*/
380 _iqk_rf_set_check_8822b(dm, RF_PATH_A, 0xdf,
381 RF_backup[0][RF_PATH_A] & (~BIT(4)));
382 _iqk_rf_set_check_8822b(dm, RF_PATH_B, 0xdf,
383 RF_backup[0][RF_PATH_B] & (~BIT(4)));
384
385 #if 0
386 /*odm_set_rf_reg(dm, RF_PATH_A, RF_0xdf, MASK20BITS, RF_backup[0][RF_PATH_A] & (~BIT(4)));*/
387 /*odm_set_rf_reg(dm, RF_PATH_B, RF_0xdf, MASK20BITS, RF_backup[0][RF_PATH_B] & (~BIT(4)));*/
388 #endif
389
390 for (i = 1; i < RF_REG_NUM_8822B; i++) {
391 odm_set_rf_reg(dm, RF_PATH_A, backup_rf_reg[i], MASK20BITS,
392 RF_backup[i][RF_PATH_A]);
393 odm_set_rf_reg(dm, RF_PATH_B, backup_rf_reg[i], MASK20BITS,
394 RF_backup[i][RF_PATH_B]);
395 }
396 #if 0
397 /* RF_DBG(dm, DBG_RF_IQK, "[IQK]RestoreRF Success!!!!\n"); */
398 #endif
399 }
400
_iqk_backup_iqk_8822b_subfunction(struct dm_struct * dm)401 void _iqk_backup_iqk_8822b_subfunction(struct dm_struct *dm)
402 {
403 struct dm_iqk_info *iqk = &dm->IQK_info;
404 u8 i, j, k;
405
406 iqk->iqk_channel[1] = iqk->iqk_channel[0];
407 for (i = 0; i < 2; i++) {
408 iqk->lok_idac[1][i] = iqk->lok_idac[0][i];
409 iqk->rxiqk_agc[1][i] = iqk->rxiqk_agc[0][i];
410 iqk->bypass_iqk[1][i] = iqk->bypass_iqk[0][i];
411 iqk->rxiqk_fail_code[1][i] = iqk->rxiqk_fail_code[0][i];
412 for (j = 0; j < 2; j++) {
413 iqk->iqk_fail_report[1][i][j] =
414 iqk->iqk_fail_report[0][i][j];
415 for (k = 0; k < 8; k++) {
416 iqk->iqk_cfir_real[1][i][j][k] =
417 iqk->iqk_cfir_real[0][i][j][k];
418 iqk->iqk_cfir_imag[1][i][j][k] =
419 iqk->iqk_cfir_imag[0][i][j][k];
420 }
421 }
422 }
423 }
424
_iqk_backup_iqk_8822b(struct dm_struct * dm,u8 step,u8 path)425 void _iqk_backup_iqk_8822b(struct dm_struct *dm, u8 step, u8 path)
426 {
427 struct dm_iqk_info *iqk = &dm->IQK_info;
428 u8 i, j;
429
430 switch (step) {
431 case 0:
432 _iqk_backup_iqk_8822b_subfunction(dm);
433
434 for (i = 0; i < 4; i++) {
435 iqk->rxiqk_fail_code[0][i] = 0x0;
436 iqk->rxiqk_agc[0][i] = 0x0;
437 for (j = 0; j < 2; j++) {
438 iqk->iqk_fail_report[0][i][j] = true;
439 iqk->gs_retry_count[0][i][j] = 0x0;
440 }
441 for (j = 0; j < 3; j++)
442 iqk->retry_count[0][i][j] = 0x0;
443 }
444 /*backup channel*/
445 iqk->iqk_channel[0] = iqk->rf_reg18;
446 break;
447 case 1: /*LOK backup*/
448 iqk->lok_idac[0][path] = odm_get_rf_reg(dm, (enum rf_path)path,
449 RF_0x58, MASK20BITS);
450 break;
451 case 2: /*TXIQK backup*/
452 case 3: /*RXIQK backup*/
453 phydm_get_iqk_cfir(dm, (step - 2), path, false);
454 break;
455 }
456 }
457
_iqk_reload_iqk_setting_8822b(struct dm_struct * dm,u8 ch,u8 reload_idx)458 void _iqk_reload_iqk_setting_8822b(struct dm_struct *dm, u8 ch,
459 u8 reload_idx
460 /*1: reload TX, 2: reload LO, TX, RX*/)
461 {
462 struct dm_iqk_info *iqk = &dm->IQK_info;
463 u8 i, path, idx;
464 u16 iqk_apply[2] = {0xc94, 0xe94};
465 u32 tmp, data;
466 u32 bmask13_12 = (BIT(13) | BIT(12));
467 u32 bmask20_16 = (BIT(20) | BIT(19) | BIT(18) | BIT(17) | BIT(16));
468 boolean report;
469
470 for (path = 0; path < 2; path++) {
471 if (reload_idx == 2) {
472 #if 0
473 /*odm_set_rf_reg(dm, (enum rf_path)path, RF_0xdf, BIT(4), 0x1);*/
474 #endif
475 tmp = odm_get_rf_reg(dm, (enum rf_path)path,
476 RF_0xdf, MASK20BITS) | BIT(4);
477 _iqk_rf_set_check_8822b(dm, (enum rf_path)path,
478 0xdf, tmp);
479 odm_set_rf_reg(dm, (enum rf_path)path, RF_0x58,
480 MASK20BITS, iqk->lok_idac[ch][path]);
481 }
482
483 for (idx = 0; idx < reload_idx; idx++) {
484 odm_set_bb_reg(dm, R_0x1b00, MASKDWORD,
485 0xf8000008 | path << 1);
486 odm_set_bb_reg(dm, R_0x1b2c, MASKDWORD, 0x7);
487 odm_set_bb_reg(dm, R_0x1b38, MASKDWORD, 0x20000000);
488 odm_set_bb_reg(dm, R_0x1b3c, MASKDWORD, 0x20000000);
489 odm_set_bb_reg(dm, R_0x1bcc, MASKDWORD, 0x00000000);
490 if (idx == 0)
491 odm_set_bb_reg(dm, R_0x1b0c, bmask13_12, 0x3);
492 else
493 odm_set_bb_reg(dm, R_0x1b0c, bmask13_12, 0x1);
494 odm_set_bb_reg(dm, R_0x1bd4, bmask20_16, 0x10);
495 for (i = 0; i < 8; i++) {
496 data = ((0xc0000000 >> idx) + 0x3) + (i * 4) +
497 (iqk->iqk_cfir_real[ch][path][idx][i]
498 << 9);
499 odm_write_4byte(dm, 0x1bd8, data);
500 data = ((0xc0000000 >> idx) + 0x1) + (i * 4) +
501 (iqk->iqk_cfir_imag[ch][path][idx][i]
502 << 9);
503 odm_write_4byte(dm, 0x1bd8, data);
504 }
505 if (idx == 0) {
506 report = !(iqk->iqk_fail_report[ch][path][idx]);
507 odm_set_bb_reg(dm, iqk_apply[path],
508 BIT(0), report);
509 } else {
510 report = !(iqk->iqk_fail_report[ch][path][idx]);
511 odm_set_bb_reg(dm, iqk_apply[path],
512 BIT(10), report);
513 }
514 }
515 odm_set_bb_reg(dm, R_0x1bd8, MASKDWORD, 0x0);
516 odm_set_bb_reg(dm, R_0x1b0c, bmask13_12, 0x0);
517 }
518 }
519
520 boolean
_iqk_reload_iqk_8822b(struct dm_struct * dm,boolean reset)521 _iqk_reload_iqk_8822b(struct dm_struct *dm, boolean reset)
522 {
523 struct dm_iqk_info *iqk = &dm->IQK_info;
524 u8 i;
525
526 iqk->is_reload = false;
527
528 if (reset) {
529 for (i = 0; i < 2; i++)
530 iqk->iqk_channel[i] = 0x0;
531 } else {
532 iqk->rf_reg18 = odm_get_rf_reg(dm, RF_PATH_A,
533 RF_0x18, MASK20BITS);
534
535 for (i = 0; i < 2; i++) {
536 if (iqk->rf_reg18 == iqk->iqk_channel[i]) {
537 _iqk_reload_iqk_setting_8822b(dm, i, 2);
538 _iqk_fill_iqk_report_8822b(dm, i);
539 RF_DBG(dm, DBG_RF_IQK,
540 "[IQK]reload IQK result before!!!!\n");
541 iqk->is_reload = true;
542 }
543 }
544 }
545 /*report*/
546 odm_set_bb_reg(dm, R_0x1bf0, BIT(16), (u8)iqk->is_reload);
547 return iqk->is_reload;
548 }
549
_iqk_rfe_setting_8822b(struct dm_struct * dm,boolean ext_pa_on)550 void _iqk_rfe_setting_8822b(struct dm_struct *dm, boolean ext_pa_on)
551 {
552 if (ext_pa_on) {
553 /*RFE setting*/
554 odm_write_4byte(dm, 0xcb0, 0x77777777);
555 odm_write_4byte(dm, 0xcb4, 0x00007777);
556 odm_write_4byte(dm, 0xcbc, 0x0000083B);
557 odm_write_4byte(dm, 0xeb0, 0x77777777);
558 odm_write_4byte(dm, 0xeb4, 0x00007777);
559 odm_write_4byte(dm, 0xebc, 0x0000083B);
560 #if 0
561 /*odm_write_4byte(dm, 0x1990, 0x00000c30);*/
562 #endif
563 RF_DBG(dm, DBG_RF_IQK, "[IQK]external PA on!!!!\n");
564 } else {
565 /*RFE setting*/
566 odm_write_4byte(dm, 0xcb0, 0x77777777);
567 odm_write_4byte(dm, 0xcb4, 0x00007777);
568 odm_write_4byte(dm, 0xcbc, 0x00000100);
569 odm_write_4byte(dm, 0xeb0, 0x77777777);
570 odm_write_4byte(dm, 0xeb4, 0x00007777);
571 odm_write_4byte(dm, 0xebc, 0x00000100);
572 #if 0
573 /*odm_write_4byte(dm, 0x1990, 0x00000c30);*/
574 /*RF_DBG(dm, DBG_RF_IQK, "[IQK]external PA off!!!!\n");*/
575 #endif
576 }
577 }
578
_iqk_rf_setting_8822b(struct dm_struct * dm)579 void _iqk_rf_setting_8822b(struct dm_struct *dm)
580 {
581 u8 path;
582 u32 tmp;
583
584 odm_write_4byte(dm, 0x1b00, 0xf8000008);
585 odm_write_4byte(dm, 0x1bb8, 0x00000000);
586
587 for (path = 0; path < 2; path++) {
588 /*0xdf:B11 = 1,B4 = 0, B1 = 1*/
589 tmp = odm_get_rf_reg(dm, (enum rf_path)path,
590 RF_0xdf, MASK20BITS);
591 tmp = (tmp & (~BIT(4))) | BIT(1) | BIT(11);
592 _iqk_rf_set_check_8822b(dm, (enum rf_path)path, 0xdf, tmp);
593 #if 0
594 /*odm_set_rf_reg(dm, (enum rf_path)path, RF_0xdf, MASK20BITS, tmp);*/
595 #endif
596
597 /*release 0x56 TXBB*/
598 odm_set_rf_reg(dm, (enum rf_path)path, RF_0x65,
599 MASK20BITS, 0x09000);
600
601 if (*dm->band_type == ODM_BAND_5G) {
602 odm_set_rf_reg(dm, (enum rf_path)path,
603 RF_0xef, BIT(19), 0x1);
604 odm_set_rf_reg(dm, (enum rf_path)path,
605 RF_0x33, MASK20BITS, 0x00026);
606 odm_set_rf_reg(dm, (enum rf_path)path,
607 RF_0x3e, MASK20BITS, 0x00037);
608 odm_set_rf_reg(dm, (enum rf_path)path,
609 RF_0x3f, MASK20BITS, 0xdefce);
610 odm_set_rf_reg(dm, (enum rf_path)path,
611 RF_0xef, BIT(19), 0x0);
612 } else {
613 odm_set_rf_reg(dm, (enum rf_path)path,
614 RF_0xef, BIT(19), 0x1);
615 odm_set_rf_reg(dm, (enum rf_path)path,
616 RF_0x33, MASK20BITS, 0x00026);
617 odm_set_rf_reg(dm, (enum rf_path)path,
618 RF_0x3e, MASK20BITS, 0x00037);
619 odm_set_rf_reg(dm, (enum rf_path)path,
620 RF_0x3f, MASK20BITS, 0x5efce);
621 odm_set_rf_reg(dm, (enum rf_path)path,
622 RF_0xef, BIT(19), 0x0);
623 }
624 }
625 }
626
_iqk_configure_macbb_8822b(struct dm_struct * dm)627 void _iqk_configure_macbb_8822b(struct dm_struct *dm)
628 {
629 /*MACBB register setting*/
630 odm_write_1byte(dm, 0x522, 0x7f);
631 odm_set_bb_reg(dm, R_0x550, BIT(11) | BIT(3), 0x0);
632 /*0x90c[15]=1: dac_buf reset selection*/
633 odm_set_bb_reg(dm, R_0x90c, BIT(15), 0x1);
634 /*0xc94[0]=1, 0xe94[0]=1: Let tx from IQK*/
635 odm_set_bb_reg(dm, R_0xc94, BIT(0), 0x1);
636 odm_set_bb_reg(dm, R_0xe94, BIT(0), 0x1);
637 odm_set_bb_reg(dm, R_0xc94, (BIT(11) | BIT(10)), 0x1);
638 odm_set_bb_reg(dm, R_0xe94, (BIT(11) | BIT(10)), 0x1);
639 /* 3-wire off*/
640 odm_write_4byte(dm, 0xc00, 0x00000004);
641 odm_write_4byte(dm, 0xe00, 0x00000004);
642 /*disable PMAC*/
643 odm_set_bb_reg(dm, R_0xb00, BIT(8), 0x0);
644 /*disable CCK block*/
645 odm_set_bb_reg(dm, R_0x808, BIT(28), 0x0);
646 /*disable OFDM CCA*/
647 odm_set_bb_reg(dm, R_0x838, BIT(3) | BIT(2) | BIT(1), 0x7);
648 #if 0
649 /*RF_DBG(dm, DBG_RF_IQK, "[IQK]Set MACBB setting for IQK!!!!\n");*/
650 #endif
651 }
652
_iqk_lok_setting_8822b(struct dm_struct * dm,u8 path)653 void _iqk_lok_setting_8822b(struct dm_struct *dm, u8 path)
654 {
655 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
656 odm_write_4byte(dm, 0x1bcc, 0x9);
657 odm_write_1byte(dm, 0x1b23, 0x00);
658
659 switch (*dm->band_type) {
660 case ODM_BAND_2_4G:
661 odm_write_1byte(dm, 0x1b2b, 0x00);
662 odm_set_rf_reg(dm, (enum rf_path)path,
663 RF_0x56, MASK20BITS, 0x50df2);
664 odm_set_rf_reg(dm, (enum rf_path)path,
665 RF_0x8f, MASK20BITS, 0xadc00);
666 /* WE_LUT_TX_LOK*/
667 odm_set_rf_reg(dm, (enum rf_path)path,
668 RF_0xef, BIT(4), 0x1);
669 odm_set_rf_reg(dm, (enum rf_path)path,
670 RF_0x33, BIT(1) | BIT(0), 0x0);
671 break;
672 case ODM_BAND_5G:
673 odm_write_1byte(dm, 0x1b2b, 0x80);
674 odm_set_rf_reg(dm, (enum rf_path)path,
675 RF_0x56, MASK20BITS, 0x5086c);
676 odm_set_rf_reg(dm, (enum rf_path)path,
677 RF_0x8f, MASK20BITS, 0xa9c00);
678 /* WE_LUT_TX_LOK*/
679 odm_set_rf_reg(dm, (enum rf_path)path, RF_0xef, BIT(4), 0x1);
680 odm_set_rf_reg(dm, (enum rf_path)path, RF_0x33,
681 BIT(1) | BIT(0), 0x1);
682 break;
683 }
684 #if 0
685 /* RF_DBG(dm, DBG_RF_IQK, "[IQK]Set LOK setting!!!!\n");*/
686 #endif
687 }
688
_iqk_txk_setting_8822b(struct dm_struct * dm,u8 path)689 void _iqk_txk_setting_8822b(struct dm_struct *dm, u8 path)
690 {
691 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
692 odm_write_4byte(dm, 0x1bcc, 0x9);
693 odm_write_4byte(dm, 0x1b20, 0x01440008);
694
695 if (path == 0x0)
696 odm_write_4byte(dm, 0x1b00, 0xf800000a);
697 else
698 odm_write_4byte(dm, 0x1b00, 0xf8000008);
699 odm_write_4byte(dm, 0x1bcc, 0x3f);
700
701 switch (*dm->band_type) {
702 case ODM_BAND_2_4G:
703 odm_set_rf_reg(dm, (enum rf_path)path,
704 RF_0x56, MASK20BITS, 0x50df2);
705 odm_set_rf_reg(dm, (enum rf_path)path,
706 RF_0x8f, MASK20BITS, 0xadc00);
707 odm_write_1byte(dm, 0x1b2b, 0x00);
708 break;
709 case ODM_BAND_5G:
710 odm_set_rf_reg(dm, (enum rf_path)path,
711 RF_0x56, MASK20BITS, 0x500ef);
712 odm_set_rf_reg(dm, (enum rf_path)path,
713 RF_0x8f, MASK20BITS, 0xa9c00);
714 odm_write_1byte(dm, 0x1b2b, 0x80);
715 break;
716 }
717 #if 0
718 /*RF_DBG(dm, DBG_RF_IQK, "[IQK]Set TXK setting!!!!\n");*/
719 #endif
720 }
721
_iqk_rxk1_setting_8822b(struct dm_struct * dm,u8 path)722 void _iqk_rxk1_setting_8822b(struct dm_struct *dm, u8 path)
723 {
724 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
725
726 switch (*dm->band_type) {
727 case ODM_BAND_2_4G:
728 odm_write_1byte(dm, 0x1bcc, 0x9);
729 odm_write_1byte(dm, 0x1b2b, 0x00);
730 odm_write_4byte(dm, 0x1b20, 0x01450008);
731 odm_write_4byte(dm, 0x1b24, 0x01460c88);
732 odm_set_rf_reg(dm, (enum rf_path)path,
733 RF_0x56, MASK20BITS, 0x510e0);
734 odm_set_rf_reg(dm, (enum rf_path)path,
735 RF_0x8f, MASK20BITS, 0xacc00);
736 break;
737 case ODM_BAND_5G:
738 odm_write_1byte(dm, 0x1bcc, 0x09);
739 odm_write_1byte(dm, 0x1b2b, 0x80);
740 odm_write_4byte(dm, 0x1b20, 0x00850008);
741 odm_write_4byte(dm, 0x1b24, 0x00460048);
742 odm_set_rf_reg(dm, (enum rf_path)path,
743 RF_0x56, MASK20BITS, 0x510e0);
744 odm_set_rf_reg(dm, (enum rf_path)path,
745 RF_0x8f, MASK20BITS, 0xadc00);
746 break;
747 }
748 #if 0
749 /*RF_DBG(dm, DBG_RF_IQK, "[IQK]Set RXK setting!!!!\n");*/
750 #endif
751 }
752
_iqk_rxk2_setting_8822b(struct dm_struct * dm,u8 path,boolean is_gs)753 void _iqk_rxk2_setting_8822b(struct dm_struct *dm, u8 path, boolean is_gs)
754 {
755 struct dm_iqk_info *iqk = &dm->IQK_info;
756
757 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
758
759 switch (*dm->band_type) {
760 case ODM_BAND_2_4G:
761 if (is_gs)
762 iqk->tmp1bcc = 0x12;
763 odm_write_1byte(dm, 0x1bcc, iqk->tmp1bcc);
764 odm_write_1byte(dm, 0x1b2b, 0x00);
765 odm_write_4byte(dm, 0x1b20, 0x01450008);
766 odm_write_4byte(dm, 0x1b24, 0x01460848);
767 odm_set_rf_reg(dm, (enum rf_path)path,
768 RF_0x56, MASK20BITS, 0x510e0);
769 odm_set_rf_reg(dm, (enum rf_path)path,
770 RF_0x8f, MASK20BITS, 0xa9c00);
771 break;
772 case ODM_BAND_5G:
773 if (is_gs) {
774 if (path == RF_PATH_A)
775 iqk->tmp1bcc = 0x12;
776 else
777 iqk->tmp1bcc = 0x09;
778 }
779 odm_write_1byte(dm, 0x1bcc, iqk->tmp1bcc);
780 odm_write_1byte(dm, 0x1b2b, 0x80);
781 odm_write_4byte(dm, 0x1b20, 0x00850008);
782 odm_write_4byte(dm, 0x1b24, 0x00460848);
783 odm_set_rf_reg(dm, (enum rf_path)path,
784 RF_0x56, MASK20BITS, 0x51060);
785 odm_set_rf_reg(dm, (enum rf_path)path,
786 RF_0x8f, MASK20BITS, 0xa9c00);
787 break;
788 }
789 #if 0
790 /* RF_DBG(dm, DBG_RF_IQK, "[IQK]Set RXK setting!!!!\n");*/
791 #endif
792 }
793
halrf_iqk_set_rf0x8(struct dm_struct * dm,u8 path)794 void halrf_iqk_set_rf0x8(struct dm_struct *dm, u8 path)
795 {
796 u16 c = 0x0;
797
798 while (c < 30000) {
799 odm_set_rf_reg(dm, (enum rf_path)path,
800 RF_0xef, MASK20BITS, 0x0);
801 odm_set_rf_reg(dm, (enum rf_path)path,
802 RF_0x8, MASK20BITS, 0x0);
803 if (odm_get_rf_reg(dm, (enum rf_path)path, RF_0x8, MASK20BITS)
804 == 0x0)
805 break;
806 c++;
807 }
808 }
809
halrf_iqk_check_if_reload(struct dm_struct * dm)810 void halrf_iqk_check_if_reload(struct dm_struct *dm)
811 {
812 struct dm_iqk_info *iqk = &dm->IQK_info;
813
814 iqk->is_reload = (boolean)odm_get_bb_reg(dm, R_0x1bf0, BIT(16));
815 }
816
817 boolean
_iqk_check_cal_8822b(struct dm_struct * dm,u8 path,u8 cmd)818 _iqk_check_cal_8822b(struct dm_struct *dm, u8 path, u8 cmd)
819 {
820 boolean notready = true, fail = true;
821 u32 delay_count = 0x0;
822
823 while (notready) {
824 if (odm_get_rf_reg(dm, (enum rf_path)path, RF_0x8, MASK20BITS)
825 == 0x12345) {
826 if (cmd == 0x0) /*LOK*/
827 fail = false;
828 else
829 fail = (boolean)
830 odm_get_bb_reg(dm, R_0x1b08, BIT(26));
831 notready = false;
832 } else {
833 ODM_delay_ms(1);
834 delay_count++;
835 }
836
837 if (delay_count >= 50) {
838 fail = true;
839 RF_DBG(dm, DBG_RF_IQK, "[IQK]IQK timeout!!!\n");
840 break;
841 }
842 }
843 halrf_iqk_set_rf0x8(dm, path);
844 RF_DBG(dm, DBG_RF_IQK, "[IQK]delay count = 0x%x!!!\n", delay_count);
845 return fail;
846 }
847
848 boolean
_iqk_rxk_gsearch_fail_8822b(struct dm_struct * dm,u8 path,u8 step)849 _iqk_rxk_gsearch_fail_8822b(struct dm_struct *dm, u8 path, u8 step)
850 {
851 struct dm_iqk_info *iqk = &dm->IQK_info;
852 boolean fail = true;
853 u32 IQK_CMD = 0x0, rf_reg0, tmp, bb_idx;
854 u8 IQMUX[4] = {0x9, 0x12, 0x1b, 0x24};
855 u8 idx;
856
857 if (step == RXIQK1) {
858 RF_DBG(dm, DBG_RF_IQK,
859 "[IQK]============ S%d RXIQK GainSearch ============\n",
860 path);
861 IQK_CMD = 0xf8000208 | (1 << (path + 4));
862 RF_DBG(dm, DBG_RF_IQK, "[IQK]S%d GS%d_Trigger = 0x%x\n", path,
863 step, IQK_CMD);
864 _iqk_ltec_write_8822b(dm, 0x38, 0xffff,0x7700);
865 odm_write_4byte(dm, 0x1b00, IQK_CMD);
866 odm_write_4byte(dm, 0x1b00, IQK_CMD + 0x1);
867 ODM_delay_ms(GS_delay_8822B);
868 fail = _iqk_check_cal_8822b(dm, path, 0x1);
869 _iqk_ltec_write_8822b(dm, 0x38, MASKDWORD, iqk->tmp_gntwl);
870 } else if (step == RXIQK2) {
871 for (idx = 0; idx < 4; idx++) {
872 if (iqk->tmp1bcc == IQMUX[idx])
873 break;
874 }
875 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
876 odm_write_4byte(dm, 0x1bcc, iqk->tmp1bcc);
877
878 IQK_CMD = 0xf8000308 | (1 << (path + 4));
879 RF_DBG(dm, DBG_RF_IQK, "[IQK]S%d GS%d_Trigger = 0x%x\n", path,
880 step, IQK_CMD);
881
882 _iqk_ltec_write_8822b(dm, 0x38, 0xffff,0x7700);
883 odm_write_4byte(dm, 0x1b00, IQK_CMD);
884 odm_write_4byte(dm, 0x1b00, IQK_CMD + 0x1);
885 ODM_delay_ms(GS_delay_8822B);
886 fail = _iqk_check_cal_8822b(dm, path, 0x1);
887 _iqk_ltec_write_8822b(dm, 0x38, MASKDWORD, iqk->tmp_gntwl);
888
889 rf_reg0 = odm_get_rf_reg(dm, (enum rf_path)path,
890 RF_0x0, MASK20BITS);
891 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
892 RF_DBG(dm, DBG_RF_IQK,
893 "[IQK]S%d RF0x0=0x%x tmp1bcc=0x%x idx=%d 0x1b3c=0x%x\n",
894 path, rf_reg0, iqk->tmp1bcc, idx,
895 odm_read_4byte(dm, 0x1b3c));
896 tmp = (rf_reg0 & 0x1fe0) >> 5;
897 iqk->lna_idx = tmp >> 5;
898 bb_idx = tmp & 0x1f;
899
900 if (bb_idx == 0x1) {
901 if (iqk->lna_idx != 0x0)
902 iqk->lna_idx--;
903 else if (idx != 3)
904 idx++;
905 else
906 iqk->isbnd = true;
907 fail = true;
908 } else if (bb_idx == 0xa) {
909 if (idx != 0)
910 idx--;
911 else if (iqk->lna_idx != 0x7)
912 iqk->lna_idx++;
913 else
914 iqk->isbnd = true;
915 fail = true;
916 } else {
917 fail = false;
918 }
919
920 if (iqk->isbnd)
921 fail = false;
922
923 iqk->tmp1bcc = IQMUX[idx];
924
925 if (fail) {
926 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
927 tmp = (odm_read_4byte(dm, 0x1b24) & 0xffffe3ff) |
928 (iqk->lna_idx << 10);
929 odm_write_4byte(dm, 0x1b24, tmp);
930 }
931 }
932 return fail;
933 }
934
935 boolean
_lok_one_shot_8822b(void * dm_void,u8 path)936 _lok_one_shot_8822b(void *dm_void, u8 path)
937 {
938 struct dm_struct *dm = (struct dm_struct *)dm_void;
939 struct dm_iqk_info *iqk = &dm->IQK_info;
940 boolean LOK_notready = false;
941 u32 LOK_temp = 0;
942 u32 IQK_CMD = 0x0;
943
944 RF_DBG(dm, DBG_RF_IQK, "[IQK]==========S%d LOK ==========\n", path);
945 IQK_CMD = 0xf8000008 | (1 << (4 + path));
946 RF_DBG(dm, DBG_RF_IQK, "[IQK]LOK_Trigger = 0x%x\n", IQK_CMD);
947
948 _iqk_ltec_write_8822b(dm, 0x38, 0xffff,0x7700);
949 odm_write_4byte(dm, 0x1b00, IQK_CMD);
950 odm_write_4byte(dm, 0x1b00, IQK_CMD + 1);
951 /*LOK: CMD ID = 0 {0xf8000018, 0xf8000028}*/
952 /*LOK: CMD ID = 0 {0xf8000019, 0xf8000029}*/
953 ODM_delay_ms(LOK_delay_8822B);
954 LOK_notready = _iqk_check_cal_8822b(dm, path, 0x0);
955 _iqk_ltec_write_8822b(dm, 0x38, MASKDWORD, iqk->tmp_gntwl);
956
957 if (!LOK_notready)
958 _iqk_backup_iqk_8822b(dm, 0x1, path);
959 if (DBG_RF_IQK) {
960 if (!LOK_notready) {
961 LOK_temp = odm_get_rf_reg(dm, (enum rf_path)path,
962 RF_0x58, MASK20BITS);
963 RF_DBG(dm, DBG_RF_IQK, "[IQK]0x58 = 0x%x\n", LOK_temp);
964 } else {
965 RF_DBG(dm, DBG_RF_IQK, "[IQK]==>S%d LOK Fail!!!\n",
966 path);
967 }
968 }
969 iqk->lok_fail[path] = LOK_notready;
970 return LOK_notready;
971 }
972
973 boolean
_iqk_one_shot_8822b(void * dm_void,u8 path,u8 idx)974 _iqk_one_shot_8822b(void *dm_void, u8 path, u8 idx)
975 {
976 struct dm_struct *dm = (struct dm_struct *)dm_void;
977 struct dm_iqk_info *iqk = &dm->IQK_info;
978 u8 delay_count = 0;
979 boolean fail = true;
980 u32 IQK_CMD = 0x0, tmp;
981 u16 iqk_apply[2] = {0xc94, 0xe94};
982
983 if (idx == TXIQK)
984 RF_DBG(dm, DBG_RF_IQK,
985 "[IQK]============ S%d WBTXIQK ============\n", path);
986 else if (idx == RXIQK1)
987 RF_DBG(dm, DBG_RF_IQK,
988 "[IQK]============ S%d WBRXIQK STEP1============\n",
989 path);
990 else
991 RF_DBG(dm, DBG_RF_IQK,
992 "[IQK]============ S%d WBRXIQK STEP2============\n",
993 path);
994
995 if (idx == TXIQK) {
996 IQK_CMD = 0xf8000008 |
997 ((*dm->band_width + 4) << 8) | (1 << (path + 4));
998 RF_DBG(dm, DBG_RF_IQK, "[IQK]TXK_Trigger = 0x%x\n", IQK_CMD);
999 /*{0xf8000418, 0xf800042a} ==> 20 WBTXK (CMD = 4)*/
1000 /*{0xf8000518, 0xf800052a} ==> 40 WBTXK (CMD = 5)*/
1001 /*{0xf8000618, 0xf800062a} ==> 80 WBTXK (CMD = 6)*/
1002 } else if (idx == RXIQK1) {
1003 if (*dm->band_width == 2)
1004 IQK_CMD = 0xf8000808 | (1 << (path + 4));
1005 else
1006 IQK_CMD = 0xf8000708 | (1 << (path + 4));
1007 RF_DBG(dm, DBG_RF_IQK, "[IQK]RXK1_Trigger = 0x%x\n", IQK_CMD);
1008 /*{0xf8000718, 0xf800072a} ==> 20 WBTXK (CMD = 7)*/
1009 /*{0xf8000718, 0xf800072a} ==> 40 WBTXK (CMD = 7)*/
1010 /*{0xf8000818, 0xf800082a} ==> 80 WBTXK (CMD = 8)*/
1011 } else if (idx == RXIQK2) {
1012 IQK_CMD = 0xf8000008 |
1013 ((*dm->band_width + 9) << 8) | (1 << (path + 4));
1014 RF_DBG(dm, DBG_RF_IQK, "[IQK]RXK2_Trigger = 0x%x\n", IQK_CMD);
1015 /*{0xf8000918, 0xf800092a} ==> 20 WBRXK (CMD = 9)*/
1016 /*{0xf8000a18, 0xf8000a2a} ==> 40 WBRXK (CMD = 10)*/
1017 /*{0xf8000b18, 0xf8000b2a} ==> 80 WBRXK (CMD = 11)*/
1018 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
1019 tmp = (odm_read_4byte(dm, 0x1b24) & 0xffffe3ff) |
1020 ((iqk->lna_idx & 0x7) << 10);
1021 odm_write_4byte(dm, 0x1b24, tmp);
1022 }
1023 _iqk_ltec_write_8822b(dm, 0x38, 0xffff,0x7700);
1024 odm_write_4byte(dm, 0x1b00, IQK_CMD);
1025 odm_write_4byte(dm, 0x1b00, IQK_CMD + 0x1);
1026 ODM_delay_ms(WBIQK_delay_8822B);
1027 fail = _iqk_check_cal_8822b(dm, path, 0x1);
1028 _iqk_ltec_write_8822b(dm, 0x38, MASKDWORD, iqk->tmp_gntwl);
1029
1030 if (dm->debug_components & DBG_RF_IQK) {
1031 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
1032 RF_DBG(dm, DBG_RF_IQK,
1033 "[IQK]S%d ==> 0x1b00 = 0x%x, 0x1b08 = 0x%x\n", path,
1034 odm_read_4byte(dm, 0x1b00), odm_read_4byte(dm, 0x1b08));
1035 RF_DBG(dm, DBG_RF_IQK, "[IQK]S%d ==> delay_count = 0x%x\n",
1036 path, delay_count);
1037 if (idx != TXIQK)
1038 RF_DBG(dm, DBG_RF_IQK,
1039 "[IQK]S%d ==> RF0x0 = 0x%x, RF0x56 = 0x%x\n",
1040 path,
1041 odm_get_rf_reg(dm, (enum rf_path)path, RF_0x0,
1042 MASK20BITS),
1043 odm_get_rf_reg(dm, (enum rf_path)path, RF_0x56,
1044 MASK20BITS));
1045 }
1046
1047 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
1048
1049 if (idx == TXIQK) {
1050 if (fail)
1051 odm_set_bb_reg(dm, iqk_apply[path], BIT(0), 0x0);
1052 else
1053 _iqk_backup_iqk_8822b(dm, 0x2, path);
1054 }
1055
1056 if (idx == RXIQK2) {
1057 iqk->rxiqk_agc[0][path] =
1058 (u16)(((odm_get_rf_reg(dm, (enum rf_path)path,
1059 RF_0x0, MASK20BITS) >> 5) & 0xff) |
1060 (iqk->tmp1bcc << 8));
1061
1062 odm_write_4byte(dm, 0x1b38, 0x20000000);
1063
1064 if (fail)
1065 odm_set_bb_reg(dm, iqk_apply[path],
1066 (BIT(11) | BIT(10)), 0x0);
1067 else
1068 _iqk_backup_iqk_8822b(dm, 0x3, path);
1069 }
1070
1071 if (idx == TXIQK)
1072 iqk->iqk_fail_report[0][path][TXIQK] = fail;
1073 else
1074 iqk->iqk_fail_report[0][path][RXIQK] = fail;
1075
1076 return fail;
1077 }
1078
1079 boolean
_iqk_rx_iqk_by_path_8822b(void * dm_void,u8 path)1080 _iqk_rx_iqk_by_path_8822b(void *dm_void, u8 path)
1081 {
1082 struct dm_struct *dm = (struct dm_struct *)dm_void;
1083 struct dm_iqk_info *iqk = &dm->IQK_info;
1084 boolean KFAIL = true, gonext, gs_limit;
1085
1086 #if 1
1087 switch (iqk->rxiqk_step) {
1088 case 1: /*gain search_RXK1*/
1089 _iqk_rxk1_setting_8822b(dm, path);
1090 gonext = false;
1091 while (1) {
1092 KFAIL = _iqk_rxk_gsearch_fail_8822b(dm, path, RXIQK1);
1093 if (KFAIL && iqk->gs_retry_count[0][path][0] < 2) {
1094 iqk->gs_retry_count[0][path][0]++;
1095 } else if (KFAIL) {
1096 iqk->rxiqk_fail_code[0][path] = 0;
1097 iqk->rxiqk_step = 5;
1098 gonext = true;
1099 } else {
1100 iqk->rxiqk_step++;
1101 gonext = true;
1102 }
1103 if (gonext)
1104 break;
1105 }
1106 halrf_iqk_xym_read(dm, path, 0x2);
1107 break;
1108 case 2: /*gain search_RXK2*/
1109 _iqk_rxk2_setting_8822b(dm, path, true);
1110 iqk->isbnd = false;
1111 while (1) {
1112 KFAIL = _iqk_rxk_gsearch_fail_8822b(dm, path, RXIQK2);
1113 gs_limit = (iqk->gs_retry_count[0][path][1] <
1114 rxiqk_gs_limit);
1115 if (KFAIL && gs_limit) {
1116 iqk->gs_retry_count[0][path][1]++;
1117 } else {
1118 iqk->rxiqk_step++;
1119 break;
1120 }
1121 }
1122 halrf_iqk_xym_read(dm, path, 0x3);
1123 break;
1124 case 3: /*RXK1*/
1125 _iqk_rxk1_setting_8822b(dm, path);
1126 gonext = false;
1127 while (1) {
1128 KFAIL = _iqk_one_shot_8822b(dm, path, RXIQK1);
1129 if (KFAIL && iqk->retry_count[0][path][RXIQK1] < 2) {
1130 iqk->retry_count[0][path][RXIQK1]++;
1131 } else if (KFAIL) {
1132 iqk->rxiqk_fail_code[0][path] = 1;
1133 iqk->rxiqk_step = 5;
1134 gonext = true;
1135 } else {
1136 iqk->rxiqk_step++;
1137 gonext = true;
1138 }
1139 if (gonext)
1140 break;
1141 }
1142 halrf_iqk_xym_read(dm, path, 0x4);
1143 break;
1144 case 4: /*RXK2*/
1145 _iqk_rxk2_setting_8822b(dm, path, false);
1146 gonext = false;
1147 while (1) {
1148 KFAIL = _iqk_one_shot_8822b(dm, path, RXIQK2);
1149 if (KFAIL && iqk->retry_count[0][path][RXIQK2] < 2) {
1150 iqk->retry_count[0][path][RXIQK2]++;
1151 } else if (KFAIL) {
1152 iqk->rxiqk_fail_code[0][path] = 2;
1153 iqk->rxiqk_step = 5;
1154 gonext = true;
1155 } else {
1156 iqk->rxiqk_step++;
1157 gonext = true;
1158 }
1159 if (gonext)
1160 break;
1161 }
1162 halrf_iqk_xym_read(dm, path, 0x0);
1163 break;
1164 }
1165 return KFAIL;
1166 #endif
1167 }
1168
_iqk_iqk_by_path_8822b_subfunction(void * dm_void,u8 rf_path)1169 void _iqk_iqk_by_path_8822b_subfunction(void *dm_void, u8 rf_path)
1170 {
1171 struct dm_struct *dm = (struct dm_struct *)dm_void;
1172 struct dm_iqk_info *iqk = &dm->IQK_info;
1173 boolean KFAIL = true;
1174
1175 while (1) {
1176 KFAIL = _iqk_rx_iqk_by_path_8822b(dm, rf_path);
1177 RF_DBG(dm, DBG_RF_IQK,
1178 "[IQK]S%dRXK KFail = 0x%x\n", rf_path, KFAIL);
1179 if (iqk->rxiqk_step == 5) {
1180 dm->rf_calibrate_info.iqk_step++;
1181 iqk->rxiqk_step = 1;
1182 if (KFAIL)
1183 RF_DBG(dm, DBG_RF_IQK,
1184 "[IQK]S%dRXK fail code: %d!!!\n",
1185 rf_path,
1186 iqk->rxiqk_fail_code[0][rf_path]);
1187 break;
1188 }
1189 }
1190 iqk->kcount++;
1191 }
1192
_iqk_iqk_by_path_8822b(void * dm_void,boolean segment_iqk)1193 void _iqk_iqk_by_path_8822b(void *dm_void, boolean segment_iqk)
1194 {
1195 struct dm_struct *dm = (struct dm_struct *)dm_void;
1196 struct dm_iqk_info *iqk = &dm->IQK_info;
1197 boolean KFAIL = true;
1198 u8 i, kcount_limit;
1199
1200 #if 0
1201 /* RF_DBG(dm, DBG_RF_IQK, "[IQK]iqk_step = 0x%x\n", dm->rf_calibrate_info.iqk_step); */
1202 #endif
1203
1204 if (*dm->band_width == 2)
1205 kcount_limit = kcount_limit_80m;
1206 else
1207 kcount_limit = kcount_limit_others;
1208
1209 while (1) {
1210 #if 1
1211 switch (dm->rf_calibrate_info.iqk_step) {
1212 case 1: /*S0 LOK*/
1213 #if 1
1214 _iqk_lok_setting_8822b(dm, RF_PATH_A);
1215 _lok_one_shot_8822b(dm, RF_PATH_A);
1216 #endif
1217 dm->rf_calibrate_info.iqk_step++;
1218 break;
1219 case 2: /*S1 LOK*/
1220 #if 1
1221 _iqk_lok_setting_8822b(dm, RF_PATH_B);
1222 _lok_one_shot_8822b(dm, RF_PATH_B);
1223 #endif
1224 dm->rf_calibrate_info.iqk_step++;
1225 break;
1226 case 3: /*S0 TXIQK*/
1227 #if 1
1228 _iqk_txk_setting_8822b(dm, RF_PATH_A);
1229 KFAIL = _iqk_one_shot_8822b(dm, RF_PATH_A, TXIQK);
1230 iqk->kcount++;
1231 RF_DBG(dm, DBG_RF_IQK, "[IQK]S0TXK KFail = 0x%x\n",
1232 KFAIL);
1233
1234 if (KFAIL && iqk->retry_count[0][RF_PATH_A][TXIQK] < 3)
1235 iqk->retry_count[0][RF_PATH_A][TXIQK]++;
1236 else
1237 #endif
1238 dm->rf_calibrate_info.iqk_step++;
1239 halrf_iqk_xym_read(dm, RF_PATH_A, 0x1);
1240 break;
1241 case 4: /*S1 TXIQK*/
1242 #if 1
1243 _iqk_txk_setting_8822b(dm, RF_PATH_B);
1244 KFAIL = _iqk_one_shot_8822b(dm, RF_PATH_B, TXIQK);
1245 iqk->kcount++;
1246 RF_DBG(dm, DBG_RF_IQK, "[IQK]S1TXK KFail = 0x%x\n",
1247 KFAIL);
1248 if (KFAIL && iqk->retry_count[0][RF_PATH_B][TXIQK] < 3)
1249 iqk->retry_count[0][RF_PATH_B][TXIQK]++;
1250 else
1251 #endif
1252 dm->rf_calibrate_info.iqk_step++;
1253 halrf_iqk_xym_read(dm, RF_PATH_B, 0x1);
1254 break;
1255 case 5: /*S0 RXIQK*/
1256 _iqk_iqk_by_path_8822b_subfunction(dm, RF_PATH_A);
1257 break;
1258 case 6: /*S1 RXIQK*/
1259 _iqk_iqk_by_path_8822b_subfunction(dm, RF_PATH_B);
1260 break;
1261 }
1262
1263 if (dm->rf_calibrate_info.iqk_step == 7) {
1264 RF_DBG(dm, DBG_RF_IQK,
1265 "[IQK]==========LOK summary ==========\n");
1266 RF_DBG(dm, DBG_RF_IQK,
1267 "[IQK]A_LOK_notready = %d B_LOK_notready = %d\n",
1268 iqk->lok_fail[RF_PATH_A],
1269 iqk->lok_fail[RF_PATH_B]);
1270 RF_DBG(dm, DBG_RF_IQK,
1271 "[IQK]==========IQK summary ==========\n");
1272 RF_DBG(dm, DBG_RF_IQK,
1273 "[IQK]A_TXIQK_fail = %d, B_TXIQK_fail = %d\n",
1274 iqk->iqk_fail_report[0][RF_PATH_A][TXIQK],
1275 iqk->iqk_fail_report[0][RF_PATH_B][TXIQK]);
1276 RF_DBG(dm, DBG_RF_IQK,
1277 "[IQK]A_RXIQK_fail = %d, B_RXIQK_fail = %d\n",
1278 iqk->iqk_fail_report[0][RF_PATH_A][RXIQK],
1279 iqk->iqk_fail_report[0][RF_PATH_B][RXIQK]);
1280 RF_DBG(dm, DBG_RF_IQK,
1281 "[IQK]A_TXIQK_retry = %d, B_TXIQK_retry = %d\n",
1282 iqk->retry_count[0][RF_PATH_A][TXIQK],
1283 iqk->retry_count[0][RF_PATH_B][TXIQK]);
1284 RF_DBG(dm, DBG_RF_IQK,
1285 "[IQK]A_RXK1_retry = %d A_RXK2_retry = %d\n",
1286 iqk->retry_count[0][RF_PATH_A][RXIQK1],
1287 iqk->retry_count[0][RF_PATH_A][RXIQK2]);
1288 RF_DBG(dm, DBG_RF_IQK,
1289 "[IQK]B_RXK1_retry = %d B_RXK2_retry = %d\n",
1290 iqk->retry_count[0][RF_PATH_B][RXIQK1],
1291 iqk->retry_count[0][RF_PATH_B][RXIQK2]);
1292 RF_DBG(dm, DBG_RF_IQK,
1293 "[IQK]A_GS1_retry = %d A_GS2_retry = %d\n",
1294 iqk->gs_retry_count[0][RF_PATH_A][0],
1295 iqk->gs_retry_count[0][RF_PATH_A][1]);
1296 RF_DBG(dm, DBG_RF_IQK,
1297 "[IQK]B_GS1_retry = %d B_GS2_retry = %d\n",
1298 iqk->gs_retry_count[0][RF_PATH_B][0],
1299 iqk->gs_retry_count[0][RF_PATH_B][1]);
1300 for (i = 0; i < 2; i++) {
1301 odm_write_4byte(dm, 0x1b00,
1302 0xf8000008 | i << 1);
1303 odm_write_4byte(dm, 0x1b2c, 0x7);
1304 odm_write_4byte(dm, 0x1bcc, 0x0);
1305 odm_write_4byte(dm, 0x1b38, 0x20000000);
1306 }
1307 break;
1308 }
1309
1310 if (segment_iqk && iqk->kcount == kcount_limit)
1311 break;
1312 #endif
1313 }
1314 }
1315
_iqk_start_iqk_8822b(struct dm_struct * dm,boolean segment_iqk)1316 void _iqk_start_iqk_8822b(struct dm_struct *dm, boolean segment_iqk)
1317 {
1318 u32 tmp;
1319
1320 /*GNT_WL = 1*/
1321 tmp = odm_get_rf_reg(dm, RF_PATH_A, RF_0x1, MASK20BITS);
1322 tmp = tmp | BIT(5) | BIT(0);
1323 odm_set_rf_reg(dm, RF_PATH_A, RF_0x1, MASK20BITS, tmp);
1324
1325 tmp = odm_get_rf_reg(dm, RF_PATH_B, RF_0x1, MASK20BITS);
1326 tmp = tmp | BIT(5) | BIT(0);
1327 odm_set_rf_reg(dm, RF_PATH_B, RF_0x1, MASK20BITS, tmp);
1328
1329 _iqk_iqk_by_path_8822b(dm, segment_iqk);
1330 }
1331
_iq_calibrate_8822b_init(struct dm_struct * dm)1332 void _iq_calibrate_8822b_init(struct dm_struct *dm)
1333 {
1334 struct dm_iqk_info *iqk = &dm->IQK_info;
1335 u8 i, j, k, m;
1336 static boolean firstrun = true;
1337
1338 if (firstrun) {
1339 firstrun = false;
1340 RF_DBG(dm, DBG_RF_IQK,
1341 "[IQK]=====>PHY_IQCalibrate_8822B_Init\n");
1342
1343 for (i = 0; i < SS_8822B; i++) {
1344 for (j = 0; j < 2; j++) {
1345 iqk->lok_fail[i] = true;
1346 iqk->iqk_fail[j][i] = true;
1347 iqk->iqc_matrix[j][i] = 0x20000000;
1348 }
1349 }
1350
1351 for (i = 0; i < 2; i++) {
1352 iqk->iqk_channel[i] = 0x0;
1353
1354 for (j = 0; j < SS_8822B; j++) {
1355 iqk->lok_idac[i][j] = 0x0;
1356 iqk->rxiqk_agc[i][j] = 0x0;
1357 iqk->bypass_iqk[i][j] = 0x0;
1358
1359 for (k = 0; k < 2; k++) {
1360 iqk->iqk_fail_report[i][j][k] = true;
1361 for (m = 0; m < 8; m++) {
1362 iqk->iqk_cfir_real[i][j][k][m]
1363 = 0x0;
1364 iqk->iqk_cfir_imag[i][j][k][m]
1365 = 0x0;
1366 }
1367 }
1368
1369 for (k = 0; k < 3; k++)
1370 iqk->retry_count[i][j][k] = 0x0;
1371 }
1372 }
1373 }
1374 /*parameters init.*/
1375 /*cu_distance (IQK result variation)=111*/
1376 odm_write_4byte(dm, 0x1b10, 0x88011c00);
1377 }
1378
1379 boolean
_iqk_rximr_rxk1_test_8822b(struct dm_struct * dm,u8 path,u32 tone_index)1380 _iqk_rximr_rxk1_test_8822b(struct dm_struct *dm, u8 path, u32 tone_index)
1381 {
1382 boolean fail = true;
1383 u32 IQK_CMD, reg_1b20, reg_1b24;
1384
1385 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
1386 reg_1b20 = (odm_read_4byte(dm, 0x1b20) & 0x000fffff);
1387 odm_write_4byte(dm, 0x1b20, reg_1b20 | ((tone_index & 0xfff) << 20));
1388 reg_1b24 = (odm_read_4byte(dm, 0x1b24) & 0x000fffff);
1389 odm_write_4byte(dm, 0x1b24, reg_1b24 | ((tone_index & 0xfff) << 20));
1390
1391 IQK_CMD = 0xf8000208 | (1 << (path + 4));
1392 odm_write_4byte(dm, 0x1b00, IQK_CMD);
1393 odm_write_4byte(dm, 0x1b00, IQK_CMD + 0x1);
1394
1395 ODM_delay_ms(GS_delay_8822B);
1396 fail = _iqk_check_cal_8822b(dm, path, 0x1);
1397 return fail;
1398 }
1399
_iqk_tximr_selfcheck_8822b(void * dm_void,u8 tone_index,u8 path)1400 u32 _iqk_tximr_selfcheck_8822b(void *dm_void, u8 tone_index, u8 path)
1401 {
1402 u32 tx_ini_power_H[2], tx_ini_power_L[2];
1403 u32 tmp1, tmp2, tmp3, tmp4, tmp5;
1404 u32 IQK_CMD;
1405 u32 tximr = 0x0;
1406 u8 i;
1407
1408 struct dm_struct *dm = (struct dm_struct *)dm_void;
1409 /*backup*/
1410 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
1411 odm_write_4byte(dm, 0x1bc8, 0x80000000);
1412 odm_write_4byte(dm, 0x8f8, 0x41400080);
1413 tmp1 = odm_read_4byte(dm, 0x1b0c);
1414 tmp2 = odm_read_4byte(dm, 0x1b14);
1415 tmp3 = odm_read_4byte(dm, 0x1b1c);
1416 tmp4 = odm_read_4byte(dm, 0x1b20);
1417 tmp5 = odm_read_4byte(dm, 0x1b24);
1418 /*setup*/
1419 odm_write_4byte(dm, 0x1b0c, 0x00003000);
1420 odm_write_4byte(dm, 0x1b1c, 0xA2193C32);
1421 odm_write_1byte(dm, 0x1b15, 0x00);
1422 odm_write_4byte(dm, 0x1b20, (u32)(tone_index << 20 | 0x00040008));
1423 odm_write_4byte(dm, 0x1b24, (u32)(tone_index << 20 | 0x00060008));
1424 odm_write_4byte(dm, 0x1b2c, 0x07);
1425 odm_write_4byte(dm, 0x1b38, 0x20000000);
1426 odm_write_4byte(dm, 0x1b3c, 0x20000000);
1427 /* ======derive pwr1========*/
1428 for (i = 0; i < 2; i++) {
1429 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
1430 if (i == 0)
1431 odm_write_4byte(dm, 0x1bcc, 0x0f);
1432 else
1433 odm_write_4byte(dm, 0x1bcc, 0x09);
1434 /* One Shot*/
1435 IQK_CMD = 0x00000800;
1436 odm_write_4byte(dm, 0x1b34, IQK_CMD + 1);
1437 odm_write_4byte(dm, 0x1b34, IQK_CMD);
1438 ODM_delay_ms(1);
1439 odm_write_4byte(dm, 0x1bd4, 0x00040001);
1440 tx_ini_power_H[i] = odm_read_4byte(dm, 0x1bfc);
1441 odm_write_4byte(dm, 0x1bd4, 0x000C0001);
1442 tx_ini_power_L[i] = odm_read_4byte(dm, 0x1bfc);
1443 }
1444 /*restore*/
1445 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
1446 odm_write_4byte(dm, 0x1b0c, tmp1);
1447 odm_write_4byte(dm, 0x1b14, tmp2);
1448 odm_write_4byte(dm, 0x1b1c, tmp3);
1449 odm_write_4byte(dm, 0x1b20, tmp4);
1450 odm_write_4byte(dm, 0x1b24, tmp5);
1451
1452 if (tx_ini_power_H[1] == tx_ini_power_H[0])
1453 tximr = (3 * (halrf_psd_log2base(tx_ini_power_L[0] << 2) -
1454 halrf_psd_log2base(tx_ini_power_L[1]))) / 100;
1455 else
1456 tximr = 0;
1457 return tximr;
1458 }
1459
_iqk_start_tximr_test_8822b(struct dm_struct * dm,u8 imr_limit)1460 void _iqk_start_tximr_test_8822b(struct dm_struct *dm, u8 imr_limit)
1461 {
1462 boolean KFAIL;
1463 u8 path, i, tone_index;
1464 u32 imr_result;
1465
1466 for (path = 0; path < 2; path++) {
1467 _iqk_txk_setting_8822b(dm, path);
1468 KFAIL = _iqk_one_shot_8822b(dm, path, TXIQK);
1469 for (i = 0x0; i < imr_limit; i++) {
1470 tone_index = (u8)(0x08 | i << 4);
1471 imr_result = _iqk_tximr_selfcheck_8822b(dm, tone_index,
1472 path);
1473 RF_DBG(dm, DBG_RF_IQK,
1474 "[IQK]path=%x, toneindex = %x, TXIMR = %d\n",
1475 path, tone_index, imr_result);
1476 }
1477 RF_DBG(dm, DBG_RF_IQK, "\n");
1478 }
1479 }
1480
_iqk_rximr_selfcheck_8822b(void * dm_void,u32 tone_index,u8 path,u32 tmp1b38)1481 u32 _iqk_rximr_selfcheck_8822b(void *dm_void, u32 tone_index, u8 path,
1482 u32 tmp1b38)
1483 {
1484 /*[0]: psd tone; [1]: image tone*/
1485 u32 rx_ini_power_H[2], rx_ini_power_L[2];
1486 u32 tmp1, tmp2, tmp3, tmp4, tmp5;
1487 u32 IQK_CMD;
1488 u8 i, count = 0x0;
1489 u32 rximr = 0x0;
1490
1491 struct dm_struct *dm = (struct dm_struct *)dm_void;
1492
1493 /*backup*/
1494 odm_write_4byte(dm, 0x1b00, 0xf8000008 | path << 1);
1495 tmp1 = odm_read_4byte(dm, 0x1b0c);
1496 tmp2 = odm_read_4byte(dm, 0x1b14);
1497 tmp3 = odm_read_4byte(dm, 0x1b1c);
1498 tmp4 = odm_read_4byte(dm, 0x1b20);
1499 tmp5 = odm_read_4byte(dm, 0x1b24);
1500
1501 odm_write_4byte(dm, 0x1b0c, 0x00001000);
1502 odm_write_1byte(dm, 0x1b15, 0x00);
1503 odm_write_4byte(dm, 0x1b1c, 0x82193d31);
1504 odm_write_4byte(dm, 0x1b20, (u32)(tone_index << 20 | 0x00040008));
1505 odm_write_4byte(dm, 0x1b24, (u32)(tone_index << 20 | 0x00060048));
1506 odm_write_4byte(dm, 0x1b2c, 0x07);
1507 odm_write_4byte(dm, 0x1b38, tmp1b38);
1508 odm_write_4byte(dm, 0x1b3c, 0x20000000);
1509
1510 for (i = 0; i < 2; i++) {
1511 if (i == 0)
1512 odm_write_4byte(dm, 0x1b1c, 0x82193d31);
1513 else
1514 odm_write_4byte(dm, 0x1b1c, 0xa2193d31);
1515 IQK_CMD = 0x00000800;
1516 odm_write_4byte(dm, 0x1b34, IQK_CMD + 1);
1517 odm_write_4byte(dm, 0x1b34, IQK_CMD);
1518 ODM_delay_ms(2);
1519 odm_write_1byte(dm, 0x1bd6, 0xb);
1520 while (count < 100) {
1521 count++;
1522 if (odm_get_bb_reg(dm, R_0x1bfc, BIT(1)) == 1)
1523 break;
1524
1525 ODM_delay_ms(1);
1526 }
1527 if (1) {
1528 odm_write_1byte(dm, 0x1bd6, 0x5);
1529 rx_ini_power_H[i] = odm_read_4byte(dm, 0x1bfc);
1530 odm_write_1byte(dm, 0x1bd6, 0xe);
1531 rx_ini_power_L[i] = odm_read_4byte(dm, 0x1bfc);
1532 } else {
1533 rx_ini_power_H[i] = 0x0;
1534 rx_ini_power_L[i] = 0x0;
1535 }
1536 }
1537 /*restore*/
1538 odm_write_4byte(dm, 0x1b0c, tmp1);
1539 odm_write_4byte(dm, 0x1b14, tmp2);
1540 odm_write_4byte(dm, 0x1b1c, tmp3);
1541 odm_write_4byte(dm, 0x1b20, tmp4);
1542 odm_write_4byte(dm, 0x1b24, tmp5);
1543 for (i = 0; i < 2; i++)
1544 rx_ini_power_H[i] = (rx_ini_power_H[i] & 0xf8000000) >> 27;
1545
1546 if (rx_ini_power_H[0] != rx_ini_power_H[1])
1547 switch (rx_ini_power_H[0]) {
1548 case 1:
1549 rx_ini_power_L[0] =
1550 (u32)((rx_ini_power_L[0] >> 1) | 0x80000000);
1551 rx_ini_power_L[1] = (u32)rx_ini_power_L[1] >> 1;
1552 break;
1553 case 2:
1554 rx_ini_power_L[0] =
1555 (u32)((rx_ini_power_L[0] >> 2) | 0x80000000);
1556 rx_ini_power_L[1] = (u32)rx_ini_power_L[1] >> 2;
1557 break;
1558 case 3:
1559 rx_ini_power_L[0] =
1560 (u32)((rx_ini_power_L[0] >> 2) | 0xc0000000);
1561 rx_ini_power_L[1] = (u32)rx_ini_power_L[1] >> 2;
1562 break;
1563 case 4:
1564 rx_ini_power_L[0] =
1565 (u32)((rx_ini_power_L[0] >> 3) | 0x80000000);
1566 rx_ini_power_L[1] = (u32)rx_ini_power_L[1] >> 3;
1567 break;
1568 case 5:
1569 rx_ini_power_L[0] =
1570 (u32)((rx_ini_power_L[0] >> 3) | 0xa0000000);
1571 rx_ini_power_L[1] = (u32)rx_ini_power_L[1] >> 3;
1572 break;
1573 case 6:
1574 rx_ini_power_L[0] =
1575 (u32)((rx_ini_power_L[0] >> 3) | 0xc0000000);
1576 rx_ini_power_L[1] = (u32)rx_ini_power_L[1] >> 3;
1577 break;
1578 case 7:
1579 rx_ini_power_L[0] =
1580 (u32)((rx_ini_power_L[0] >> 3) | 0xe0000000);
1581 rx_ini_power_L[1] = (u32)rx_ini_power_L[1] >> 3;
1582 break;
1583 default:
1584 break;
1585 }
1586 rximr = (u32)(3 * ((halrf_psd_log2base(rx_ini_power_L[0] / 100) -
1587 halrf_psd_log2base(rx_ini_power_L[1] / 100))) / 100);
1588 #if 0
1589 /*
1590 RF_DBG(dm, DBG_RF_IQK, "%-20s: 0x%x, 0x%x, 0x%x, 0x%x,0x%x, tone_index=%x, rximr= %d\n",
1591 (path == 0) ? "PATH A RXIMR ": "PATH B RXIMR",
1592 rx_ini_power_H[0], rx_ini_power_L[0], rx_ini_power_H[1], rx_ini_power_L[1], tmp1bcc, tone_index, rximr);
1593 */
1594 #endif
1595 return rximr;
1596 }
1597
_iqk_get_rxk1_8822b(struct dm_struct * dm,u8 path,u8 imr_limit,u8 side,u32 temp[][15])1598 boolean _iqk_get_rxk1_8822b(struct dm_struct *dm, u8 path, u8 imr_limit,
1599 u8 side, u32 temp[][15])
1600 {
1601 struct dm_iqk_info *iqk = &dm->IQK_info;
1602 boolean kfail = true;
1603 u8 i, count = 0;
1604 u32 tone_index;
1605
1606 for (i = 0; i < imr_limit; i++) {
1607 if (side == 0)
1608 tone_index = 0xff8 - (i << 4);
1609 else
1610 tone_index = 0x08 | (i << 4);
1611 while (count < 3) {
1612 _iqk_rxk1_setting_8822b(dm, path);
1613 kfail = _iqk_rximr_rxk1_test_8822b(dm, path,
1614 tone_index);
1615 RF_DBG(dm,
1616 DBG_RF_IQK,
1617 "[IQK]path = %x, kfail = %x\n",
1618 path, kfail);
1619 if (kfail) {
1620 count++;
1621 if (count == 3) {
1622 temp[side][i] = 0x20000000;
1623 RF_DBG(dm,
1624 DBG_RF_IQK,
1625 "[IQK]path = %x",
1626 path);
1627 RF_DBG(dm,
1628 DBG_RF_IQK,
1629 "toneindex = %x rxk1 fail\n",
1630 tone_index);
1631 }
1632 } else {
1633 odm_write_4byte(dm, 0x1b00, 0xf8000008 |
1634 path << 1);
1635 odm_write_4byte(dm, 0x1b1c, 0xa2193c32);
1636 odm_write_4byte(dm, 0x1b14, 0xe5);
1637 odm_write_4byte(dm, 0x1b14, 0x0);
1638 temp[side][i] = odm_read_4byte(dm, 0x1b38);
1639 RF_DBG(dm,
1640 DBG_RF_IQK,
1641 "[IQK]path = 0x%x", path);
1642 RF_DBG(dm,
1643 DBG_RF_IQK,
1644 "[tone_idx = 0x%x", tone_index);
1645 RF_DBG(dm,
1646 DBG_RF_IQK,
1647 "[tmp1b38 = 0x%x\n", temp[side][i]);
1648 break;
1649 }
1650 }
1651 }
1652 return kfail;
1653 }
1654
_iqk_get_rxk2_8822b(struct dm_struct * dm,u8 path,u8 imr_limit,u8 side,u32 temp[][15])1655 void _iqk_get_rxk2_8822b(struct dm_struct *dm, u8 path, u8 imr_limit, u8 side,
1656 u32 temp[][15])
1657 {
1658 struct dm_iqk_info *iqk = &dm->IQK_info;
1659 u8 i;
1660 u32 tone_index, imr_result;
1661 char *freq[15] = {
1662 "1.25MHz", "3.75MHz", "6.25MHz", "8.75MHz", "11.25MHz",
1663 "13.75MHz", "16.25MHz", "18.75MHz", "21.25MHz", "23.75MHz",
1664 "26.25MHz", "28.75MHz", "31.25MHz", "33.75MHz", "36.25MHz"};
1665
1666 for (i = 0x0; i < imr_limit; i++) {
1667 if (side == 0)
1668 tone_index = 0xff8 - (i << 4);
1669 else
1670 tone_index = 0x08 | (i << 4);
1671 _iqk_rxk2_setting_8822b(dm, path, false);
1672 imr_result = _iqk_rximr_selfcheck_8822b(dm,
1673 tone_index,
1674 path,
1675 temp[side][i]);
1676 RF_DBG(dm,
1677 DBG_RF_IQK, "[IQK]tone_idx = 0x%5x,", tone_index);
1678 RF_DBG(dm,
1679 DBG_RF_IQK,
1680 "freq =%s%10s,",
1681 (side == 0) ? "-" : " ",
1682 freq[i]);
1683 RF_DBG(dm,
1684 DBG_RF_IQK,
1685 "RXIMR = %5d dB\n", imr_result);
1686 }
1687 }
1688
_iqk_rximr_test_8822b(struct dm_struct * dm,u8 path,u8 imr_limit)1689 void _iqk_rximr_test_8822b(struct dm_struct *dm, u8 path, u8 imr_limit)
1690 {
1691 struct dm_iqk_info *iqk = &dm->IQK_info;
1692 boolean kfail;
1693 u8 i, step, count, side;
1694 u32 imr_result = 0, tone_index;
1695 u32 temp = 0, temp1b38[2][15];
1696 u32 cmd = 0xf8000008;
1697
1698 for (step = 1; step < 5; step++) {
1699 count = 0;
1700 switch (step) {
1701 case 1: /*gain search_RXK1*/
1702 _iqk_rxk1_setting_8822b(dm, path);
1703 while (count < 3) {
1704 kfail = _iqk_rxk_gsearch_fail_8822b(dm, path,
1705 RXIQK1);
1706 RF_DBG(dm, DBG_RF_IQK,
1707 "[IQK]path = %x, kfail = %x\n", path,
1708 kfail);
1709 if (kfail) {
1710 count++;
1711 if (count == 3)
1712 step = 5;
1713 } else {
1714 break;
1715 }
1716 }
1717 break;
1718 case 2: /*gain search_RXK2*/
1719 _iqk_rxk2_setting_8822b(dm, path, true);
1720 iqk->isbnd = false;
1721 while (count < 8) {
1722 kfail = _iqk_rxk_gsearch_fail_8822b(dm, path,
1723 RXIQK2);
1724 RF_DBG(dm, DBG_RF_IQK,
1725 "[IQK]path = %x, kfail = %x\n", path,
1726 kfail);
1727 if (kfail) {
1728 count++;
1729 if (count == 8)
1730 step = 5;
1731 } else {
1732 break;
1733 }
1734 }
1735 break;
1736 case 3: /*get RXK1 IQC*/
1737 odm_write_4byte(dm, 0x1b00, cmd | path << 1);
1738 temp = odm_read_4byte(dm, 0x1b1c);
1739 for (side = 0; side < 2; side++) {
1740 kfail = _iqk_get_rxk1_8822b(dm,
1741 path,
1742 imr_limit,
1743 side,
1744 temp1b38);
1745 if (kfail) {
1746 step = 5;
1747 break;
1748 }
1749 }
1750 break;
1751 case 4: /*get RX IMR*/
1752 for (side = 0; side < 2; side++) {
1753 _iqk_get_rxk2_8822b(dm, path, imr_limit, side,
1754 temp1b38);
1755 odm_write_4byte(dm, 0x1b00, cmd | path << 1);
1756 odm_write_4byte(dm, 0x1b1c, temp);
1757 odm_write_4byte(dm, 0x1b38, 0x20000000);
1758 }
1759 break;
1760 }
1761 }
1762 }
1763
_iqk_start_rximr_test_8822b(struct dm_struct * dm,u8 imr_limit)1764 void _iqk_start_rximr_test_8822b(struct dm_struct *dm, u8 imr_limit)
1765 {
1766 u8 path;
1767
1768 for (path = 0; path < 2; path++)
1769 _iqk_rximr_test_8822b(dm, path, imr_limit);
1770 }
1771
_iqk_start_imr_test_8822b(void * dm_void)1772 void _iqk_start_imr_test_8822b(void *dm_void)
1773 {
1774 u8 imr_limit;
1775
1776 struct dm_struct *dm = (struct dm_struct *)dm_void;
1777
1778 if (*dm->band_width == 2)
1779 imr_limit = 0xf;
1780 else if (*dm->band_width == 1)
1781 imr_limit = 0x8;
1782 else
1783 imr_limit = 0x4;
1784 #if 0
1785 /* _iqk_start_tximr_test_8822b(dm, imr_limit);*/
1786 #endif
1787 _iqk_start_rximr_test_8822b(dm, imr_limit);
1788 }
1789
_phy_iq_calibrate_8822b(struct dm_struct * dm,boolean reset,boolean segment_iqk)1790 void _phy_iq_calibrate_8822b(struct dm_struct *dm, boolean reset,
1791 boolean segment_iqk)
1792 {
1793 u32 MAC_backup[MAC_REG_NUM_8822B], BB_backup[BB_REG_NUM_8822B];
1794 u32 RF_backup[RF_REG_NUM_8822B][SS_8822B];
1795 u32 backup_mac_reg[MAC_REG_NUM_8822B] = {0x520, 0x550};
1796 u32 backup_bb_reg[BB_REG_NUM_8822B] = {0x808, 0x90c, 0xc00, 0xcb0,
1797 0xcb4, 0xcbc, 0xe00, 0xeb0,
1798 0xeb4, 0xebc, 0x1990, 0x9a4,
1799 0xa04, 0xb00, 0x838, 0xc58,
1800 0xc5c, 0xc6c, 0xe58, 0xe5c,
1801 0xe6c};
1802 u32 backup_rf_reg[RF_REG_NUM_8822B] = {0xdf, 0x8f, 0x65, 0x0, 0x1};
1803 boolean is_mp = false;
1804
1805 struct dm_iqk_info *iqk = &dm->IQK_info;
1806
1807 if (*dm->mp_mode)
1808 is_mp = true;
1809
1810 if (!is_mp)
1811 if (_iqk_reload_iqk_8822b(dm, reset))
1812 return;
1813
1814 RF_DBG(dm, DBG_RF_IQK, "[IQK]==========IQK strat!!!!!==========\n");
1815
1816 RF_DBG(dm, DBG_RF_IQK,
1817 "[IQK]band_type=%s band_width=%d ExtPA2G=%d ext_pa_5g=%d\n",
1818 (*dm->band_type == ODM_BAND_5G) ? "5G" : "2G", *dm->band_width,
1819 dm->ext_pa, dm->ext_pa_5g);
1820 RF_DBG(dm, DBG_RF_IQK, "[IQK]Interface = %d, cut_version = %x\n",
1821 dm->support_interface, dm->cut_version);
1822
1823 iqk->iqk_times++;
1824 iqk->kcount = 0;
1825 dm->rf_calibrate_info.iqk_step = 1;
1826 iqk->rxiqk_step = 1;
1827
1828 iqk->tmp_gntwl = _iqk_ltec_read_8822b(dm, 0x38);
1829 _iqk_backup_iqk_8822b(dm, 0x0, 0x0);
1830 _iqk_backup_mac_bb_8822b(dm, MAC_backup, BB_backup,
1831 backup_mac_reg, backup_bb_reg);
1832 _iqk_backup_rf_8822b(dm, RF_backup, backup_rf_reg);
1833 #if 0
1834 _iqk_configure_macbb_8822b(dm);
1835 _iqk_afe_setting_8822b(dm, true);
1836 _iqk_rfe_setting_8822b(dm, false);
1837 _iqk_agc_bnd_int_8822b(dm);
1838 _iqk_rf_setting_8822b(dm);
1839 #endif
1840
1841 while (1) {
1842 _iqk_configure_macbb_8822b(dm);
1843 _iqk_afe_setting_8822b(dm, true);
1844 _iqk_rfe_setting_8822b(dm, false);
1845 _iqk_agc_bnd_int_8822b(dm);
1846 _iqk_rf_setting_8822b(dm);
1847 _iqk_start_iqk_8822b(dm, segment_iqk);
1848 _iqk_afe_setting_8822b(dm, false);
1849 _iqk_restore_mac_bb_8822b(dm, MAC_backup, BB_backup,
1850 backup_mac_reg, backup_bb_reg);
1851 _iqk_restore_rf_8822b(dm, backup_rf_reg, RF_backup);
1852 if (dm->rf_calibrate_info.iqk_step == 7)
1853 break;
1854 iqk->kcount = 0;
1855 RF_DBG(dm, DBG_RF_IQK, "[IQK]delay 50ms!!!\n");
1856 ODM_delay_ms(50);
1857 };
1858 if (segment_iqk)
1859 _iqk_reload_iqk_setting_8822b(dm, 0x0, 0x1);
1860 #if 0
1861 _iqk_afe_setting_8822b(dm, false);
1862 _iqk_restore_mac_bb_8822b(dm, MAC_backup, BB_backup, backup_mac_reg, backup_bb_reg);
1863 _iqk_restore_rf_8822b(dm, backup_rf_reg, RF_backup);
1864 #endif
1865 _iqk_fill_iqk_report_8822b(dm, 0);
1866 _iqk_rf0xb0_workaround_8822b(dm);
1867 RF_DBG(dm, DBG_RF_IQK, "[IQK]==========IQK end!!!!!==========\n");
1868 }
1869
_phy_iq_calibrate_by_fw_8822b(void * dm_void,u8 clear,u8 segment_iqk)1870 void _phy_iq_calibrate_by_fw_8822b(void *dm_void, u8 clear, u8 segment_iqk)
1871 {
1872 struct dm_struct *dm = (struct dm_struct *)dm_void;
1873 struct dm_iqk_info *iqk = &dm->IQK_info;
1874 enum hal_status status = HAL_STATUS_FAILURE;
1875
1876 if (*dm->mp_mode)
1877 clear = 0x1;
1878 #if 0
1879 /* else if (dm->is_linked)*/
1880 /* segment_iqk = 0x1;*/
1881 #endif
1882
1883 iqk->iqk_times++;
1884 status = odm_iq_calibrate_by_fw(dm, clear, segment_iqk);
1885
1886 if (status == HAL_STATUS_SUCCESS)
1887 RF_DBG(dm, DBG_RF_IQK, "[IQK]FWIQK OK!!!\n");
1888 else
1889 RF_DBG(dm, DBG_RF_IQK, "[IQK]FWIQK fail!!!\n");
1890 }
1891
1892 /*IQK_version:0x2f, NCTL:0x8*/
1893 /*1.disable CCK block and OFDM CCA block while IQKing*/
phy_iq_calibrate_8822b(void * dm_void,boolean clear,boolean segment_iqk)1894 void phy_iq_calibrate_8822b(void *dm_void, boolean clear, boolean segment_iqk)
1895 {
1896 struct dm_struct *dm = (struct dm_struct *)dm_void;
1897
1898 if (*dm->mp_mode)
1899 halrf_iqk_hwtx_check(dm, true);
1900 /*FW IQK*/
1901 if (dm->fw_offload_ability & PHYDM_RF_IQK_OFFLOAD) {
1902 _phy_iq_calibrate_by_fw_8822b(dm, clear, (u8)(segment_iqk));
1903 phydm_get_read_counter_8822b(dm);
1904 halrf_iqk_check_if_reload(dm);
1905 } else {
1906 _iq_calibrate_8822b_init(dm);
1907 _phy_iq_calibrate_8822b(dm, clear, segment_iqk);
1908 }
1909 _iqk_0xc94_workaround_8822b(dm);
1910 _iqk_fail_count_8822b(dm);
1911 if (*dm->mp_mode)
1912 halrf_iqk_hwtx_check(dm, false);
1913 #if (DM_ODM_SUPPORT_TYPE & ODM_AP)
1914 _iqk_iqk_fail_report_8822b(dm);
1915 #endif
1916 halrf_iqk_dbg(dm);
1917 }
1918
_phy_imr_measure_8822b(struct dm_struct * dm)1919 void _phy_imr_measure_8822b(struct dm_struct *dm)
1920 {
1921 u32 MAC_backup[MAC_REG_NUM_8822B], BB_backup[BB_REG_NUM_8822B];
1922 u32 RF_backup[RF_REG_NUM_8822B][SS_8822B];
1923 u32 backup_mac_reg[MAC_REG_NUM_8822B] = {0x520, 0x550};
1924 u32 backup_bb_reg[BB_REG_NUM_8822B] = {0x808, 0x90c, 0xc00, 0xcb0,
1925 0xcb4, 0xcbc, 0xe00, 0xeb0,
1926 0xeb4, 0xebc, 0x1990, 0x9a4,
1927 0xa04, 0xb00, 0x838, 0xc58,
1928 0xc5c, 0xc6c, 0xe58, 0xe5c,
1929 0xe6c};
1930 u32 backup_rf_reg[RF_REG_NUM_8822B] = {0xdf, 0x8f, 0x65, 0x0, 0x1};
1931
1932 _iqk_backup_iqk_8822b(dm, 0x0, 0x0);
1933 _iqk_backup_mac_bb_8822b(dm, MAC_backup, BB_backup,
1934 backup_mac_reg, backup_bb_reg);
1935 _iqk_backup_rf_8822b(dm, RF_backup, backup_rf_reg);
1936 _iqk_configure_macbb_8822b(dm);
1937 _iqk_afe_setting_8822b(dm, true);
1938 _iqk_rfe_setting_8822b(dm, false);
1939 _iqk_agc_bnd_int_8822b(dm);
1940 _iqk_rf_setting_8822b(dm);
1941
1942 _iqk_start_imr_test_8822b(dm);
1943
1944 _iqk_afe_setting_8822b(dm, false);
1945 _iqk_restore_mac_bb_8822b(dm, MAC_backup, BB_backup,
1946 backup_mac_reg, backup_bb_reg);
1947 _iqk_restore_rf_8822b(dm, backup_rf_reg, RF_backup);
1948 }
1949
do_imr_test_8822b(void * dm_void)1950 void do_imr_test_8822b(void *dm_void)
1951 {
1952 struct dm_struct *dm = (struct dm_struct *)dm_void;
1953
1954 RF_DBG(dm, DBG_RF_IQK,
1955 "[IQK] ************IMR Test *****************\n");
1956 _phy_imr_measure_8822b(dm);
1957 RF_DBG(dm, DBG_RF_IQK,
1958 "[IQK] **********End IMR Test *******************\n");
1959 }
1960
phy_get_iqk_cfir_8822b(void * dm_void,u8 idx,u8 path,boolean debug)1961 void phy_get_iqk_cfir_8822b(void *dm_void, u8 idx, u8 path, boolean debug)
1962 {
1963 struct dm_struct *dm = (struct dm_struct *)dm_void;
1964 struct dm_iqk_info *iqk_info = &dm->IQK_info;
1965
1966 u8 i, ch;
1967 u32 tmp;
1968 u32 bit_mask_20_16 = BIT(20) | BIT(19) | BIT(18) | BIT(17) | BIT(16);
1969
1970 if (debug)
1971 ch = 2;
1972 else
1973 ch = 0;
1974
1975 odm_set_bb_reg(dm, R_0x1b00, MASKDWORD, 0xf8000008 | path << 1);
1976 if (idx == 0)
1977 odm_set_bb_reg(dm, R_0x1b0c, BIT(13) | BIT(12), 0x3);
1978 else
1979 odm_set_bb_reg(dm, R_0x1b0c, BIT(13) | BIT(12), 0x1);
1980 odm_set_bb_reg(dm, R_0x1bd4, bit_mask_20_16, 0x10);
1981 for (i = 0; i < 8; i++) {
1982 odm_set_bb_reg(dm, R_0x1bd8, MASKDWORD, 0xe0000001 + (i * 4));
1983 tmp = odm_get_bb_reg(dm, R_0x1bfc, MASKDWORD);
1984 iqk_info->iqk_cfir_real[ch][path][idx][i] =
1985 (tmp & 0x0fff0000) >> 16;
1986 iqk_info->iqk_cfir_imag[ch][path][idx][i] = tmp & 0xfff;
1987 }
1988 odm_set_bb_reg(dm, R_0x1bd8, MASKDWORD, 0x0);
1989 odm_set_bb_reg(dm, R_0x1b0c, BIT(13) | BIT(12), 0x0);
1990 }
1991
phy_iqk_dbg_cfir_backup_8822b(void * dm_void)1992 void phy_iqk_dbg_cfir_backup_8822b(void *dm_void)
1993 {
1994 struct dm_struct *dm = (struct dm_struct *)dm_void;
1995 struct dm_iqk_info *iqk_info = &dm->IQK_info;
1996 u8 path, idx, i;
1997
1998 RF_DBG(dm, DBG_RF_IQK, "[IQK]%-20s\n", "backup TX/RX CFIR");
1999
2000 for (path = 0; path < 2; path++)
2001 for (idx = 0; idx < 2; idx++)
2002 phydm_get_iqk_cfir(dm, idx, path, true);
2003
2004 for (path = 0; path < 2; path++) {
2005 for (idx = 0; idx < 2; idx++) {
2006 for (i = 0; i < 8; i++) {
2007 RF_DBG(dm, DBG_RF_IQK,
2008 "[IQK]%-7s %-3s CFIR_real: %-2d: 0x%x\n",
2009 (path == 0) ? "PATH A" : "PATH B",
2010 (idx == 0) ? "TX" : "RX", i,
2011 iqk_info->iqk_cfir_real[2][path][idx][i])
2012 ;
2013 }
2014 for (i = 0; i < 8; i++) {
2015 RF_DBG(dm, DBG_RF_IQK,
2016 "[IQK]%-7s %-3s CFIR_img:%-2d: 0x%x\n",
2017 (path == 0) ? "PATH A" : "PATH B",
2018 (idx == 0) ? "TX" : "RX", i,
2019 iqk_info->iqk_cfir_imag[2][path][idx][i])
2020 ;
2021 }
2022 }
2023 }
2024 }
2025
phy_iqk_dbg_cfir_backup_update_8822b(void * dm_void)2026 void phy_iqk_dbg_cfir_backup_update_8822b(void *dm_void)
2027 {
2028 struct dm_struct *dm = (struct dm_struct *)dm_void;
2029 struct dm_iqk_info *iqk = &dm->IQK_info;
2030 u8 i, path, idx;
2031 u32 bmask13_12 = BIT(13) | BIT(12);
2032 u32 bmask20_16 = BIT(20) | BIT(19) | BIT(18) | BIT(17) | BIT(16);
2033 u32 data;
2034
2035 if (iqk->iqk_cfir_real[2][0][0][0] == 0) {
2036 RF_DBG(dm, DBG_RF_IQK, "[IQK]%-20s\n", "CFIR is invalid");
2037 return;
2038 }
2039 for (path = 0; path < 2; path++) {
2040 for (idx = 0; idx < 2; idx++) {
2041 odm_set_bb_reg(dm, R_0x1b00, MASKDWORD,
2042 0xf8000008 | path << 1);
2043 odm_set_bb_reg(dm, R_0x1b2c, MASKDWORD, 0x7);
2044 odm_set_bb_reg(dm, R_0x1b38, MASKDWORD, 0x20000000);
2045 odm_set_bb_reg(dm, R_0x1b3c, MASKDWORD, 0x20000000);
2046 odm_set_bb_reg(dm, R_0x1bcc, MASKDWORD, 0x00000000);
2047 if (idx == 0)
2048 odm_set_bb_reg(dm, R_0x1b0c, bmask13_12, 0x3);
2049 else
2050 odm_set_bb_reg(dm, R_0x1b0c, bmask13_12, 0x1);
2051 odm_set_bb_reg(dm, R_0x1bd4, bmask20_16, 0x10);
2052 for (i = 0; i < 8; i++) {
2053 data = ((0xc0000000 >> idx) + 0x3) + (i * 4) +
2054 (iqk->iqk_cfir_real[2][path][idx][i]
2055 << 9);
2056 odm_write_4byte(dm, 0x1bd8, data);
2057 data = ((0xc0000000 >> idx) + 0x1) + (i * 4) +
2058 (iqk->iqk_cfir_imag[2][path][idx][i]
2059 << 9);
2060 odm_write_4byte(dm, 0x1bd8, data);
2061 #if 0
2062 /*odm_write_4byte(dm, 0x1bd8, iqk->iqk_cfir_real[2][path][idx][i]);*/
2063 /*odm_write_4byte(dm, 0x1bd8, iqk->iqk_cfir_imag[2][path][idx][i]);*/
2064 #endif
2065 }
2066 }
2067 odm_set_bb_reg(dm, R_0x1bd8, MASKDWORD, 0x0);
2068 odm_set_bb_reg(dm, R_0x1b0c, bmask13_12, 0x0);
2069 }
2070 RF_DBG(dm, DBG_RF_IQK, "[IQK]%-20s\n", "update new CFIR");
2071 }
2072
phy_iqk_dbg_cfir_reload_8822b(void * dm_void)2073 void phy_iqk_dbg_cfir_reload_8822b(void *dm_void)
2074 {
2075 struct dm_struct *dm = (struct dm_struct *)dm_void;
2076 struct dm_iqk_info *iqk = &dm->IQK_info;
2077 u8 i, path, idx;
2078 u32 bmask13_12 = BIT(13) | BIT(12);
2079 u32 bmask20_16 = BIT(20) | BIT(19) | BIT(18) | BIT(17) | BIT(16);
2080 u32 data;
2081
2082 if (iqk->iqk_cfir_real[0][0][0][0] == 0) {
2083 RF_DBG(dm, DBG_RF_IQK, "[IQK]%-20s\n", "CFIR is invalid");
2084 return;
2085 }
2086 for (path = 0; path < 2; path++) {
2087 for (idx = 0; idx < 2; idx++) {
2088 odm_set_bb_reg(dm, R_0x1b00, MASKDWORD,
2089 0xf8000008 | path << 1);
2090 odm_set_bb_reg(dm, R_0x1b2c, MASKDWORD, 0x7);
2091 odm_set_bb_reg(dm, R_0x1b38, MASKDWORD, 0x20000000);
2092 odm_set_bb_reg(dm, R_0x1b3c, MASKDWORD, 0x20000000);
2093 odm_set_bb_reg(dm, R_0x1bcc, MASKDWORD, 0x00000000);
2094 if (idx == 0)
2095 odm_set_bb_reg(dm, R_0x1b0c, bmask13_12, 0x3);
2096 else
2097 odm_set_bb_reg(dm, R_0x1b0c, bmask13_12, 0x1);
2098 odm_set_bb_reg(dm, R_0x1bd4, bmask20_16, 0x10);
2099 for (i = 0; i < 8; i++) {
2100 #if 0
2101 /*odm_write_4byte(dm, 0x1bd8, iqk->iqk_cfir_real[0][path][idx][i]);*/
2102 /*odm_write_4byte(dm, 0x1bd8, iqk->iqk_cfir_imag[0][path][idx][i]);*/
2103 #endif
2104 data = ((0xc0000000 >> idx) + 0x3) + (i * 4) +
2105 (iqk->iqk_cfir_real[0][path][idx][i]
2106 << 9);
2107 odm_write_4byte(dm, 0x1bd8, data);
2108 data = ((0xc0000000 >> idx) + 0x1) + (i * 4) +
2109 (iqk->iqk_cfir_imag[0][path][idx][i]
2110 << 9);
2111 odm_write_4byte(dm, 0x1bd8, data);
2112 }
2113 }
2114 odm_set_bb_reg(dm, R_0x1bd8, MASKDWORD, 0x0);
2115 odm_set_bb_reg(dm, R_0x1b0c, bmask13_12, 0x0);
2116 }
2117 RF_DBG(dm, DBG_RF_IQK, "[IQK]%-20s\n", "write CFIR with default value");
2118 }
2119
phy_iqk_dbg_cfir_write_8822b(void * dm_void,u8 type,u32 path,u32 idx,u32 i,u32 data)2120 void phy_iqk_dbg_cfir_write_8822b(void *dm_void, u8 type, u32 path, u32 idx,
2121 u32 i, u32 data)
2122 {
2123 struct dm_struct *dm = (struct dm_struct *)dm_void;
2124 struct dm_iqk_info *iqk_info = &dm->IQK_info;
2125
2126 if (type == 0)
2127 iqk_info->iqk_cfir_real[2][path][idx][i] = (u16)data;
2128 else
2129 iqk_info->iqk_cfir_imag[2][path][idx][i] = (u16)data;
2130 }
2131
phy_iqk_dbg_cfir_backup_show_8822b(void * dm_void)2132 void phy_iqk_dbg_cfir_backup_show_8822b(void *dm_void)
2133 {
2134 struct dm_struct *dm = (struct dm_struct *)dm_void;
2135 struct dm_iqk_info *iqk_info = &dm->IQK_info;
2136 u8 path, idx, i;
2137
2138 RF_DBG(dm, DBG_RF_IQK, "[IQK]%-20s\n", "backup TX/RX CFIR");
2139
2140 for (path = 0; path < 2; path++) {
2141 for (idx = 0; idx < 2; idx++) {
2142 for (i = 0; i < 8; i++) {
2143 RF_DBG(dm, DBG_RF_IQK,
2144 "[IQK]%-10s %-3s CFIR_real:%-2d: 0x%x\n",
2145 (path == 0) ? "PATH A" : "PATH B",
2146 (idx == 0) ? "TX" : "RX", i,
2147 iqk_info->iqk_cfir_real[2][path][idx][i])
2148 ;
2149 }
2150 for (i = 0; i < 8; i++) {
2151 RF_DBG(dm, DBG_RF_IQK,
2152 "[IQK]%-10s %-3s CFIR_img:%-2d: 0x%x\n",
2153 (path == 0) ? "PATH A" : "PATH B",
2154 (idx == 0) ? "TX" : "RX", i,
2155 iqk_info->iqk_cfir_imag[2][path][idx][i])
2156 ;
2157 }
2158 }
2159 }
2160 }
2161
2162 #endif
2163