1 /*
2 * Copyright 2020-2026 NXP
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6 #include <errno.h>
7
8 #include <assert.h>
9 #include <common/debug.h>
10 #include <ddr_utils.h>
11 #include <mmio_poll.h>
12 #include <s32cc-clk-drv.h>
13
14 static uint32_t enable_axi_ports(void);
15 static uint32_t get_mail(uint32_t *mail);
16 static uint32_t ack_mail(void);
17 static uint8_t get_max_cdd(const uint32_t cdd_addr[], size_t size);
18 static uint16_t get_max_delay(const uint32_t delay_addr[], size_t size);
19 static uint8_t get_avg_vref(const uint32_t vref_addr[], size_t size);
20 static uint32_t adjust_ddrc_config(void);
21 static bool is_lpddr4(void);
22
23 static struct space_timing_params tr_res = {
24 .cdd = {.rr = 0, .rw = 0, .wr = 0, .ww = 0},
25 .vref_ca = 0,
26 .vref_dq = 0,
27 .tphy_wrdata_delay = 0
28 };
29
30 /* Modify bitfield value with delta, given bitfield position and mask */
update_bf(uint32_t * v,uint8_t pos,uint32_t mask,int32_t delta)31 bool update_bf(uint32_t *v, uint8_t pos, uint32_t mask, int32_t delta)
32 {
33 uint32_t bf_val;
34 int64_t new_val;
35
36 bf_val = (*v >> pos) & mask;
37 new_val = (int64_t)bf_val + delta;
38
39 /* Check if new value is within valid range [0, mask] */
40 if ((new_val < 0) || (new_val > (int64_t)mask)) {
41 return false;
42 }
43
44 *v = (*v & ~(mask << pos)) | ((uint32_t)new_val << pos);
45 return true;
46 }
47
48 /* Sets default AXI parity. */
set_axi_parity(void)49 uint32_t set_axi_parity(void)
50 {
51 uint32_t swstat_reg, timeout = DEFAULT_TIMEOUT_US;
52 int err;
53
54 /* Enable Parity For All AXI Interfaces */
55 mmio_setbits_32(DDR_SS_REG, DDR_SS_AXI_PARITY_ENABLE_MASK);
56
57 /* Set AXI_PARITY_TYPE to 0x1ff; 0-even, 1-odd */
58 mmio_setbits_32(DDR_SS_REG, DDR_SS_AXI_PARITY_TYPE_MASK);
59
60 /* For LPDDR4 Set DFI1_ENABLED to 0x1 */
61 if (is_lpddr4()) {
62 mmio_setbits_32(DDR_SS_REG, DDR_SS_DFI_1_ENABLED);
63 }
64
65 if (plat_deassert_ddr_reset() != 0) {
66 return DEASSERT_FAILED;
67 }
68
69 /* Enable HIF, CAM Queueing */
70 mmio_write_32(DDRC_BASE + OFFSET_DDRC_DBG1, DBG1_DISABLE_DE_QUEUEING);
71
72 /* Disable auto-refresh: RFSHCTL3.dis_auto_refresh = 1 */
73 mmio_setbits_32(DDRC_BASE + OFFSET_DDRC_RFSHCTL3, RFSHCTL3_DISABLE_AUTO_REFRESH);
74
75 /* Disable power down: PWRCTL.powerdown_en = 0 */
76 mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_POWER_DOWN_ENABLE_MASK);
77
78 /* Disable self-refresh: PWRCTL.selfref_en = 0 */
79 mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_SELF_REFRESH_ENABLE_MASK);
80
81 /*
82 * Disable assertion of dfi_dram_clk_disable:
83 * PWRTL.en_dfi_dram_clk_disable = 0
84 */
85 mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_EN_DFI_DRAM_CLOCK_DIS_MASK);
86
87 /* Enable Quasi-Dynamic Programming */
88 mmio_write_32(DDRC_BASE + OFFSET_DDRC_SWCTL, SWCTL_SWDONE_ENABLE);
89
90 /* Confirm Register Programming Done Ack is Cleared */
91 err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_SWSTAT, swstat_reg,
92 (swstat_reg & SWSTAT_SWDONE_ACK_MASK) != SWSTAT_SW_DONE,
93 timeout);
94 if (err != 0) {
95 ERROR("Failed to clear register programming done ACK\n");
96 return TIMEOUT_ERR;
97 }
98
99 /* DFI_INIT_COMPLETE_EN set to 0 */
100 mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_DFIMISC, DFIMISC_DFI_INIT_COMPLETE_EN_MASK);
101
102 /* Set SWCTL.sw_done to 1 */
103 mmio_write_32(DDRC_BASE + OFFSET_DDRC_SWCTL, SWCTL_SWDONE_DONE);
104
105 err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_SWSTAT, swstat_reg,
106 (swstat_reg & SWSTAT_SWDONE_ACK_MASK) != SWSTAT_SW_NOT_DONE,
107 timeout);
108 if (err != 0) {
109 ERROR("Failed to confirm DDRC SWSTAT switch done ACK\n");
110 return TIMEOUT_ERR;
111 }
112
113 return NO_ERR;
114 }
115
116 /* Enables AXI port n. Programming Mode: Dynamic */
enable_axi_ports(void)117 static uint32_t enable_axi_ports(void)
118 {
119 /* Port 0 Control Register */
120 mmio_write_32(DDRC_UMCTL2_MP_BASE + OFFSET_DDRC_PCTRL_0, ENABLE_AXI_PORT);
121 /* Port 1 Control Register */
122 mmio_write_32(DDRC_UMCTL2_MP_BASE + OFFSET_DDRC_PCTRL_1, ENABLE_AXI_PORT);
123 /* Port 2 Control Register */
124 mmio_write_32(DDRC_UMCTL2_MP_BASE + OFFSET_DDRC_PCTRL_2, ENABLE_AXI_PORT);
125
126 return NO_ERR;
127 }
128
129 /*
130 * Post PHY training setup - complementary settings that need to be
131 * performed after running the firmware.
132 * @param options - various flags controlling post training actions
133 */
post_train_setup(uint8_t options)134 uint32_t post_train_setup(uint8_t options)
135 {
136 uint32_t calbusy_reg, swstat_reg, swctl_reg, phymstr_reg;
137 uint32_t umctl2_reg, dfistat_reg;
138 uint32_t ret = NO_ERR, timeout = DEFAULT_TIMEOUT_US;
139 int err;
140
141 /*
142 * CalBusy.0 = 1, indicates the calibrator is actively calibrating.
143 * Wait Calibrating done.
144 */
145 err = mmio_read_32_poll_timeout(DDR_PHYA_MASTER0_CALBUSY, calbusy_reg,
146 (calbusy_reg & MASTER0_CAL_ACTIVE) == MASTER0_CAL_DONE,
147 timeout);
148 if (err != 0) {
149 ERROR("PHY Master0 calibrator did not complete\n");
150 return TIMEOUT_ERR;
151 }
152
153 /* Set SWCTL.sw_done to 0 */
154 mmio_write_32(DDRC_BASE + OFFSET_DDRC_SWCTL, SWCTL_SWDONE_ENABLE);
155 err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_SWSTAT, swctl_reg,
156 (swctl_reg & SWSTAT_SWDONE_ACK_MASK) == SWSTAT_SW_NOT_DONE,
157 timeout);
158 if (err != 0) {
159 ERROR("Failed to clear register DDRC SWCTL.sw_done\n");
160 return TIMEOUT_ERR;
161 }
162
163 /* Disable PHY Master. */
164 mmio_clrbits_32(DDRC_BASE + OFFSET_DFIPHYMSTR, DFIPHYMSTR_ENABLE);
165
166 /* Wait for PHY Master to be disabled. */
167 err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DFIPHYMSTR, phymstr_reg,
168 (phymstr_reg & DFIPHYMSTR_ENABLE) == DFIPHYMSTR_DISABLED,
169 timeout);
170 if (err != 0) {
171 ERROR("Failed tO disable PHY Master\n");
172 return TIMEOUT_ERR;
173 }
174
175 /* Wait for PHY Master request to be finished. */
176 err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_STAT, phymstr_reg,
177 (((phymstr_reg & SELFREF_TYPE_MASK) >> SELFREF_TYPE_POS)
178 != PHY_MASTER_REQUEST),
179 timeout);
180 if (err != 0) {
181 ERROR("Failed to finish PHY Master request\n");
182 return TIMEOUT_ERR;
183 }
184
185 /* Set DFIMISC.dfi_init_start to 1*/
186 mmio_setbits_32(DDRC_BASE + OFFSET_DDRC_DFIMISC, DFIMISC_DFI_INIT_START_MASK);
187
188 /* Set SWCTL.sw_done to 1 */
189 mmio_write_32(DDRC_BASE + OFFSET_DDRC_SWCTL, SWCTL_SWDONE_DONE);
190
191 /* Wait SWSTAT.sw_done_ack to 1*/
192 err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_SWSTAT, swstat_reg,
193 (swstat_reg & SWSTAT_SWDONE_ACK_MASK) != SWSTAT_SW_NOT_DONE,
194 timeout);
195 if (err != 0) {
196 ERROR("Failed to wait for SWSTAT.sw_done\n");
197 return TIMEOUT_ERR;
198 }
199
200 /* Wait DFISTAT.dfi_init_complete to 1 */
201 err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_DFISTAT, dfistat_reg,
202 (dfistat_reg & DFISTAT_DFI_INIT_DONE) != DFISTAT_DFI_INIT_INCOMPLETE,
203 timeout);
204 if (err != 0) {
205 ERROR("DDRC DFI initialization not complete\n");
206 return TIMEOUT_ERR;
207 }
208
209 /* Set SWCTL.sw_done to 0 */
210 mmio_write_32(DDRC_BASE + OFFSET_DDRC_SWCTL, SWCTL_SWDONE_ENABLE);
211 err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_SWSTAT, swctl_reg,
212 (swctl_reg & SWSTAT_SWDONE_ACK_MASK) == SWSTAT_SW_NOT_DONE,
213 timeout);
214 if (err != 0) {
215 ERROR("Failed to clear register DDRC SWCTL.sw_done\n");
216 return TIMEOUT_ERR;
217 }
218
219 /* Set dfi_init_start to 0 */
220 mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_DFIMISC, DFIMISC_DFI_INIT_START_MASK);
221
222 /* Enable PHY Master. */
223 mmio_setbits_32(DDRC_BASE + OFFSET_DFIPHYMSTR, DFIPHYMSTR_ENABLE);
224
225 /* Wait for PHY Master to be enabled. */
226 err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DFIPHYMSTR, phymstr_reg,
227 (phymstr_reg & DFIPHYMSTR_ENABLE) == DFIPHYMSTR_ENABLE,
228 timeout);
229 if (err != 0) {
230 ERROR("Failed to enable PHY Master\n");
231 return TIMEOUT_ERR;
232 }
233
234 if ((options & ADJUST_DDRC_MASK) != ADJUST_DDRC_DISABLED) {
235 /* Overwrite DDRC register based on post training_results */
236 ret = adjust_ddrc_config();
237 if (ret != NO_ERR) {
238 return ret;
239 }
240 }
241
242 /* Set dfi_complete_en to 1 */
243 mmio_setbits_32(DDRC_BASE + OFFSET_DDRC_DFIMISC, DFIMISC_DFI_INIT_COMPLETE_EN_MASK);
244
245 /* Set PWRCTL.selfref_sw to 0 */
246 mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_SELFREF_SW_MASK);
247
248 /* Set SWCTL.sw_done to 1 */
249 mmio_write_32(DDRC_BASE + OFFSET_DDRC_SWCTL, SWCTL_SWDONE_DONE);
250 err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_SWSTAT, swctl_reg,
251 (swctl_reg & SWSTAT_SWDONE_ACK_MASK)
252 != SWSTAT_SW_NOT_DONE, timeout);
253 if (err != 0) {
254 ERROR("Failed to set SWCTL.sw_done to 1\n");
255 return TIMEOUT_ERR;
256 }
257
258 /* Wait for DWC_ddr_umctl2 to move to normal operating mode */
259 err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_STAT, umctl2_reg,
260 (umctl2_reg & STAT_OPERATING_MODE_MASK)
261 != STAT_OPERATING_MODE_INIT, timeout);
262 if (err != 0) {
263 ERROR("DWC_ddr_umctl2 did not reach normal operating mode\n");
264 return TIMEOUT_ERR;
265 }
266
267 /* Enable auto-refresh: RFSHCTL3.dis_auto_refresh = 0 */
268 mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_RFSHCTL3, RFSHCTL3_DIS_AUTO_REFRESH_MASK);
269
270 /* Enable power down: PWRCTL.powerdown_en = 1 */
271 mmio_setbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_POWER_DOWN_ENABLE_MASK);
272
273 /* Enable self-refresh: PWRCTL.selfref_en = 1 */
274 mmio_setbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_SELF_REFRESH_ENABLE_MASK);
275
276 /*
277 * Enable assertion of dfi_dram_clk_disable:
278 * PWRTL.en_dfi_dram_clk_disable = 1
279 */
280 mmio_setbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_EN_DFI_DRAM_CLOCK_DIS_MASK);
281
282 /*
283 * Each platform has a different number of AXI ports so this
284 * method should be implemented in hardware specific source
285 */
286 ret = enable_axi_ports();
287
288 return ret;
289 }
290
291 /* Wait until firmware finishes execution and return training result */
wait_firmware_execution(void)292 uint32_t wait_firmware_execution(void)
293 {
294 uint32_t timeout_us = DEFAULT_TIMEOUT_US, ret = NO_ERR, mail = 0;
295 uint64_t timeout = timeout_init_us(timeout_us);
296 bool loop_continue = true;
297 bool timeout_expired;
298
299 do {
300 ret = get_mail(&mail);
301 if (ret != NO_ERR) {
302 loop_continue = false;
303 } else if (mail == TRAINING_FAILED_MSG) {
304 /* Training stage failed */
305 ret = TRAINING_FAILED;
306 loop_continue = false;
307 } else if (mail == TRAINING_OK_MSG) {
308 loop_continue = false;
309 } else {
310 /* Continue waiting for training result */
311 }
312 timeout_expired = timeout_elapsed(timeout);
313 if (timeout_expired) {
314 ret = TRAINING_FAILED;
315 loop_continue = false;
316 }
317 /* Continue loop if no exit condition met and timeout not elapsed */
318 } while (loop_continue);
319
320 return ret;
321 }
322
323 /* Acknowledge received message */
ack_mail(void)324 static uint32_t ack_mail(void)
325 {
326 uint32_t timeout = DEFAULT_TIMEOUT_US;
327 uint32_t uct_reg;
328 int err;
329
330 /* ACK message */
331 mmio_write_32(DDR_PHYA_DCTWRITEPROT, APBONLY_DCTWRITEPROT_ACK_EN);
332
333 err = mmio_read_32_poll_timeout(DDR_PHYA_APBONLY_UCTSHADOWREGS, uct_reg,
334 (uct_reg & UCT_WRITE_PROT_SHADOW_MASK) !=
335 UCT_WRITE_PROT_SHADOW_ACK,
336 timeout);
337 if (err != 0) {
338 ERROR("DDR PHY did not acknowledge write protection\n");
339 return TIMEOUT_ERR;
340 }
341
342 mmio_write_32(DDR_PHYA_DCTWRITEPROT, APBONLY_DCTWRITEPROT_ACK_DIS);
343
344 return NO_ERR;
345 }
346
347 /* Read available message from DDR PHY microcontroller */
get_mail(uint32_t * mail)348 static uint32_t get_mail(uint32_t *mail)
349 {
350 uint32_t uct_reg, timeout = DEFAULT_TIMEOUT_US;
351 int err;
352
353 err = mmio_read_32_poll_timeout(DDR_PHYA_APBONLY_UCTSHADOWREGS, uct_reg,
354 (uct_reg & UCT_WRITE_PROT_SHADOW_MASK) ==
355 UCT_WRITE_PROT_SHADOW_ACK,
356 timeout);
357 if (err != 0) {
358 ERROR("DDR PHY did not acknowledge UCT write protection\n");
359 return TIMEOUT_ERR;
360 }
361
362 *mail = mmio_read_32(DDR_PHYA_APBONLY_UCTWRITEONLYSHADOW);
363 /* ACK */
364 return ack_mail();
365 }
366
367 /* Read Critical Delay Differences from message block and store max values */
read_cdds(void)368 void read_cdds(void)
369 {
370 const uint32_t rank0_rw_addr[] = {CDD_CHA_RW_0_0, CDD_CHB_RW_0_0};
371 const uint32_t rank0_wr_addr[] = {CDD_CHA_WR_0_0, CDD_CHB_WR_0_0};
372 uint8_t cdd_rr = 0, cdd_ww = 0, cdd_wr = 0, cdd_rw = 0;
373 uint32_t mstr;
374
375 /* Max CDD values for single-rank */
376 tr_res.cdd.rr = cdd_rr;
377 tr_res.cdd.ww = cdd_ww;
378 tr_res.cdd.rw = is_lpddr4() ?
379 get_max_cdd(rank0_rw_addr, ARRAY_SIZE(rank0_rw_addr)) :
380 mmio_read_8(CDD_CHA_RW_0_0_DDR3);
381 tr_res.cdd.wr = is_lpddr4() ?
382 get_max_cdd(rank0_wr_addr, ARRAY_SIZE(rank0_wr_addr)) :
383 mmio_read_8(CDD_CHA_WR_0_0_DDR3);
384
385 /* Check MSTR.active_ranks to identify multi-rank configurations */
386 mstr = mmio_read_32(DDRC_BASE);
387 if ((mstr & MSTR_ACT_RANKS_MASK) == MSTR_DUAL_RANK_VAL) {
388 /* Compute max CDDs for both ranks depending on memory type */
389 if (is_lpddr4()) {
390 const uint32_t rr_addr[] = {
391 CDD_CHA_RR_1_0, CDD_CHA_RR_0_1,
392 CDD_CHB_RR_1_0, CDD_CHB_RR_0_1
393 };
394 const uint32_t ww_addr[] = {
395 CDD_CHA_WW_1_0, CDD_CHA_WW_0_1,
396 CDD_CHB_WW_1_0, CDD_CHB_WW_0_1
397 };
398 const uint32_t rw_addr[] = {
399 CDD_CHA_RW_1_1, CDD_CHA_RW_1_0,
400 CDD_CHA_RW_0_1, CDD_CHB_RW_1_1,
401 CDD_CHB_RW_1_0, CDD_CHB_RW_0_1
402 };
403 const uint32_t wr_addr[] = {
404 CDD_CHA_WR_1_1, CDD_CHA_WR_1_0,
405 CDD_CHA_WR_0_1, CDD_CHB_WR_1_1,
406 CDD_CHB_WR_1_0, CDD_CHB_WR_0_1
407 };
408
409 cdd_rr = get_max_cdd(rr_addr, ARRAY_SIZE(rr_addr));
410 cdd_rw = get_max_cdd(rw_addr, ARRAY_SIZE(rw_addr));
411 cdd_wr = get_max_cdd(wr_addr, ARRAY_SIZE(wr_addr));
412 cdd_ww = get_max_cdd(ww_addr, ARRAY_SIZE(ww_addr));
413 } else {
414 const uint32_t rr_addr[] = {CDD_CHA_RR_1_0_DDR3,
415 CDD_CHA_RR_0_1_DDR3};
416 const uint32_t ww_addr[] = {CDD_CHA_WW_1_0_DDR3,
417 CDD_CHA_WW_0_1_DDR3};
418 const uint32_t rw_addr[] = {CDD_CHA_RW_1_1_DDR3,
419 CDD_CHA_RW_1_0_DDR3,
420 CDD_CHA_RW_0_1_DDR3};
421 const uint32_t wr_addr[] = {CDD_CHA_WR_1_1_DDR3,
422 CDD_CHA_WR_1_0_DDR3,
423 CDD_CHA_WR_0_1_DDR3};
424
425 cdd_rr = get_max_cdd(rr_addr, ARRAY_SIZE(rr_addr));
426 cdd_rw = get_max_cdd(rw_addr, ARRAY_SIZE(rw_addr));
427 cdd_wr = get_max_cdd(wr_addr, ARRAY_SIZE(wr_addr));
428 cdd_ww = get_max_cdd(ww_addr, ARRAY_SIZE(ww_addr));
429 }
430
431 /* Update max CDD values if needed */
432 if (cdd_rr > tr_res.cdd.rr) {
433 tr_res.cdd.rr = cdd_rr;
434 }
435 if (cdd_rw > tr_res.cdd.rw) {
436 tr_res.cdd.rw = cdd_rw;
437 }
438 if (cdd_wr > tr_res.cdd.wr) {
439 tr_res.cdd.wr = cdd_wr;
440 }
441 if (cdd_ww > tr_res.cdd.ww) {
442 tr_res.cdd.ww = cdd_ww;
443 }
444 }
445 }
446
447 /* Read trained VrefCA from message block and store average value */
read_vref_ca(void)448 void read_vref_ca(void)
449 {
450 const uint32_t rank0_vref_addr[] = {VREF_CA_A0, VREF_CA_B0};
451 const uint32_t rank01_vref_addr[] = {VREF_CA_A0, VREF_CA_A1,
452 VREF_CA_B0, VREF_CA_B1};
453 uint32_t mstr;
454
455 /* Check MSTR.active_ranks to identify multi-rank configurations */
456 mstr = mmio_read_32(DDRC_BASE);
457 if ((mstr & MSTR_ACT_RANKS_MASK) == MSTR_DUAL_RANK_VAL) {
458 tr_res.vref_ca = get_avg_vref(rank01_vref_addr,
459 ARRAY_SIZE(rank01_vref_addr));
460 } else {
461 tr_res.vref_ca = get_avg_vref(rank0_vref_addr,
462 ARRAY_SIZE(rank0_vref_addr));
463 }
464 }
465
466 /* Read trained VrefDQ from message block and store average value*/
read_vref_dq(void)467 void read_vref_dq(void)
468 {
469 const uint32_t rank0_vref_addr[] = {VREF_DQ_A0, VREF_DQ_B0};
470 const uint32_t rank01_vref_addr[] = {VREF_DQ_A0, VREF_DQ_A1,
471 VREF_DQ_B0, VREF_DQ_B1};
472 uint32_t mstr;
473
474 /* Check MSTR.active_ranks to identify multi-rank configurations */
475 mstr = mmio_read_32(DDRC_BASE);
476 if ((mstr & MSTR_ACT_RANKS_MASK) == MSTR_DUAL_RANK_VAL) {
477 tr_res.vref_dq = get_avg_vref(rank01_vref_addr,
478 ARRAY_SIZE(rank01_vref_addr));
479 } else {
480 tr_res.vref_dq = get_avg_vref(rank0_vref_addr,
481 ARRAY_SIZE(rank0_vref_addr));
482 }
483 }
484
485 /* Calculate DFITMG1.dfi_t_wrdata_delay */
compute_tphy_wrdata_delay(void)486 void compute_tphy_wrdata_delay(void)
487 {
488 uint16_t tx_dqsdly, tx_dqsdly_tg1, tctrl_delay, burst_length,
489 wrdata_use_dfi_phy_clk;
490
491 const uint32_t single_rank_dly_addr[] = {
492 DBYTE0_TXDQSDLYTG0_U0, DBYTE0_TXDQSDLYTG0_U1,
493 DBYTE1_TXDQSDLYTG0_U0, DBYTE1_TXDQSDLYTG0_U1,
494 DBYTE2_TXDQSDLYTG0_U0, DBYTE2_TXDQSDLYTG0_U1,
495 DBYTE3_TXDQSDLYTG0_U0, DBYTE3_TXDQSDLYTG0_U1
496 };
497
498 const uint32_t dual_rank_dly_addr[] = {
499 DBYTE0_TXDQSDLYTG1_U0, DBYTE0_TXDQSDLYTG1_U1,
500 DBYTE1_TXDQSDLYTG1_U0, DBYTE1_TXDQSDLYTG1_U1,
501 DBYTE2_TXDQSDLYTG1_U0, DBYTE2_TXDQSDLYTG1_U1,
502 DBYTE3_TXDQSDLYTG1_U0, DBYTE3_TXDQSDLYTG1_U1
503 };
504
505 uint32_t mstr, dfitmg0;
506
507 /* Compute max tx_dqdqsdly for rank 0 */
508 tx_dqsdly = get_max_delay(single_rank_dly_addr,
509 ARRAY_SIZE(single_rank_dly_addr));
510
511 /* Check MSTR.active_ranks to identify multi-rank configurations */
512 mstr = mmio_read_32(DDRC_BASE);
513 if ((mstr & MSTR_ACT_RANKS_MASK) == MSTR_DUAL_RANK_VAL) {
514 /* Compute max tx_dqdqsdly for rank 1 */
515 tx_dqsdly_tg1 = get_max_delay(dual_rank_dly_addr,
516 ARRAY_SIZE(dual_rank_dly_addr));
517 if (tx_dqsdly_tg1 > tx_dqsdly) {
518 tx_dqsdly = tx_dqsdly_tg1;
519 }
520 }
521
522 /* Extract coarse delay value + 1 for fine delay */
523 tx_dqsdly = (tx_dqsdly >> TXDQDLY_COARSE) + 1U;
524
525 /* Compute tctrl_delay */
526 tctrl_delay = (uint16_t)((mmio_read_16(ARDPTR_INITVAL_ADDR) / 2U) +
527 (DDRPHY_PIPE_DFI_MISC * 2U) + 3U);
528
529 burst_length = (uint16_t)(mstr >> MSTR_BURST_RDWR_POS) &
530 MSTR_BURST_RDWR_MASK;
531 dfitmg0 = mmio_read_16(DDRC_BASE + OFFSET_DDRC_DFITMG0);
532 wrdata_use_dfi_phy_clk = (uint16_t)(dfitmg0 >> DFITMG0_PHY_CLK_POS) &
533 DFITMG0_PHY_CLK_MASK;
534
535 /* Program */
536 tr_res.tphy_wrdata_delay = tctrl_delay + 6U + burst_length +
537 wrdata_use_dfi_phy_clk + tx_dqsdly;
538 tr_res.tphy_wrdata_delay = (tr_res.tphy_wrdata_delay / 2U) +
539 (tr_res.tphy_wrdata_delay % 2U);
540 }
541
542 /* Re-program some of the DDRC registers based on post-training results. */
adjust_ddrc_config(void)543 static uint32_t adjust_ddrc_config(void)
544 {
545 uint8_t wr_gap_ddr3 = 3, min_lp4 = 7, min_ddr3 = 0xe, max = 0xf;
546 uint8_t rd_gap, wr_gap, rd_gap_new, wr_gap_new, delta, min;
547 uint8_t rd_gap_lp4 = 4, rd_gap_ddr3 = 2, wr_gap_lp4 = 5;
548 uint32_t dramtmg2_reg, rankctl_reg, mstr_reg;
549 uint32_t ret = NO_ERR;
550
551 /* DRAMTMG2.rd2wr & DRAMTMG2.wr2rd */
552 dramtmg2_reg = mmio_read_32(DDRC_BASE + OFFSET_DDRC_DRAMTMG2);
553 delta = (uint8_t)((tr_res.cdd.rw + (tr_res.cdd.rw % 2U)) / 2U);
554 if (!update_bf(&dramtmg2_reg, DRAMTMG2_RD_WR_POS, DRAMTMG2_RD_WR_MASK,
555 (int32_t)delta)) {
556 return BITFIELD_EXCEEDED;
557 }
558 delta = (uint8_t)((tr_res.cdd.ww + (tr_res.cdd.ww % 2U)) / 2U);
559 if (!update_bf(&dramtmg2_reg, DRAMTMG2_WR_RD_POS, DRAMTMG2_WR_RD_MASK,
560 (int32_t)delta)) {
561 return BITFIELD_EXCEEDED;
562 }
563 mmio_write_32(DDRC_BASE + OFFSET_DDRC_DRAMTMG2, dramtmg2_reg);
564
565 /* For LPDDR4 overwrite INIT6 and INIT7 DDRC registers. */
566 if (is_lpddr4()) {
567 /* INIT6.mr5 */
568 mmio_clrsetbits_32(DDRC_BASE + OFFSET_DDRC_INIT6, INIT6_MR5_MASK, tr_res.vref_ca);
569
570 /* INIT7.mr6 */
571 mmio_clrsetbits_32(DDRC_BASE + OFFSET_DDRC_INIT7, INIT7_MR6_MASK, tr_res.vref_dq);
572 }
573
574 /* DFITMG1.dfi_t_wrdata_delay */
575 mmio_clrsetbits_32(DDRC_BASE + OFFSET_DDRC_DFITMG1,
576 (DFITMG1_WRDATA_DELAY_MASK << DFITMG1_WRDATA_DELAY_POS),
577 (((uint32_t)tr_res.tphy_wrdata_delay) << DFITMG1_WRDATA_DELAY_POS));
578
579 /* For multi-rank systems */
580 mstr_reg = mmio_read_32(DDRC_BASE);
581 if ((mstr_reg & MSTR_ACT_RANKS_MASK) == MSTR_DUAL_RANK_VAL) {
582 uint8_t rd_gap_ct = is_lpddr4() ? rd_gap_lp4 : rd_gap_ddr3;
583 uint8_t wr_gap_ct = is_lpddr4() ? wr_gap_lp4 : wr_gap_ddr3;
584
585 min = is_lpddr4() ? min_lp4 : min_ddr3;
586 rankctl_reg = mmio_read_32(DDRC_BASE + OFFSET_DDRC_RANKCTL);
587 /* RANKCTL.diff_rank_rd_gap */
588 rd_gap = (uint8_t)((rankctl_reg >> RANKCTL_RD_GAP_POS) &
589 RANKCTL_RD_GAP_MASK);
590 rd_gap_new = (uint8_t)((rd_gap_ct + tr_res.cdd.rr +
591 (tr_res.cdd.rr % 2U)) / 2U);
592
593 /* ensure min and max of rd_gap field */
594 rd_gap_new = (rd_gap_new < min) ? min : ((rd_gap_new > max) ?
595 max : rd_gap_new);
596 if (rd_gap_new > rd_gap) {
597 delta = (uint8_t)(rd_gap_new - rd_gap);
598 if (!update_bf(&rankctl_reg, RANKCTL_RD_GAP_POS,
599 RANKCTL_RD_GAP_MASK, (int32_t)delta)) {
600 return BITFIELD_EXCEEDED;
601 }
602 }
603
604 /* RANKCTL.diff_rank_wr_gap */
605 wr_gap = (uint8_t)((rankctl_reg >> RANKCTL_WR_GAP_POS) &
606 RANKCTL_WR_GAP_MASK);
607 wr_gap_new = (uint8_t)((wr_gap_ct + tr_res.cdd.ww +
608 (tr_res.cdd.ww % 2U)) / 2U);
609
610 /* ensure min and max of wr_gap field */
611 wr_gap_new = (wr_gap_new < min) ? min : ((wr_gap_new > max) ?
612 max : wr_gap_new);
613 if (wr_gap_new > wr_gap) {
614 delta = (uint8_t)(wr_gap_new - wr_gap);
615 if (!update_bf(&rankctl_reg, RANKCTL_WR_GAP_POS,
616 RANKCTL_WR_GAP_MASK, (int32_t)delta)) {
617 return BITFIELD_EXCEEDED;
618 }
619 }
620
621 if ((rd_gap_new > rd_gap) || (wr_gap_new > wr_gap)) {
622 mmio_write_32(DDRC_BASE + OFFSET_DDRC_RANKCTL, rankctl_reg);
623 }
624 }
625
626 return ret;
627 }
628
629 /* Check if memory type is LPDDR4 using MSTR register */
is_lpddr4(void)630 static bool is_lpddr4(void)
631 {
632 uint32_t mstr;
633
634 mstr = mmio_read_32(DDRC_BASE);
635 return ((mstr & MSTR_DRAM_MASK) == MSTR_LPDDR4_VAL);
636 }
637
638 /*
639 * Get maximum critical delay difference value.
640 * @param cdd_addr[] - list of CDD memory addresses
641 * @param size - number of CDDs to be read
642 * @return max CDD value
643 */
get_max_cdd(const uint32_t cdd_addr[],size_t size)644 static uint8_t get_max_cdd(const uint32_t cdd_addr[], size_t size)
645 {
646 uint8_t cdd, max = 0;
647 int8_t signed_cdd;
648 size_t i;
649
650 for (i = 0; i < size; i++) {
651 /* CDD has type int8_t - read as unsigned and cast to signed */
652 signed_cdd = (int8_t)(mmio_read_8(cdd_addr[i]));
653 /* We need to use absolute value */
654 cdd = (uint8_t)((signed_cdd >= 0) ? signed_cdd : -signed_cdd);
655 max = MAX(cdd, max);
656 }
657 return max;
658 }
659
660 /*
661 * Get maximum delay value.
662 * @param delay_addr[] - list of CDD memory addresses
663 * @param size - number of values to be read
664 * @return max delay value
665 */
get_max_delay(const uint32_t delay_addr[],size_t size)666 static uint16_t get_max_delay(const uint32_t delay_addr[], size_t size)
667 {
668 uint16_t value, max = 0;
669 size_t i;
670
671 for (i = 0; i < size; i++) {
672 value = mmio_read_16(delay_addr[i]);
673 max = MAX(value, max);
674 }
675 return max;
676 }
677
678 /*
679 * Compute average vref value.
680 * @param vref_addr[] - list of vref memory addresses
681 * @param size - number of values to be read
682 * @return average vref value
683 */
get_avg_vref(const uint32_t vref_addr[],size_t size)684 static uint8_t get_avg_vref(const uint32_t vref_addr[], size_t size)
685 {
686 uint32_t sum = 0;
687 size_t i;
688
689 for (i = 0; i < size; i++) {
690 sum += mmio_read_8(vref_addr[i]);
691 }
692
693 assert((sum / size) <= UINT8_MAX);
694
695 return (uint8_t)(sum / size);
696 }
697