xref: /rk3399_ARM-atf/drivers/nxp/ddr/s32cc/ddr_utils.c (revision ded1b9c73ccffef61616d353e63fe4d0ea279c19)
1 /*
2  * Copyright 2020-2026 NXP
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 #include <errno.h>
7 
8 #include <assert.h>
9 #include <common/debug.h>
10 #include <ddr_utils.h>
11 #include <mmio_poll.h>
12 
13 static uint32_t enable_axi_ports(void);
14 static uint32_t get_mail(uint32_t *mail);
15 static uint32_t ack_mail(void);
16 static uint8_t get_max_cdd(const uint32_t cdd_addr[], size_t size);
17 static uint16_t get_max_delay(const uint32_t delay_addr[], size_t size);
18 static uint8_t get_avg_vref(const uint32_t vref_addr[], size_t size);
19 static uint32_t adjust_ddrc_config(void);
20 static bool is_lpddr4(void);
21 
22 static struct space_timing_params tr_res = {
23 		.cdd = {.rr = 0, .rw = 0, .wr = 0, .ww = 0},
24 		.vref_ca = 0,
25 		.vref_dq = 0,
26 		.tphy_wrdata_delay = 0
27 };
28 
29 /* Modify bitfield value with delta, given bitfield position and mask */
30 bool update_bf(uint32_t *v, uint8_t pos, uint32_t mask, int32_t delta)
31 {
32 	uint32_t bf_val;
33 	int64_t new_val;
34 
35 	bf_val = (*v >> pos) & mask;
36 	new_val = (int64_t)bf_val + delta;
37 
38     /* Check if new value is within valid range [0, mask] */
39 	if ((new_val < 0) || (new_val > (int64_t)mask)) {
40 		return false;
41 	}
42 
43 	*v = (*v & ~(mask << pos)) | ((uint32_t)new_val << pos);
44 	return true;
45 }
46 
47 /* Sets default AXI parity. */
48 uint32_t set_axi_parity(void)
49 {
50 	uint32_t swstat_reg, timeout = DEFAULT_TIMEOUT_US;
51 	int err;
52 
53 	/* Enable Parity For All AXI Interfaces */
54 	mmio_setbits_32(DDR_SS_REG, DDR_SS_AXI_PARITY_ENABLE_MASK);
55 
56 	/* Set AXI_PARITY_TYPE to 0x1ff;   0-even, 1-odd */
57 	mmio_setbits_32(DDR_SS_REG, DDR_SS_AXI_PARITY_TYPE_MASK);
58 
59 	/* For LPDDR4 Set DFI1_ENABLED to 0x1 */
60 	if (is_lpddr4()) {
61 		mmio_setbits_32(DDR_SS_REG, DDR_SS_DFI_1_ENABLED);
62 	}
63 
64 	/* Enable HIF, CAM Queueing */
65 	mmio_write_32(DDRC_BASE + OFFSET_DDRC_DBG1, DBG1_DISABLE_DE_QUEUEING);
66 
67 	/* Disable auto-refresh: RFSHCTL3.dis_auto_refresh = 1 */
68 	mmio_setbits_32(DDRC_BASE + OFFSET_DDRC_RFSHCTL3, RFSHCTL3_DISABLE_AUTO_REFRESH);
69 
70 	/* Disable power down: PWRCTL.powerdown_en = 0 */
71 	mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_POWER_DOWN_ENABLE_MASK);
72 
73 	/* Disable self-refresh: PWRCTL.selfref_en = 0 */
74 	mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_SELF_REFRESH_ENABLE_MASK);
75 
76 	/*
77 	 * Disable assertion of dfi_dram_clk_disable:
78 	 * PWRTL.en_dfi_dram_clk_disable = 0
79 	 */
80 	mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_EN_DFI_DRAM_CLOCK_DIS_MASK);
81 
82 	/* Enable Quasi-Dynamic Programming */
83 	mmio_write_32(DDRC_BASE + OFFSET_DDRC_SWCTL, SWCTL_SWDONE_ENABLE);
84 
85 	/* Confirm Register Programming Done Ack is Cleared */
86 	err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_SWSTAT, swstat_reg,
87 					(swstat_reg & SWSTAT_SWDONE_ACK_MASK) != SWSTAT_SW_DONE,
88 					timeout);
89 	if (err != 0) {
90 		ERROR("Failed to clear register programming done ACK\n");
91 		return TIMEOUT_ERR;
92 	}
93 
94 	/* DFI_INIT_COMPLETE_EN set to 0 */
95 	mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_DFIMISC, DFIMISC_DFI_INIT_COMPLETE_EN_MASK);
96 
97 	/* Set SWCTL.sw_done to 1 */
98 	mmio_write_32(DDRC_BASE + OFFSET_DDRC_SWCTL, SWCTL_SWDONE_DONE);
99 
100 	err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_SWSTAT, swstat_reg,
101 					(swstat_reg & SWSTAT_SWDONE_ACK_MASK) != SWSTAT_SW_NOT_DONE,
102 					timeout);
103 	if (err != 0) {
104 		ERROR("Failed to confirm DDRC SWSTAT switch done ACK\n");
105 		return TIMEOUT_ERR;
106 	}
107 
108 	return NO_ERR;
109 }
110 
111 /* Enables AXI port n. Programming Mode: Dynamic */
112 static uint32_t enable_axi_ports(void)
113 {
114 	/* Port 0 Control Register */
115 	mmio_write_32(DDRC_UMCTL2_MP_BASE + OFFSET_DDRC_PCTRL_0, ENABLE_AXI_PORT);
116 	/* Port 1 Control Register */
117 	mmio_write_32(DDRC_UMCTL2_MP_BASE + OFFSET_DDRC_PCTRL_1, ENABLE_AXI_PORT);
118 	/* Port 2 Control Register */
119 	mmio_write_32(DDRC_UMCTL2_MP_BASE + OFFSET_DDRC_PCTRL_2, ENABLE_AXI_PORT);
120 
121 	return NO_ERR;
122 }
123 
124 /*
125  * Post PHY training setup - complementary settings that need to be
126  * performed after running the firmware.
127  * @param options - various flags controlling post training actions
128  */
129 uint32_t post_train_setup(uint8_t options)
130 {
131 	uint32_t calbusy_reg, swstat_reg, swctl_reg, phymstr_reg;
132 	uint32_t umctl2_reg, dfistat_reg;
133 	uint32_t ret = NO_ERR, timeout = DEFAULT_TIMEOUT_US;
134 	int err;
135 
136 	/*
137 	 * CalBusy.0 = 1, indicates the calibrator is actively calibrating.
138 	 * Wait Calibrating done.
139 	 */
140 	err = mmio_read_32_poll_timeout(DDR_PHYA_MASTER0_CALBUSY, calbusy_reg,
141 				  (calbusy_reg & MASTER0_CAL_ACTIVE) == MASTER0_CAL_DONE,
142 				  timeout);
143 	if (err != 0) {
144 		ERROR("PHY Master0 calibrator did not complete\n");
145 		return TIMEOUT_ERR;
146 	}
147 
148 	/* Set SWCTL.sw_done to 0 */
149 	mmio_write_32(DDRC_BASE + OFFSET_DDRC_SWCTL, SWCTL_SWDONE_ENABLE);
150 	err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_SWSTAT, swctl_reg,
151 				  (swctl_reg & SWSTAT_SWDONE_ACK_MASK) == SWSTAT_SW_NOT_DONE,
152 				  timeout);
153 	if (err != 0) {
154 		ERROR("Failed to clear register DDRC SWCTL.sw_done\n");
155 		return TIMEOUT_ERR;
156 	}
157 
158 	/* Disable PHY Master. */
159 	mmio_clrbits_32(DDRC_BASE + OFFSET_DFIPHYMSTR, DFIPHYMSTR_ENABLE);
160 
161 	/* Wait for PHY Master to be disabled. */
162 	err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DFIPHYMSTR, phymstr_reg,
163 				  (phymstr_reg & DFIPHYMSTR_ENABLE) == DFIPHYMSTR_DISABLED,
164 				  timeout);
165 	if (err != 0) {
166 		ERROR("Failed tO disable PHY Master\n");
167 		return TIMEOUT_ERR;
168 	}
169 
170 	/* Wait for PHY Master request to be finished. */
171 	err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_STAT, phymstr_reg,
172 				  (((phymstr_reg & SELFREF_TYPE_MASK) >> SELFREF_TYPE_POS)
173 				  != PHY_MASTER_REQUEST),
174 				  timeout);
175 	if (err != 0) {
176 		ERROR("Failed to finish PHY Master request\n");
177 		return TIMEOUT_ERR;
178 	}
179 
180 	/* Set DFIMISC.dfi_init_start to 1*/
181 	mmio_setbits_32(DDRC_BASE + OFFSET_DDRC_DFIMISC, DFIMISC_DFI_INIT_START_MASK);
182 
183 	/* Set SWCTL.sw_done to 1 */
184 	mmio_write_32(DDRC_BASE + OFFSET_DDRC_SWCTL, SWCTL_SWDONE_DONE);
185 
186 	/* Wait SWSTAT.sw_done_ack to 1*/
187 	err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_SWSTAT, swstat_reg,
188 				  (swstat_reg & SWSTAT_SWDONE_ACK_MASK) != SWSTAT_SW_NOT_DONE,
189 				  timeout);
190 	if (err != 0) {
191 		ERROR("Failed to wait for SWSTAT.sw_done\n");
192 		return TIMEOUT_ERR;
193 	}
194 
195 	/* Wait DFISTAT.dfi_init_complete to 1 */
196 	err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_DFISTAT, dfistat_reg,
197 				  (dfistat_reg & DFISTAT_DFI_INIT_DONE) != DFISTAT_DFI_INIT_INCOMPLETE,
198 				  timeout);
199 	if (err != 0) {
200 		ERROR("DDRC DFI initialization not complete\n");
201 		return TIMEOUT_ERR;
202 	}
203 
204 	/* Set SWCTL.sw_done to 0 */
205 	mmio_write_32(DDRC_BASE + OFFSET_DDRC_SWCTL, SWCTL_SWDONE_ENABLE);
206 	err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_SWSTAT, swctl_reg,
207 				  (swctl_reg & SWSTAT_SWDONE_ACK_MASK) == SWSTAT_SW_NOT_DONE,
208 				  timeout);
209 	if (err != 0) {
210 		ERROR("Failed to clear register DDRC SWCTL.sw_done\n");
211 		return TIMEOUT_ERR;
212 	}
213 
214 	/* Set dfi_init_start to 0 */
215 	mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_DFIMISC, DFIMISC_DFI_INIT_START_MASK);
216 
217 	/* Enable PHY Master. */
218 	mmio_setbits_32(DDRC_BASE + OFFSET_DFIPHYMSTR, DFIPHYMSTR_ENABLE);
219 
220 	/* Wait for PHY Master to be enabled. */
221 	err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DFIPHYMSTR, phymstr_reg,
222 				  (phymstr_reg & DFIPHYMSTR_ENABLE) == DFIPHYMSTR_ENABLE,
223 				  timeout);
224 	if (err != 0) {
225 		ERROR("Failed to enable PHY Master\n");
226 		return TIMEOUT_ERR;
227 	}
228 
229 	if ((options & ADJUST_DDRC_MASK) != ADJUST_DDRC_DISABLED) {
230 		/* Overwrite DDRC register based on post training_results */
231 		ret = adjust_ddrc_config();
232 		if (ret != NO_ERR) {
233 			return ret;
234 		}
235 	}
236 
237 	/* Set dfi_complete_en to 1 */
238 	mmio_setbits_32(DDRC_BASE + OFFSET_DDRC_DFIMISC, DFIMISC_DFI_INIT_COMPLETE_EN_MASK);
239 
240 	/* Set PWRCTL.selfref_sw to 0 */
241 	mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_SELFREF_SW_MASK);
242 
243 	/* Set SWCTL.sw_done to 1 */
244 	mmio_write_32(DDRC_BASE + OFFSET_DDRC_SWCTL, SWCTL_SWDONE_DONE);
245 	err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_SWSTAT, swctl_reg,
246 				  (swctl_reg & SWSTAT_SWDONE_ACK_MASK)
247 				  != SWSTAT_SW_NOT_DONE, timeout);
248 	if (err != 0) {
249 		ERROR("Failed to set SWCTL.sw_done to 1\n");
250 		return TIMEOUT_ERR;
251 	}
252 
253 	/* Wait for DWC_ddr_umctl2 to move to normal operating mode */
254 	err = mmio_read_32_poll_timeout(DDRC_BASE + OFFSET_DDRC_STAT, umctl2_reg,
255 				  (umctl2_reg & STAT_OPERATING_MODE_MASK)
256 				  != STAT_OPERATING_MODE_INIT, timeout);
257 	if (err != 0) {
258 		ERROR("DWC_ddr_umctl2 did not reach normal operating mode\n");
259 		return TIMEOUT_ERR;
260 	}
261 
262 	/* Enable auto-refresh: RFSHCTL3.dis_auto_refresh = 0 */
263 	mmio_clrbits_32(DDRC_BASE + OFFSET_DDRC_RFSHCTL3, RFSHCTL3_DIS_AUTO_REFRESH_MASK);
264 
265 	/* Enable power down: PWRCTL.powerdown_en = 1 */
266 	mmio_setbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_POWER_DOWN_ENABLE_MASK);
267 
268 	/* Enable self-refresh: PWRCTL.selfref_en = 1 */
269 	mmio_setbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_SELF_REFRESH_ENABLE_MASK);
270 
271 	/*
272 	 * Enable assertion of dfi_dram_clk_disable:
273 	 * PWRTL.en_dfi_dram_clk_disable = 1
274 	 */
275 	mmio_setbits_32(DDRC_BASE + OFFSET_DDRC_PWRCTL, PWRCTL_EN_DFI_DRAM_CLOCK_DIS_MASK);
276 
277 	/*
278 	 * Each platform has a different number of AXI ports so this
279 	 * method should be implemented in hardware specific source
280 	 */
281 	ret = enable_axi_ports();
282 
283 	return ret;
284 }
285 
286 /* Wait until firmware finishes execution and return training result */
287 uint32_t wait_firmware_execution(void)
288 {
289 	uint32_t timeout_us = DEFAULT_TIMEOUT_US, ret = NO_ERR, mail = 0;
290 	uint64_t timeout = timeout_init_us(timeout_us);
291 	bool loop_continue = true;
292 	bool timeout_expired;
293 
294 	do {
295 		ret = get_mail(&mail);
296 		if (ret != NO_ERR) {
297 			loop_continue = false;
298 		} else if (mail == TRAINING_FAILED_MSG) {
299 			/* Training stage failed */
300 			ret = TRAINING_FAILED;
301 			loop_continue = false;
302 		} else if (mail == TRAINING_OK_MSG) {
303 			loop_continue = false;
304 		} else {
305 			/* Continue waiting for training result */
306 		}
307 		timeout_expired = timeout_elapsed(timeout);
308 		if (timeout_expired) {
309 			ret = TRAINING_FAILED;
310 			loop_continue = false;
311 		}
312 		/* Continue loop if no exit condition met and timeout not elapsed */
313 	} while (loop_continue);
314 
315 	return ret;
316 }
317 
318 /* Acknowledge received message */
319 static uint32_t ack_mail(void)
320 {
321 	uint32_t timeout = DEFAULT_TIMEOUT_US;
322 	uint32_t uct_reg;
323 	int err;
324 
325 	/* ACK message */
326 	mmio_write_32(DDR_PHYA_DCTWRITEPROT, APBONLY_DCTWRITEPROT_ACK_EN);
327 
328 	err = mmio_read_32_poll_timeout(DDR_PHYA_APBONLY_UCTSHADOWREGS, uct_reg,
329 					(uct_reg & UCT_WRITE_PROT_SHADOW_MASK) !=
330 					UCT_WRITE_PROT_SHADOW_ACK,
331 					timeout);
332 	if (err != 0) {
333 		ERROR("DDR PHY did not acknowledge write protection\n");
334 		return TIMEOUT_ERR;
335 	}
336 
337 	mmio_write_32(DDR_PHYA_DCTWRITEPROT, APBONLY_DCTWRITEPROT_ACK_DIS);
338 
339 	return NO_ERR;
340 }
341 
342 /* Read available message from DDR PHY microcontroller */
343 static uint32_t get_mail(uint32_t *mail)
344 {
345 	uint32_t uct_reg, timeout = DEFAULT_TIMEOUT_US;
346 	int err;
347 
348 	err = mmio_read_32_poll_timeout(DDR_PHYA_APBONLY_UCTSHADOWREGS, uct_reg,
349 					(uct_reg & UCT_WRITE_PROT_SHADOW_MASK) ==
350 					UCT_WRITE_PROT_SHADOW_ACK,
351 					timeout);
352 	if (err != 0) {
353 		ERROR("DDR PHY did not acknowledge UCT write protection\n");
354 		return TIMEOUT_ERR;
355 	}
356 
357 	*mail = mmio_read_32(DDR_PHYA_APBONLY_UCTWRITEONLYSHADOW);
358 	/* ACK */
359 	return ack_mail();
360 }
361 
362 /* Read Critical Delay Differences from message block and store max values */
363 void read_cdds(void)
364 {
365 	const uint32_t rank0_rw_addr[] = {CDD_CHA_RW_0_0, CDD_CHB_RW_0_0};
366 	const uint32_t rank0_wr_addr[] = {CDD_CHA_WR_0_0, CDD_CHB_WR_0_0};
367 	uint8_t cdd_rr = 0, cdd_ww = 0, cdd_wr = 0, cdd_rw = 0;
368 	uint32_t mstr;
369 
370 	/* Max CDD values for single-rank */
371 	tr_res.cdd.rr = cdd_rr;
372 	tr_res.cdd.ww = cdd_ww;
373 	tr_res.cdd.rw = is_lpddr4() ?
374 			get_max_cdd(rank0_rw_addr, ARRAY_SIZE(rank0_rw_addr)) :
375 			mmio_read_8(CDD_CHA_RW_0_0_DDR3);
376 	tr_res.cdd.wr = is_lpddr4() ?
377 			get_max_cdd(rank0_wr_addr, ARRAY_SIZE(rank0_wr_addr)) :
378 			mmio_read_8(CDD_CHA_WR_0_0_DDR3);
379 
380 	/* Check MSTR.active_ranks to identify multi-rank configurations */
381 	mstr = mmio_read_32(DDRC_BASE);
382 	if ((mstr & MSTR_ACT_RANKS_MASK) == MSTR_DUAL_RANK_VAL) {
383 		/* Compute max CDDs for both ranks depending on memory type */
384 		if (is_lpddr4()) {
385 			const uint32_t rr_addr[] = {
386 				CDD_CHA_RR_1_0, CDD_CHA_RR_0_1,
387 				CDD_CHB_RR_1_0, CDD_CHB_RR_0_1
388 				};
389 			const uint32_t ww_addr[] = {
390 				CDD_CHA_WW_1_0, CDD_CHA_WW_0_1,
391 				CDD_CHB_WW_1_0, CDD_CHB_WW_0_1
392 				};
393 			const uint32_t rw_addr[] = {
394 				CDD_CHA_RW_1_1, CDD_CHA_RW_1_0,
395 				CDD_CHA_RW_0_1, CDD_CHB_RW_1_1,
396 				CDD_CHB_RW_1_0, CDD_CHB_RW_0_1
397 				};
398 			const uint32_t wr_addr[] = {
399 				CDD_CHA_WR_1_1, CDD_CHA_WR_1_0,
400 				CDD_CHA_WR_0_1, CDD_CHB_WR_1_1,
401 				CDD_CHB_WR_1_0, CDD_CHB_WR_0_1
402 				};
403 
404 			cdd_rr = get_max_cdd(rr_addr, ARRAY_SIZE(rr_addr));
405 			cdd_rw = get_max_cdd(rw_addr, ARRAY_SIZE(rw_addr));
406 			cdd_wr = get_max_cdd(wr_addr, ARRAY_SIZE(wr_addr));
407 			cdd_ww = get_max_cdd(ww_addr, ARRAY_SIZE(ww_addr));
408 		} else {
409 			const uint32_t rr_addr[] = {CDD_CHA_RR_1_0_DDR3,
410 						    CDD_CHA_RR_0_1_DDR3};
411 			const uint32_t ww_addr[] = {CDD_CHA_WW_1_0_DDR3,
412 						    CDD_CHA_WW_0_1_DDR3};
413 			const uint32_t rw_addr[] = {CDD_CHA_RW_1_1_DDR3,
414 						    CDD_CHA_RW_1_0_DDR3,
415 						    CDD_CHA_RW_0_1_DDR3};
416 			const uint32_t wr_addr[] = {CDD_CHA_WR_1_1_DDR3,
417 						    CDD_CHA_WR_1_0_DDR3,
418 						    CDD_CHA_WR_0_1_DDR3};
419 
420 			cdd_rr = get_max_cdd(rr_addr, ARRAY_SIZE(rr_addr));
421 			cdd_rw = get_max_cdd(rw_addr, ARRAY_SIZE(rw_addr));
422 			cdd_wr = get_max_cdd(wr_addr, ARRAY_SIZE(wr_addr));
423 			cdd_ww = get_max_cdd(ww_addr, ARRAY_SIZE(ww_addr));
424 		}
425 
426 		/* Update max CDD values if needed */
427 		if (cdd_rr > tr_res.cdd.rr) {
428 			tr_res.cdd.rr = cdd_rr;
429 		}
430 		if (cdd_rw > tr_res.cdd.rw) {
431 			tr_res.cdd.rw = cdd_rw;
432 		}
433 		if (cdd_wr > tr_res.cdd.wr) {
434 			tr_res.cdd.wr = cdd_wr;
435 		}
436 		if (cdd_ww > tr_res.cdd.ww) {
437 			tr_res.cdd.ww = cdd_ww;
438 		}
439 	}
440 }
441 
442 /* Read trained VrefCA from message block and store average value */
443 void read_vref_ca(void)
444 {
445 	const uint32_t rank0_vref_addr[] = {VREF_CA_A0, VREF_CA_B0};
446 	const uint32_t rank01_vref_addr[] = {VREF_CA_A0, VREF_CA_A1,
447 					     VREF_CA_B0, VREF_CA_B1};
448 	uint32_t mstr;
449 
450 	/* Check MSTR.active_ranks to identify multi-rank configurations */
451 	mstr = mmio_read_32(DDRC_BASE);
452 	if ((mstr & MSTR_ACT_RANKS_MASK) == MSTR_DUAL_RANK_VAL) {
453 		tr_res.vref_ca = get_avg_vref(rank01_vref_addr,
454 					      ARRAY_SIZE(rank01_vref_addr));
455 	} else {
456 		tr_res.vref_ca = get_avg_vref(rank0_vref_addr,
457 					      ARRAY_SIZE(rank0_vref_addr));
458 	}
459 }
460 
461 /* Read trained VrefDQ from message block and store average value*/
462 void read_vref_dq(void)
463 {
464 	const uint32_t rank0_vref_addr[] = {VREF_DQ_A0, VREF_DQ_B0};
465 	const uint32_t rank01_vref_addr[] = {VREF_DQ_A0, VREF_DQ_A1,
466 					     VREF_DQ_B0, VREF_DQ_B1};
467 	uint32_t mstr;
468 
469 	/* Check MSTR.active_ranks to identify multi-rank configurations */
470 	mstr = mmio_read_32(DDRC_BASE);
471 	if ((mstr & MSTR_ACT_RANKS_MASK) == MSTR_DUAL_RANK_VAL) {
472 		tr_res.vref_dq = get_avg_vref(rank01_vref_addr,
473 					      ARRAY_SIZE(rank01_vref_addr));
474 	} else {
475 		tr_res.vref_dq = get_avg_vref(rank0_vref_addr,
476 					      ARRAY_SIZE(rank0_vref_addr));
477 	}
478 }
479 
480 /* Calculate DFITMG1.dfi_t_wrdata_delay */
481 void compute_tphy_wrdata_delay(void)
482 {
483 	uint16_t tx_dqsdly, tx_dqsdly_tg1, tctrl_delay, burst_length,
484 		 wrdata_use_dfi_phy_clk;
485 
486 	const uint32_t single_rank_dly_addr[] = {
487 		DBYTE0_TXDQSDLYTG0_U0, DBYTE0_TXDQSDLYTG0_U1,
488 		DBYTE1_TXDQSDLYTG0_U0, DBYTE1_TXDQSDLYTG0_U1,
489 		DBYTE2_TXDQSDLYTG0_U0, DBYTE2_TXDQSDLYTG0_U1,
490 		DBYTE3_TXDQSDLYTG0_U0, DBYTE3_TXDQSDLYTG0_U1
491 	};
492 
493 	const uint32_t dual_rank_dly_addr[] = {
494 		DBYTE0_TXDQSDLYTG1_U0, DBYTE0_TXDQSDLYTG1_U1,
495 		DBYTE1_TXDQSDLYTG1_U0, DBYTE1_TXDQSDLYTG1_U1,
496 		DBYTE2_TXDQSDLYTG1_U0, DBYTE2_TXDQSDLYTG1_U1,
497 		DBYTE3_TXDQSDLYTG1_U0, DBYTE3_TXDQSDLYTG1_U1
498 	};
499 
500 	uint32_t mstr, dfitmg0;
501 
502 	/* Compute max tx_dqdqsdly for rank 0 */
503 	tx_dqsdly = get_max_delay(single_rank_dly_addr,
504 				  ARRAY_SIZE(single_rank_dly_addr));
505 
506 	/* Check MSTR.active_ranks to identify multi-rank configurations */
507 	mstr = mmio_read_32(DDRC_BASE);
508 	if ((mstr & MSTR_ACT_RANKS_MASK) == MSTR_DUAL_RANK_VAL) {
509 		/* Compute max tx_dqdqsdly for rank 1 */
510 		tx_dqsdly_tg1 = get_max_delay(dual_rank_dly_addr,
511 					      ARRAY_SIZE(dual_rank_dly_addr));
512 		if (tx_dqsdly_tg1 > tx_dqsdly) {
513 			tx_dqsdly = tx_dqsdly_tg1;
514 		}
515 	}
516 
517 	/* Extract coarse delay value + 1 for fine delay */
518 	tx_dqsdly = (tx_dqsdly >> TXDQDLY_COARSE) + 1U;
519 
520 	/* Compute tctrl_delay */
521 	tctrl_delay = (uint16_t)((mmio_read_16(ARDPTR_INITVAL_ADDR) / 2U) +
522 				 (DDRPHY_PIPE_DFI_MISC * 2U) + 3U);
523 
524 	burst_length = (uint16_t)(mstr >> MSTR_BURST_RDWR_POS) &
525 		       MSTR_BURST_RDWR_MASK;
526 	dfitmg0 = mmio_read_16(DDRC_BASE + OFFSET_DDRC_DFITMG0);
527 	wrdata_use_dfi_phy_clk = (uint16_t)(dfitmg0 >> DFITMG0_PHY_CLK_POS) &
528 				 DFITMG0_PHY_CLK_MASK;
529 
530 	/* Program */
531 	tr_res.tphy_wrdata_delay = tctrl_delay + 6U + burst_length +
532 				   wrdata_use_dfi_phy_clk + tx_dqsdly;
533 	tr_res.tphy_wrdata_delay = (tr_res.tphy_wrdata_delay / 2U) +
534 				   (tr_res.tphy_wrdata_delay % 2U);
535 }
536 
537 /* Re-program some of the DDRC registers based on post-training results. */
538 static uint32_t adjust_ddrc_config(void)
539 {
540 	uint8_t wr_gap_ddr3 = 3, min_lp4 = 7, min_ddr3 = 0xe, max = 0xf;
541 	uint8_t rd_gap, wr_gap, rd_gap_new, wr_gap_new, delta, min;
542 	uint8_t rd_gap_lp4 = 4, rd_gap_ddr3 = 2, wr_gap_lp4 = 5;
543 	uint32_t dramtmg2_reg, rankctl_reg, mstr_reg;
544 	uint32_t ret = NO_ERR;
545 
546 	/* DRAMTMG2.rd2wr & DRAMTMG2.wr2rd */
547 	dramtmg2_reg = mmio_read_32(DDRC_BASE + OFFSET_DDRC_DRAMTMG2);
548 	delta = (uint8_t)((tr_res.cdd.rw + (tr_res.cdd.rw % 2U)) / 2U);
549 	if (!update_bf(&dramtmg2_reg, DRAMTMG2_RD_WR_POS, DRAMTMG2_RD_WR_MASK,
550 		       (int32_t)delta)) {
551 		return BITFIELD_EXCEEDED;
552 	}
553 	delta = (uint8_t)((tr_res.cdd.ww + (tr_res.cdd.ww % 2U)) / 2U);
554 	if (!update_bf(&dramtmg2_reg, DRAMTMG2_WR_RD_POS, DRAMTMG2_WR_RD_MASK,
555 		       (int32_t)delta)) {
556 		return BITFIELD_EXCEEDED;
557 	}
558 	mmio_write_32(DDRC_BASE + OFFSET_DDRC_DRAMTMG2, dramtmg2_reg);
559 
560 	/* For LPDDR4 overwrite INIT6 and INIT7 DDRC registers. */
561 	if (is_lpddr4()) {
562 		/* INIT6.mr5 */
563 		mmio_clrsetbits_32(DDRC_BASE + OFFSET_DDRC_INIT6, INIT6_MR5_MASK, tr_res.vref_ca);
564 
565 		/* INIT7.mr6 */
566 		mmio_clrsetbits_32(DDRC_BASE + OFFSET_DDRC_INIT7, INIT7_MR6_MASK, tr_res.vref_dq);
567 	}
568 
569 	/* DFITMG1.dfi_t_wrdata_delay */
570 	mmio_clrsetbits_32(DDRC_BASE + OFFSET_DDRC_DFITMG1,
571 			   (DFITMG1_WRDATA_DELAY_MASK << DFITMG1_WRDATA_DELAY_POS),
572 			   (((uint32_t)tr_res.tphy_wrdata_delay) << DFITMG1_WRDATA_DELAY_POS));
573 
574 	/* For multi-rank systems */
575 	mstr_reg = mmio_read_32(DDRC_BASE);
576 	if ((mstr_reg & MSTR_ACT_RANKS_MASK) == MSTR_DUAL_RANK_VAL) {
577 		uint8_t rd_gap_ct = is_lpddr4() ? rd_gap_lp4 : rd_gap_ddr3;
578 		uint8_t wr_gap_ct = is_lpddr4() ? wr_gap_lp4 : wr_gap_ddr3;
579 
580 		min = is_lpddr4() ? min_lp4 : min_ddr3;
581 		rankctl_reg = mmio_read_32(DDRC_BASE + OFFSET_DDRC_RANKCTL);
582 		/* RANKCTL.diff_rank_rd_gap */
583 		rd_gap = (uint8_t)((rankctl_reg >> RANKCTL_RD_GAP_POS) &
584 				   RANKCTL_RD_GAP_MASK);
585 		rd_gap_new = (uint8_t)((rd_gap_ct + tr_res.cdd.rr +
586 					(tr_res.cdd.rr % 2U)) / 2U);
587 
588 		/* ensure min and max of rd_gap field */
589 		rd_gap_new = (rd_gap_new < min) ? min : ((rd_gap_new > max) ?
590 							 max : rd_gap_new);
591 		if (rd_gap_new > rd_gap) {
592 			delta = (uint8_t)(rd_gap_new - rd_gap);
593 			if (!update_bf(&rankctl_reg, RANKCTL_RD_GAP_POS,
594 				       RANKCTL_RD_GAP_MASK, (int32_t)delta)) {
595 				return BITFIELD_EXCEEDED;
596 			}
597 		}
598 
599 		/* RANKCTL.diff_rank_wr_gap */
600 		wr_gap = (uint8_t)((rankctl_reg >> RANKCTL_WR_GAP_POS) &
601 				   RANKCTL_WR_GAP_MASK);
602 		wr_gap_new = (uint8_t)((wr_gap_ct + tr_res.cdd.ww +
603 					(tr_res.cdd.ww % 2U)) / 2U);
604 
605 		/* ensure min and max of wr_gap field */
606 		wr_gap_new = (wr_gap_new < min) ? min : ((wr_gap_new > max) ?
607 							 max : wr_gap_new);
608 		if (wr_gap_new > wr_gap) {
609 			delta = (uint8_t)(wr_gap_new - wr_gap);
610 			if (!update_bf(&rankctl_reg, RANKCTL_WR_GAP_POS,
611 				       RANKCTL_WR_GAP_MASK, (int32_t)delta)) {
612 				return BITFIELD_EXCEEDED;
613 			}
614 		}
615 
616 		if ((rd_gap_new > rd_gap) || (wr_gap_new > wr_gap)) {
617 			mmio_write_32(DDRC_BASE + OFFSET_DDRC_RANKCTL, rankctl_reg);
618 		}
619 	}
620 
621 	return ret;
622 }
623 
624 /* Check if memory type is LPDDR4 using MSTR register */
625 static bool is_lpddr4(void)
626 {
627 	uint32_t mstr;
628 
629 	mstr = mmio_read_32(DDRC_BASE);
630 	return ((mstr & MSTR_DRAM_MASK) == MSTR_LPDDR4_VAL);
631 }
632 
633 /*
634  * Get maximum critical delay difference value.
635  * @param cdd_addr[] - list of CDD memory addresses
636  * @param size - number of CDDs to be read
637  * @return max CDD value
638  */
639 static uint8_t get_max_cdd(const uint32_t cdd_addr[], size_t size)
640 {
641 	uint8_t cdd, max = 0;
642 	int8_t signed_cdd;
643 	size_t i;
644 
645 	for (i = 0; i < size; i++) {
646 		/* CDD has type int8_t - read as unsigned and cast to signed */
647 		signed_cdd = (int8_t)(mmio_read_8(cdd_addr[i]));
648 		/* We need to use absolute value */
649 		cdd = (uint8_t)((signed_cdd >= 0) ? signed_cdd : -signed_cdd);
650 		max = MAX(cdd, max);
651 	}
652 	return max;
653 }
654 
655 /*
656  * Get maximum delay value.
657  * @param delay_addr[] - list of CDD memory addresses
658  * @param size - number of values to be read
659  * @return max delay value
660  */
661 static uint16_t get_max_delay(const uint32_t delay_addr[], size_t size)
662 {
663 	uint16_t value, max = 0;
664 	size_t i;
665 
666 	for (i = 0; i < size; i++) {
667 		value = mmio_read_16(delay_addr[i]);
668 		max = MAX(value, max);
669 	}
670 	return max;
671 }
672 
673 /*
674  * Compute average vref value.
675  * @param vref_addr[] - list of vref memory addresses
676  * @param size - number of values to be read
677  * @return average vref value
678  */
679 static uint8_t get_avg_vref(const uint32_t vref_addr[], size_t size)
680 {
681 	uint32_t sum = 0;
682 	size_t i;
683 
684 	for (i = 0; i < size; i++) {
685 		sum += mmio_read_8(vref_addr[i]);
686 	}
687 
688 	assert((sum / size) <= UINT8_MAX);
689 
690 	return (uint8_t)(sum / size);
691 }
692