1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 #include <debug.h> 7 #include <arch_helpers.h> 8 #include <platform_def.h> 9 #include <plat_private.h> 10 #include <dram.h> 11 #include <pmu_regs.h> 12 #include <rk3399_def.h> 13 #include <secure.h> 14 #include <soc.h> 15 #include <suspend.h> 16 17 #define PMUGRF_OS_REG0 0x300 18 #define PMUGRF_OS_REG1 0x304 19 #define PMUGRF_OS_REG2 0x308 20 #define PMUGRF_OS_REG3 0x30c 21 22 #define CRU_SFTRST_DDR_CTRL(ch, n) ((0x1 << (8 + 16 + (ch) * 4)) | \ 23 ((n) << (8 + (ch) * 4))) 24 #define CRU_SFTRST_DDR_PHY(ch, n) ((0x1 << (9 + 16 + (ch) * 4)) | \ 25 ((n) << (9 + (ch) * 4))) 26 27 #define FBDIV_ENC(n) ((n) << 16) 28 #define FBDIV_DEC(n) (((n) >> 16) & 0xfff) 29 #define POSTDIV2_ENC(n) ((n) << 12) 30 #define POSTDIV2_DEC(n) (((n) >> 12) & 0x7) 31 #define POSTDIV1_ENC(n) ((n) << 8) 32 #define POSTDIV1_DEC(n) (((n) >> 8) & 0x7) 33 #define REFDIV_ENC(n) (n) 34 #define REFDIV_DEC(n) ((n) & 0x3f) 35 36 /* PMU CRU */ 37 #define PMUCRU_RSTNHOLD_CON0 0x120 38 #define PMUCRU_RSTNHOLD_CON1 0x124 39 40 #define PRESET_GPIO0_HOLD(n) (((n) << 7) | WMSK_BIT(7)) 41 #define PRESET_GPIO1_HOLD(n) (((n) << 8) | WMSK_BIT(8)) 42 43 #define SYS_COUNTER_FREQ_IN_MHZ (SYS_COUNTER_FREQ_IN_TICKS / 1000000) 44 45 /* 46 * Copy @num registers from @src to @dst 47 */ 48 __sramfunc void sram_regcpy(uintptr_t dst, uintptr_t src, uint32_t num) 49 { 50 while (num--) { 51 mmio_write_32(dst, mmio_read_32(src)); 52 dst += sizeof(uint32_t); 53 src += sizeof(uint32_t); 54 } 55 } 56 57 static __sramfunc uint32_t sram_get_timer_value(void) 58 { 59 /* 60 * Generic delay timer implementation expects the timer to be a down 61 * counter. We apply bitwise NOT operator to the tick values returned 62 * by read_cntpct_el0() to simulate the down counter. 63 */ 64 return (uint32_t)(~read_cntpct_el0()); 65 } 66 67 static __sramfunc void sram_udelay(uint32_t usec) 68 { 69 uint32_t start, cnt, delta, delta_us; 70 71 /* counter is decreasing */ 72 start = sram_get_timer_value(); 73 do { 74 cnt = sram_get_timer_value(); 75 if (cnt > start) { 76 delta = UINT32_MAX - cnt; 77 delta += start; 78 } else 79 delta = start - cnt; 80 delta_us = (delta * SYS_COUNTER_FREQ_IN_MHZ); 81 } while (delta_us < usec); 82 } 83 84 static __sramfunc void configure_sgrf(void) 85 { 86 /* 87 * SGRF_DDR_RGN_DPLL_CLK and SGRF_DDR_RGN_RTC_CLK: 88 * IC ECO bug, need to set this register. 89 * 90 * SGRF_DDR_RGN_BYPS: 91 * After the PD_CENTER suspend/resume, the DDR region 92 * related registers in the SGRF will be reset, we 93 * need to re-initialize them. 94 */ 95 mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16), 96 SGRF_DDR_RGN_DPLL_CLK | 97 SGRF_DDR_RGN_RTC_CLK | 98 SGRF_DDR_RGN_BYPS); 99 } 100 101 static __sramfunc void rkclk_ddr_reset(uint32_t channel, uint32_t ctl, 102 uint32_t phy) 103 { 104 channel &= 0x1; 105 ctl &= 0x1; 106 phy &= 0x1; 107 mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(4), 108 CRU_SFTRST_DDR_CTRL(channel, ctl) | 109 CRU_SFTRST_DDR_PHY(channel, phy)); 110 } 111 112 static __sramfunc void phy_pctrl_reset(uint32_t ch) 113 { 114 rkclk_ddr_reset(ch, 1, 1); 115 sram_udelay(10); 116 rkclk_ddr_reset(ch, 1, 0); 117 sram_udelay(10); 118 rkclk_ddr_reset(ch, 0, 0); 119 sram_udelay(10); 120 } 121 122 static __sramfunc void set_cs_training_index(uint32_t ch, uint32_t rank) 123 { 124 /* PHY_8/136/264/392 phy_per_cs_training_index_X 1bit offset_24 */ 125 mmio_clrsetbits_32(PHY_REG(ch, 8), 0x1 << 24, rank << 24); 126 mmio_clrsetbits_32(PHY_REG(ch, 136), 0x1 << 24, rank << 24); 127 mmio_clrsetbits_32(PHY_REG(ch, 264), 0x1 << 24, rank << 24); 128 mmio_clrsetbits_32(PHY_REG(ch, 392), 0x1 << 24, rank << 24); 129 } 130 131 static __sramfunc void select_per_cs_training_index(uint32_t ch, uint32_t rank) 132 { 133 /* PHY_84 PHY_PER_CS_TRAINING_EN_0 1bit offset_16 */ 134 if ((mmio_read_32(PHY_REG(ch, 84)) >> 16) & 1) 135 set_cs_training_index(ch, rank); 136 } 137 138 static void override_write_leveling_value(uint32_t ch) 139 { 140 uint32_t byte; 141 142 /* PHY_896 PHY_FREQ_SEL_MULTICAST_EN 1bit offset_0 */ 143 mmio_setbits_32(PHY_REG(ch, 896), 1); 144 145 /* 146 * PHY_8/136/264/392 147 * phy_per_cs_training_multicast_en_X 1bit offset_16 148 */ 149 mmio_clrsetbits_32(PHY_REG(ch, 8), 0x1 << 16, 1 << 16); 150 mmio_clrsetbits_32(PHY_REG(ch, 136), 0x1 << 16, 1 << 16); 151 mmio_clrsetbits_32(PHY_REG(ch, 264), 0x1 << 16, 1 << 16); 152 mmio_clrsetbits_32(PHY_REG(ch, 392), 0x1 << 16, 1 << 16); 153 154 for (byte = 0; byte < 4; byte++) 155 mmio_clrsetbits_32(PHY_REG(ch, 63 + (128 * byte)), 156 0xffff << 16, 157 0x200 << 16); 158 159 /* PHY_896 PHY_FREQ_SEL_MULTICAST_EN 1bit offset_0 */ 160 mmio_clrbits_32(PHY_REG(ch, 896), 1); 161 162 /* CTL_200 ctrlupd_req 1bit offset_8 */ 163 mmio_clrsetbits_32(CTL_REG(ch, 200), 0x1 << 8, 0x1 << 8); 164 } 165 166 static __sramfunc int data_training(uint32_t ch, 167 struct rk3399_sdram_params *sdram_params, 168 uint32_t training_flag) 169 { 170 uint32_t obs_0, obs_1, obs_2, obs_3, obs_err = 0; 171 uint32_t rank = sdram_params->ch[ch].rank; 172 uint32_t rank_mask; 173 uint32_t i, tmp; 174 175 if (sdram_params->dramtype == LPDDR4) 176 rank_mask = (rank == 1) ? 0x5 : 0xf; 177 else 178 rank_mask = (rank == 1) ? 0x1 : 0x3; 179 180 /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */ 181 mmio_setbits_32(PHY_REG(ch, 927), (1 << 22)); 182 183 if (training_flag == PI_FULL_TRAINING) { 184 if (sdram_params->dramtype == LPDDR4) { 185 training_flag = PI_WRITE_LEVELING | 186 PI_READ_GATE_TRAINING | 187 PI_READ_LEVELING | 188 PI_WDQ_LEVELING; 189 } else if (sdram_params->dramtype == LPDDR3) { 190 training_flag = PI_CA_TRAINING | PI_WRITE_LEVELING | 191 PI_READ_GATE_TRAINING; 192 } else if (sdram_params->dramtype == DDR3) { 193 training_flag = PI_WRITE_LEVELING | 194 PI_READ_GATE_TRAINING | 195 PI_READ_LEVELING; 196 } 197 } 198 199 /* ca training(LPDDR4,LPDDR3 support) */ 200 if ((training_flag & PI_CA_TRAINING) == PI_CA_TRAINING) { 201 for (i = 0; i < 4; i++) { 202 if (!(rank_mask & (1 << i))) 203 continue; 204 205 select_per_cs_training_index(ch, i); 206 /* PI_100 PI_CALVL_EN:RW:8:2 */ 207 mmio_clrsetbits_32(PI_REG(ch, 100), 0x3 << 8, 0x2 << 8); 208 209 /* PI_92 PI_CALVL_REQ:WR:16:1,PI_CALVL_CS:RW:24:2 */ 210 mmio_clrsetbits_32(PI_REG(ch, 92), 211 (0x1 << 16) | (0x3 << 24), 212 (0x1 << 16) | (i << 24)); 213 while (1) { 214 /* PI_174 PI_INT_STATUS:RD:8:18 */ 215 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8; 216 217 /* 218 * check status obs 219 * PHY_532/660/788 phy_adr_calvl_obs1_:0:32 220 */ 221 obs_0 = mmio_read_32(PHY_REG(ch, 532)); 222 obs_1 = mmio_read_32(PHY_REG(ch, 660)); 223 obs_2 = mmio_read_32(PHY_REG(ch, 788)); 224 if (((obs_0 >> 30) & 0x3) || 225 ((obs_1 >> 30) & 0x3) || 226 ((obs_2 >> 30) & 0x3)) 227 obs_err = 1; 228 if ((((tmp >> 11) & 0x1) == 0x1) && 229 (((tmp >> 13) & 0x1) == 0x1) && 230 (((tmp >> 5) & 0x1) == 0x0) && 231 (obs_err == 0)) 232 break; 233 else if ((((tmp >> 5) & 0x1) == 0x1) || 234 (obs_err == 1)) 235 return -1; 236 } 237 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ 238 mmio_write_32(PI_REG(ch, 175), 0x00003f7c); 239 } 240 mmio_clrbits_32(PI_REG(ch, 100), 0x3 << 8); 241 } 242 243 /* write leveling(LPDDR4,LPDDR3,DDR3 support) */ 244 if ((training_flag & PI_WRITE_LEVELING) == PI_WRITE_LEVELING) { 245 for (i = 0; i < rank; i++) { 246 select_per_cs_training_index(ch, i); 247 /* PI_60 PI_WRLVL_EN:RW:8:2 */ 248 mmio_clrsetbits_32(PI_REG(ch, 60), 0x3 << 8, 0x2 << 8); 249 /* PI_59 PI_WRLVL_REQ:WR:8:1,PI_WRLVL_CS:RW:16:2 */ 250 mmio_clrsetbits_32(PI_REG(ch, 59), 251 (0x1 << 8) | (0x3 << 16), 252 (0x1 << 8) | (i << 16)); 253 254 while (1) { 255 /* PI_174 PI_INT_STATUS:RD:8:18 */ 256 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8; 257 258 /* 259 * check status obs, if error maybe can not 260 * get leveling done PHY_40/168/296/424 261 * phy_wrlvl_status_obs_X:0:13 262 */ 263 obs_0 = mmio_read_32(PHY_REG(ch, 40)); 264 obs_1 = mmio_read_32(PHY_REG(ch, 168)); 265 obs_2 = mmio_read_32(PHY_REG(ch, 296)); 266 obs_3 = mmio_read_32(PHY_REG(ch, 424)); 267 if (((obs_0 >> 12) & 0x1) || 268 ((obs_1 >> 12) & 0x1) || 269 ((obs_2 >> 12) & 0x1) || 270 ((obs_3 >> 12) & 0x1)) 271 obs_err = 1; 272 if ((((tmp >> 10) & 0x1) == 0x1) && 273 (((tmp >> 13) & 0x1) == 0x1) && 274 (((tmp >> 4) & 0x1) == 0x0) && 275 (obs_err == 0)) 276 break; 277 else if ((((tmp >> 4) & 0x1) == 0x1) || 278 (obs_err == 1)) 279 return -1; 280 } 281 282 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ 283 mmio_write_32(PI_REG(ch, 175), 0x00003f7c); 284 } 285 override_write_leveling_value(ch); 286 mmio_clrbits_32(PI_REG(ch, 60), 0x3 << 8); 287 } 288 289 /* read gate training(LPDDR4,LPDDR3,DDR3 support) */ 290 if ((training_flag & PI_READ_GATE_TRAINING) == PI_READ_GATE_TRAINING) { 291 for (i = 0; i < rank; i++) { 292 select_per_cs_training_index(ch, i); 293 /* PI_80 PI_RDLVL_GATE_EN:RW:24:2 */ 294 mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 24, 295 0x2 << 24); 296 /* 297 * PI_74 PI_RDLVL_GATE_REQ:WR:16:1 298 * PI_RDLVL_CS:RW:24:2 299 */ 300 mmio_clrsetbits_32(PI_REG(ch, 74), 301 (0x1 << 16) | (0x3 << 24), 302 (0x1 << 16) | (i << 24)); 303 304 while (1) { 305 /* PI_174 PI_INT_STATUS:RD:8:18 */ 306 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8; 307 308 /* 309 * check status obs 310 * PHY_43/171/299/427 311 * PHY_GTLVL_STATUS_OBS_x:16:8 312 */ 313 obs_0 = mmio_read_32(PHY_REG(ch, 43)); 314 obs_1 = mmio_read_32(PHY_REG(ch, 171)); 315 obs_2 = mmio_read_32(PHY_REG(ch, 299)); 316 obs_3 = mmio_read_32(PHY_REG(ch, 427)); 317 if (((obs_0 >> (16 + 6)) & 0x3) || 318 ((obs_1 >> (16 + 6)) & 0x3) || 319 ((obs_2 >> (16 + 6)) & 0x3) || 320 ((obs_3 >> (16 + 6)) & 0x3)) 321 obs_err = 1; 322 if ((((tmp >> 9) & 0x1) == 0x1) && 323 (((tmp >> 13) & 0x1) == 0x1) && 324 (((tmp >> 3) & 0x1) == 0x0) && 325 (obs_err == 0)) 326 break; 327 else if ((((tmp >> 3) & 0x1) == 0x1) || 328 (obs_err == 1)) 329 return -1; 330 } 331 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ 332 mmio_write_32(PI_REG(ch, 175), 0x00003f7c); 333 } 334 mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 24); 335 } 336 337 /* read leveling(LPDDR4,LPDDR3,DDR3 support) */ 338 if ((training_flag & PI_READ_LEVELING) == PI_READ_LEVELING) { 339 for (i = 0; i < rank; i++) { 340 select_per_cs_training_index(ch, i); 341 /* PI_80 PI_RDLVL_EN:RW:16:2 */ 342 mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 16, 343 0x2 << 16); 344 /* PI_74 PI_RDLVL_REQ:WR:8:1,PI_RDLVL_CS:RW:24:2 */ 345 mmio_clrsetbits_32(PI_REG(ch, 74), 346 (0x1 << 8) | (0x3 << 24), 347 (0x1 << 8) | (i << 24)); 348 while (1) { 349 /* PI_174 PI_INT_STATUS:RD:8:18 */ 350 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8; 351 352 /* 353 * make sure status obs not report error bit 354 * PHY_46/174/302/430 355 * phy_rdlvl_status_obs_X:16:8 356 */ 357 if ((((tmp >> 8) & 0x1) == 0x1) && 358 (((tmp >> 13) & 0x1) == 0x1) && 359 (((tmp >> 2) & 0x1) == 0x0)) 360 break; 361 else if (((tmp >> 2) & 0x1) == 0x1) 362 return -1; 363 } 364 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ 365 mmio_write_32(PI_REG(ch, 175), 0x00003f7c); 366 } 367 mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 16); 368 } 369 370 /* wdq leveling(LPDDR4 support) */ 371 if ((training_flag & PI_WDQ_LEVELING) == PI_WDQ_LEVELING) { 372 for (i = 0; i < 4; i++) { 373 if (!(rank_mask & (1 << i))) 374 continue; 375 376 select_per_cs_training_index(ch, i); 377 /* 378 * disable PI_WDQLVL_VREF_EN before wdq leveling? 379 * PI_181 PI_WDQLVL_VREF_EN:RW:8:1 380 */ 381 mmio_clrbits_32(PI_REG(ch, 181), 0x1 << 8); 382 /* PI_124 PI_WDQLVL_EN:RW:16:2 */ 383 mmio_clrsetbits_32(PI_REG(ch, 124), 0x3 << 16, 384 0x2 << 16); 385 /* PI_121 PI_WDQLVL_REQ:WR:8:1,PI_WDQLVL_CS:RW:16:2 */ 386 mmio_clrsetbits_32(PI_REG(ch, 121), 387 (0x1 << 8) | (0x3 << 16), 388 (0x1 << 8) | (i << 16)); 389 while (1) { 390 /* PI_174 PI_INT_STATUS:RD:8:18 */ 391 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8; 392 if ((((tmp >> 12) & 0x1) == 0x1) && 393 (((tmp >> 13) & 0x1) == 0x1) && 394 (((tmp >> 6) & 0x1) == 0x0)) 395 break; 396 else if (((tmp >> 6) & 0x1) == 0x1) 397 return -1; 398 } 399 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ 400 mmio_write_32(PI_REG(ch, 175), 0x00003f7c); 401 } 402 mmio_clrbits_32(PI_REG(ch, 124), 0x3 << 16); 403 } 404 405 /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */ 406 mmio_clrbits_32(PHY_REG(ch, 927), (1 << 22)); 407 408 return 0; 409 } 410 411 static __sramfunc void set_ddrconfig(struct rk3399_sdram_params *sdram_params, 412 unsigned char channel, uint32_t ddrconfig) 413 { 414 /* only need to set ddrconfig */ 415 struct rk3399_sdram_channel *ch = &sdram_params->ch[channel]; 416 unsigned int cs0_cap = 0; 417 unsigned int cs1_cap = 0; 418 419 cs0_cap = (1 << (ch->cs0_row + ch->col + ch->bk + ch->bw - 20)); 420 if (ch->rank > 1) 421 cs1_cap = cs0_cap >> (ch->cs0_row - ch->cs1_row); 422 if (ch->row_3_4) { 423 cs0_cap = cs0_cap * 3 / 4; 424 cs1_cap = cs1_cap * 3 / 4; 425 } 426 427 mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICECONF, 428 ddrconfig | (ddrconfig << 6)); 429 mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICESIZE, 430 ((cs0_cap / 32) & 0xff) | (((cs1_cap / 32) & 0xff) << 8)); 431 } 432 433 static __sramfunc void dram_all_config(struct rk3399_sdram_params *sdram_params) 434 { 435 unsigned int i; 436 437 for (i = 0; i < 2; i++) { 438 struct rk3399_sdram_channel *info = &sdram_params->ch[i]; 439 struct rk3399_msch_timings *noc = &info->noc_timings; 440 441 if (sdram_params->ch[i].col == 0) 442 continue; 443 444 mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGA0, 445 noc->ddrtiminga0.d32); 446 mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGB0, 447 noc->ddrtimingb0.d32); 448 mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGC0, 449 noc->ddrtimingc0.d32); 450 mmio_write_32(MSCH_BASE(i) + MSCH_DEVTODEV0, 451 noc->devtodev0.d32); 452 mmio_write_32(MSCH_BASE(i) + MSCH_DDRMODE, noc->ddrmode.d32); 453 454 /* rank 1 memory clock disable (dfi_dram_clk_disable = 1) */ 455 if (sdram_params->ch[i].rank == 1) 456 mmio_setbits_32(CTL_REG(i, 276), 1 << 17); 457 } 458 459 DDR_STRIDE(sdram_params->stride); 460 461 /* reboot hold register set */ 462 mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1), 463 CRU_PMU_SGRF_RST_RLS | 464 PRESET_GPIO0_HOLD(1) | 465 PRESET_GPIO1_HOLD(1)); 466 mmio_clrsetbits_32(CRU_BASE + CRU_GLB_RST_CON, 0x3, 0x3); 467 } 468 469 static __sramfunc void pctl_cfg(uint32_t ch, 470 struct rk3399_sdram_params *sdram_params) 471 { 472 const uint32_t *params_ctl = sdram_params->pctl_regs.denali_ctl; 473 const uint32_t *params_pi = sdram_params->pi_regs.denali_pi; 474 const struct rk3399_ddr_publ_regs *phy_regs = &sdram_params->phy_regs; 475 uint32_t tmp, tmp1, tmp2, i; 476 477 /* 478 * Workaround controller bug: 479 * Do not program DRAM_CLASS until NO_PHY_IND_TRAIN_INT is programmed 480 */ 481 sram_regcpy(CTL_REG(ch, 1), (uintptr_t)¶ms_ctl[1], 482 CTL_REG_NUM - 1); 483 mmio_write_32(CTL_REG(ch, 0), params_ctl[0]); 484 sram_regcpy(PI_REG(ch, 0), (uintptr_t)¶ms_pi[0], 485 PI_REG_NUM); 486 487 sram_regcpy(PHY_REG(ch, 910), (uintptr_t)&phy_regs->phy896[910 - 896], 488 3); 489 490 mmio_clrsetbits_32(CTL_REG(ch, 68), PWRUP_SREFRESH_EXIT, 491 PWRUP_SREFRESH_EXIT); 492 493 /* PHY_DLL_RST_EN */ 494 mmio_clrsetbits_32(PHY_REG(ch, 957), 0x3 << 24, 1 << 24); 495 dmbst(); 496 497 mmio_setbits_32(PI_REG(ch, 0), START); 498 mmio_setbits_32(CTL_REG(ch, 0), START); 499 500 /* wait lock */ 501 while (1) { 502 tmp = mmio_read_32(PHY_REG(ch, 920)); 503 tmp1 = mmio_read_32(PHY_REG(ch, 921)); 504 tmp2 = mmio_read_32(PHY_REG(ch, 922)); 505 if ((((tmp >> 16) & 0x1) == 0x1) && 506 (((tmp1 >> 16) & 0x1) == 0x1) && 507 (((tmp1 >> 0) & 0x1) == 0x1) && 508 (((tmp2 >> 0) & 0x1) == 0x1)) 509 break; 510 /* if PLL bypass,don't need wait lock */ 511 if (mmio_read_32(PHY_REG(ch, 911)) & 0x1) 512 break; 513 } 514 515 sram_regcpy(PHY_REG(ch, 896), (uintptr_t)&phy_regs->phy896[0], 63); 516 517 for (i = 0; i < 4; i++) 518 sram_regcpy(PHY_REG(ch, 128 * i), 519 (uintptr_t)&phy_regs->phy0[i][0], 91); 520 521 for (i = 0; i < 3; i++) 522 sram_regcpy(PHY_REG(ch, 512 + 128 * i), 523 (uintptr_t)&phy_regs->phy512[i][0], 38); 524 } 525 526 static __sramfunc int dram_switch_to_next_index( 527 struct rk3399_sdram_params *sdram_params) 528 { 529 uint32_t ch, ch_count; 530 uint32_t fn = ((mmio_read_32(CTL_REG(0, 111)) >> 16) + 1) & 0x1; 531 532 mmio_write_32(CIC_BASE + CIC_CTRL0, 533 (((0x3 << 4) | (1 << 2) | 1) << 16) | 534 (fn << 4) | (1 << 2) | 1); 535 while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 2))) 536 ; 537 538 mmio_write_32(CIC_BASE + CIC_CTRL0, 0x20002); 539 while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 0))) 540 ; 541 542 ch_count = sdram_params->num_channels; 543 544 /* LPDDR4 f2 cann't do training, all training will fail */ 545 for (ch = 0; ch < ch_count; ch++) { 546 mmio_clrsetbits_32(PHY_REG(ch, 896), (0x3 << 8) | 1, 547 fn << 8); 548 549 /* data_training failed */ 550 if (data_training(ch, sdram_params, PI_FULL_TRAINING)) 551 return -1; 552 } 553 554 return 0; 555 } 556 557 /* 558 * Needs to be done for both channels at once in case of a shared reset signal 559 * between channels. 560 */ 561 static __sramfunc int pctl_start(uint32_t channel_mask, 562 struct rk3399_sdram_params *sdram_params) 563 { 564 uint32_t count; 565 uint32_t byte; 566 567 mmio_setbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT); 568 mmio_setbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT); 569 570 /* need de-access IO retention before controller START */ 571 if (channel_mask & (1 << 0)) 572 mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 19)); 573 if (channel_mask & (1 << 1)) 574 mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 23)); 575 576 /* PHY_DLL_RST_EN */ 577 if (channel_mask & (1 << 0)) 578 mmio_clrsetbits_32(PHY_REG(0, 957), 0x3 << 24, 579 0x2 << 24); 580 if (channel_mask & (1 << 1)) 581 mmio_clrsetbits_32(PHY_REG(1, 957), 0x3 << 24, 582 0x2 << 24); 583 584 /* check ERROR bit */ 585 if (channel_mask & (1 << 0)) { 586 count = 0; 587 while (!(mmio_read_32(CTL_REG(0, 203)) & (1 << 3))) { 588 /* CKE is low, loop 10ms */ 589 if (count > 100) 590 return -1; 591 592 sram_udelay(100); 593 count++; 594 } 595 596 mmio_clrbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT); 597 598 /* Restore the PHY_RX_CAL_DQS value */ 599 for (byte = 0; byte < 4; byte++) 600 mmio_clrsetbits_32(PHY_REG(0, 57 + 128 * byte), 601 0xfff << 16, 602 sdram_params->rx_cal_dqs[0][byte]); 603 } 604 if (channel_mask & (1 << 1)) { 605 count = 0; 606 while (!(mmio_read_32(CTL_REG(1, 203)) & (1 << 3))) { 607 /* CKE is low, loop 10ms */ 608 if (count > 100) 609 return -1; 610 611 sram_udelay(100); 612 count++; 613 } 614 615 mmio_clrbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT); 616 617 /* Restore the PHY_RX_CAL_DQS value */ 618 for (byte = 0; byte < 4; byte++) 619 mmio_clrsetbits_32(PHY_REG(1, 57 + 128 * byte), 620 0xfff << 16, 621 sdram_params->rx_cal_dqs[1][byte]); 622 } 623 624 return 0; 625 } 626 627 void dmc_save(void) 628 { 629 struct rk3399_sdram_params *sdram_params = &sdram_config; 630 struct rk3399_ddr_publ_regs *phy_regs; 631 uint32_t *params_ctl; 632 uint32_t *params_pi; 633 uint32_t refdiv, postdiv2, postdiv1, fbdiv; 634 uint32_t tmp, ch, byte, i; 635 636 phy_regs = &sdram_params->phy_regs; 637 params_ctl = sdram_params->pctl_regs.denali_ctl; 638 params_pi = sdram_params->pi_regs.denali_pi; 639 640 fbdiv = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 0)) & 0xfff; 641 tmp = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, 1)); 642 postdiv2 = POSTDIV2_DEC(tmp); 643 postdiv1 = POSTDIV1_DEC(tmp); 644 refdiv = REFDIV_DEC(tmp); 645 646 sdram_params->ddr_freq = ((fbdiv * 24) / 647 (refdiv * postdiv1 * postdiv2)) * MHz; 648 649 INFO("sdram_params->ddr_freq = %d\n", sdram_params->ddr_freq); 650 sdram_params->odt = (((mmio_read_32(PHY_REG(0, 5)) >> 16) & 651 0x7) != 0) ? 1 : 0; 652 653 /* copy the registers CTL PI and PHY */ 654 sram_regcpy((uintptr_t)¶ms_ctl[0], CTL_REG(0, 0), CTL_REG_NUM); 655 656 /* mask DENALI_CTL_00_DATA.START, only copy here, will trigger later */ 657 params_ctl[0] &= ~(0x1 << 0); 658 659 sram_regcpy((uintptr_t)¶ms_pi[0], PI_REG(0, 0), 660 PI_REG_NUM); 661 662 /* mask DENALI_PI_00_DATA.START, only copy here, will trigger later*/ 663 params_pi[0] &= ~(0x1 << 0); 664 665 for (i = 0; i < 4; i++) 666 sram_regcpy((uintptr_t)&phy_regs->phy0[i][0], 667 PHY_REG(0, 128 * i), 91); 668 669 for (i = 0; i < 3; i++) 670 sram_regcpy((uintptr_t)&phy_regs->phy512[i][0], 671 PHY_REG(0, 512 + 128 * i), 38); 672 673 sram_regcpy((uintptr_t)&phy_regs->phy896[0], PHY_REG(0, 896), 63); 674 675 for (ch = 0; ch < sdram_params->num_channels; ch++) { 676 for (byte = 0; byte < 4; byte++) 677 sdram_params->rx_cal_dqs[ch][byte] = (0xfff << 16) & 678 mmio_read_32(PHY_REG(ch, 57 + byte * 128)); 679 } 680 681 /* set DENALI_PHY_957_DATA.PHY_DLL_RST_EN = 0x1 */ 682 phy_regs->phy896[957 - 896] &= ~(0x3 << 24); 683 phy_regs->phy896[957 - 896] |= 1 << 24; 684 phy_regs->phy896[0] |= 1; 685 phy_regs->phy896[0] &= ~(0x3 << 8); 686 } 687 688 __sramfunc void dmc_restore(void) 689 { 690 struct rk3399_sdram_params *sdram_params = &sdram_config; 691 uint32_t channel_mask = 0; 692 uint32_t channel; 693 694 configure_sgrf(); 695 696 retry: 697 for (channel = 0; channel < sdram_params->num_channels; channel++) { 698 phy_pctrl_reset(channel); 699 if (channel >= sdram_params->num_channels) 700 continue; 701 702 pctl_cfg(channel, sdram_params); 703 } 704 705 for (channel = 0; channel < 2; channel++) { 706 if (sdram_params->ch[channel].col) 707 channel_mask |= 1 << channel; 708 } 709 710 if (pctl_start(channel_mask, sdram_params) < 0) 711 goto retry; 712 713 for (channel = 0; channel < sdram_params->num_channels; channel++) { 714 /* LPDDR2/LPDDR3 need to wait DAI complete, max 10us */ 715 if (sdram_params->dramtype == LPDDR3) 716 sram_udelay(10); 717 718 /* If traning fail, retry to do it again. */ 719 if (data_training(channel, sdram_params, PI_FULL_TRAINING)) 720 goto retry; 721 722 set_ddrconfig(sdram_params, channel, 723 sdram_params->ch[channel].ddrconfig); 724 } 725 726 dram_all_config(sdram_params); 727 728 /* Switch to index 1 and prepare for DDR frequency switch. */ 729 dram_switch_to_next_index(sdram_params); 730 } 731