1 /* 2 * Copyright (C) 2022-2024, STMicroelectronics - All Rights Reserved 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <common/debug.h> 8 #include <drivers/delay_timer.h> 9 #include <drivers/st/stm32mp_ddr.h> 10 #include <drivers/st/stm32mp_ddrctrl_regs.h> 11 #include <lib/mmio.h> 12 13 #include <platform_def.h> 14 15 #define INVALID_OFFSET 0xFFU 16 17 static bool axi_port_reenable_request; 18 static bool host_interface_reenable_request; 19 20 static uintptr_t get_base_addr(const struct stm32mp_ddr_priv *priv, enum stm32mp_ddr_base_type base) 21 { 22 if (base == DDRPHY_BASE) { 23 return (uintptr_t)priv->phy; 24 } else { 25 return (uintptr_t)priv->ctl; 26 } 27 } 28 29 void stm32mp_ddr_set_reg(const struct stm32mp_ddr_priv *priv, enum stm32mp_ddr_reg_type type, 30 const void *param, const struct stm32mp_ddr_reg_info *ddr_registers) 31 { 32 unsigned int i; 33 unsigned int value; 34 enum stm32mp_ddr_base_type base = ddr_registers[type].base; 35 uintptr_t base_addr = get_base_addr(priv, base); 36 const struct stm32mp_ddr_reg_desc *desc = ddr_registers[type].desc; 37 38 VERBOSE("init %s\n", ddr_registers[type].name); 39 for (i = 0; i < ddr_registers[type].size; i++) { 40 uintptr_t ptr = base_addr + desc[i].offset; 41 42 if (desc[i].par_offset == INVALID_OFFSET) { 43 ERROR("invalid parameter offset for %s - index %u", 44 ddr_registers[type].name, i); 45 panic(); 46 } else { 47 #if !STM32MP13 && !STM32MP15 48 if (desc[i].qd) { 49 stm32mp_ddr_start_sw_done(priv->ctl); 50 } 51 #endif 52 value = *((uint32_t *)((uintptr_t)param + 53 desc[i].par_offset)); 54 mmio_write_32(ptr, value); 55 #if !STM32MP13 && !STM32MP15 56 if (desc[i].qd) { 57 stm32mp_ddr_wait_sw_done_ack(priv->ctl); 58 } 59 #endif 60 } 61 } 62 } 63 64 /* Start quasi dynamic register update */ 65 void stm32mp_ddr_start_sw_done(struct stm32mp_ddrctl *ctl) 66 { 67 mmio_clrbits_32((uintptr_t)&ctl->swctl, DDRCTRL_SWCTL_SW_DONE); 68 VERBOSE("[0x%lx] swctl = 0x%x\n", 69 (uintptr_t)&ctl->swctl, mmio_read_32((uintptr_t)&ctl->swctl)); 70 } 71 72 /* Wait quasi dynamic register update */ 73 void stm32mp_ddr_wait_sw_done_ack(struct stm32mp_ddrctl *ctl) 74 { 75 uint64_t timeout; 76 uint32_t swstat; 77 78 mmio_setbits_32((uintptr_t)&ctl->swctl, DDRCTRL_SWCTL_SW_DONE); 79 VERBOSE("[0x%lx] swctl = 0x%x\n", 80 (uintptr_t)&ctl->swctl, mmio_read_32((uintptr_t)&ctl->swctl)); 81 82 timeout = timeout_init_us(DDR_TIMEOUT_US_1S); 83 do { 84 swstat = mmio_read_32((uintptr_t)&ctl->swstat); 85 VERBOSE("[0x%lx] swstat = 0x%x ", 86 (uintptr_t)&ctl->swstat, swstat); 87 if (timeout_elapsed(timeout)) { 88 panic(); 89 } 90 } while ((swstat & DDRCTRL_SWSTAT_SW_DONE_ACK) == 0U); 91 92 VERBOSE("[0x%lx] swstat = 0x%x\n", 93 (uintptr_t)&ctl->swstat, swstat); 94 } 95 96 void stm32mp_ddr_enable_axi_port(struct stm32mp_ddrctl *ctl) 97 { 98 /* Enable uMCTL2 AXI port 0 */ 99 mmio_setbits_32((uintptr_t)&ctl->pctrl_0, DDRCTRL_PCTRL_N_PORT_EN); 100 VERBOSE("[0x%lx] pctrl_0 = 0x%x\n", (uintptr_t)&ctl->pctrl_0, 101 mmio_read_32((uintptr_t)&ctl->pctrl_0)); 102 103 #if STM32MP_DDR_DUAL_AXI_PORT 104 /* Enable uMCTL2 AXI port 1 */ 105 mmio_setbits_32((uintptr_t)&ctl->pctrl_1, DDRCTRL_PCTRL_N_PORT_EN); 106 VERBOSE("[0x%lx] pctrl_1 = 0x%x\n", (uintptr_t)&ctl->pctrl_1, 107 mmio_read_32((uintptr_t)&ctl->pctrl_1)); 108 #endif 109 } 110 111 int stm32mp_ddr_disable_axi_port(struct stm32mp_ddrctl *ctl) 112 { 113 uint64_t timeout; 114 uint32_t pstat; 115 116 /* Disable uMCTL2 AXI port 0 */ 117 mmio_clrbits_32((uintptr_t)&ctl->pctrl_0, DDRCTRL_PCTRL_N_PORT_EN); 118 VERBOSE("[0x%lx] pctrl_0 = 0x%x\n", (uintptr_t)&ctl->pctrl_0, 119 mmio_read_32((uintptr_t)&ctl->pctrl_0)); 120 121 #if STM32MP_DDR_DUAL_AXI_PORT 122 /* Disable uMCTL2 AXI port 1 */ 123 mmio_clrbits_32((uintptr_t)&ctl->pctrl_1, DDRCTRL_PCTRL_N_PORT_EN); 124 VERBOSE("[0x%lx] pctrl_1 = 0x%x\n", (uintptr_t)&ctl->pctrl_1, 125 mmio_read_32((uintptr_t)&ctl->pctrl_1)); 126 #endif 127 128 /* 129 * Waits until all AXI ports are idle 130 * Poll PSTAT.rd_port_busy_n = 0 131 * Poll PSTAT.wr_port_busy_n = 0 132 */ 133 timeout = timeout_init_us(DDR_TIMEOUT_US_1S); 134 do { 135 pstat = mmio_read_32((uintptr_t)&ctl->pstat); 136 VERBOSE("[0x%lx] pstat = 0x%x ", 137 (uintptr_t)&ctl->pstat, pstat); 138 if (timeout_elapsed(timeout)) { 139 return -1; 140 } 141 } while (pstat != 0U); 142 143 return 0; 144 } 145 146 static bool ddr_is_axi_port_enabled(struct stm32mp_ddrctl *ctl) 147 { 148 return (mmio_read_32((uintptr_t)&ctl->pctrl_0) & DDRCTRL_PCTRL_N_PORT_EN) != 0U; 149 } 150 151 void stm32mp_ddr_enable_host_interface(struct stm32mp_ddrctl *ctl) 152 { 153 mmio_clrbits_32((uintptr_t)&ctl->dbg1, DDRCTRL_DBG1_DIS_HIF); 154 VERBOSE("[0x%lx] dbg1 = 0x%x\n", 155 (uintptr_t)&ctl->dbg1, 156 mmio_read_32((uintptr_t)&ctl->dbg1)); 157 } 158 159 void stm32mp_ddr_disable_host_interface(struct stm32mp_ddrctl *ctl) 160 { 161 uint64_t timeout; 162 uint32_t dbgcam; 163 int count = 0; 164 165 mmio_setbits_32((uintptr_t)&ctl->dbg1, DDRCTRL_DBG1_DIS_HIF); 166 VERBOSE("[0x%lx] dbg1 = 0x%x\n", 167 (uintptr_t)&ctl->dbg1, 168 mmio_read_32((uintptr_t)&ctl->dbg1)); 169 170 /* 171 * Waits until all queues and pipelines are empty 172 * Poll DBGCAM.dbg_wr_q_empty = 1 173 * Poll DBGCAM.dbg_rd_q_empty = 1 174 * Poll DBGCAM.dbg_wr_data_pipeline_empty = 1 175 * Poll DBGCAM.dbg_rd_data_pipeline_empty = 1 176 * 177 * data_pipeline fields must be polled twice to ensure 178 * value propoagation, so count is added to loop condition. 179 */ 180 timeout = timeout_init_us(DDR_TIMEOUT_US_1S); 181 do { 182 dbgcam = mmio_read_32((uintptr_t)&ctl->dbgcam); 183 VERBOSE("[0x%lx] dbgcam = 0x%x ", 184 (uintptr_t)&ctl->dbgcam, dbgcam); 185 if (timeout_elapsed(timeout)) { 186 panic(); 187 } 188 count++; 189 } while (((dbgcam & DDRCTRL_DBG_Q_AND_DATA_PIPELINE_EMPTY) != 190 DDRCTRL_DBG_Q_AND_DATA_PIPELINE_EMPTY) || (count < 2)); 191 } 192 193 static bool ddr_is_host_interface_enabled(struct stm32mp_ddrctl *ctl) 194 { 195 return (mmio_read_32((uintptr_t)&ctl->dbg1) & DDRCTRL_DBG1_DIS_HIF) == 0U; 196 } 197 198 int stm32mp_ddr_sw_selfref_entry(struct stm32mp_ddrctl *ctl) 199 { 200 uint64_t timeout; 201 uint32_t stat; 202 uint32_t operating_mode; 203 uint32_t selref_type; 204 205 mmio_setbits_32((uintptr_t)&ctl->pwrctl, DDRCTRL_PWRCTL_SELFREF_SW); 206 VERBOSE("[0x%lx] pwrctl = 0x%x\n", 207 (uintptr_t)&ctl->pwrctl, 208 mmio_read_32((uintptr_t)&ctl->pwrctl)); 209 210 /* 211 * Wait operating mode change in self-refresh mode 212 * with STAT.operating_mode[1:0]==11. 213 * Ensure transition to self-refresh was due to software 214 * by checking also that STAT.selfref_type[1:0]=2. 215 */ 216 timeout = timeout_init_us(DDR_TIMEOUT_500US); 217 while (!timeout_elapsed(timeout)) { 218 stat = mmio_read_32((uintptr_t)&ctl->stat); 219 operating_mode = stat & DDRCTRL_STAT_OPERATING_MODE_MASK; 220 selref_type = stat & DDRCTRL_STAT_SELFREF_TYPE_MASK; 221 222 if ((operating_mode == DDRCTRL_STAT_OPERATING_MODE_SR) && 223 (selref_type == DDRCTRL_STAT_SELFREF_TYPE_SR)) { 224 return 0; 225 } 226 } 227 228 return -1; 229 } 230 231 void stm32mp_ddr_sw_selfref_exit(struct stm32mp_ddrctl *ctl) 232 { 233 mmio_clrbits_32((uintptr_t)&ctl->pwrctl, DDRCTRL_PWRCTL_SELFREF_SW); 234 VERBOSE("[0x%lx] pwrctl = 0x%x\n", 235 (uintptr_t)&ctl->pwrctl, 236 mmio_read_32((uintptr_t)&ctl->pwrctl)); 237 } 238 239 void stm32mp_ddr_set_qd3_update_conditions(struct stm32mp_ddrctl *ctl) 240 { 241 if (ddr_is_axi_port_enabled(ctl)) { 242 if (stm32mp_ddr_disable_axi_port(ctl) != 0) { 243 panic(); 244 } 245 axi_port_reenable_request = true; 246 } 247 248 if (ddr_is_host_interface_enabled(ctl)) { 249 stm32mp_ddr_disable_host_interface(ctl); 250 host_interface_reenable_request = true; 251 } 252 253 stm32mp_ddr_start_sw_done(ctl); 254 } 255 256 void stm32mp_ddr_unset_qd3_update_conditions(struct stm32mp_ddrctl *ctl) 257 { 258 stm32mp_ddr_wait_sw_done_ack(ctl); 259 260 if (host_interface_reenable_request) { 261 stm32mp_ddr_enable_host_interface(ctl); 262 host_interface_reenable_request = false; 263 } 264 265 if (axi_port_reenable_request) { 266 stm32mp_ddr_enable_axi_port(ctl); 267 axi_port_reenable_request = false; 268 } 269 } 270 271 void stm32mp_ddr_wait_refresh_update_done_ack(struct stm32mp_ddrctl *ctl) 272 { 273 uint64_t timeout; 274 uint32_t rfshctl3; 275 uint32_t refresh_update_level = DDRCTRL_RFSHCTL3_REFRESH_UPDATE_LEVEL; 276 277 /* Toggle rfshctl3.refresh_update_level */ 278 rfshctl3 = mmio_read_32((uintptr_t)&ctl->rfshctl3); 279 if ((rfshctl3 & refresh_update_level) == refresh_update_level) { 280 mmio_setbits_32((uintptr_t)&ctl->rfshctl3, refresh_update_level); 281 } else { 282 mmio_clrbits_32((uintptr_t)&ctl->rfshctl3, refresh_update_level); 283 refresh_update_level = 0U; 284 } 285 286 VERBOSE("[0x%lx] rfshctl3 = 0x%x\n", 287 (uintptr_t)&ctl->rfshctl3, mmio_read_32((uintptr_t)&ctl->rfshctl3)); 288 289 timeout = timeout_init_us(DDR_TIMEOUT_US_1S); 290 do { 291 rfshctl3 = mmio_read_32((uintptr_t)&ctl->rfshctl3); 292 VERBOSE("[0x%lx] rfshctl3 = 0x%x ", (uintptr_t)&ctl->rfshctl3, rfshctl3); 293 if (timeout_elapsed(timeout)) { 294 panic(); 295 } 296 } while ((rfshctl3 & DDRCTRL_RFSHCTL3_REFRESH_UPDATE_LEVEL) != refresh_update_level); 297 298 VERBOSE("[0x%lx] rfshctl3 = 0x%x\n", (uintptr_t)&ctl->rfshctl3, rfshctl3); 299 } 300