1 /* 2 * Copyright 2019-2022 NXP 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <bl31/interrupt_mgmt.h> 8 #include <common/runtime_svc.h> 9 #include <lib/mmio.h> 10 #include <lib/spinlock.h> 11 #include <plat/common/platform.h> 12 13 #include <dram.h> 14 15 #define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10 16 #define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11 17 18 struct dram_info dram_info; 19 20 /* lock used for DDR DVFS */ 21 spinlock_t dfs_lock; 22 23 static volatile uint32_t wfe_done; 24 static volatile bool wait_ddrc_hwffc_done = true; 25 static unsigned int dev_fsp = 0x1; 26 27 static uint32_t fsp_init_reg[3][4] = { 28 { DDRC_INIT3(0), DDRC_INIT4(0), DDRC_INIT6(0), DDRC_INIT7(0) }, 29 { DDRC_FREQ1_INIT3(0), DDRC_FREQ1_INIT4(0), DDRC_FREQ1_INIT6(0), DDRC_FREQ1_INIT7(0) }, 30 { DDRC_FREQ2_INIT3(0), DDRC_FREQ2_INIT4(0), DDRC_FREQ2_INIT6(0), DDRC_FREQ2_INIT7(0) }, 31 }; 32 33 static void get_mr_values(uint32_t (*mr_value)[8]) 34 { 35 uint32_t init_val; 36 unsigned int i, fsp_index; 37 38 for (fsp_index = 0U; fsp_index < 3U; fsp_index++) { 39 for (i = 0U; i < 4U; i++) { 40 init_val = mmio_read_32(fsp_init_reg[fsp_index][i]); 41 mr_value[fsp_index][2*i] = init_val >> 16; 42 mr_value[fsp_index][2*i + 1] = init_val & 0xFFFF; 43 } 44 } 45 } 46 47 /* Restore the ddrc configs */ 48 void dram_umctl2_init(struct dram_timing_info *timing) 49 { 50 struct dram_cfg_param *ddrc_cfg = timing->ddrc_cfg; 51 unsigned int i; 52 53 for (i = 0U; i < timing->ddrc_cfg_num; i++) { 54 mmio_write_32(ddrc_cfg->reg, ddrc_cfg->val); 55 ddrc_cfg++; 56 } 57 58 /* set the default fsp to P0 */ 59 mmio_write_32(DDRC_MSTR2(0), 0x0); 60 } 61 62 /* Restore the dram PHY config */ 63 void dram_phy_init(struct dram_timing_info *timing) 64 { 65 struct dram_cfg_param *cfg = timing->ddrphy_cfg; 66 unsigned int i; 67 68 /* Restore the PHY init config */ 69 cfg = timing->ddrphy_cfg; 70 for (i = 0U; i < timing->ddrphy_cfg_num; i++) { 71 dwc_ddrphy_apb_wr(cfg->reg, cfg->val); 72 cfg++; 73 } 74 75 /* Restore the DDR PHY CSRs */ 76 cfg = timing->ddrphy_trained_csr; 77 for (i = 0U; i < timing->ddrphy_trained_csr_num; i++) { 78 dwc_ddrphy_apb_wr(cfg->reg, cfg->val); 79 cfg++; 80 } 81 82 /* Load the PIE image */ 83 cfg = timing->ddrphy_pie; 84 for (i = 0U; i < timing->ddrphy_pie_num; i++) { 85 dwc_ddrphy_apb_wr(cfg->reg, cfg->val); 86 cfg++; 87 } 88 } 89 90 /* EL3 SGI-8 IPI handler for DDR Dynamic frequency scaling */ 91 static uint64_t waiting_dvfs(uint32_t id, uint32_t flags, 92 void *handle, void *cookie) 93 { 94 uint64_t mpidr = read_mpidr_el1(); 95 unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr); 96 uint32_t irq; 97 98 irq = plat_ic_acknowledge_interrupt(); 99 if (irq < 1022U) { 100 plat_ic_end_of_interrupt(irq); 101 } 102 103 /* set the WFE done status */ 104 spin_lock(&dfs_lock); 105 wfe_done |= (1 << cpu_id * 8); 106 dsb(); 107 spin_unlock(&dfs_lock); 108 109 while (1) { 110 /* ddr frequency change done */ 111 if (!wait_ddrc_hwffc_done) 112 break; 113 114 wfe(); 115 } 116 117 return 0; 118 } 119 120 void dram_info_init(unsigned long dram_timing_base) 121 { 122 uint32_t ddrc_mstr, current_fsp; 123 unsigned int idx = 0; 124 uint32_t flags = 0; 125 uint32_t rc; 126 unsigned int i; 127 128 /* Get the dram type & rank */ 129 ddrc_mstr = mmio_read_32(DDRC_MSTR(0)); 130 131 dram_info.dram_type = ddrc_mstr & DDR_TYPE_MASK; 132 dram_info.num_rank = (ddrc_mstr >> 24) & ACTIVE_RANK_MASK; 133 134 /* Get current fsp info */ 135 current_fsp = mmio_read_32(DDRC_DFIMISC(0)) & 0xf; 136 dram_info.boot_fsp = current_fsp; 137 dram_info.current_fsp = current_fsp; 138 139 get_mr_values(dram_info.mr_table); 140 141 dram_info.timing_info = (struct dram_timing_info *)dram_timing_base; 142 143 /* get the num of supported fsp */ 144 for (i = 0U; i < 4U; ++i) { 145 if (!dram_info.timing_info->fsp_table[i]) { 146 break; 147 } 148 idx = i; 149 } 150 dram_info.num_fsp = i; 151 152 /* check if has bypass mode support */ 153 if (dram_info.timing_info->fsp_table[idx] < 666) { 154 dram_info.bypass_mode = true; 155 } else { 156 dram_info.bypass_mode = false; 157 } 158 159 /* Register the EL3 handler for DDR DVFS */ 160 set_interrupt_rm_flag(flags, NON_SECURE); 161 rc = register_interrupt_type_handler(INTR_TYPE_EL3, waiting_dvfs, flags); 162 if (rc != 0) { 163 panic(); 164 } 165 } 166 167 168 /* 169 * For each freq return the following info: 170 * 171 * r1: data rate 172 * r2: 1 + dram_core parent 173 * r3: 1 + dram_alt parent index 174 * r4: 1 + dram_apb parent index 175 * 176 * The parent indices can be used by an OS who manages source clocks to enabled 177 * them ahead of the switch. 178 * 179 * A parent value of "0" means "don't care". 180 * 181 * Current implementation of freq switch is hardcoded in 182 * plat/imx/common/imx8m/clock.c but in theory this can be enhanced to support 183 * a wide variety of rates. 184 */ 185 int dram_dvfs_get_freq_info(void *handle, u_register_t index) 186 { 187 switch (index) { 188 case 0: 189 SMC_RET4(handle, dram_info.timing_info->fsp_table[0], 190 1, 0, 5); 191 case 1: 192 if (!dram_info.bypass_mode) { 193 SMC_RET4(handle, dram_info.timing_info->fsp_table[1], 194 1, 0, 0); 195 } 196 SMC_RET4(handle, dram_info.timing_info->fsp_table[1], 197 2, 2, 4); 198 case 2: 199 if (!dram_info.bypass_mode) { 200 SMC_RET4(handle, dram_info.timing_info->fsp_table[2], 201 1, 0, 0); 202 } 203 SMC_RET4(handle, dram_info.timing_info->fsp_table[2], 204 2, 3, 3); 205 case 3: 206 SMC_RET4(handle, dram_info.timing_info->fsp_table[3], 207 1, 0, 0); 208 default: 209 SMC_RET1(handle, -3); 210 } 211 } 212 213 int dram_dvfs_handler(uint32_t smc_fid, void *handle, 214 u_register_t x1, u_register_t x2, u_register_t x3) 215 { 216 uint64_t mpidr = read_mpidr_el1(); 217 unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr); 218 unsigned int fsp_index = x1; 219 uint32_t online_cores = x2; 220 221 if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_COUNT) { 222 SMC_RET1(handle, dram_info.num_fsp); 223 } else if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_INFO) { 224 return dram_dvfs_get_freq_info(handle, x2); 225 } else if (x1 < 4) { 226 wait_ddrc_hwffc_done = true; 227 dsb(); 228 229 /* trigger the SGI IPI to info other cores */ 230 for (int i = 0; i < PLATFORM_CORE_COUNT; i++) { 231 if (cpu_id != i && (online_cores & (0x1 << (i * 8)))) { 232 plat_ic_raise_el3_sgi(0x8, i); 233 } 234 } 235 236 /* make sure all the core in WFE */ 237 online_cores &= ~(0x1 << (cpu_id * 8)); 238 while (1) { 239 if (online_cores == wfe_done) { 240 break; 241 } 242 } 243 244 /* flush the L1/L2 cache */ 245 dcsw_op_all(DCCSW); 246 247 if (dram_info.dram_type == DDRC_LPDDR4) { 248 lpddr4_swffc(&dram_info, dev_fsp, fsp_index); 249 dev_fsp = (~dev_fsp) & 0x1; 250 } else if (dram_info.dram_type == DDRC_DDR4) { 251 ddr4_swffc(&dram_info, fsp_index); 252 } 253 254 dram_info.current_fsp = fsp_index; 255 wait_ddrc_hwffc_done = false; 256 wfe_done = 0; 257 dsb(); 258 sev(); 259 isb(); 260 } 261 262 SMC_RET1(handle, 0); 263 } 264