xref: /rk3399_ARM-atf/plat/imx/imx8m/ddr/dram.c (revision 0331b1c6111d198195298a2885dbd93cac1ad26a)
1 /*
2  * Copyright 2019-2023 NXP
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <bl31/interrupt_mgmt.h>
8 #include <common/runtime_svc.h>
9 #include <lib/mmio.h>
10 #include <lib/spinlock.h>
11 #include <plat/common/platform.h>
12 
13 #include <dram.h>
14 
15 #define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT		0x10
16 #define IMX_SIP_DDR_DVFS_GET_FREQ_INFO		0x11
17 
18 struct dram_info dram_info;
19 
20 /* lock used for DDR DVFS */
21 spinlock_t dfs_lock;
22 
23 static volatile uint32_t wfe_done;
24 static volatile bool wait_ddrc_hwffc_done = true;
25 static unsigned int dev_fsp = 0x1;
26 
27 static uint32_t fsp_init_reg[3][4] = {
28 	{ DDRC_INIT3(0), DDRC_INIT4(0), DDRC_INIT6(0), DDRC_INIT7(0) },
29 	{ DDRC_FREQ1_INIT3(0), DDRC_FREQ1_INIT4(0), DDRC_FREQ1_INIT6(0), DDRC_FREQ1_INIT7(0) },
30 	{ DDRC_FREQ2_INIT3(0), DDRC_FREQ2_INIT4(0), DDRC_FREQ2_INIT6(0), DDRC_FREQ2_INIT7(0) },
31 };
32 
33 static void get_mr_values(uint32_t (*mr_value)[8])
34 {
35 	uint32_t init_val;
36 	unsigned int i, fsp_index;
37 
38 	for (fsp_index = 0U; fsp_index < 3U; fsp_index++) {
39 		for (i = 0U; i < 4U; i++) {
40 			init_val = mmio_read_32(fsp_init_reg[fsp_index][i]);
41 			mr_value[fsp_index][2*i] = init_val >> 16;
42 			mr_value[fsp_index][2*i + 1] = init_val & 0xFFFF;
43 		}
44 	}
45 }
46 
47 static void save_rank_setting(void)
48 {
49 	uint32_t i, offset;
50 	uint32_t pstate_num = dram_info.num_fsp;
51 
52 	/* only support maximum 3 setpoints */
53 	pstate_num = (pstate_num > MAX_FSP_NUM) ? MAX_FSP_NUM : pstate_num;
54 
55 	for (i = 0U; i < pstate_num; i++) {
56 		offset = i ? (i + 1) * 0x1000 : 0U;
57 		dram_info.rank_setting[i][0] = mmio_read_32(DDRC_DRAMTMG2(0) + offset);
58 		if (dram_info.dram_type != DDRC_LPDDR4) {
59 			dram_info.rank_setting[i][1] = mmio_read_32(DDRC_DRAMTMG9(0) + offset);
60 		}
61 #if !defined(PLAT_imx8mq)
62 		dram_info.rank_setting[i][2] = mmio_read_32(DDRC_RANKCTL(0) + offset);
63 #endif
64 	}
65 #if defined(PLAT_imx8mq)
66 	dram_info.rank_setting[0][2] = mmio_read_32(DDRC_RANKCTL(0));
67 #endif
68 }
69 /* Restore the ddrc configs */
70 void dram_umctl2_init(struct dram_timing_info *timing)
71 {
72 	struct dram_cfg_param *ddrc_cfg = timing->ddrc_cfg;
73 	unsigned int i;
74 
75 	for (i = 0U; i < timing->ddrc_cfg_num; i++) {
76 		mmio_write_32(ddrc_cfg->reg, ddrc_cfg->val);
77 		ddrc_cfg++;
78 	}
79 
80 	/* set the default fsp to P0 */
81 	mmio_write_32(DDRC_MSTR2(0), 0x0);
82 }
83 
84 /* Restore the dram PHY config */
85 void dram_phy_init(struct dram_timing_info *timing)
86 {
87 	struct dram_cfg_param *cfg = timing->ddrphy_cfg;
88 	unsigned int i;
89 
90 	/* Restore the PHY init config */
91 	cfg = timing->ddrphy_cfg;
92 	for (i = 0U; i < timing->ddrphy_cfg_num; i++) {
93 		dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
94 		cfg++;
95 	}
96 
97 	/* Restore the DDR PHY CSRs */
98 	cfg = timing->ddrphy_trained_csr;
99 	for (i = 0U; i < timing->ddrphy_trained_csr_num; i++) {
100 		dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
101 		cfg++;
102 	}
103 
104 	/* Load the PIE image */
105 	cfg = timing->ddrphy_pie;
106 	for (i = 0U; i < timing->ddrphy_pie_num; i++) {
107 		dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
108 		cfg++;
109 	}
110 }
111 
112 /* EL3 SGI-8 IPI handler for DDR Dynamic frequency scaling */
113 static uint64_t waiting_dvfs(uint32_t id, uint32_t flags,
114 				void *handle, void *cookie)
115 {
116 	uint64_t mpidr = read_mpidr_el1();
117 	unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
118 	uint32_t irq;
119 
120 	irq = plat_ic_acknowledge_interrupt();
121 	if (irq < 1022U) {
122 		plat_ic_end_of_interrupt(irq);
123 	}
124 
125 	/* set the WFE done status */
126 	spin_lock(&dfs_lock);
127 	wfe_done |= (1 << cpu_id * 8);
128 	dsb();
129 	spin_unlock(&dfs_lock);
130 
131 	while (1) {
132 		/* ddr frequency change done */
133 		if (!wait_ddrc_hwffc_done)
134 			break;
135 
136 		wfe();
137 	}
138 
139 	return 0;
140 }
141 
142 void dram_info_init(unsigned long dram_timing_base)
143 {
144 	uint32_t ddrc_mstr, current_fsp;
145 	unsigned int idx = 0;
146 	uint32_t flags = 0;
147 	uint32_t rc;
148 	unsigned int i;
149 
150 	/* Get the dram type & rank */
151 	ddrc_mstr = mmio_read_32(DDRC_MSTR(0));
152 
153 	dram_info.dram_type = ddrc_mstr & DDR_TYPE_MASK;
154 	dram_info.num_rank = ((ddrc_mstr >> 24) & ACTIVE_RANK_MASK) == 0x3 ?
155 		DDRC_ACTIVE_TWO_RANK : DDRC_ACTIVE_ONE_RANK;
156 
157 	/* Get current fsp info */
158 	current_fsp = mmio_read_32(DDRC_DFIMISC(0));
159 	current_fsp = (current_fsp >> 8) & 0xf;
160 	dram_info.boot_fsp = current_fsp;
161 	dram_info.current_fsp = current_fsp;
162 
163 	get_mr_values(dram_info.mr_table);
164 
165 	dram_info.timing_info = (struct dram_timing_info *)dram_timing_base;
166 
167 	/* get the num of supported fsp */
168 	for (i = 0U; i < 4U; ++i) {
169 		if (!dram_info.timing_info->fsp_table[i]) {
170 			break;
171 		}
172 		idx = i;
173 	}
174 
175 	/* only support maximum 3 setpoints */
176 	dram_info.num_fsp = (i > MAX_FSP_NUM) ? MAX_FSP_NUM : i;
177 
178 	/* no valid fsp table, return directly */
179 	if (i == 0U) {
180 		return;
181 	}
182 
183 	/* save the DRAMTMG2/9 for rank to rank workaround */
184 	save_rank_setting();
185 
186 	/* check if has bypass mode support */
187 	if (dram_info.timing_info->fsp_table[idx] < 666) {
188 		dram_info.bypass_mode = true;
189 	} else {
190 		dram_info.bypass_mode = false;
191 	}
192 
193 	/* Register the EL3 handler for DDR DVFS */
194 	set_interrupt_rm_flag(flags, NON_SECURE);
195 	rc = register_interrupt_type_handler(INTR_TYPE_EL3, waiting_dvfs, flags);
196 	if (rc != 0) {
197 		panic();
198 	}
199 
200 	if (dram_info.dram_type == DDRC_LPDDR4 && current_fsp != 0x0) {
201 		/* flush the L1/L2 cache */
202 		dcsw_op_all(DCCSW);
203 		lpddr4_swffc(&dram_info, dev_fsp, 0x0);
204 		dev_fsp = (~dev_fsp) & 0x1;
205 	} else if (current_fsp != 0x0) {
206 		/* flush the L1/L2 cache */
207 		dcsw_op_all(DCCSW);
208 		ddr4_swffc(&dram_info, 0x0);
209 	}
210 }
211 
212 /*
213  * For each freq return the following info:
214  *
215  * r1: data rate
216  * r2: 1 + dram_core parent
217  * r3: 1 + dram_alt parent index
218  * r4: 1 + dram_apb parent index
219  *
220  * The parent indices can be used by an OS who manages source clocks to enabled
221  * them ahead of the switch.
222  *
223  * A parent value of "0" means "don't care".
224  *
225  * Current implementation of freq switch is hardcoded in
226  * plat/imx/common/imx8m/clock.c but in theory this can be enhanced to support
227  * a wide variety of rates.
228  */
229 int dram_dvfs_get_freq_info(void *handle, u_register_t index)
230 {
231 	switch (index) {
232 	case 0:
233 		 SMC_RET4(handle, dram_info.timing_info->fsp_table[0],
234 			1, 0, 5);
235 	case 1:
236 		if (!dram_info.bypass_mode) {
237 			SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
238 				1, 0, 0);
239 		}
240 		SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
241 			2, 2, 4);
242 	case 2:
243 		if (!dram_info.bypass_mode) {
244 			SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
245 				1, 0, 0);
246 		}
247 		SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
248 			2, 3, 3);
249 	case 3:
250 		 SMC_RET4(handle, dram_info.timing_info->fsp_table[3],
251 			1, 0, 0);
252 	default:
253 		SMC_RET1(handle, -3);
254 	}
255 }
256 
257 int dram_dvfs_handler(uint32_t smc_fid, void *handle,
258 	u_register_t x1, u_register_t x2, u_register_t x3)
259 {
260 	uint64_t mpidr = read_mpidr_el1();
261 	unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
262 	unsigned int fsp_index = x1;
263 	uint32_t online_cores = x2;
264 
265 	if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_COUNT) {
266 		SMC_RET1(handle, dram_info.num_fsp);
267 	} else if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_INFO) {
268 		return dram_dvfs_get_freq_info(handle, x2);
269 	} else if (x1 < 3U) {
270 		wait_ddrc_hwffc_done = true;
271 		dsb();
272 
273 		/* trigger the SGI IPI to info other cores */
274 		for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
275 			if (cpu_id != i && (online_cores & (0x1 << (i * 8)))) {
276 				plat_ic_raise_el3_sgi(0x8, i);
277 			}
278 		}
279 
280 		/* make sure all the core in WFE */
281 		online_cores &= ~(0x1 << (cpu_id * 8));
282 		while (1) {
283 			if (online_cores == wfe_done) {
284 				break;
285 			}
286 		}
287 
288 		/* flush the L1/L2 cache */
289 		dcsw_op_all(DCCSW);
290 
291 		if (dram_info.dram_type == DDRC_LPDDR4) {
292 			lpddr4_swffc(&dram_info, dev_fsp, fsp_index);
293 			dev_fsp = (~dev_fsp) & 0x1;
294 		} else {
295 			ddr4_swffc(&dram_info, fsp_index);
296 		}
297 
298 		dram_info.current_fsp = fsp_index;
299 		wait_ddrc_hwffc_done = false;
300 		wfe_done = 0;
301 		dsb();
302 		sev();
303 		isb();
304 	}
305 
306 	SMC_RET1(handle, 0);
307 }
308