xref: /rk3399_ARM-atf/plat/imx/imx8m/ddr/dram.c (revision a2655f48697416b8350ba5b3f7f44f1f0be79d4e)
1 /*
2  * Copyright 2019-2023 NXP
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <bl31/interrupt_mgmt.h>
8 #include <common/runtime_svc.h>
9 #include <lib/mmio.h>
10 #include <lib/spinlock.h>
11 #include <plat/common/platform.h>
12 
13 #include <dram.h>
14 
15 #define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT		0x10
16 #define IMX_SIP_DDR_DVFS_GET_FREQ_INFO		0x11
17 
18 struct dram_info dram_info;
19 
20 /* lock used for DDR DVFS */
21 spinlock_t dfs_lock;
22 
23 static volatile uint32_t wfe_done;
24 static volatile bool wait_ddrc_hwffc_done = true;
25 static unsigned int dev_fsp = 0x1;
26 
27 static uint32_t fsp_init_reg[3][4] = {
28 	{ DDRC_INIT3(0), DDRC_INIT4(0), DDRC_INIT6(0), DDRC_INIT7(0) },
29 	{ DDRC_FREQ1_INIT3(0), DDRC_FREQ1_INIT4(0), DDRC_FREQ1_INIT6(0), DDRC_FREQ1_INIT7(0) },
30 	{ DDRC_FREQ2_INIT3(0), DDRC_FREQ2_INIT4(0), DDRC_FREQ2_INIT6(0), DDRC_FREQ2_INIT7(0) },
31 };
32 
33 #if defined(PLAT_imx8mp)
34 static uint32_t lpddr4_mr_read(unsigned int mr_rank, unsigned int mr_addr)
35 {
36 	unsigned int tmp, drate_byte;
37 
38 	tmp = mmio_read_32(DRC_PERF_MON_MRR0_DAT(0));
39 	mmio_write_32(DRC_PERF_MON_MRR0_DAT(0), tmp | 0x1);
40 	do {
41 		tmp = mmio_read_32(DDRC_MRSTAT(0));
42 	} while (tmp & 0x1);
43 
44 	mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4) | 0x1);
45 	mmio_write_32(DDRC_MRCTRL1(0), (mr_addr << 8));
46 	mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4) | BIT(31) | 0x1);
47 
48 	/* Workaround for SNPS STAR 9001549457 */
49 	do {
50 		tmp = mmio_read_32(DDRC_MRSTAT(0));
51 	} while (tmp & 0x1);
52 
53 	do {
54 		tmp = mmio_read_32(DRC_PERF_MON_MRR0_DAT(0));
55 	} while (!(tmp & 0x8));
56 	tmp = mmio_read_32(DRC_PERF_MON_MRR1_DAT(0));
57 
58 	drate_byte = (mmio_read_32(DDRC_DERATEEN(0)) >> 4) & 0xff;
59 	tmp = (tmp >> (drate_byte * 8)) & 0xff;
60 	mmio_write_32(DRC_PERF_MON_MRR0_DAT(0), 0x4);
61 
62 	return tmp;
63 }
64 #endif
65 
66 static void get_mr_values(uint32_t (*mr_value)[8])
67 {
68 	uint32_t init_val;
69 	unsigned int i, fsp_index;
70 
71 	for (fsp_index = 0U; fsp_index < 3U; fsp_index++) {
72 		for (i = 0U; i < 4U; i++) {
73 			init_val = mmio_read_32(fsp_init_reg[fsp_index][i]);
74 			mr_value[fsp_index][2*i] = init_val >> 16;
75 			mr_value[fsp_index][2*i + 1] = init_val & 0xFFFF;
76 		}
77 
78 #if defined(PLAT_imx8mp)
79 		if (dram_info.dram_type == DDRC_LPDDR4) {
80 			mr_value[fsp_index][5] = lpddr4_mr_read(1, MR12); /* read MR12 from DRAM */
81 			mr_value[fsp_index][7] = lpddr4_mr_read(1, MR14); /* read MR14 from DRAM */
82 		}
83 #endif
84 	}
85 }
86 
87 static void save_rank_setting(void)
88 {
89 	uint32_t i, offset;
90 	uint32_t pstate_num = dram_info.num_fsp;
91 
92 	/* only support maximum 3 setpoints */
93 	pstate_num = (pstate_num > MAX_FSP_NUM) ? MAX_FSP_NUM : pstate_num;
94 
95 	for (i = 0U; i < pstate_num; i++) {
96 		offset = i ? (i + 1) * 0x1000 : 0U;
97 		dram_info.rank_setting[i][0] = mmio_read_32(DDRC_DRAMTMG2(0) + offset);
98 		if (dram_info.dram_type != DDRC_LPDDR4) {
99 			dram_info.rank_setting[i][1] = mmio_read_32(DDRC_DRAMTMG9(0) + offset);
100 		}
101 #if !defined(PLAT_imx8mq)
102 		dram_info.rank_setting[i][2] = mmio_read_32(DDRC_RANKCTL(0) + offset);
103 #endif
104 	}
105 #if defined(PLAT_imx8mq)
106 	dram_info.rank_setting[0][2] = mmio_read_32(DDRC_RANKCTL(0));
107 #endif
108 }
109 /* Restore the ddrc configs */
110 void dram_umctl2_init(struct dram_timing_info *timing)
111 {
112 	struct dram_cfg_param *ddrc_cfg = timing->ddrc_cfg;
113 	unsigned int i;
114 
115 	for (i = 0U; i < timing->ddrc_cfg_num; i++) {
116 		mmio_write_32(ddrc_cfg->reg, ddrc_cfg->val);
117 		ddrc_cfg++;
118 	}
119 
120 	/* set the default fsp to P0 */
121 	mmio_write_32(DDRC_MSTR2(0), 0x0);
122 }
123 
124 /* Restore the dram PHY config */
125 void dram_phy_init(struct dram_timing_info *timing)
126 {
127 	struct dram_cfg_param *cfg = timing->ddrphy_cfg;
128 	unsigned int i;
129 
130 	/* Restore the PHY init config */
131 	cfg = timing->ddrphy_cfg;
132 	for (i = 0U; i < timing->ddrphy_cfg_num; i++) {
133 		dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
134 		cfg++;
135 	}
136 
137 	/* Restore the DDR PHY CSRs */
138 	cfg = timing->ddrphy_trained_csr;
139 	for (i = 0U; i < timing->ddrphy_trained_csr_num; i++) {
140 		dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
141 		cfg++;
142 	}
143 
144 	/* Load the PIE image */
145 	cfg = timing->ddrphy_pie;
146 	for (i = 0U; i < timing->ddrphy_pie_num; i++) {
147 		dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
148 		cfg++;
149 	}
150 }
151 
152 /* EL3 SGI-8 IPI handler for DDR Dynamic frequency scaling */
153 static uint64_t waiting_dvfs(uint32_t id, uint32_t flags,
154 				void *handle, void *cookie)
155 {
156 	uint64_t mpidr = read_mpidr_el1();
157 	unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
158 	uint32_t irq;
159 
160 	irq = plat_ic_acknowledge_interrupt();
161 	if (irq < 1022U) {
162 		plat_ic_end_of_interrupt(irq);
163 	}
164 
165 	/* set the WFE done status */
166 	spin_lock(&dfs_lock);
167 	wfe_done |= (1 << cpu_id * 8);
168 	dsb();
169 	spin_unlock(&dfs_lock);
170 
171 	while (1) {
172 		/* ddr frequency change done */
173 		if (!wait_ddrc_hwffc_done)
174 			break;
175 
176 		wfe();
177 	}
178 
179 	return 0;
180 }
181 
182 void dram_info_init(unsigned long dram_timing_base)
183 {
184 	uint32_t ddrc_mstr, current_fsp;
185 	unsigned int idx = 0;
186 	uint32_t flags = 0;
187 	uint32_t rc;
188 	unsigned int i;
189 
190 	/* Get the dram type & rank */
191 	ddrc_mstr = mmio_read_32(DDRC_MSTR(0));
192 
193 	dram_info.dram_type = ddrc_mstr & DDR_TYPE_MASK;
194 	dram_info.num_rank = ((ddrc_mstr >> 24) & ACTIVE_RANK_MASK) == 0x3 ?
195 		DDRC_ACTIVE_TWO_RANK : DDRC_ACTIVE_ONE_RANK;
196 
197 	/* Get current fsp info */
198 	current_fsp = mmio_read_32(DDRC_DFIMISC(0));
199 	current_fsp = (current_fsp >> 8) & 0xf;
200 	dram_info.boot_fsp = current_fsp;
201 	dram_info.current_fsp = current_fsp;
202 
203 	get_mr_values(dram_info.mr_table);
204 
205 	dram_info.timing_info = (struct dram_timing_info *)dram_timing_base;
206 
207 	/* get the num of supported fsp */
208 	for (i = 0U; i < 4U; ++i) {
209 		if (!dram_info.timing_info->fsp_table[i]) {
210 			break;
211 		}
212 		idx = i;
213 	}
214 
215 	/* only support maximum 3 setpoints */
216 	dram_info.num_fsp = (i > MAX_FSP_NUM) ? MAX_FSP_NUM : i;
217 
218 	/* no valid fsp table, return directly */
219 	if (i == 0U) {
220 		return;
221 	}
222 
223 	/* save the DRAMTMG2/9 for rank to rank workaround */
224 	save_rank_setting();
225 
226 	/* check if has bypass mode support */
227 	if (dram_info.timing_info->fsp_table[idx] < 666) {
228 		dram_info.bypass_mode = true;
229 	} else {
230 		dram_info.bypass_mode = false;
231 	}
232 
233 	/* Register the EL3 handler for DDR DVFS */
234 	set_interrupt_rm_flag(flags, NON_SECURE);
235 	rc = register_interrupt_type_handler(INTR_TYPE_EL3, waiting_dvfs, flags);
236 	if (rc != 0) {
237 		panic();
238 	}
239 
240 	if (dram_info.dram_type == DDRC_LPDDR4 && current_fsp != 0x0) {
241 		/* flush the L1/L2 cache */
242 		dcsw_op_all(DCCSW);
243 		lpddr4_swffc(&dram_info, dev_fsp, 0x0);
244 		dev_fsp = (~dev_fsp) & 0x1;
245 	} else if (current_fsp != 0x0) {
246 		/* flush the L1/L2 cache */
247 		dcsw_op_all(DCCSW);
248 		ddr4_swffc(&dram_info, 0x0);
249 	}
250 }
251 
252 /*
253  * For each freq return the following info:
254  *
255  * r1: data rate
256  * r2: 1 + dram_core parent
257  * r3: 1 + dram_alt parent index
258  * r4: 1 + dram_apb parent index
259  *
260  * The parent indices can be used by an OS who manages source clocks to enabled
261  * them ahead of the switch.
262  *
263  * A parent value of "0" means "don't care".
264  *
265  * Current implementation of freq switch is hardcoded in
266  * plat/imx/common/imx8m/clock.c but in theory this can be enhanced to support
267  * a wide variety of rates.
268  */
269 int dram_dvfs_get_freq_info(void *handle, u_register_t index)
270 {
271 	switch (index) {
272 	case 0:
273 		 SMC_RET4(handle, dram_info.timing_info->fsp_table[0],
274 			1, 0, 5);
275 	case 1:
276 		if (!dram_info.bypass_mode) {
277 			SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
278 				1, 0, 0);
279 		}
280 		SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
281 			2, 2, 4);
282 	case 2:
283 		if (!dram_info.bypass_mode) {
284 			SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
285 				1, 0, 0);
286 		}
287 		SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
288 			2, 3, 3);
289 	case 3:
290 		 SMC_RET4(handle, dram_info.timing_info->fsp_table[3],
291 			1, 0, 0);
292 	default:
293 		SMC_RET1(handle, -3);
294 	}
295 }
296 
297 int dram_dvfs_handler(uint32_t smc_fid, void *handle,
298 	u_register_t x1, u_register_t x2, u_register_t x3)
299 {
300 	uint64_t mpidr = read_mpidr_el1();
301 	unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
302 	unsigned int fsp_index = x1;
303 	uint32_t online_cores = x2;
304 
305 	if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_COUNT) {
306 		SMC_RET1(handle, dram_info.num_fsp);
307 	} else if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_INFO) {
308 		return dram_dvfs_get_freq_info(handle, x2);
309 	} else if (x1 < 3U) {
310 		wait_ddrc_hwffc_done = true;
311 		dsb();
312 
313 		/* trigger the SGI IPI to info other cores */
314 		for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
315 			if (cpu_id != i && (online_cores & (0x1 << (i * 8)))) {
316 				plat_ic_raise_el3_sgi(0x8, i);
317 			}
318 		}
319 
320 		/* make sure all the core in WFE */
321 		online_cores &= ~(0x1 << (cpu_id * 8));
322 		while (1) {
323 			if (online_cores == wfe_done) {
324 				break;
325 			}
326 		}
327 
328 		/* flush the L1/L2 cache */
329 		dcsw_op_all(DCCSW);
330 
331 		if (dram_info.dram_type == DDRC_LPDDR4) {
332 			lpddr4_swffc(&dram_info, dev_fsp, fsp_index);
333 			dev_fsp = (~dev_fsp) & 0x1;
334 		} else {
335 			ddr4_swffc(&dram_info, fsp_index);
336 		}
337 
338 		dram_info.current_fsp = fsp_index;
339 		wait_ddrc_hwffc_done = false;
340 		wfe_done = 0;
341 		dsb();
342 		sev();
343 		isb();
344 	}
345 
346 	SMC_RET1(handle, 0);
347 }
348