xref: /rk3399_ARM-atf/plat/imx/imx8m/ddr/dram.c (revision dd108c3c1fe3f958a38ae255e57b41e5453d077f)
1 /*
2  * Copyright 2019-2023 NXP
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <bl31/interrupt_mgmt.h>
8 #include <common/runtime_svc.h>
9 #include <lib/mmio.h>
10 #include <lib/spinlock.h>
11 #include <plat/common/platform.h>
12 
13 #include <dram.h>
14 
15 #define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT		0x10
16 #define IMX_SIP_DDR_DVFS_GET_FREQ_INFO		0x11
17 
18 struct dram_info dram_info;
19 
20 /* lock used for DDR DVFS */
21 spinlock_t dfs_lock;
22 
23 #if defined(PLAT_imx8mq)
24 /* ocram used to dram timing */
25 static uint8_t dram_timing_saved[13 * 1024] __aligned(8);
26 #endif
27 
28 static volatile uint32_t wfe_done;
29 static volatile bool wait_ddrc_hwffc_done = true;
30 static unsigned int dev_fsp = 0x1;
31 
32 static uint32_t fsp_init_reg[3][4] = {
33 	{ DDRC_INIT3(0), DDRC_INIT4(0), DDRC_INIT6(0), DDRC_INIT7(0) },
34 	{ DDRC_FREQ1_INIT3(0), DDRC_FREQ1_INIT4(0), DDRC_FREQ1_INIT6(0), DDRC_FREQ1_INIT7(0) },
35 	{ DDRC_FREQ2_INIT3(0), DDRC_FREQ2_INIT4(0), DDRC_FREQ2_INIT6(0), DDRC_FREQ2_INIT7(0) },
36 };
37 
38 #if defined(PLAT_imx8mq)
39 static inline struct dram_cfg_param *get_cfg_ptr(void *ptr,
40 		void *old_base, void *new_base)
41 {
42 	uintptr_t offset = (uintptr_t)ptr & ~((uintptr_t)old_base);
43 
44 	return (struct dram_cfg_param *)(offset + new_base);
45 }
46 
47 /* copy the dram timing info from DRAM to OCRAM */
48 void imx8mq_dram_timing_copy(struct dram_timing_info *from)
49 {
50 	struct dram_timing_info *info = (struct dram_timing_info *)dram_timing_saved;
51 
52 	/* copy the whole 13KB content used for dram timing info */
53 	memcpy(dram_timing_saved, from, sizeof(dram_timing_saved));
54 
55 	/* correct the header after copied into ocram */
56 	info->ddrc_cfg = get_cfg_ptr(info->ddrc_cfg, from, dram_timing_saved);
57 	info->ddrphy_cfg = get_cfg_ptr(info->ddrphy_cfg, from, dram_timing_saved);
58 	info->ddrphy_trained_csr = get_cfg_ptr(info->ddrphy_trained_csr, from, dram_timing_saved);
59 	info->ddrphy_pie = get_cfg_ptr(info->ddrphy_pie, from, dram_timing_saved);
60 }
61 #endif
62 
63 #if defined(PLAT_imx8mp)
64 static uint32_t lpddr4_mr_read(unsigned int mr_rank, unsigned int mr_addr)
65 {
66 	unsigned int tmp, drate_byte;
67 
68 	tmp = mmio_read_32(DRC_PERF_MON_MRR0_DAT(0));
69 	mmio_write_32(DRC_PERF_MON_MRR0_DAT(0), tmp | 0x1);
70 	do {
71 		tmp = mmio_read_32(DDRC_MRSTAT(0));
72 	} while (tmp & 0x1);
73 
74 	mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4) | 0x1);
75 	mmio_write_32(DDRC_MRCTRL1(0), (mr_addr << 8));
76 	mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4) | BIT(31) | 0x1);
77 
78 	/* Workaround for SNPS STAR 9001549457 */
79 	do {
80 		tmp = mmio_read_32(DDRC_MRSTAT(0));
81 	} while (tmp & 0x1);
82 
83 	do {
84 		tmp = mmio_read_32(DRC_PERF_MON_MRR0_DAT(0));
85 	} while (!(tmp & 0x8));
86 	tmp = mmio_read_32(DRC_PERF_MON_MRR1_DAT(0));
87 
88 	drate_byte = (mmio_read_32(DDRC_DERATEEN(0)) >> 4) & 0xff;
89 	tmp = (tmp >> (drate_byte * 8)) & 0xff;
90 	mmio_write_32(DRC_PERF_MON_MRR0_DAT(0), 0x4);
91 
92 	return tmp;
93 }
94 #endif
95 
96 static void get_mr_values(uint32_t (*mr_value)[8])
97 {
98 	uint32_t init_val;
99 	unsigned int i, fsp_index;
100 
101 	for (fsp_index = 0U; fsp_index < 3U; fsp_index++) {
102 		for (i = 0U; i < 4U; i++) {
103 			init_val = mmio_read_32(fsp_init_reg[fsp_index][i]);
104 			mr_value[fsp_index][2*i] = init_val >> 16;
105 			mr_value[fsp_index][2*i + 1] = init_val & 0xFFFF;
106 		}
107 
108 #if defined(PLAT_imx8mp)
109 		if (dram_info.dram_type == DDRC_LPDDR4) {
110 			mr_value[fsp_index][5] = lpddr4_mr_read(1, MR12); /* read MR12 from DRAM */
111 			mr_value[fsp_index][7] = lpddr4_mr_read(1, MR14); /* read MR14 from DRAM */
112 		}
113 #endif
114 	}
115 }
116 
117 static void save_rank_setting(void)
118 {
119 	uint32_t i, offset;
120 	uint32_t pstate_num = dram_info.num_fsp;
121 
122 	/* only support maximum 3 setpoints */
123 	pstate_num = (pstate_num > MAX_FSP_NUM) ? MAX_FSP_NUM : pstate_num;
124 
125 	for (i = 0U; i < pstate_num; i++) {
126 		offset = i ? (i + 1) * 0x1000 : 0U;
127 		dram_info.rank_setting[i][0] = mmio_read_32(DDRC_DRAMTMG2(0) + offset);
128 		if (dram_info.dram_type != DDRC_LPDDR4) {
129 			dram_info.rank_setting[i][1] = mmio_read_32(DDRC_DRAMTMG9(0) + offset);
130 		}
131 #if !defined(PLAT_imx8mq)
132 		dram_info.rank_setting[i][2] = mmio_read_32(DDRC_RANKCTL(0) + offset);
133 #endif
134 	}
135 #if defined(PLAT_imx8mq)
136 	dram_info.rank_setting[0][2] = mmio_read_32(DDRC_RANKCTL(0));
137 #endif
138 }
139 /* Restore the ddrc configs */
140 void dram_umctl2_init(struct dram_timing_info *timing)
141 {
142 	struct dram_cfg_param *ddrc_cfg = timing->ddrc_cfg;
143 	unsigned int i;
144 
145 	for (i = 0U; i < timing->ddrc_cfg_num; i++) {
146 		mmio_write_32(ddrc_cfg->reg, ddrc_cfg->val);
147 		ddrc_cfg++;
148 	}
149 
150 	/* set the default fsp to P0 */
151 	mmio_write_32(DDRC_MSTR2(0), 0x0);
152 }
153 
154 /* Restore the dram PHY config */
155 void dram_phy_init(struct dram_timing_info *timing)
156 {
157 	struct dram_cfg_param *cfg = timing->ddrphy_cfg;
158 	unsigned int i;
159 
160 	/* Restore the PHY init config */
161 	cfg = timing->ddrphy_cfg;
162 	for (i = 0U; i < timing->ddrphy_cfg_num; i++) {
163 		dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
164 		cfg++;
165 	}
166 
167 	/* Restore the DDR PHY CSRs */
168 	cfg = timing->ddrphy_trained_csr;
169 	for (i = 0U; i < timing->ddrphy_trained_csr_num; i++) {
170 		dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
171 		cfg++;
172 	}
173 
174 	/* Load the PIE image */
175 	cfg = timing->ddrphy_pie;
176 	for (i = 0U; i < timing->ddrphy_pie_num; i++) {
177 		dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
178 		cfg++;
179 	}
180 }
181 
182 /* EL3 SGI-8 IPI handler for DDR Dynamic frequency scaling */
183 static uint64_t waiting_dvfs(uint32_t id, uint32_t flags,
184 				void *handle, void *cookie)
185 {
186 	uint64_t mpidr = read_mpidr_el1();
187 	unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
188 	uint32_t irq;
189 
190 	irq = plat_ic_acknowledge_interrupt();
191 	if (irq < 1022U) {
192 		plat_ic_end_of_interrupt(irq);
193 	}
194 
195 	/* set the WFE done status */
196 	spin_lock(&dfs_lock);
197 	wfe_done |= (1 << cpu_id * 8);
198 	dsb();
199 	spin_unlock(&dfs_lock);
200 
201 	while (1) {
202 		/* ddr frequency change done */
203 		if (!wait_ddrc_hwffc_done)
204 			break;
205 
206 		wfe();
207 	}
208 
209 	return 0;
210 }
211 
212 void dram_info_init(unsigned long dram_timing_base)
213 {
214 	uint32_t ddrc_mstr, current_fsp;
215 	unsigned int idx = 0;
216 	uint32_t flags = 0;
217 	uint32_t rc;
218 	unsigned int i;
219 
220 	/* Get the dram type & rank */
221 	ddrc_mstr = mmio_read_32(DDRC_MSTR(0));
222 
223 	dram_info.dram_type = ddrc_mstr & DDR_TYPE_MASK;
224 	dram_info.num_rank = ((ddrc_mstr >> 24) & ACTIVE_RANK_MASK) == 0x3 ?
225 		DDRC_ACTIVE_TWO_RANK : DDRC_ACTIVE_ONE_RANK;
226 
227 	/* Get current fsp info */
228 	current_fsp = mmio_read_32(DDRC_DFIMISC(0));
229 	current_fsp = (current_fsp >> 8) & 0xf;
230 	dram_info.boot_fsp = current_fsp;
231 	dram_info.current_fsp = current_fsp;
232 
233 #if defined(PLAT_imx8mq)
234 	imx8mq_dram_timing_copy((struct dram_timing_info *)dram_timing_base);
235 	dram_timing_base = (unsigned long) dram_timing_saved;
236 #endif
237 	get_mr_values(dram_info.mr_table);
238 
239 	dram_info.timing_info = (struct dram_timing_info *)dram_timing_base;
240 
241 	/* get the num of supported fsp */
242 	for (i = 0U; i < 4U; ++i) {
243 		if (!dram_info.timing_info->fsp_table[i]) {
244 			break;
245 		}
246 		idx = i;
247 	}
248 
249 	/* only support maximum 3 setpoints */
250 	dram_info.num_fsp = (i > MAX_FSP_NUM) ? MAX_FSP_NUM : i;
251 
252 	/* no valid fsp table, return directly */
253 	if (i == 0U) {
254 		return;
255 	}
256 
257 	/* save the DRAMTMG2/9 for rank to rank workaround */
258 	save_rank_setting();
259 
260 	/* check if has bypass mode support */
261 	if (dram_info.timing_info->fsp_table[idx] < 666) {
262 		dram_info.bypass_mode = true;
263 	} else {
264 		dram_info.bypass_mode = false;
265 	}
266 
267 	/* Register the EL3 handler for DDR DVFS */
268 	set_interrupt_rm_flag(flags, NON_SECURE);
269 	rc = register_interrupt_type_handler(INTR_TYPE_EL3, waiting_dvfs, flags);
270 	if (rc != 0) {
271 		panic();
272 	}
273 
274 	if (dram_info.dram_type == DDRC_LPDDR4 && current_fsp != 0x0) {
275 		/* flush the L1/L2 cache */
276 		dcsw_op_all(DCCSW);
277 		lpddr4_swffc(&dram_info, dev_fsp, 0x0);
278 		dev_fsp = (~dev_fsp) & 0x1;
279 	} else if (current_fsp != 0x0) {
280 		/* flush the L1/L2 cache */
281 		dcsw_op_all(DCCSW);
282 		ddr4_swffc(&dram_info, 0x0);
283 	}
284 }
285 
286 /*
287  * For each freq return the following info:
288  *
289  * r1: data rate
290  * r2: 1 + dram_core parent
291  * r3: 1 + dram_alt parent index
292  * r4: 1 + dram_apb parent index
293  *
294  * The parent indices can be used by an OS who manages source clocks to enabled
295  * them ahead of the switch.
296  *
297  * A parent value of "0" means "don't care".
298  *
299  * Current implementation of freq switch is hardcoded in
300  * plat/imx/common/imx8m/clock.c but in theory this can be enhanced to support
301  * a wide variety of rates.
302  */
303 int dram_dvfs_get_freq_info(void *handle, u_register_t index)
304 {
305 	switch (index) {
306 	case 0:
307 		 SMC_RET4(handle, dram_info.timing_info->fsp_table[0],
308 			1, 0, 5);
309 	case 1:
310 		if (!dram_info.bypass_mode) {
311 			SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
312 				1, 0, 0);
313 		}
314 		SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
315 			2, 2, 4);
316 	case 2:
317 		if (!dram_info.bypass_mode) {
318 			SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
319 				1, 0, 0);
320 		}
321 		SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
322 			2, 3, 3);
323 	case 3:
324 		 SMC_RET4(handle, dram_info.timing_info->fsp_table[3],
325 			1, 0, 0);
326 	default:
327 		SMC_RET1(handle, -3);
328 	}
329 }
330 
331 int dram_dvfs_handler(uint32_t smc_fid, void *handle,
332 	u_register_t x1, u_register_t x2, u_register_t x3)
333 {
334 	uint64_t mpidr = read_mpidr_el1();
335 	unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
336 	unsigned int fsp_index = x1;
337 	uint32_t online_cores = x2;
338 
339 	if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_COUNT) {
340 		SMC_RET1(handle, dram_info.num_fsp);
341 	} else if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_INFO) {
342 		return dram_dvfs_get_freq_info(handle, x2);
343 	} else if (x1 < 3U) {
344 		wait_ddrc_hwffc_done = true;
345 		dsb();
346 
347 		/* trigger the SGI IPI to info other cores */
348 		for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
349 			if (cpu_id != i && (online_cores & (0x1 << (i * 8)))) {
350 				plat_ic_raise_el3_sgi(0x8, i);
351 			}
352 		}
353 
354 		/* make sure all the core in WFE */
355 		online_cores &= ~(0x1 << (cpu_id * 8));
356 		while (1) {
357 			if (online_cores == wfe_done) {
358 				break;
359 			}
360 		}
361 
362 		/* flush the L1/L2 cache */
363 		dcsw_op_all(DCCSW);
364 
365 		if (dram_info.dram_type == DDRC_LPDDR4) {
366 			lpddr4_swffc(&dram_info, dev_fsp, fsp_index);
367 			dev_fsp = (~dev_fsp) & 0x1;
368 		} else {
369 			ddr4_swffc(&dram_info, fsp_index);
370 		}
371 
372 		dram_info.current_fsp = fsp_index;
373 		wait_ddrc_hwffc_done = false;
374 		wfe_done = 0;
375 		dsb();
376 		sev();
377 		isb();
378 	}
379 
380 	SMC_RET1(handle, 0);
381 }
382