xref: /rk3399_ARM-atf/drivers/st/ddr/stm32mp_ddr.c (revision d596023bff6780b8c33e33923f356b7e71b79e56)
1 /*
2  * Copyright (C) 2022-2024, STMicroelectronics - All Rights Reserved
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <common/debug.h>
8 #include <drivers/delay_timer.h>
9 #include <drivers/st/stm32mp_ddr.h>
10 #include <drivers/st/stm32mp_ddrctrl_regs.h>
11 #include <lib/mmio.h>
12 
13 #include <platform_def.h>
14 
15 #define INVALID_OFFSET	0xFFU
16 
17 static bool axi_port_reenable_request;
18 static bool host_interface_reenable_request;
19 
20 static uintptr_t get_base_addr(const struct stm32mp_ddr_priv *priv, enum stm32mp_ddr_base_type base)
21 {
22 	if (base == DDRPHY_BASE) {
23 		return (uintptr_t)priv->phy;
24 	} else {
25 		return (uintptr_t)priv->ctl;
26 	}
27 }
28 
29 void stm32mp_ddr_set_reg(const struct stm32mp_ddr_priv *priv, enum stm32mp_ddr_reg_type type,
30 			 const void *param, const struct stm32mp_ddr_reg_info *ddr_registers)
31 {
32 	unsigned int i;
33 	unsigned int value;
34 	enum stm32mp_ddr_base_type base = ddr_registers[type].base;
35 	uintptr_t base_addr = get_base_addr(priv, base);
36 	const struct stm32mp_ddr_reg_desc *desc = ddr_registers[type].desc;
37 
38 	VERBOSE("init %s\n", ddr_registers[type].name);
39 	for (i = 0; i < ddr_registers[type].size; i++) {
40 		uintptr_t ptr = base_addr + desc[i].offset;
41 
42 		if (desc[i].par_offset == INVALID_OFFSET) {
43 			ERROR("invalid parameter offset for %s - index %u",
44 			      ddr_registers[type].name, i);
45 			panic();
46 		} else {
47 			value = *((uint32_t *)((uintptr_t)param +
48 					       desc[i].par_offset));
49 			mmio_write_32(ptr, value);
50 		}
51 	}
52 }
53 
54 /* Start quasi dynamic register update */
55 void stm32mp_ddr_start_sw_done(struct stm32mp_ddrctl *ctl)
56 {
57 	mmio_clrbits_32((uintptr_t)&ctl->swctl, DDRCTRL_SWCTL_SW_DONE);
58 	VERBOSE("[0x%lx] swctl = 0x%x\n",
59 		(uintptr_t)&ctl->swctl,  mmio_read_32((uintptr_t)&ctl->swctl));
60 }
61 
62 /* Wait quasi dynamic register update */
63 void stm32mp_ddr_wait_sw_done_ack(struct stm32mp_ddrctl *ctl)
64 {
65 	uint64_t timeout;
66 	uint32_t swstat;
67 
68 	mmio_setbits_32((uintptr_t)&ctl->swctl, DDRCTRL_SWCTL_SW_DONE);
69 	VERBOSE("[0x%lx] swctl = 0x%x\n",
70 		(uintptr_t)&ctl->swctl, mmio_read_32((uintptr_t)&ctl->swctl));
71 
72 	timeout = timeout_init_us(DDR_TIMEOUT_US_1S);
73 	do {
74 		swstat = mmio_read_32((uintptr_t)&ctl->swstat);
75 		VERBOSE("[0x%lx] swstat = 0x%x ",
76 			(uintptr_t)&ctl->swstat, swstat);
77 		if (timeout_elapsed(timeout)) {
78 			panic();
79 		}
80 	} while ((swstat & DDRCTRL_SWSTAT_SW_DONE_ACK) == 0U);
81 
82 	VERBOSE("[0x%lx] swstat = 0x%x\n",
83 		(uintptr_t)&ctl->swstat, swstat);
84 }
85 
86 void stm32mp_ddr_enable_axi_port(struct stm32mp_ddrctl *ctl)
87 {
88 	/* Enable uMCTL2 AXI port 0 */
89 	mmio_setbits_32((uintptr_t)&ctl->pctrl_0, DDRCTRL_PCTRL_N_PORT_EN);
90 	VERBOSE("[0x%lx] pctrl_0 = 0x%x\n", (uintptr_t)&ctl->pctrl_0,
91 		mmio_read_32((uintptr_t)&ctl->pctrl_0));
92 
93 #if STM32MP_DDR_DUAL_AXI_PORT
94 	/* Enable uMCTL2 AXI port 1 */
95 	mmio_setbits_32((uintptr_t)&ctl->pctrl_1, DDRCTRL_PCTRL_N_PORT_EN);
96 	VERBOSE("[0x%lx] pctrl_1 = 0x%x\n", (uintptr_t)&ctl->pctrl_1,
97 		mmio_read_32((uintptr_t)&ctl->pctrl_1));
98 #endif
99 }
100 
101 int stm32mp_ddr_disable_axi_port(struct stm32mp_ddrctl *ctl)
102 {
103 	uint64_t timeout;
104 	uint32_t pstat;
105 
106 	/* Disable uMCTL2 AXI port 0 */
107 	mmio_clrbits_32((uintptr_t)&ctl->pctrl_0, DDRCTRL_PCTRL_N_PORT_EN);
108 	VERBOSE("[0x%lx] pctrl_0 = 0x%x\n", (uintptr_t)&ctl->pctrl_0,
109 		mmio_read_32((uintptr_t)&ctl->pctrl_0));
110 
111 #if STM32MP_DDR_DUAL_AXI_PORT
112 	/* Disable uMCTL2 AXI port 1 */
113 	mmio_clrbits_32((uintptr_t)&ctl->pctrl_1, DDRCTRL_PCTRL_N_PORT_EN);
114 	VERBOSE("[0x%lx] pctrl_1 = 0x%x\n", (uintptr_t)&ctl->pctrl_1,
115 		mmio_read_32((uintptr_t)&ctl->pctrl_1));
116 #endif
117 
118 	/*
119 	 * Waits until all AXI ports are idle
120 	 * Poll PSTAT.rd_port_busy_n = 0
121 	 * Poll PSTAT.wr_port_busy_n = 0
122 	 */
123 	timeout = timeout_init_us(DDR_TIMEOUT_US_1S);
124 	do {
125 		pstat = mmio_read_32((uintptr_t)&ctl->pstat);
126 		VERBOSE("[0x%lx] pstat = 0x%x ",
127 			(uintptr_t)&ctl->pstat, pstat);
128 		if (timeout_elapsed(timeout)) {
129 			return -1;
130 		}
131 	} while (pstat != 0U);
132 
133 	return 0;
134 }
135 
136 static bool ddr_is_axi_port_enabled(struct stm32mp_ddrctl *ctl)
137 {
138 	return (mmio_read_32((uintptr_t)&ctl->pctrl_0) & DDRCTRL_PCTRL_N_PORT_EN) != 0U;
139 }
140 
141 void stm32mp_ddr_enable_host_interface(struct stm32mp_ddrctl *ctl)
142 {
143 	mmio_clrbits_32((uintptr_t)&ctl->dbg1, DDRCTRL_DBG1_DIS_HIF);
144 	VERBOSE("[0x%lx] dbg1 = 0x%x\n",
145 		(uintptr_t)&ctl->dbg1,
146 		mmio_read_32((uintptr_t)&ctl->dbg1));
147 }
148 
149 void stm32mp_ddr_disable_host_interface(struct stm32mp_ddrctl *ctl)
150 {
151 	uint64_t timeout;
152 	uint32_t dbgcam;
153 	int count = 0;
154 
155 	mmio_setbits_32((uintptr_t)&ctl->dbg1, DDRCTRL_DBG1_DIS_HIF);
156 	VERBOSE("[0x%lx] dbg1 = 0x%x\n",
157 		(uintptr_t)&ctl->dbg1,
158 		mmio_read_32((uintptr_t)&ctl->dbg1));
159 
160 	/*
161 	 * Waits until all queues and pipelines are empty
162 	 * Poll DBGCAM.dbg_wr_q_empty = 1
163 	 * Poll DBGCAM.dbg_rd_q_empty = 1
164 	 * Poll DBGCAM.dbg_wr_data_pipeline_empty = 1
165 	 * Poll DBGCAM.dbg_rd_data_pipeline_empty = 1
166 	 *
167 	 * data_pipeline fields must be polled twice to ensure
168 	 * value propoagation, so count is added to loop condition.
169 	 */
170 	timeout = timeout_init_us(DDR_TIMEOUT_US_1S);
171 	do {
172 		dbgcam = mmio_read_32((uintptr_t)&ctl->dbgcam);
173 		VERBOSE("[0x%lx] dbgcam = 0x%x ",
174 			(uintptr_t)&ctl->dbgcam, dbgcam);
175 		if (timeout_elapsed(timeout)) {
176 			panic();
177 		}
178 		count++;
179 	} while (((dbgcam & DDRCTRL_DBG_Q_AND_DATA_PIPELINE_EMPTY) !=
180 		  DDRCTRL_DBG_Q_AND_DATA_PIPELINE_EMPTY) || (count < 2));
181 }
182 
183 static bool ddr_is_host_interface_enabled(struct stm32mp_ddrctl *ctl)
184 {
185 	return (mmio_read_32((uintptr_t)&ctl->dbg1) & DDRCTRL_DBG1_DIS_HIF) == 0U;
186 }
187 
188 int stm32mp_ddr_sw_selfref_entry(struct stm32mp_ddrctl *ctl)
189 {
190 	uint64_t timeout;
191 	uint32_t stat;
192 	uint32_t operating_mode;
193 	uint32_t selref_type;
194 
195 	mmio_setbits_32((uintptr_t)&ctl->pwrctl, DDRCTRL_PWRCTL_SELFREF_SW);
196 	VERBOSE("[0x%lx] pwrctl = 0x%x\n",
197 		(uintptr_t)&ctl->pwrctl,
198 		mmio_read_32((uintptr_t)&ctl->pwrctl));
199 
200 	/*
201 	 * Wait operating mode change in self-refresh mode
202 	 * with STAT.operating_mode[1:0]==11.
203 	 * Ensure transition to self-refresh was due to software
204 	 * by checking also that STAT.selfref_type[1:0]=2.
205 	 */
206 	timeout = timeout_init_us(DDR_TIMEOUT_500US);
207 	while (!timeout_elapsed(timeout)) {
208 		stat = mmio_read_32((uintptr_t)&ctl->stat);
209 		operating_mode = stat & DDRCTRL_STAT_OPERATING_MODE_MASK;
210 		selref_type = stat & DDRCTRL_STAT_SELFREF_TYPE_MASK;
211 
212 		if ((operating_mode == DDRCTRL_STAT_OPERATING_MODE_SR) &&
213 		    (selref_type == DDRCTRL_STAT_SELFREF_TYPE_SR)) {
214 			return 0;
215 		}
216 	}
217 
218 	return -1;
219 }
220 
221 void stm32mp_ddr_sw_selfref_exit(struct stm32mp_ddrctl *ctl)
222 {
223 	mmio_clrbits_32((uintptr_t)&ctl->pwrctl, DDRCTRL_PWRCTL_SELFREF_SW);
224 	VERBOSE("[0x%lx] pwrctl = 0x%x\n",
225 		(uintptr_t)&ctl->pwrctl,
226 		mmio_read_32((uintptr_t)&ctl->pwrctl));
227 }
228 
229 void stm32mp_ddr_set_qd3_update_conditions(struct stm32mp_ddrctl *ctl)
230 {
231 	if (ddr_is_axi_port_enabled(ctl)) {
232 		if (stm32mp_ddr_disable_axi_port(ctl) != 0) {
233 			panic();
234 		}
235 		axi_port_reenable_request = true;
236 	}
237 
238 	if (ddr_is_host_interface_enabled(ctl)) {
239 		stm32mp_ddr_disable_host_interface(ctl);
240 		host_interface_reenable_request = true;
241 	}
242 
243 	stm32mp_ddr_start_sw_done(ctl);
244 }
245 
246 void stm32mp_ddr_unset_qd3_update_conditions(struct stm32mp_ddrctl *ctl)
247 {
248 	stm32mp_ddr_wait_sw_done_ack(ctl);
249 
250 	if (host_interface_reenable_request) {
251 		stm32mp_ddr_enable_host_interface(ctl);
252 		host_interface_reenable_request = false;
253 	}
254 
255 	if (axi_port_reenable_request) {
256 		stm32mp_ddr_enable_axi_port(ctl);
257 		axi_port_reenable_request = false;
258 	}
259 }
260 
261 void stm32mp_ddr_wait_refresh_update_done_ack(struct stm32mp_ddrctl *ctl)
262 {
263 	uint64_t timeout;
264 	uint32_t rfshctl3;
265 	uint32_t refresh_update_level = DDRCTRL_RFSHCTL3_REFRESH_UPDATE_LEVEL;
266 
267 	/* Toggle rfshctl3.refresh_update_level */
268 	rfshctl3 = mmio_read_32((uintptr_t)&ctl->rfshctl3);
269 	if ((rfshctl3 & refresh_update_level) == refresh_update_level) {
270 		mmio_setbits_32((uintptr_t)&ctl->rfshctl3, refresh_update_level);
271 	} else {
272 		mmio_clrbits_32((uintptr_t)&ctl->rfshctl3, refresh_update_level);
273 		refresh_update_level = 0U;
274 	}
275 
276 	VERBOSE("[0x%lx] rfshctl3 = 0x%x\n",
277 		(uintptr_t)&ctl->rfshctl3, mmio_read_32((uintptr_t)&ctl->rfshctl3));
278 
279 	timeout = timeout_init_us(DDR_TIMEOUT_US_1S);
280 	do {
281 		rfshctl3 = mmio_read_32((uintptr_t)&ctl->rfshctl3);
282 		VERBOSE("[0x%lx] rfshctl3 = 0x%x ", (uintptr_t)&ctl->rfshctl3, rfshctl3);
283 		if (timeout_elapsed(timeout)) {
284 			panic();
285 		}
286 	} while ((rfshctl3 & DDRCTRL_RFSHCTL3_REFRESH_UPDATE_LEVEL) != refresh_update_level);
287 
288 	VERBOSE("[0x%lx] rfshctl3 = 0x%x\n", (uintptr_t)&ctl->rfshctl3, rfshctl3);
289 }
290