xref: /rk3399_ARM-atf/plat/imx/imx8m/imx8mm/gpc.c (revision cf9346cb83804feb083b56a668eb0a462983e038)
1 /*
2  * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <stdbool.h>
10 
11 #include <common/debug.h>
12 #include <drivers/delay_timer.h>
13 #include <lib/mmio.h>
14 #include <lib/psci/psci.h>
15 #include <lib/smccc.h>
16 #include <platform_def.h>
17 #include <services/std_svc.h>
18 
19 #include <gpc.h>
20 #include <imx_sip_svc.h>
21 
22 #define MIPI_PWR_REQ		BIT(0)
23 #define PCIE_PWR_REQ		BIT(1)
24 #define OTG1_PWR_REQ		BIT(2)
25 #define OTG2_PWR_REQ		BIT(3)
26 #define HSIOMIX_PWR_REQ		BIT(4)
27 #define GPU2D_PWR_REQ		BIT(6)
28 #define GPUMIX_PWR_REQ		BIT(7)
29 #define VPUMIX_PWR_REQ		BIT(8)
30 #define GPU3D_PWR_REQ		BIT(9)
31 #define DISPMIX_PWR_REQ		BIT(10)
32 #define VPU_G1_PWR_REQ		BIT(11)
33 #define VPU_G2_PWR_REQ		BIT(12)
34 #define VPU_H1_PWR_REQ		BIT(13)
35 
36 #define HSIOMIX_ADB400_SYNC	(0x3 << 5)
37 #define DISPMIX_ADB400_SYNC	BIT(7)
38 #define VPUMIX_ADB400_SYNC	BIT(8)
39 #define GPU3D_ADB400_SYNC	BIT(9)
40 #define GPU2D_ADB400_SYNC	BIT(10)
41 #define GPUMIX_ADB400_SYNC	BIT(11)
42 #define HSIOMIX_ADB400_ACK	(0x3 << 23)
43 #define DISPMIX_ADB400_ACK	BIT(25)
44 #define VPUMIX_ADB400_ACK	BIT(26)
45 #define GPU3D_ADB400_ACK	BIT(27)
46 #define GPU2D_ADB400_ACK	BIT(28)
47 #define GPUMIX_ADB400_ACK	BIT(29)
48 
49 #define MIPI_PGC		0xc00
50 #define PCIE_PGC		0xc40
51 #define OTG1_PGC		0xc80
52 #define OTG2_PGC		0xcc0
53 #define HSIOMIX_PGC	        0xd00
54 #define GPU2D_PGC		0xd80
55 #define GPUMIX_PGC		0xdc0
56 #define VPUMIX_PGC		0xe00
57 #define GPU3D_PGC		0xe40
58 #define DISPMIX_PGC		0xe80
59 #define VPU_G1_PGC		0xec0
60 #define VPU_G2_PGC		0xf00
61 #define VPU_H1_PGC		0xf40
62 
63 enum pu_domain_id {
64 	HSIOMIX,
65 	PCIE,
66 	OTG1,
67 	OTG2,
68 	GPUMIX,
69 	VPUMIX,
70 	VPU_G1,
71 	VPU_G2,
72 	VPU_H1,
73 	DISPMIX,
74 	MIPI,
75 	/* below two domain only for ATF internal use */
76 	GPU2D,
77 	GPU3D,
78 	MAX_DOMAINS,
79 };
80 
81 /* PU domain */
82 static struct imx_pwr_domain pu_domains[] = {
83 	IMX_MIX_DOMAIN(HSIOMIX, false),
84 	IMX_PD_DOMAIN(PCIE, false),
85 	IMX_PD_DOMAIN(OTG1, true),
86 	IMX_PD_DOMAIN(OTG2, true),
87 	IMX_MIX_DOMAIN(GPUMIX, false),
88 	IMX_MIX_DOMAIN(VPUMIX, false),
89 	IMX_PD_DOMAIN(VPU_G1, false),
90 	IMX_PD_DOMAIN(VPU_G2, false),
91 	IMX_PD_DOMAIN(VPU_H1, false),
92 	IMX_MIX_DOMAIN(DISPMIX, false),
93 	IMX_PD_DOMAIN(MIPI, false),
94 	/* below two domain only for ATF internal use */
95 	IMX_MIX_DOMAIN(GPU2D, false),
96 	IMX_MIX_DOMAIN(GPU3D, false),
97 };
98 
99 static unsigned int pu_domain_status;
100 
101 #define GPU_RCR		0x40
102 #define VPU_RCR		0x44
103 
104 #define VPU_CTL_BASE		0x38330000
105 #define BLK_SFT_RSTN_CSR	0x0
106 #define H1_SFT_RSTN		BIT(2)
107 #define G1_SFT_RSTN		BIT(1)
108 #define G2_SFT_RSTN		BIT(0)
109 
110 #define DISP_CTL_BASE		0x32e28000
111 
112 void vpu_sft_reset_assert(uint32_t domain_id)
113 {
114 	uint32_t val;
115 
116 	val = mmio_read_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR);
117 
118 	switch (domain_id) {
119 	case VPU_G1:
120 		val &= ~G1_SFT_RSTN;
121 		mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
122 		break;
123 	case VPU_G2:
124 		val &= ~G2_SFT_RSTN;
125 		mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
126 		break;
127 	case VPU_H1:
128 		val &= ~H1_SFT_RSTN;
129 		mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
130 		break;
131 	default:
132 		break;
133 	}
134 }
135 
136 void vpu_sft_reset_deassert(uint32_t domain_id)
137 {
138 	uint32_t val;
139 
140 	val = mmio_read_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR);
141 
142 	switch (domain_id) {
143 	case VPU_G1:
144 		val |= G1_SFT_RSTN;
145 		mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
146 		break;
147 	case VPU_G2:
148 		val |= G2_SFT_RSTN;
149 		mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
150 		break;
151 	case VPU_H1:
152 		val |= H1_SFT_RSTN;
153 		mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
154 		break;
155 	default:
156 		break;
157 	}
158 }
159 
160 void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on)
161 {
162 	if (domain_id >= MAX_DOMAINS) {
163 		return;
164 	}
165 
166 	struct imx_pwr_domain *pwr_domain = &pu_domains[domain_id];
167 
168 	if (on) {
169 		pu_domain_status |= (1 << domain_id);
170 
171 		if (domain_id == VPU_G1 || domain_id == VPU_G2 ||
172 		    domain_id == VPU_H1) {
173 			vpu_sft_reset_assert(domain_id);
174 		}
175 
176 		/* HSIOMIX has no PU bit, so skip for it */
177 		if (domain_id != HSIOMIX) {
178 			/* clear the PGC bit */
179 			mmio_clrbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1);
180 
181 			/* power up the domain */
182 			mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, pwr_domain->pwr_req);
183 
184 			/* wait for power request done */
185 			while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & pwr_domain->pwr_req) {
186 				;
187 			}
188 		}
189 
190 		if (domain_id == VPU_G1 || domain_id == VPU_G2 ||
191 		    domain_id == VPU_H1) {
192 			vpu_sft_reset_deassert(domain_id);
193 			/* dealy for a while to make sure reset done */
194 			udelay(100);
195 		}
196 
197 		if (domain_id == GPUMIX) {
198 			/* assert reset */
199 			mmio_write_32(IMX_SRC_BASE + GPU_RCR, 0x1);
200 
201 			/* power up GPU2D */
202 			mmio_clrbits_32(IMX_GPC_BASE + GPU2D_PGC, 0x1);
203 
204 			mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, GPU2D_PWR_REQ);
205 
206 			/* wait for power request done */
207 			while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & GPU2D_PWR_REQ) {
208 				;
209 			}
210 
211 			udelay(1);
212 
213 			/* power up GPU3D */
214 			mmio_clrbits_32(IMX_GPC_BASE + GPU3D_PGC, 0x1);
215 
216 			mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, GPU3D_PWR_REQ);
217 
218 			/* wait for power request done */
219 			while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & GPU3D_PWR_REQ) {
220 				;
221 			}
222 
223 			udelay(10);
224 			/* release the gpumix reset */
225 			mmio_write_32(IMX_SRC_BASE + GPU_RCR, 0x0);
226 			udelay(10);
227 		}
228 
229 		/* vpu sft clock enable */
230 		if (domain_id == VPUMIX) {
231 			mmio_write_32(IMX_SRC_BASE + VPU_RCR, 0x1);
232 			udelay(5);
233 			mmio_write_32(IMX_SRC_BASE + VPU_RCR, 0x0);
234 			udelay(5);
235 
236 			/* enable all clock */
237 			mmio_write_32(VPU_CTL_BASE + 0x4, 0x7);
238 		}
239 
240 		if (domain_id == DISPMIX) {
241 			/* special setting for DISPMIX */
242 			mmio_write_32(DISP_CTL_BASE + 0x4, 0x1fff);
243 			mmio_write_32(DISP_CTL_BASE, 0x7f);
244 			mmio_write_32(DISP_CTL_BASE + 0x8, 0x30000);
245 		}
246 
247 		/* handle the ADB400 sync */
248 		if (pwr_domain->need_sync) {
249 			/* clear adb power down request */
250 			mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync);
251 
252 			/* wait for adb power request ack */
253 			while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) {
254 				;
255 			}
256 		}
257 
258 		if (domain_id == GPUMIX) {
259 			/* power up GPU2D ADB */
260 			mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU2D_ADB400_SYNC);
261 
262 			/* wait for adb power request ack */
263 			while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU2D_ADB400_ACK)) {
264 				;
265 			}
266 
267 			/* power up GPU3D ADB */
268 			mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU3D_ADB400_SYNC);
269 
270 			/* wait for adb power request ack */
271 			while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU3D_ADB400_ACK)) {
272 				;
273 			}
274 		}
275 	} else {
276 		pu_domain_status &= ~(1 << domain_id);
277 
278 		if (domain_id == OTG1 || domain_id == OTG2) {
279 			return;
280 		}
281 
282 		/* GPU2D & GPU3D ADB power down */
283 		if (domain_id == GPUMIX) {
284 			mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU2D_ADB400_SYNC);
285 
286 			/* wait for adb power request ack */
287 			while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU2D_ADB400_ACK)) {
288 				;
289 			}
290 
291 			mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU3D_ADB400_SYNC);
292 
293 				/* wait for adb power request ack */
294 			while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU3D_ADB400_ACK)) {
295 				;
296 			}
297 		}
298 
299 		/* handle the ADB400 sync */
300 		if (pwr_domain->need_sync) {
301 			/* set adb power down request */
302 			mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync);
303 
304 			/* wait for adb power request ack */
305 			while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) {
306 				;
307 			}
308 		}
309 
310 		if (domain_id == GPUMIX) {
311 			/* power down GPU2D */
312 			mmio_setbits_32(IMX_GPC_BASE + GPU2D_PGC, 0x1);
313 
314 			mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, GPU2D_PWR_REQ);
315 
316 			/* wait for power request done */
317 			while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & GPU2D_PWR_REQ) {
318 				;
319 			}
320 
321 			/* power down GPU3D */
322 			mmio_setbits_32(IMX_GPC_BASE + GPU3D_PGC, 0x1);
323 
324 			mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, GPU3D_PWR_REQ);
325 
326 			/* wait for power request done */
327 			while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & GPU3D_PWR_REQ) {
328 				;
329 			}
330 		}
331 
332 		/* HSIOMIX has no PU bit, so skip for it */
333 		if (domain_id != HSIOMIX) {
334 			/* set the PGC bit */
335 			mmio_setbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1);
336 
337 			/* power down the domain */
338 			mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, pwr_domain->pwr_req);
339 
340 			/* wait for power request done */
341 			while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & pwr_domain->pwr_req) {
342 				;
343 			}
344 		}
345 	}
346 }
347 
348 void imx_gpc_init(void)
349 {
350 	unsigned int val;
351 	int i;
352 
353 	/* mask all the wakeup irq by default */
354 	for (i = 0; i < 4; i++) {
355 		mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_A53 + i * 4, ~0x0);
356 		mmio_write_32(IMX_GPC_BASE + IMR1_CORE1_A53 + i * 4, ~0x0);
357 		mmio_write_32(IMX_GPC_BASE + IMR1_CORE2_A53 + i * 4, ~0x0);
358 		mmio_write_32(IMX_GPC_BASE + IMR1_CORE3_A53 + i * 4, ~0x0);
359 		mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_M4 + i * 4, ~0x0);
360 	}
361 
362 	val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
363 	/* use GIC wake_request to wakeup C0~C3 from LPM */
364 	val |= 0x30c00000;
365 	/* clear the MASTER0 LPM handshake */
366 	val &= ~(1 << 6);
367 	mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val);
368 
369 	/* clear MASTER1 & MASTER2 mapping in CPU0(A53) */
370 	mmio_clrbits_32(IMX_GPC_BASE + MST_CPU_MAPPING, (MASTER1_MAPPING |
371 		MASTER2_MAPPING));
372 
373 	/* set all mix/PU in A53 domain */
374 	mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xffff);
375 
376 	/*
377 	 * Set the CORE & SCU power up timing:
378 	 * SW = 0x1, SW2ISO = 0x1;
379 	 * the CPU CORE and SCU power up timming counter
380 	 * is drived  by 32K OSC, each domain's power up
381 	 * latency is (SW + SW2ISO) / 32768
382 	 */
383 	mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(0) + 0x4, 0x81);
384 	mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(1) + 0x4, 0x81);
385 	mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(2) + 0x4, 0x81);
386 	mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(3) + 0x4, 0x81);
387 	mmio_write_32(IMX_GPC_BASE + PLAT_PGC_PCR + 0x4, 0x81);
388 	mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING,
389 		      (0x59 << 10) | 0x5B | (0x2 << 20));
390 
391 	/* set DUMMY PDN/PUP ACK by default for A53 domain */
392 	mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53,
393 		      A53_DUMMY_PUP_ACK | A53_DUMMY_PDN_ACK);
394 
395 	/* clear DSM by default */
396 	val = mmio_read_32(IMX_GPC_BASE + SLPCR);
397 	val &= ~SLPCR_EN_DSM;
398 	/* enable the fast wakeup wait mode */
399 	val |= SLPCR_A53_FASTWUP_WAIT_MODE;
400 	/* clear the RBC */
401 	val &= ~(0x3f << SLPCR_RBC_COUNT_SHIFT);
402 	/* set the STBY_COUNT to 0x5, (128 * 30)us */
403 	val &= ~(0x7 << SLPCR_STBY_COUNT_SHFT);
404 	val |= (0x5 << SLPCR_STBY_COUNT_SHFT);
405 	mmio_write_32(IMX_GPC_BASE + SLPCR, val);
406 
407 	/*
408 	 * USB PHY power up needs to make sure RESET bit in SRC is clear,
409 	 * otherwise, the PU power up bit in GPC will NOT self-cleared.
410 	 * only need to do it once.
411 	 */
412 	mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1);
413 	mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1);
414 }
415