1 /* 2 * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <stdlib.h> 8 #include <stdint.h> 9 #include <stdbool.h> 10 11 #include <arch_helpers.h> 12 #include <common/debug.h> 13 #include <drivers/delay_timer.h> 14 #include <lib/mmio.h> 15 #include <lib/psci/psci.h> 16 #include <lib/smccc.h> 17 #include <lib/spinlock.h> 18 #include <plat/common/platform.h> 19 #include <services/std_svc.h> 20 21 #include <gpc.h> 22 #include <platform_def.h> 23 24 #define FSL_SIP_CONFIG_GPC_MASK U(0x00) 25 #define FSL_SIP_CONFIG_GPC_UNMASK U(0x01) 26 #define FSL_SIP_CONFIG_GPC_SET_WAKE U(0x02) 27 #define FSL_SIP_CONFIG_GPC_PM_DOMAIN U(0x03) 28 #define FSL_SIP_CONFIG_GPC_SET_AFF U(0x04) 29 #define FSL_SIP_CONFIG_GPC_CORE_WAKE U(0x05) 30 31 #define MAX_HW_IRQ_NUM U(128) 32 #define MAX_IMR_NUM U(4) 33 34 static uint32_t gpc_saved_imrs[16]; 35 static uint32_t gpc_wake_irqs[4]; 36 static uint32_t gpc_imr_offset[] = { 37 IMX_GPC_BASE + IMR1_CORE0_A53, 38 IMX_GPC_BASE + IMR1_CORE1_A53, 39 IMX_GPC_BASE + IMR1_CORE2_A53, 40 IMX_GPC_BASE + IMR1_CORE3_A53, 41 IMX_GPC_BASE + IMR1_CORE0_M4, 42 }; 43 44 spinlock_t gpc_imr_lock[4]; 45 46 static void gpc_imr_core_spin_lock(unsigned int core_id) 47 { 48 spin_lock(&gpc_imr_lock[core_id]); 49 } 50 51 static void gpc_imr_core_spin_unlock(unsigned int core_id) 52 { 53 spin_unlock(&gpc_imr_lock[core_id]); 54 } 55 56 static void gpc_save_imr_lpm(unsigned int core_id, unsigned int imr_idx) 57 { 58 uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4; 59 60 gpc_imr_core_spin_lock(core_id); 61 62 gpc_saved_imrs[core_id + imr_idx * 4] = mmio_read_32(reg); 63 mmio_write_32(reg, ~gpc_wake_irqs[imr_idx]); 64 65 gpc_imr_core_spin_unlock(core_id); 66 } 67 68 static void gpc_restore_imr_lpm(unsigned int core_id, unsigned int imr_idx) 69 { 70 uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4; 71 uint32_t val = gpc_saved_imrs[core_id + imr_idx * 4]; 72 73 gpc_imr_core_spin_lock(core_id); 74 75 mmio_write_32(reg, val); 76 77 gpc_imr_core_spin_unlock(core_id); 78 } 79 80 /* 81 * On i.MX8MQ, only in system suspend mode, the A53 cluster can 82 * enter LPM mode and shutdown the A53 PLAT power domain. So LPM 83 * wakeup only used for system suspend. when system enter suspend, 84 * any A53 CORE can be the last core to suspend the system, But 85 * the LPM wakeup can only use the C0's IMR to wakeup A53 cluster 86 * from LPM, so save C0's IMRs before suspend, restore back after 87 * resume. 88 */ 89 void imx_set_sys_wakeup(unsigned int last_core, bool pdn) 90 { 91 unsigned int imr, core; 92 93 if (pdn) { 94 for (imr = 0U; imr < MAX_IMR_NUM; imr++) { 95 for (core = 0U; core < PLATFORM_CORE_COUNT; core++) { 96 gpc_save_imr_lpm(core, imr); 97 } 98 } 99 } else { 100 for (imr = 0U; imr < MAX_IMR_NUM; imr++) { 101 for (core = 0U; core < PLATFORM_CORE_COUNT; core++) { 102 gpc_restore_imr_lpm(core, imr); 103 } 104 } 105 } 106 } 107 108 static void imx_gpc_hwirq_mask(unsigned int hwirq) 109 { 110 uintptr_t reg; 111 unsigned int val; 112 113 if (hwirq >= MAX_HW_IRQ_NUM) { 114 return; 115 } 116 117 gpc_imr_core_spin_lock(0); 118 reg = gpc_imr_offset[0] + (hwirq / 32) * 4; 119 val = mmio_read_32(reg); 120 val |= 1 << hwirq % 32; 121 mmio_write_32(reg, val); 122 gpc_imr_core_spin_unlock(0); 123 } 124 125 static void imx_gpc_hwirq_unmask(unsigned int hwirq) 126 { 127 uintptr_t reg; 128 unsigned int val; 129 130 if (hwirq >= MAX_HW_IRQ_NUM) { 131 return; 132 } 133 134 gpc_imr_core_spin_lock(0); 135 reg = gpc_imr_offset[0] + (hwirq / 32) * 4; 136 val = mmio_read_32(reg); 137 val &= ~(1 << hwirq % 32); 138 mmio_write_32(reg, val); 139 gpc_imr_core_spin_unlock(0); 140 } 141 142 static void imx_gpc_set_wake(uint32_t hwirq, bool on) 143 { 144 uint32_t mask, idx; 145 146 if (hwirq >= MAX_HW_IRQ_NUM) { 147 return; 148 } 149 150 mask = 1 << hwirq % 32; 151 idx = hwirq / 32; 152 gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask : 153 gpc_wake_irqs[idx] & ~mask; 154 } 155 156 static void imx_gpc_mask_irq0(uint32_t core_id, uint32_t mask) 157 { 158 gpc_imr_core_spin_lock(core_id); 159 if (mask) { 160 mmio_setbits_32(gpc_imr_offset[core_id], 1); 161 } else { 162 mmio_clrbits_32(gpc_imr_offset[core_id], 1); 163 } 164 165 dsb(); 166 gpc_imr_core_spin_unlock(core_id); 167 } 168 169 void imx_gpc_core_wake(uint32_t cpumask) 170 { 171 for (int i = 0; i < PLATFORM_CORE_COUNT; i++) { 172 if (cpumask & (1 << i)) { 173 imx_gpc_mask_irq0(i, false); 174 } 175 } 176 } 177 178 void imx_gpc_set_a53_core_awake(uint32_t core_id) 179 { 180 imx_gpc_mask_irq0(core_id, true); 181 } 182 183 static void imx_gpc_set_affinity(uint32_t hwirq, unsigned int cpu_idx) 184 { 185 uintptr_t reg; 186 unsigned int val; 187 188 if (hwirq >= MAX_HW_IRQ_NUM || cpu_idx >= 4) { 189 return; 190 } 191 192 /* 193 * using the mask/unmask bit as affinity function.unmask the 194 * IMR bit to enable IRQ wakeup for this core. 195 */ 196 gpc_imr_core_spin_lock(cpu_idx); 197 reg = gpc_imr_offset[cpu_idx] + (hwirq / 32) * 4; 198 val = mmio_read_32(reg); 199 val &= ~(1 << hwirq % 32); 200 mmio_write_32(reg, val); 201 gpc_imr_core_spin_unlock(cpu_idx); 202 203 /* clear affinity of other core */ 204 for (int i = 0; i < PLATFORM_CORE_COUNT; i++) { 205 if (cpu_idx != i) { 206 gpc_imr_core_spin_lock(i); 207 reg = gpc_imr_offset[i] + (hwirq / 32) * 4; 208 val = mmio_read_32(reg); 209 val |= (1 << hwirq % 32); 210 mmio_write_32(reg, val); 211 gpc_imr_core_spin_unlock(i); 212 } 213 } 214 } 215 216 /* use wfi power down the core */ 217 void imx_set_cpu_pwr_off(unsigned int core_id) 218 { 219 bakery_lock_get(&gpc_lock); 220 221 /* enable the wfi power down of the core */ 222 mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) | 223 (1 << (core_id + 20))); 224 225 bakery_lock_release(&gpc_lock); 226 227 /* assert the pcg pcr bit of the core */ 228 mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); 229 }; 230 231 /* if out of lpm, we need to do reverse steps */ 232 void imx_set_cpu_lpm(unsigned int core_id, bool pdn) 233 { 234 bakery_lock_get(&gpc_lock); 235 236 if (pdn) { 237 /* enable the core WFI PDN & IRQ PUP */ 238 mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) | 239 (1 << (core_id + 20)) | COREx_IRQ_WUP(core_id)); 240 /* assert the pcg pcr bit of the core */ 241 mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); 242 } else { 243 /* disable CORE WFI PDN & IRQ PUP */ 244 mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) | 245 COREx_IRQ_WUP(core_id)); 246 /* deassert the pcg pcr bit of the core */ 247 mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1); 248 } 249 250 bakery_lock_release(&gpc_lock); 251 } 252 253 void imx_pup_pdn_slot_config(int last_core, bool pdn) 254 { 255 if (pdn) { 256 /* SLOT0 for A53 PLAT power down */ 257 mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(0), SLT_PLAT_PDN); 258 /* SLOT1 for A53 PLAT power up */ 259 mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(1), SLT_PLAT_PUP); 260 /* SLOT2 for A53 primary core power up */ 261 mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(2), SLT_COREx_PUP(last_core)); 262 /* ACK setting: PLAT ACK for PDN, CORE ACK for PUP */ 263 mmio_clrsetbits_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, 0xFFFFFFFF, 264 A53_PLAT_PDN_ACK | A53_PLAT_PUP_ACK); 265 } else { 266 mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(0), 0xFFFFFFFF); 267 mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(1), 0xFFFFFFFF); 268 mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(2), 0xFFFFFFFF); 269 mmio_clrsetbits_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, 0xFFFFFFFF, 270 A53_DUMMY_PDN_ACK | A53_DUMMY_PUP_ACK); 271 } 272 } 273 274 void imx_set_cluster_powerdown(unsigned int last_core, uint8_t power_state) 275 { 276 uint32_t val; 277 278 if (is_local_state_off(power_state)) { 279 val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); 280 val |= A53_LPM_STOP; /* enable C0-C1's STOP mode */ 281 val &= ~CPU_CLOCK_ON_LPM; /* disable CPU clock in LPM mode */ 282 mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); 283 284 /* enable C2-3's STOP mode */ 285 mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, A53_LPM_STOP); 286 287 /* enable PLAT/SCU power down */ 288 val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_AD); 289 val &= ~EN_L2_WFI_PDN; 290 val |= L2PGE | EN_PLAT_PDN; 291 val &= ~COREx_IRQ_WUP(last_core); /* disable IRQ PUP for last core */ 292 val |= COREx_LPM_PUP(last_core); /* enable LPM PUP for last core */ 293 mmio_write_32(IMX_GPC_BASE + LPCR_A53_AD, val); 294 295 imx_pup_pdn_slot_config(last_core, true); 296 297 /* enable PLAT PGC */ 298 mmio_setbits_32(IMX_GPC_BASE + A53_PLAT_PGC, 0x1); 299 } else { 300 /* clear PLAT PGC */ 301 mmio_clrbits_32(IMX_GPC_BASE + A53_PLAT_PGC, 0x1); 302 303 /* clear the slot and ack for cluster power down */ 304 imx_pup_pdn_slot_config(last_core, false); 305 306 val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); 307 val &= ~A53_LPM_MASK; /* clear the C0~1 LPM */ 308 val |= CPU_CLOCK_ON_LPM; /* disable cpu clock in LPM */ 309 mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); 310 311 /* set A53 LPM to RUN mode */ 312 mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, A53_LPM_MASK); 313 314 /* clear PLAT/SCU power down */ 315 val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_AD); 316 val |= EN_L2_WFI_PDN; 317 val &= ~(L2PGE | EN_PLAT_PDN); 318 val &= ~COREx_LPM_PUP(last_core); /* disable C0's LPM PUP */ 319 mmio_write_32(IMX_GPC_BASE + LPCR_A53_AD, val); 320 } 321 } 322 323 int imx_gpc_handler(uint32_t smc_fid, 324 u_register_t x1, 325 u_register_t x2, 326 u_register_t x3) 327 { 328 switch (x1) { 329 case FSL_SIP_CONFIG_GPC_CORE_WAKE: 330 imx_gpc_core_wake(x2); 331 break; 332 case FSL_SIP_CONFIG_GPC_SET_WAKE: 333 imx_gpc_set_wake(x2, x3); 334 break; 335 case FSL_SIP_CONFIG_GPC_MASK: 336 imx_gpc_hwirq_mask(x2); 337 break; 338 case FSL_SIP_CONFIG_GPC_UNMASK: 339 imx_gpc_hwirq_unmask(x2); 340 break; 341 case FSL_SIP_CONFIG_GPC_SET_AFF: 342 imx_gpc_set_affinity(x2, x3); 343 break; 344 default: 345 return SMC_UNK; 346 } 347 348 return 0; 349 } 350 351 void imx_gpc_init(void) 352 { 353 uint32_t val; 354 unsigned int i, j; 355 356 /* mask all the interrupt by default */ 357 for (i = 0U; i < PLATFORM_CORE_COUNT; i++) { 358 for (j = 0U; j < ARRAY_SIZE(gpc_imr_offset); j++) { 359 mmio_write_32(gpc_imr_offset[j] + i * 4, ~0x0); 360 } 361 } 362 363 /* Due to the hardware design requirement, need to make 364 * sure GPR interrupt(#32) is unmasked during RUN mode to 365 * avoid entering DSM mode by mistake. 366 */ 367 for (i = 0U; i < PLATFORM_CORE_COUNT; i++) { 368 mmio_write_32(gpc_imr_offset[i], ~0x1); 369 } 370 371 /* leave the IOMUX_GPC bit 12 on for core wakeup */ 372 mmio_setbits_32(IMX_IOMUX_GPR_BASE + 0x4, 1 << 12); 373 374 /* use external IRQs to wakeup C0~C3 from LPM */ 375 val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); 376 val |= IRQ_SRC_A53_WUP; 377 /* clear the MASTER0 LPM handshake */ 378 val &= ~MASTER0_LPM_HSK; 379 mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); 380 381 /* mask M4 DSM trigger if M4 is NOT enabled */ 382 mmio_setbits_32(IMX_GPC_BASE + LPCR_M4, DSM_MODE_MASK); 383 384 /* set all mix/PU in A53 domain */ 385 mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xfffd); 386 387 /* set SCU timming */ 388 mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING, 389 (0x59 << 10) | 0x5B | (0x2 << 20)); 390 391 /* set DUMMY PDN/PUP ACK by default for A53 domain */ 392 mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, A53_DUMMY_PUP_ACK | 393 A53_DUMMY_PDN_ACK); 394 395 /* disable DSM mode by default */ 396 mmio_clrbits_32(IMX_GPC_BASE + SLPCR, DSM_MODE_MASK); 397 398 /* 399 * USB PHY power up needs to make sure RESET bit in SRC is clear, 400 * otherwise, the PU power up bit in GPC will NOT self-cleared. 401 * only need to do it once. 402 */ 403 mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1); 404 mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1); 405 406 /* 407 * for USB OTG, the limitation are: 408 * 1. before system clock config, the IPG clock run at 12.5MHz, delay time 409 * should be longer than 82us. 410 * 2. after system clock config, ipg clock run at 66.5MHz, delay time 411 * be longer that 15.3 us. 412 * Add 100us to make sure the USB OTG SRC is clear safely. 413 */ 414 udelay(100); 415 } 416