1 /* 2 * Copyright 2025 NXP 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include "../drivers/arm/gic/v3/gicv3_private.h" 8 9 #include <drivers/arm/gic.h> 10 #include <lib/mmio.h> 11 #include <scmi_imx9.h> 12 13 #include <ele_api.h> 14 #include <imx9_sys_sleep.h> 15 #include <imx_scmi_client.h> 16 #include <plat_imx8.h> 17 18 #define IRQ_MASK(x) irq_mask[(x) / 32U] 19 #define IRQ_SHIFT(x) (1U << (x) % 32U) 20 21 static uint32_t irq_mask[IMR_NUM] = { 0x0 }; 22 static struct scmi_per_lpm_config per_lpm[PER_NUM]; 23 24 static const uint32_t gpio_ctrl_offset[GPIO_CTRL_REG_NUM] = { 25 0xc, 0x10, 0x14, 0x18, 0x1c, 0x40, 0x54, 0x58 26 }; 27 28 bool has_netc_irq; 29 static bool has_wakeup_irq; 30 static bool gpio_wakeup; 31 bool keep_wakeupmix_on; 32 33 #if HAS_XSPI_SUPPORT 34 static uint32_t xspi_mto[2]; 35 36 static void xspi_save(void) 37 { 38 /* Save the XSPI MTO register */ 39 xspi_mto[0] = mmio_read_32(XSPI1_BASE + XSPI_MTO); 40 xspi_mto[1] = mmio_read_32(XSPI2_BASE + XSPI_MTO); 41 } 42 43 static void xspi_restore(void) 44 { 45 /* request the GMID first */ 46 ele_release_gmid(); 47 mmio_write_32(XSPI1_BASE + XSPI_MTO, xspi_mto[0]); 48 mmio_write_32(XSPI2_BASE + XSPI_MTO, xspi_mto[1]); 49 } 50 #endif 51 52 static void gpio_save(struct gpio_ctx *ctx) 53 { 54 for (uint32_t i = 0U; i < GPIO_CTRL_REG_NUM; i++) { 55 /* First 4 regs for permission */ 56 if (i < 4U) { 57 ctx->port_ctrl[i] = mmio_read_32(ctx->base + gpio_ctrl_offset[i]); 58 /* Clear the permission to read the gpio non-secure setting. */ 59 mmio_write_32(ctx->base + gpio_ctrl_offset[i], 0x0); 60 } else { 61 ctx->port_ctrl[i] = mmio_read_32(ctx->base + gpio_ctrl_offset[i]); 62 } 63 } 64 65 /* Save the gpio icr */ 66 for (uint32_t i = 0U; i < ctx->pin_num; i++) { 67 ctx->gpio_icr[i] = mmio_read_32(ctx->base + 0x80 + i * 4U); 68 /* Mark if any gpio pin is used as wakeup irq */ 69 if (ctx->gpio_icr[i]) { 70 gpio_wakeup = true; 71 } 72 } 73 74 /* Restore the gpio permission */ 75 for (uint32_t i = 0U; i < 4U; i++) { 76 mmio_write_32(ctx->base + gpio_ctrl_offset[i], ctx->port_ctrl[i]); 77 } 78 } 79 static void gpio_restore(struct gpio_ctx *ctx) 80 { 81 /* Clear the gpio permission */ 82 for (uint32_t i = 0U; i < 4U; i++) { 83 mmio_write_32(ctx->base + gpio_ctrl_offset[i], 0x0); 84 } 85 86 for (uint32_t i = 0U; i < ctx->pin_num; i++) { 87 mmio_write_32(ctx->base + 0x80 + i * 4U, ctx->gpio_icr[i]); 88 } 89 90 for (uint32_t i = 4U; i < GPIO_CTRL_REG_NUM; i++) 91 mmio_write_32(ctx->base + gpio_ctrl_offset[i], ctx->port_ctrl[i]); 92 93 /* Permission config retore last */ 94 for (uint32_t i = 0U; i < 4U; i++) { 95 mmio_write_32(ctx->base + gpio_ctrl_offset[i], ctx->port_ctrl[i]); 96 } 97 98 gpio_wakeup = false; 99 } 100 101 static void wdog_save(struct wdog_ctx *wdog) 102 { 103 wdog->regs[0] = mmio_read_32(wdog->base); 104 wdog->regs[1] = mmio_read_32(wdog->base + 0x8); 105 } 106 107 static void wdog_restore(struct wdog_ctx *wdog) 108 { 109 uint32_t cs, toval; 110 111 cs = mmio_read_32(wdog->base); 112 toval = mmio_read_32(wdog->base + 0x8); 113 114 /* Wdog does not lost context, no need to restore */ 115 if (cs == wdog->regs[0] && toval == wdog->regs[1]) { 116 return; 117 } 118 119 /* Reconfig the CS */ 120 mmio_write_32(wdog->base, wdog->regs[0]); 121 /* Set the tiemout value */ 122 mmio_write_32(wdog->base + 0x8, wdog->regs[1]); 123 124 /* Wait for the lock status */ 125 while ((mmio_read_32(wdog->base) & BIT(11))) { 126 ; 127 } 128 129 /* Wait for the config done */ 130 while (!(mmio_read_32(wdog->base) & BIT(10))) { 131 ; 132 } 133 } 134 135 static inline bool active_wakeup_irq(uint32_t irq) 136 { 137 return !(IRQ_MASK(irq) & IRQ_SHIFT(irq)); 138 } 139 140 /* 141 * For peripherals like CANs, GPIOs & UARTs that need to support 142 * async wakeup when clock is gated, LPCGs of these IPs need to be 143 * changed to CPU LPM controlled, and for CANs &UARTs, we also need 144 * to make sure its ROOT clock slice is enabled. 145 */ 146 static void peripheral_qchannel_hsk(bool en) 147 { 148 uint32_t num_hsks = 0U; 149 150 for (uint32_t i = 0U; i < ARRAY_SIZE(per_hsk_cfg); i++) { 151 if (active_wakeup_irq(per_hsk_cfg[i].wakeup_irq)) { 152 per_lpm[num_hsks].perId = per_hsk_cfg[i].per_idx; 153 per_lpm[num_hsks].lpmSetting = en ? SCMI_CPU_PD_LPM_ON_RUN_WAIT_STOP : 154 SCMI_CPU_PD_LPM_ON_ALWAYS; 155 num_hsks++; 156 } 157 } 158 159 scmi_per_lpm_mode_set(imx9_scmi_handle, IMX9_SCMI_CPU_A55P, 160 num_hsks, per_lpm); 161 } 162 163 void imx_set_sys_wakeup(uint32_t last_core, bool pdn) 164 { 165 uintptr_t gicd_base = PLAT_GICD_BASE; 166 167 /* Set the GPC IMRs based on GIC IRQ mask setting */ 168 for (uint32_t i = 0U; i < IMR_NUM; i++) { 169 if (pdn) { 170 /* set the wakeup irq based on GIC */ 171 irq_mask[i] = 172 ~gicd_read_isenabler(gicd_base, 32 * (i + 1)); 173 } else { 174 irq_mask[i] = 0xFFFFFFFF; 175 } 176 177 if (~irq_mask[i] & wakeup_irq_mask[i]) { 178 if (i == IRQ_MASK(NETC_IREC_PCI_INT_X0) && 179 (wakeup_irq_mask[i] & IRQ_SHIFT(NETC_IREC_PCI_INT_X0))) { 180 has_netc_irq = true; 181 } else { 182 has_wakeup_irq = true; 183 } 184 } 185 } 186 187 /* Set IRQ wakeup mask for the last core & cluster */ 188 scmi_core_Irq_wake_set(imx9_scmi_handle, IMX9_SCMI_CPU_A55P, 189 0, IMR_NUM, irq_mask); 190 191 scmi_core_Irq_wake_set(imx9_scmi_handle, SCMI_CPU_A55_ID(last_core), 192 0, IMR_NUM, irq_mask); 193 194 /* Configure low power wakeup source interface */ 195 peripheral_qchannel_hsk(pdn); 196 } 197 198 void imx9_sys_sleep_prepare(uint32_t core_id) 199 { 200 /* Save the gic context */ 201 gic_save(); 202 203 /* Save contex of gpios in wakeupmix */ 204 for (uint32_t i = 0U; i < GPIO_NUM; i++) { 205 gpio_save(&gpios[i]); 206 } 207 208 /* Save wdog3/4 ctx */ 209 for (uint32_t i = 0U; i < WDOG_NUM; i++) { 210 wdog_save(&wdogs[i]); 211 } 212 213 #if HAS_XSPI_SUPPORT 214 xspi_save(); 215 #endif 216 imx_set_sys_wakeup(core_id, true); 217 218 keep_wakeupmix_on = gpio_wakeup || has_wakeup_irq; 219 } 220 221 void imx9_sys_sleep_unprepare(uint32_t core_id) 222 { 223 /* Restore the gic context */ 224 gic_resume(); 225 226 #if HAS_XSPI_SUPPORT 227 xspi_restore(); 228 #endif 229 /* Restore contex of gpios in wakeupmix */ 230 for (uint32_t i = 0U; i < GPIO_NUM; i++) { 231 gpio_restore(&gpios[i]); 232 } 233 234 /* Restore wdog3/4 ctx */ 235 for (uint32_t i = 0U; i < WDOG_NUM; i++) { 236 wdog_restore(&wdogs[i]); 237 } 238 239 imx_set_sys_wakeup(core_id, false); 240 241 has_wakeup_irq = false; 242 } 243