1 /* 2 * Copyright (c) 2022, Xilinx, Inc. All rights reserved. 3 * Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 /* 9 * APU specific definition of processors in the subsystem as well as functions 10 * for getting information about and changing state of the APU. 11 */ 12 13 #include <assert.h> 14 15 #include <drivers/arm/gic_common.h> 16 #include <drivers/arm/gicv3.h> 17 #include <lib/bakery_lock.h> 18 #include <lib/mmio.h> 19 #include <lib/mmio.h> 20 #include <lib/utils.h> 21 #include <lib/spinlock.h> 22 #include <plat/common/platform.h> 23 24 #include <plat_ipi.h> 25 #include <platform_def.h> 26 #include "pm_api_sys.h" 27 #include "pm_client.h" 28 #include <versal_net_def.h> 29 30 #define UNDEFINED_CPUID (~0) 31 32 DEFINE_RENAME_SYSREG_RW_FUNCS(cpu_pwrctrl_val, S3_0_C15_C2_7) 33 34 /* 35 * ARM v8.2, the cache will turn off automatically when cpu 36 * power down. Therefore, there is no doubt to use the spin_lock here. 37 */ 38 #if !HW_ASSISTED_COHERENCY 39 DEFINE_BAKERY_LOCK(pm_client_secure_lock); 40 static inline void pm_client_lock_get(void) 41 { 42 bakery_lock_get(&pm_client_secure_lock); 43 } 44 45 static inline void pm_client_lock_release(void) 46 { 47 bakery_lock_release(&pm_client_secure_lock); 48 } 49 #else 50 spinlock_t pm_client_secure_lock; 51 static inline void pm_client_lock_get(void) 52 { 53 spin_lock(&pm_client_secure_lock); 54 } 55 56 static inline void pm_client_lock_release(void) 57 { 58 spin_unlock(&pm_client_secure_lock); 59 } 60 #endif 61 62 static const struct pm_ipi apu_ipi = { 63 .local_ipi_id = IPI_LOCAL_ID, 64 .remote_ipi_id = IPI_REMOTE_ID, 65 .buffer_base = IPI_BUFFER_LOCAL_BASE, 66 }; 67 68 /* Order in pm_procs_all array must match cpu ids */ 69 static const struct pm_proc pm_procs_all[] = { 70 { 71 .node_id = PM_DEV_CLUSTER0_ACPU_0, 72 .ipi = &apu_ipi, 73 .pwrdn_mask = 0, 74 }, 75 { 76 .node_id = PM_DEV_CLUSTER0_ACPU_1, 77 .ipi = &apu_ipi, 78 .pwrdn_mask = 0, 79 }, 80 { 81 .node_id = PM_DEV_CLUSTER0_ACPU_2, 82 .ipi = &apu_ipi, 83 .pwrdn_mask = 0, 84 }, 85 { 86 .node_id = PM_DEV_CLUSTER0_ACPU_3, 87 .ipi = &apu_ipi, 88 .pwrdn_mask = 0, 89 }, 90 { 91 .node_id = PM_DEV_CLUSTER1_ACPU_0, 92 .ipi = &apu_ipi, 93 .pwrdn_mask = 0, 94 }, 95 { 96 .node_id = PM_DEV_CLUSTER1_ACPU_1, 97 .ipi = &apu_ipi, 98 .pwrdn_mask = 0, 99 }, 100 { 101 .node_id = PM_DEV_CLUSTER1_ACPU_2, 102 .ipi = &apu_ipi, 103 .pwrdn_mask = 0, 104 }, 105 { 106 .node_id = PM_DEV_CLUSTER1_ACPU_3, 107 .ipi = &apu_ipi, 108 .pwrdn_mask = 0, 109 }, 110 { 111 .node_id = PM_DEV_CLUSTER2_ACPU_0, 112 .ipi = &apu_ipi, 113 .pwrdn_mask = 0, 114 }, 115 { 116 .node_id = PM_DEV_CLUSTER2_ACPU_1, 117 .ipi = &apu_ipi, 118 .pwrdn_mask = 0, 119 }, 120 { 121 .node_id = PM_DEV_CLUSTER2_ACPU_2, 122 .ipi = &apu_ipi, 123 .pwrdn_mask = 0, 124 }, 125 { 126 .node_id = PM_DEV_CLUSTER2_ACPU_3, 127 .ipi = &apu_ipi, 128 .pwrdn_mask = 0, 129 }, 130 { 131 .node_id = PM_DEV_CLUSTER3_ACPU_0, 132 .ipi = &apu_ipi, 133 .pwrdn_mask = 0, 134 }, 135 { 136 .node_id = PM_DEV_CLUSTER3_ACPU_1, 137 .ipi = &apu_ipi, 138 .pwrdn_mask = 0, 139 }, 140 { 141 .node_id = PM_DEV_CLUSTER3_ACPU_2, 142 .ipi = &apu_ipi, 143 .pwrdn_mask = 0, 144 }, 145 { 146 .node_id = PM_DEV_CLUSTER3_ACPU_3, 147 .ipi = &apu_ipi, 148 .pwrdn_mask = 0, 149 } 150 }; 151 152 const struct pm_proc *primary_proc = &pm_procs_all[0]; 153 154 /** 155 * pm_get_proc() - returns pointer to the proc structure 156 * @param cpuid id of the cpu whose proc struct pointer should be returned 157 * 158 * @return pointer to a proc structure if proc is found, otherwise NULL 159 */ 160 const struct pm_proc *pm_get_proc(uint32_t cpuid) 161 { 162 if (cpuid < ARRAY_SIZE(pm_procs_all)) { 163 return &pm_procs_all[cpuid]; 164 } 165 166 NOTICE("ERROR: cpuid: %d proc NULL\n", cpuid); 167 return NULL; 168 } 169 170 /** 171 * irq_to_pm_node_idx - Get PM node index corresponding to the interrupt number 172 * @irq: Interrupt number 173 * 174 * Return: PM node index corresponding to the specified interrupt 175 */ 176 enum pm_device_node_idx irq_to_pm_node_idx(uint32_t irq) 177 { 178 enum pm_device_node_idx dev_idx = XPM_NODEIDX_DEV_MIN; 179 180 assert(irq <= IRQ_MAX); 181 182 switch (irq) { 183 case 20: 184 dev_idx = XPM_NODEIDX_DEV_GPIO; 185 break; 186 case 21: 187 dev_idx = XPM_NODEIDX_DEV_I2C_0; 188 break; 189 case 22: 190 dev_idx = XPM_NODEIDX_DEV_I2C_1; 191 break; 192 case 23: 193 dev_idx = XPM_NODEIDX_DEV_SPI_0; 194 break; 195 case 24: 196 dev_idx = XPM_NODEIDX_DEV_SPI_1; 197 break; 198 case 25: 199 dev_idx = XPM_NODEIDX_DEV_UART_0; 200 break; 201 case 26: 202 dev_idx = XPM_NODEIDX_DEV_UART_1; 203 break; 204 case 27: 205 dev_idx = XPM_NODEIDX_DEV_CAN_FD_0; 206 break; 207 case 28: 208 dev_idx = XPM_NODEIDX_DEV_CAN_FD_1; 209 break; 210 case 29: 211 case 30: 212 case 31: 213 case 32: 214 case 33: 215 case 98: 216 dev_idx = XPM_NODEIDX_DEV_USB_0; 217 break; 218 case 34: 219 case 35: 220 case 36: 221 case 37: 222 case 38: 223 case 99: 224 dev_idx = XPM_NODEIDX_DEV_USB_1; 225 break; 226 case 39: 227 case 40: 228 dev_idx = XPM_NODEIDX_DEV_GEM_0; 229 break; 230 case 41: 231 case 42: 232 dev_idx = XPM_NODEIDX_DEV_GEM_1; 233 break; 234 case 43: 235 case 44: 236 case 45: 237 dev_idx = XPM_NODEIDX_DEV_TTC_0; 238 break; 239 case 46: 240 case 47: 241 case 48: 242 dev_idx = XPM_NODEIDX_DEV_TTC_1; 243 break; 244 case 49: 245 case 50: 246 case 51: 247 dev_idx = XPM_NODEIDX_DEV_TTC_2; 248 break; 249 case 52: 250 case 53: 251 case 54: 252 dev_idx = XPM_NODEIDX_DEV_TTC_3; 253 break; 254 case 72: 255 dev_idx = XPM_NODEIDX_DEV_ADMA_0; 256 break; 257 case 73: 258 dev_idx = XPM_NODEIDX_DEV_ADMA_1; 259 break; 260 case 74: 261 dev_idx = XPM_NODEIDX_DEV_ADMA_2; 262 break; 263 case 75: 264 dev_idx = XPM_NODEIDX_DEV_ADMA_3; 265 break; 266 case 76: 267 dev_idx = XPM_NODEIDX_DEV_ADMA_4; 268 break; 269 case 77: 270 dev_idx = XPM_NODEIDX_DEV_ADMA_5; 271 break; 272 case 78: 273 dev_idx = XPM_NODEIDX_DEV_ADMA_6; 274 break; 275 case 79: 276 dev_idx = XPM_NODEIDX_DEV_ADMA_7; 277 break; 278 case 184: 279 case 185: 280 dev_idx = XPM_NODEIDX_DEV_SDIO_0; 281 break; 282 case 186: 283 case 187: 284 dev_idx = XPM_NODEIDX_DEV_SDIO_1; 285 break; 286 case 200: 287 dev_idx = XPM_NODEIDX_DEV_RTC; 288 break; 289 default: 290 dev_idx = XPM_NODEIDX_DEV_MIN; 291 break; 292 } 293 294 return dev_idx; 295 } 296 297 /** 298 * pm_client_suspend() - Client-specific suspend actions 299 * 300 * This function should contain any PU-specific actions 301 * required prior to sending suspend request to PMU 302 * Actions taken depend on the state system is suspending to. 303 * 304 * @param proc processor which need to suspend 305 * @param state desired suspend state 306 */ 307 void pm_client_suspend(const struct pm_proc *proc, uint32_t state) 308 { 309 uint32_t cpu_id = plat_my_core_pos(); 310 uintptr_t val; 311 312 pm_client_lock_get(); 313 314 if (state == PM_STATE_SUSPEND_TO_RAM) { 315 pm_client_set_wakeup_sources((uint32_t)proc->node_id); 316 } 317 318 val = read_cpu_pwrctrl_val(); 319 val |= CORE_PWRDN_EN_BIT_MASK; 320 write_cpu_pwrctrl_val(val); 321 322 isb(); 323 324 /* Clear power down interrupt status before enabling */ 325 mmio_write_32(APU_PCIL_CORE_X_ISR_POWER_REG(cpu_id), 326 APU_PCIL_CORE_X_ISR_POWER_MASK); 327 /* Enable power down interrupt */ 328 mmio_write_32(APU_PCIL_CORE_X_IEN_POWER_REG(cpu_id), 329 APU_PCIL_CORE_X_IEN_POWER_MASK); 330 /* Clear wakeup interrupt status before enabling */ 331 mmio_write_32(APU_PCIL_CORE_X_ISR_WAKE_REG(cpu_id), 332 APU_PCIL_CORE_X_ISR_WAKE_MASK); 333 /* Enable wake interrupt */ 334 mmio_write_32(APU_PCIL_CORE_X_IEN_WAKE_REG(cpu_id), 335 APU_PCIL_CORE_X_IEN_WAKE_MASK); 336 337 pm_client_lock_release(); 338 } 339 340 /** 341 * pm_get_cpuid() - get the local cpu ID for a global node ID 342 * @param nid node id of the processor 343 * 344 * @return the cpu ID (starting from 0) for the subsystem 345 */ 346 static uint32_t pm_get_cpuid(uint32_t nid) 347 { 348 for (size_t i = 0; i < ARRAY_SIZE(pm_procs_all); i++) { 349 if (pm_procs_all[i].node_id == nid) { 350 return i; 351 } 352 } 353 return UNDEFINED_CPUID; 354 } 355 356 /** 357 * pm_client_wakeup() - Client-specific wakeup actions 358 * 359 * This function should contain any PU-specific actions 360 * required for waking up another APU core 361 * 362 * @param proc Processor which need to wakeup 363 */ 364 void pm_client_wakeup(const struct pm_proc *proc) 365 { 366 uint32_t cpuid = pm_get_cpuid(proc->node_id); 367 uintptr_t val; 368 369 if (cpuid == UNDEFINED_CPUID) { 370 return; 371 } 372 373 pm_client_lock_get(); 374 375 /* Clear powerdown request */ 376 val = read_cpu_pwrctrl_val(); 377 val &= ~CORE_PWRDN_EN_BIT_MASK; 378 write_cpu_pwrctrl_val(val); 379 380 isb(); 381 382 /* Disabled power down interrupt */ 383 mmio_write_32(APU_PCIL_CORE_X_IDS_POWER_REG(cpuid), 384 APU_PCIL_CORE_X_IDS_POWER_MASK); 385 /* Clear wakeup interrupt status before disabling */ 386 mmio_write_32(APU_PCIL_CORE_X_ISR_WAKE_REG(cpuid), 387 APU_PCIL_CORE_X_ISR_WAKE_MASK); 388 /* Disable wake interrupt */ 389 mmio_write_32(APU_PCIL_CORE_X_IDS_WAKE_REG(cpuid), 390 APU_PCIL_CORE_X_IDS_WAKE_MASK); 391 392 pm_client_lock_release(); 393 } 394 395 /** 396 * pm_client_abort_suspend() - Client-specific abort-suspend actions 397 * 398 * This function should contain any PU-specific actions 399 * required for aborting a prior suspend request 400 */ 401 void pm_client_abort_suspend(void) 402 { 403 uint32_t cpu_id = plat_my_core_pos(); 404 uintptr_t val; 405 406 /* Enable interrupts at processor level (for current cpu) */ 407 gicv3_cpuif_enable(plat_my_core_pos()); 408 409 pm_client_lock_get(); 410 411 /* Clear powerdown request */ 412 val = read_cpu_pwrctrl_val(); 413 val &= ~CORE_PWRDN_EN_BIT_MASK; 414 write_cpu_pwrctrl_val(val); 415 416 isb(); 417 418 /* Disabled power down interrupt */ 419 mmio_write_32(APU_PCIL_CORE_X_IDS_POWER_REG(cpu_id), 420 APU_PCIL_CORE_X_IDS_POWER_MASK); 421 422 pm_client_lock_release(); 423 } 424