1 /* 2 * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved. 3 * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 /* 9 * Top-level SMC handler for Versal Gen 2 power management calls and 10 * IPI setup functions for communication with PMC. 11 */ 12 13 #include <errno.h> 14 #include <stdbool.h> 15 16 #include "../drivers/arm/gic/v3/gicv3_private.h" 17 18 #include <common/ep_info.h> 19 #include <common/runtime_svc.h> 20 #include <drivers/arm/gicv3.h> 21 #include <lib/psci/psci.h> 22 #include <plat/arm/common/plat_arm.h> 23 #include <plat/common/platform.h> 24 25 #include <plat_private.h> 26 #include "pm_api_sys.h" 27 #include "pm_client.h" 28 #include "pm_ipi.h" 29 #include "pm_svc_main.h" 30 31 #define MODE 0x80000000U 32 33 #define INVALID_SGI 0xFFU 34 #define PM_INIT_SUSPEND_CB (30U) 35 #define PM_NOTIFY_CB (32U) 36 #define EVENT_CPU_PWRDWN (4U) 37 #define MBOX_SGI_SHARED_IPI (7U) 38 39 /** 40 * upper_32_bits - return bits 32-63 of a number 41 * @n: the number we're accessing 42 */ 43 #define upper_32_bits(n) ((uint32_t)((n) >> 32U)) 44 45 /** 46 * lower_32_bits - return bits 0-31 of a number 47 * @n: the number we're accessing 48 */ 49 #define lower_32_bits(n) ((uint32_t)((n) & 0xffffffffU)) 50 51 /** 52 * EXTRACT_SMC_ARGS - extracts 32-bit payloads from 64-bit SMC arguments 53 * @pm_arg: array of 32-bit payloads 54 * @x: array of 64-bit SMC arguments 55 */ 56 #define EXTRACT_ARGS(pm_arg, x) \ 57 for (uint32_t i = 0U; i < (PAYLOAD_ARG_CNT - 1U); i++) { \ 58 if ((i % 2U) != 0U) { \ 59 pm_arg[i] = lower_32_bits(x[(i / 2U) + 1U]); \ 60 } else { \ 61 pm_arg[i] = upper_32_bits(x[i / 2U]); \ 62 } \ 63 } 64 65 /* 1 sec of wait timeout for secondary core down */ 66 #define PWRDWN_WAIT_TIMEOUT (1000U) 67 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_asgi1r_el1, S3_0_C12_C11_6) 68 69 /* pm_up = true - UP, pm_up = false - DOWN */ 70 static bool pm_up; 71 static uint32_t sgi = (uint32_t)INVALID_SGI; 72 static bool pwrdwn_req_received; 73 74 bool pm_pwrdwn_req_status(void) 75 { 76 return pwrdwn_req_received; 77 } 78 79 static void notify_os(void) 80 { 81 plat_ic_raise_ns_sgi((int)sgi, read_mpidr_el1()); 82 } 83 84 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags, 85 void *handle, void *cookie) 86 { 87 uint32_t cpu_id = plat_my_core_pos(); 88 89 VERBOSE("Powering down CPU %d\n", cpu_id); 90 91 /* Deactivate CPU power down SGI */ 92 plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR); 93 94 return (uint64_t) psci_cpu_off(); 95 } 96 97 /** 98 * raise_pwr_down_interrupt() - Callback function to raise SGI. 99 * @mpidr: MPIDR for the target CPU. 100 * 101 * Raise SGI interrupt to trigger the CPU power down sequence on all the 102 * online secondary cores. 103 */ 104 static void raise_pwr_down_interrupt(u_register_t mpidr) 105 { 106 plat_ic_raise_el3_sgi((int)CPU_PWR_DOWN_REQ_INTR, mpidr); 107 } 108 109 void request_cpu_pwrdwn(void) 110 { 111 int ret; 112 113 VERBOSE("CPU power down request received\n"); 114 115 /* Send powerdown request to online secondary core(s) */ 116 ret = psci_stop_other_cores(plat_my_core_pos(), (unsigned int)PWRDWN_WAIT_TIMEOUT, raise_pwr_down_interrupt); 117 if (ret != (int)PSCI_E_SUCCESS) { 118 ERROR("Failed to powerdown secondary core(s)\n"); 119 } 120 121 /* Clear IPI IRQ */ 122 pm_ipi_irq_clear(primary_proc); 123 124 /* Deactivate IPI IRQ */ 125 plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ); 126 } 127 128 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle, 129 void *cookie) 130 { 131 uint32_t payload[4] = {0}; 132 enum pm_ret_status ret; 133 uint32_t ipi_status, i; 134 135 VERBOSE("Received IPI FIQ from firmware\n"); 136 137 console_flush(); 138 (void)plat_ic_acknowledge_interrupt(); 139 140 /* Check status register for each IPI except PMC */ 141 for (i = IPI_ID_APU; i <= IPI_ID_5; i++) { 142 ipi_status = ipi_mb_enquire_status(IPI_ID_APU, i); 143 144 /* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */ 145 if ((ipi_status & (uint32_t)IPI_MB_STATUS_RECV_PENDING) > (uint32_t) 0) { 146 plat_ic_raise_ns_sgi((int)MBOX_SGI_SHARED_IPI, read_mpidr_el1()); 147 break; 148 } 149 } 150 151 /* If PMC has not generated interrupt then end ISR */ 152 ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC); 153 if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) == (uint32_t)0) { 154 plat_ic_end_of_interrupt(id); 155 goto end; 156 } 157 158 /* Handle PMC case */ 159 ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0); 160 if (ret != PM_RET_SUCCESS) { 161 payload[0] = (uint32_t) ret; 162 } 163 164 switch (payload[0]) { 165 case PM_INIT_SUSPEND_CB: 166 if (sgi != INVALID_SGI) { 167 notify_os(); 168 } 169 break; 170 case PM_NOTIFY_CB: 171 if (sgi != INVALID_SGI) { 172 if ((payload[2] == EVENT_CPU_PWRDWN) && 173 (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) { 174 if (pwrdwn_req_received) { 175 pwrdwn_req_received = false; 176 request_cpu_pwrdwn(); 177 (void)psci_cpu_off(); 178 break; 179 } else { 180 /* No action needed, added for MISRA 181 * complaince 182 */ 183 } 184 pwrdwn_req_received = true; 185 186 } else { 187 /* No action needed, added for MISRA 188 * complaince 189 */ 190 } 191 notify_os(); 192 } else if ((payload[2] == EVENT_CPU_PWRDWN) && 193 (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) { 194 request_cpu_pwrdwn(); 195 (void)psci_cpu_off(); 196 } else { 197 /* No action needed, added for MISRA 198 * complaince 199 */ 200 } 201 break; 202 case (uint32_t) PM_RET_ERROR_INVALID_CRC: 203 pm_ipi_irq_clear(primary_proc); 204 WARN("Invalid CRC in the payload\n"); 205 break; 206 207 default: 208 pm_ipi_irq_clear(primary_proc); 209 WARN("Invalid IPI payload\n"); 210 break; 211 } 212 213 /* Clear FIQ */ 214 plat_ic_end_of_interrupt(id); 215 216 end: 217 return 0; 218 } 219 220 /** 221 * pm_register_sgi() - PM register the IPI interrupt. 222 * @sgi_num: SGI number to be used for communication. 223 * @reset: Reset to invalid SGI when reset=1. 224 * 225 * Return: On success, the initialization function must return 0. 226 * Any other return value will cause the framework to ignore 227 * the service. 228 * 229 * Update the SGI number to be used. 230 * 231 */ 232 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset) 233 { 234 int32_t ret; 235 236 if (reset == 1U) { 237 sgi = INVALID_SGI; 238 ret = 0; 239 goto end; 240 } 241 242 if (sgi != INVALID_SGI) { 243 ret = -EBUSY; 244 goto end; 245 } 246 247 if (sgi_num >= GICV3_MAX_SGI_TARGETS) { 248 ret = -EINVAL; 249 goto end; 250 } 251 252 sgi = (uint32_t)sgi_num; 253 ret = 0; 254 end: 255 return ret; 256 } 257 258 /** 259 * pm_setup() - PM service setup. 260 * 261 * Return: On success, the initialization function must return 0. 262 * Any other return value will cause the framework to ignore 263 * the service. 264 * 265 * Initialization functions for Versal power management for 266 * communicaton with PMC. 267 * 268 * Called from sip_svc_setup initialization function with the 269 * rt_svc_init signature. 270 * 271 */ 272 int32_t pm_setup(void) 273 { 274 int32_t ret = 0; 275 276 pm_ipi_init(primary_proc); 277 pm_up = true; 278 pwrdwn_req_received = false; 279 280 /* register SGI handler for CPU power down request */ 281 ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler); 282 if (ret != 0) { 283 WARN("BL31: registering SGI interrupt failed\n"); 284 } 285 286 /* 287 * Enable IPI IRQ 288 * assume the rich OS is OK to handle callback IRQs now. 289 * Even if we were wrong, it would not enable the IRQ in 290 * the GIC. 291 */ 292 pm_ipi_irq_enable(primary_proc); 293 294 ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler); 295 if (ret != 0) { 296 WARN("BL31: registering IPI interrupt failed\n"); 297 } 298 299 gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE); 300 301 /* Register for idle callback during force power down/restart */ 302 ret = (int32_t)pm_register_notifier(primary_proc->node_id, EVENT_CPU_PWRDWN, 303 0x0U, 0x1U, SECURE); 304 if (ret != 0) { 305 WARN("BL31: registering idle callback for restart/force power down failed\n"); 306 } 307 308 return ret; 309 } 310 311 /** 312 * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI. 313 * @api_id: identifier for the API being called. 314 * @pm_arg: pointer to the argument data for the API call. 315 * @handle: Pointer to caller's context structure. 316 * @security_flag: SECURE or NON_SECURE. 317 * 318 * These EEMI APIs performs CPU specific power management tasks. 319 * These EEMI APIs are invoked either from PSCI or from debugfs in kernel. 320 * These calls require CPU specific processing before sending IPI request to 321 * Platform Management Controller. For example enable/disable CPU specific 322 * interrupts. This requires separate handler for these calls and may not be 323 * handled using common eemi handler. 324 * 325 * Return: If EEMI API found then, uintptr_t type address, else 0. 326 * 327 */ 328 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, uint32_t *pm_arg, 329 void *handle, uint32_t security_flag) 330 { 331 enum pm_ret_status ret; 332 uint32_t pm_api_id = api_id & API_ID_MASK; 333 334 switch (pm_api_id) { 335 336 case (uint32_t)PM_SELF_SUSPEND: 337 ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2], 338 pm_arg[3], security_flag); 339 SMC_RET1(handle, (u_register_t)ret); 340 341 case (uint32_t)PM_FORCE_POWERDOWN: 342 ret = pm_force_powerdown(pm_arg[0], (uint8_t)pm_arg[1], security_flag); 343 SMC_RET1(handle, (u_register_t)ret); 344 345 case (uint32_t)PM_ABORT_SUSPEND: 346 ret = pm_abort_suspend(pm_arg[0], security_flag); 347 SMC_RET1(handle, (u_register_t)ret); 348 349 case (uint32_t)PM_SYSTEM_SHUTDOWN: 350 ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag); 351 SMC_RET1(handle, (u_register_t)ret); 352 353 default: 354 return (uintptr_t)0; 355 } 356 } 357 358 /** 359 * TF_A_specific_handler() - SMC handler for TF-A specific functionality. 360 * @api_id: identifier for the API being called. 361 * @pm_arg: pointer to the argument data for the API call. 362 * @handle: Pointer to caller's context structure. 363 * @security_flag: SECURE or NON_SECURE. 364 * 365 * These EEMI calls performs functionality that does not require 366 * IPI transaction. The handler ends in TF-A and returns requested data to 367 * kernel from TF-A. 368 * 369 * Return: If TF-A specific API found then, uintptr_t type address, else 0 370 * 371 */ 372 static uintptr_t TF_A_specific_handler(uint32_t api_id, uint32_t *pm_arg, 373 void *handle, uint32_t security_flag) 374 { 375 switch (api_id) { 376 377 case TF_A_FEATURE_CHECK: 378 { 379 enum pm_ret_status ret; 380 uint32_t result[PAYLOAD_ARG_CNT] = {0U}; 381 382 ret = tfa_api_feature_check(pm_arg[0], result); 383 SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U)); 384 } 385 386 case TF_A_PM_REGISTER_SGI: 387 { 388 int32_t ret; 389 390 ret = pm_register_sgi(pm_arg[0], pm_arg[1]); 391 if (ret != 0) { 392 SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS); 393 } 394 395 SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS); 396 } 397 398 case PM_GET_CALLBACK_DATA: 399 { 400 uint32_t result[4] = {0}; 401 enum pm_ret_status ret; 402 403 ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U); 404 if (ret != PM_RET_SUCCESS) { 405 result[0] = (uint32_t) ret; 406 } 407 408 SMC_RET2(handle, 409 (uint64_t)result[0] | ((uint64_t)result[1] << 32U), 410 (uint64_t)result[2] | ((uint64_t)result[3] << 32U)); 411 } 412 413 case PM_GET_TRUSTZONE_VERSION: 414 SMC_RET1(handle, ((uint64_t)PM_RET_SUCCESS) | 415 (((uint64_t)TZ_VERSION) << 32U)); 416 417 default: 418 return (uintptr_t)0U; 419 } 420 } 421 422 /** 423 * eemi_api_handler() - Prepare EEMI payload and perform IPI transaction. 424 * @api_id: identifier for the API being called. 425 * @pm_arg: pointer to the argument data for the API call. 426 * @handle: Pointer to caller's context structure. 427 * @security_flag: SECURE or NON_SECURE. 428 * 429 * EEMI - Embedded Energy Management Interface is AMD-Xilinx proprietary 430 * protocol to allow communication between power management controller and 431 * different processing clusters. 432 * 433 * This handler prepares EEMI protocol payload received from kernel and performs 434 * IPI transaction. 435 * 436 * Return: If EEMI API found then, uintptr_t type address, else 0 437 */ 438 static uintptr_t eemi_api_handler(uint32_t api_id, const uint32_t *pm_arg, 439 void *handle, uint32_t security_flag) 440 { 441 enum pm_ret_status ret; 442 uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0U}; 443 uint32_t payload[PAYLOAD_ARG_CNT] = {0U}; 444 uint32_t module_id; 445 446 module_id = (api_id & MODULE_ID_MASK) >> 8U; 447 448 PM_PACK_PAYLOAD7(payload, module_id, security_flag, api_id, 449 pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3], 450 pm_arg[4], pm_arg[5]); 451 452 ret = pm_ipi_send_sync(primary_proc, payload, (uint32_t *)buf, 453 RET_PAYLOAD_ARG_CNT); 454 455 SMC_RET4(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U), 456 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U), 457 (uint64_t)buf[3] | ((uint64_t)buf[4] << 32U), 458 (uint64_t)buf[5]); 459 } 460 461 /** 462 * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2. 463 * @smc_fid: Function Identifier. 464 * @x1: SMC64 Arguments from kernel. 465 * @x2: SMC64 Arguments from kernel. 466 * @x3: SMC64 Arguments from kernel (upper 32-bits). 467 * @x4: Unused. 468 * @cookie: Unused. 469 * @handle: Pointer to caller's context structure. 470 * @flags: SECURE or NON_SECURE. 471 * 472 * Return: Unused. 473 * 474 * Determines that smc_fid is valid and supported PM SMC Function ID from the 475 * list of pm_api_ids, otherwise completes the request with 476 * the unknown SMC Function ID. 477 * 478 * The SMC calls for PM service are forwarded from SIP Service SMC handler 479 * function with rt_svc_handle signature. 480 * 481 */ 482 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, 483 uint64_t x4, const void *cookie, void *handle, uint64_t flags) 484 { 485 uintptr_t ret; 486 uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0}; 487 uint32_t security_flag = NON_SECURE; 488 uint32_t api_id; 489 bool status = false, status_tmp = false; 490 uint64_t x[4] = {x1, x2, x3, x4}; 491 492 /* Handle case where PM wasn't initialized properly */ 493 if (pm_up == false) { 494 SMC_RET1(handle, SMC_UNK); 495 } 496 497 /* 498 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0) 499 * if smc called is secure 500 * 501 * Add redundant macro call to immune the code from glitches 502 */ 503 SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags); 504 if ((status != false) && (status_tmp != false)) { 505 security_flag = SECURE; 506 } 507 508 if ((smc_fid & FUNCID_NUM_MASK) == PASS_THROUGH_FW_CMD_ID) { 509 api_id = lower_32_bits(x[0]); 510 511 EXTRACT_ARGS(pm_arg, x); 512 513 ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle, (uint32_t)flags); 514 if (ret != (uintptr_t)0) { 515 return ret; 516 } 517 518 return eemi_api_handler(api_id, pm_arg, handle, security_flag); 519 } 520 521 pm_arg[0] = (uint32_t)x1; 522 pm_arg[1] = (uint32_t)(x1 >> 32U); 523 pm_arg[2] = (uint32_t)x2; 524 pm_arg[3] = (uint32_t)(x2 >> 32U); 525 pm_arg[4] = (uint32_t)x3; 526 (void)(x4); 527 api_id = smc_fid & FUNCID_NUM_MASK; 528 529 ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag); 530 531 return ret; 532 } 533