1 /* 2 * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved. 3 * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 /* 9 * Top-level SMC handler for Versal power management calls and 10 * IPI setup functions for communication with PMC. 11 */ 12 13 #include <errno.h> 14 #include <stdbool.h> 15 16 #include "../drivers/arm/gic/v3/gicv3_private.h" 17 18 #include <common/runtime_svc.h> 19 #include <drivers/arm/gicv3.h> 20 #include <lib/psci/psci.h> 21 #include <plat/arm/common/plat_arm.h> 22 #include <plat/common/platform.h> 23 24 #include <plat_private.h> 25 #include "pm_api_sys.h" 26 #include "pm_client.h" 27 #include "pm_ipi.h" 28 #include "pm_svc_main.h" 29 30 #define MODE 0x80000000U 31 32 #define XSCUGIC_SGIR_EL1_INITID_SHIFT 24U 33 #define INVALID_SGI 0xFFU 34 #define PM_INIT_SUSPEND_CB (30U) 35 #define PM_NOTIFY_CB (32U) 36 #define EVENT_CPU_PWRDWN (4U) 37 #define MBOX_SGI_SHARED_IPI (7U) 38 39 /** 40 * upper_32_bits - return bits 32-63 of a number 41 * @n: the number we're accessing 42 */ 43 #define upper_32_bits(n) ((uint32_t)((n) >> 32U)) 44 45 /** 46 * lower_32_bits - return bits 0-31 of a number 47 * @n: the number we're accessing 48 */ 49 #define lower_32_bits(n) ((uint32_t)((n) & 0xffffffffU)) 50 51 /** 52 * EXTRACT_SMC_ARGS - extracts 32-bit payloads from 64-bit SMC arguments 53 * @pm_arg: array of 32-bit payloads 54 * @x: array of 64-bit SMC arguments 55 */ 56 #define EXTRACT_ARGS(pm_arg, x) \ 57 for (uint32_t i = 0U; i < (PAYLOAD_ARG_CNT - 1U); i++) { \ 58 if ((i % 2U) != 0U) { \ 59 pm_arg[i] = lower_32_bits(x[(i / 2U) + 1U]); \ 60 } else { \ 61 pm_arg[i] = upper_32_bits(x[i / 2U]); \ 62 } \ 63 } 64 65 /* 1 sec of wait timeout for secondary core down */ 66 #define PWRDWN_WAIT_TIMEOUT (1000U) 67 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_asgi1r_el1, S3_0_C12_C11_6) 68 69 /* pm_up = true - UP, pm_up = false - DOWN */ 70 static bool pm_up; 71 static uint32_t sgi = (uint32_t)INVALID_SGI; 72 bool pwrdwn_req_received; 73 74 static void notify_os(void) 75 { 76 plat_ic_raise_ns_sgi(sgi, read_mpidr_el1()); 77 } 78 79 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags, 80 void *handle, void *cookie) 81 { 82 (void)id; 83 (void)flags; 84 (void)handle; 85 (void)cookie; 86 uint32_t cpu_id = plat_my_core_pos(); 87 88 VERBOSE("Powering down CPU %d\n", cpu_id); 89 90 /* Deactivate CPU power down SGI */ 91 plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR); 92 93 return psci_cpu_off(); 94 } 95 96 /** 97 * raise_pwr_down_interrupt() - Callback function to raise SGI. 98 * @mpidr: MPIDR for the target CPU. 99 * 100 * Raise SGI interrupt to trigger the CPU power down sequence on all the 101 * online secondary cores. 102 */ 103 static void raise_pwr_down_interrupt(u_register_t mpidr) 104 { 105 plat_ic_raise_el3_sgi(CPU_PWR_DOWN_REQ_INTR, mpidr); 106 } 107 108 void request_cpu_pwrdwn(void) 109 { 110 enum pm_ret_status ret; 111 112 VERBOSE("CPU power down request received\n"); 113 114 /* Send powerdown request to online secondary core(s) */ 115 ret = psci_stop_other_cores(plat_my_core_pos(), PWRDWN_WAIT_TIMEOUT, 116 raise_pwr_down_interrupt); 117 if (ret != (uint32_t)PSCI_E_SUCCESS) { 118 ERROR("Failed to powerdown secondary core(s)\n"); 119 } 120 121 /* Clear IPI IRQ */ 122 pm_ipi_irq_clear(primary_proc); 123 124 /* Deactivate IPI IRQ */ 125 plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ); 126 } 127 128 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle, 129 void *cookie) 130 { 131 (void)flags; 132 (void)handle; 133 (void)cookie; 134 uint32_t payload[4] = {0}; 135 enum pm_ret_status ret; 136 int ipi_status, i; 137 138 VERBOSE("Received IPI FIQ from firmware\n"); 139 140 console_flush(); 141 (void)plat_ic_acknowledge_interrupt(); 142 143 /* Check status register for each IPI except PMC */ 144 for (i = (int32_t)IPI_ID_APU; i <= IPI_ID_5; i++) { 145 ipi_status = ipi_mb_enquire_status(IPI_ID_APU, i); 146 147 /* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */ 148 if ((uint32_t)ipi_status & IPI_MB_STATUS_RECV_PENDING) { 149 plat_ic_raise_ns_sgi(MBOX_SGI_SHARED_IPI, read_mpidr_el1()); 150 break; 151 } 152 } 153 154 /* If PMC has not generated interrupt then end ISR */ 155 ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC); 156 if (((uint32_t)ipi_status & IPI_MB_STATUS_RECV_PENDING) == 0U) { 157 plat_ic_end_of_interrupt(id); 158 return 0; 159 } 160 161 /* Handle PMC case */ 162 ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0); 163 if (ret != PM_RET_SUCCESS) { 164 payload[0] = (uint32_t)ret; 165 } 166 167 switch (payload[0]) { 168 case PM_INIT_SUSPEND_CB: 169 if (sgi != INVALID_SGI) { 170 notify_os(); 171 } 172 break; 173 case PM_NOTIFY_CB: 174 if (sgi != INVALID_SGI) { 175 if (payload[2] == EVENT_CPU_PWRDWN) { 176 if (pwrdwn_req_received) { 177 pwrdwn_req_received = false; 178 request_cpu_pwrdwn(); 179 (void)psci_cpu_off(); 180 break; 181 } else { 182 pwrdwn_req_received = true; 183 } 184 } 185 notify_os(); 186 } else if (payload[2] == EVENT_CPU_PWRDWN) { 187 request_cpu_pwrdwn(); 188 (void)psci_cpu_off(); 189 } 190 break; 191 case PM_RET_ERROR_INVALID_CRC: 192 pm_ipi_irq_clear(primary_proc); 193 WARN("Invalid CRC in the payload\n"); 194 break; 195 196 default: 197 pm_ipi_irq_clear(primary_proc); 198 WARN("Invalid IPI payload\n"); 199 break; 200 } 201 202 /* Clear FIQ */ 203 plat_ic_end_of_interrupt(id); 204 205 return 0; 206 } 207 208 /** 209 * pm_register_sgi() - PM register the IPI interrupt. 210 * @sgi_num: SGI number to be used for communication. 211 * @reset: Reset to invalid SGI when reset=1. 212 * 213 * Return: On success, the initialization function must return 0. 214 * Any other return value will cause the framework to ignore 215 * the service. 216 * 217 * Update the SGI number to be used. 218 * 219 */ 220 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset) 221 { 222 if (reset == 1U) { 223 sgi = INVALID_SGI; 224 return 0; 225 } 226 227 if (sgi != INVALID_SGI) { 228 return -EBUSY; 229 } 230 231 if (sgi_num >= GICV3_MAX_SGI_TARGETS) { 232 return -EINVAL; 233 } 234 235 sgi = (uint32_t)sgi_num; 236 return 0; 237 } 238 239 /** 240 * pm_setup() - PM service setup. 241 * 242 * Return: On success, the initialization function must return 0. 243 * Any other return value will cause the framework to ignore 244 * the service. 245 * 246 * Initialization functions for Versal power management for 247 * communicaton with PMC. 248 * 249 * Called from sip_svc_setup initialization function with the 250 * rt_svc_init signature. 251 * 252 */ 253 int32_t pm_setup(void) 254 { 255 int32_t ret = 0; 256 257 pm_ipi_init(primary_proc); 258 pm_up = true; 259 260 /* register SGI handler for CPU power down request */ 261 ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler); 262 if (ret != 0) { 263 WARN("BL31: registering SGI interrupt failed\n"); 264 } 265 266 /* 267 * Enable IPI IRQ 268 * assume the rich OS is OK to handle callback IRQs now. 269 * Even if we were wrong, it would not enable the IRQ in 270 * the GIC. 271 */ 272 pm_ipi_irq_enable(primary_proc); 273 274 ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler); 275 if (ret != 0) { 276 WARN("BL31: registering IPI interrupt failed\n"); 277 } 278 279 gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE); 280 281 /* Register for idle callback during force power down/restart */ 282 ret = (int32_t)pm_register_notifier(primary_proc->node_id, EVENT_CPU_PWRDWN, 283 0x0U, 0x1U, SECURE_FLAG); 284 if (ret != 0) { 285 WARN("BL31: registering idle callback for restart/force power down failed\n"); 286 } 287 288 return ret; 289 } 290 291 /** 292 * eemi_for_compatibility() - EEMI calls handler for deprecated calls. 293 * @api_id: identifier for the API being called. 294 * @pm_arg: pointer to the argument data for the API call. 295 * @handle: Pointer to caller's context structure. 296 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 297 * 298 * Return: If EEMI API found then, uintptr_t type address, else 0. 299 * 300 * Some EEMI API's use case needs to be changed in Linux driver, so they 301 * can take advantage of common EEMI handler in TF-A. As of now the old 302 * implementation of these APIs are required to maintain backward compatibility 303 * until their use case in linux driver changes. 304 * 305 */ 306 static uintptr_t eemi_for_compatibility(uint32_t api_id, uint32_t *pm_arg, 307 void *handle, uint32_t security_flag) 308 { 309 enum pm_ret_status ret; 310 311 switch (api_id) { 312 313 case (uint32_t)PM_FEATURE_CHECK: 314 { 315 uint32_t result[RET_PAYLOAD_ARG_CNT] = {0U}; 316 317 ret = pm_feature_check(pm_arg[0], result, security_flag); 318 SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U), 319 (uint64_t)result[1] | ((uint64_t)result[2] << 32U)); 320 } 321 322 case PM_LOAD_PDI: 323 { 324 ret = pm_load_pdi(pm_arg[0], pm_arg[1], pm_arg[2], 325 security_flag); 326 SMC_RET1(handle, (uint64_t)ret); 327 } 328 329 default: 330 return (uintptr_t)0; 331 } 332 } 333 334 /** 335 * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI. 336 * @api_id: identifier for the API being called. 337 * @pm_arg: pointer to the argument data for the API call. 338 * @handle: Pointer to caller's context structure. 339 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 340 * 341 * These EEMI APIs performs CPU specific power management tasks. 342 * These EEMI APIs are invoked either from PSCI or from debugfs in kernel. 343 * These calls require CPU specific processing before sending IPI request to 344 * Platform Management Controller. For example enable/disable CPU specific 345 * interrupts. This requires separate handler for these calls and may not be 346 * handled using common eemi handler. 347 * 348 * Return: If EEMI API found then, uintptr_t type address, else 0. 349 * 350 */ 351 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, uint32_t *pm_arg, 352 void *handle, uint32_t security_flag) 353 { 354 enum pm_ret_status ret; 355 356 switch (api_id) { 357 358 case (uint32_t)PM_SELF_SUSPEND: 359 ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2], 360 pm_arg[3], security_flag); 361 SMC_RET1(handle, (u_register_t)ret); 362 363 case (uint32_t)PM_FORCE_POWERDOWN: 364 ret = pm_force_powerdown(pm_arg[0], pm_arg[1], security_flag); 365 SMC_RET1(handle, (u_register_t)ret); 366 367 case (uint32_t)PM_REQ_SUSPEND: 368 ret = pm_req_suspend(pm_arg[0], pm_arg[1], pm_arg[2], 369 pm_arg[3], security_flag); 370 SMC_RET1(handle, (u_register_t)ret); 371 372 case (uint32_t)PM_ABORT_SUSPEND: 373 ret = pm_abort_suspend(pm_arg[0], security_flag); 374 SMC_RET1(handle, (u_register_t)ret); 375 376 case (uint32_t)PM_SYSTEM_SHUTDOWN: 377 ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag); 378 SMC_RET1(handle, (u_register_t)ret); 379 380 default: 381 return (uintptr_t)0; 382 } 383 } 384 385 /** 386 * TF_A_specific_handler() - SMC handler for TF-A specific functionality. 387 * @api_id: identifier for the API being called. 388 * @pm_arg: pointer to the argument data for the API call. 389 * @handle: Pointer to caller's context structure. 390 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 391 * 392 * These EEMI calls performs functionality that does not require 393 * IPI transaction. The handler ends in TF-A and returns requested data to 394 * kernel from TF-A. 395 * 396 * Return: If TF-A specific API found then, uintptr_t type address, else 0 397 * 398 */ 399 static uintptr_t TF_A_specific_handler(uint32_t api_id, uint32_t *pm_arg, 400 void *handle, uint32_t security_flag) 401 { 402 switch (api_id) { 403 404 case TF_A_FEATURE_CHECK: 405 { 406 enum pm_ret_status ret; 407 uint32_t result[PAYLOAD_ARG_CNT] = {0U}; 408 409 ret = eemi_feature_check(pm_arg[0], result); 410 SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U)); 411 } 412 413 case TF_A_PM_REGISTER_SGI: 414 { 415 int32_t ret; 416 417 ret = pm_register_sgi(pm_arg[0], pm_arg[1]); 418 if (ret != 0) { 419 SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS); 420 } 421 422 SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS); 423 } 424 425 case PM_GET_CALLBACK_DATA: 426 { 427 uint32_t result[4] = {0}; 428 enum pm_ret_status ret; 429 430 ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U); 431 if (ret != 0) { 432 result[0] = (uint32_t)ret; 433 } 434 435 SMC_RET2(handle, 436 (uint64_t)result[0] | ((uint64_t)result[1] << 32U), 437 (uint64_t)result[2] | ((uint64_t)result[3] << 32U)); 438 } 439 440 case PM_GET_TRUSTZONE_VERSION: 441 SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS | 442 ((uint64_t)TZ_VERSION << 32U)); 443 444 default: 445 return (uintptr_t)0; 446 } 447 } 448 449 /** 450 * eemi_handler() - Prepare EEMI payload and perform IPI transaction. 451 * @api_id: identifier for the API being called. 452 * @pm_arg: pointer to the argument data for the API call. 453 * @handle: Pointer to caller's context structure. 454 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 455 * 456 * EEMI - Embedded Energy Management Interface is Xilinx proprietary protocol 457 * to allow communication between power management controller and different 458 * processing clusters. 459 * 460 * This handler prepares EEMI protocol payload received from kernel and performs 461 * IPI transaction. 462 * 463 * Return: If EEMI API found then, uintptr_t type address, else 0 464 * 465 */ 466 static uintptr_t eemi_handler(uint32_t api_id, uint32_t *pm_arg, 467 void *handle, uint32_t security_flag) 468 { 469 enum pm_ret_status ret; 470 uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0}; 471 472 ret = pm_handle_eemi_call(security_flag, api_id, pm_arg[0], pm_arg[1], 473 pm_arg[2], pm_arg[3], pm_arg[4], 474 (uint64_t *)buf); 475 /* 476 * Two IOCTLs, to get clock name and pinctrl name of pm_query_data API 477 * receives 5 words of respoonse from firmware. Currently linux driver can 478 * receive only 4 words from TF-A. So, this needs to be handled separately 479 * than other eemi calls. 480 */ 481 if (api_id == (uint32_t)PM_QUERY_DATA) { 482 if (((pm_arg[0] == (uint32_t)XPM_QID_CLOCK_GET_NAME) || 483 (pm_arg[0] == (uint32_t)XPM_QID_PINCTRL_GET_FUNCTION_NAME)) && 484 (ret == PM_RET_SUCCESS)) { 485 SMC_RET2(handle, (uint64_t)buf[0] | ((uint64_t)buf[1] << 32U), 486 (uint64_t)buf[2] | ((uint64_t)buf[3] << 32U)); 487 } 488 } 489 490 SMC_RET2(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U), 491 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U)); 492 } 493 494 /** 495 * eemi_api_handler() - Prepare EEMI payload and perform IPI transaction. 496 * @api_id: identifier for the API being called. 497 * @pm_arg: pointer to the argument data for the API call. 498 * @handle: Pointer to caller's context structure. 499 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 500 * 501 * EEMI - Embedded Energy Management Interface is AMD-Xilinx proprietary 502 * protocol to allow communication between power management controller and 503 * different processing clusters. 504 * 505 * This handler prepares EEMI protocol payload received from kernel and performs 506 * IPI transaction. 507 * 508 * Return: If EEMI API found then, uintptr_t type address, else 0 509 */ 510 static uintptr_t eemi_api_handler(uint32_t api_id, const uint32_t *pm_arg, 511 void *handle, uint32_t security_flag) 512 { 513 enum pm_ret_status ret; 514 uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0U}; 515 uint32_t payload[PAYLOAD_ARG_CNT] = {0U}; 516 uint32_t module_id; 517 518 module_id = (api_id & MODULE_ID_MASK) >> 8U; 519 520 PM_PACK_PAYLOAD7(payload, module_id, security_flag, api_id, 521 pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3], 522 pm_arg[4], pm_arg[5]); 523 524 ret = pm_ipi_send_sync(primary_proc, payload, (uint32_t *)buf, 525 RET_PAYLOAD_ARG_CNT); 526 527 SMC_RET4(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U), 528 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U), 529 (uint64_t)buf[3] | ((uint64_t)buf[4] << 32U), 530 (uint64_t)buf[5]); 531 } 532 533 /** 534 * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2. 535 * @smc_fid: Function Identifier. 536 * @x1: SMC64 Arguments from kernel. 537 * @x2: SMC64 Arguments from kernel. 538 * @x3: SMC64 Arguments from kernel (upper 32-bits). 539 * @x4: Unused. 540 * @cookie: Unused. 541 * @handle: Pointer to caller's context structure. 542 * @flags: SECURE_FLAG or NON_SECURE_FLAG. 543 * 544 * Return: Unused. 545 * 546 * Determines that smc_fid is valid and supported PM SMC Function ID from the 547 * list of pm_api_ids, otherwise completes the request with 548 * the unknown SMC Function ID. 549 * 550 * The SMC calls for PM service are forwarded from SIP Service SMC handler 551 * function with rt_svc_handle signature. 552 * 553 */ 554 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, 555 uint64_t x4, const void *cookie, void *handle, uint64_t flags) 556 { 557 (void)cookie; 558 uintptr_t ret; 559 uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0}; 560 uint32_t security_flag = NON_SECURE_FLAG; 561 uint32_t api_id; 562 bool status = false, status_tmp = false; 563 const uint64_t x[4] = {x1, x2, x3, x4}; 564 565 /* Handle case where PM wasn't initialized properly */ 566 if (pm_up == false) { 567 SMC_RET1(handle, SMC_UNK); 568 } 569 570 /* 571 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0) 572 * if smc called is secure 573 * 574 * Add redundant macro call to immune the code from glitches 575 */ 576 SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags); 577 if ((status != false) && (status_tmp != false)) { 578 security_flag = SECURE_FLAG; 579 } 580 581 if ((smc_fid & FUNCID_NUM_MASK) == PASS_THROUGH_FW_CMD_ID) { 582 api_id = lower_32_bits(x[0]); 583 584 EXTRACT_ARGS(pm_arg, x); 585 586 return eemi_api_handler(api_id, pm_arg, handle, security_flag); 587 } 588 589 pm_arg[0] = (uint32_t)x1; 590 pm_arg[1] = (uint32_t)(x1 >> 32U); 591 pm_arg[2] = (uint32_t)x2; 592 pm_arg[3] = (uint32_t)(x2 >> 32U); 593 pm_arg[4] = (uint32_t)x3; 594 (void)(x4); 595 api_id = smc_fid & FUNCID_NUM_MASK; 596 597 ret = eemi_for_compatibility(api_id, pm_arg, handle, security_flag); 598 if (ret != (uintptr_t)0) { 599 return ret; 600 } 601 602 ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle, flags); 603 if (ret != (uintptr_t)0) { 604 return ret; 605 } 606 607 ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag); 608 if (ret != (uintptr_t)0) { 609 return ret; 610 } 611 612 ret = eemi_handler(api_id, pm_arg, handle, security_flag); 613 614 return ret; 615 } 616