1 /* 2 * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved. 3 * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 /* 9 * Top-level SMC handler for Versal power management calls and 10 * IPI setup functions for communication with PMC. 11 */ 12 13 #include <errno.h> 14 #include <stdbool.h> 15 16 #include "../drivers/arm/gic/v3/gicv3_private.h" 17 18 #include <common/runtime_svc.h> 19 #include <drivers/arm/gicv3.h> 20 #include <lib/psci/psci.h> 21 #include <plat/arm/common/plat_arm.h> 22 #include <plat/common/platform.h> 23 24 #include <plat_private.h> 25 #include "pm_api_sys.h" 26 #include "pm_client.h" 27 #include "pm_ipi.h" 28 #include "pm_svc_main.h" 29 30 #define MODE 0x80000000U 31 32 #define XSCUGIC_SGIR_EL1_INITID_SHIFT 24U 33 #define INVALID_SGI 0xFFU 34 #define PM_INIT_SUSPEND_CB (30U) 35 #define PM_NOTIFY_CB (32U) 36 #define EVENT_CPU_PWRDWN (4U) 37 #define MBOX_SGI_SHARED_IPI (7U) 38 39 /** 40 * upper_32_bits - return bits 32-63 of a number 41 * @n: the number we're accessing 42 */ 43 #define upper_32_bits(n) ((uint32_t)((n) >> 32U)) 44 45 /** 46 * lower_32_bits - return bits 0-31 of a number 47 * @n: the number we're accessing 48 */ 49 #define lower_32_bits(n) ((uint32_t)((n) & 0xffffffffU)) 50 51 /** 52 * EXTRACT_SMC_ARGS - extracts 32-bit payloads from 64-bit SMC arguments 53 * @pm_arg: array of 32-bit payloads 54 * @x: array of 64-bit SMC arguments 55 */ 56 #define EXTRACT_ARGS(pm_arg, x) \ 57 for (uint32_t i = 0U; i < (PAYLOAD_ARG_CNT - 1U); i++) { \ 58 if ((i % 2U) != 0U) { \ 59 pm_arg[i] = lower_32_bits(x[(i / 2U) + 1U]); \ 60 } else { \ 61 pm_arg[i] = upper_32_bits(x[i / 2U]); \ 62 } \ 63 } 64 65 /* 1 sec of wait timeout for secondary core down */ 66 #define PWRDWN_WAIT_TIMEOUT (1000U) 67 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_asgi1r_el1, S3_0_C12_C11_6) 68 69 /* pm_up = true - UP, pm_up = false - DOWN */ 70 static bool pm_up; 71 static uint32_t sgi = (uint32_t)INVALID_SGI; 72 bool pwrdwn_req_received; 73 74 static void notify_os(void) 75 { 76 plat_ic_raise_ns_sgi(sgi, read_mpidr_el1()); 77 } 78 79 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags, 80 void *handle, void *cookie) 81 { 82 uint32_t cpu_id = plat_my_core_pos(); 83 84 VERBOSE("Powering down CPU %d\n", cpu_id); 85 86 /* Deactivate CPU power down SGI */ 87 plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR); 88 89 return psci_cpu_off(); 90 } 91 92 /** 93 * raise_pwr_down_interrupt() - Callback function to raise SGI. 94 * @mpidr: MPIDR for the target CPU. 95 * 96 * Raise SGI interrupt to trigger the CPU power down sequence on all the 97 * online secondary cores. 98 */ 99 static void raise_pwr_down_interrupt(u_register_t mpidr) 100 { 101 plat_ic_raise_el3_sgi(CPU_PWR_DOWN_REQ_INTR, mpidr); 102 } 103 104 void request_cpu_pwrdwn(void) 105 { 106 enum pm_ret_status ret; 107 108 VERBOSE("CPU power down request received\n"); 109 110 /* Send powerdown request to online secondary core(s) */ 111 ret = psci_stop_other_cores(PWRDWN_WAIT_TIMEOUT, raise_pwr_down_interrupt); 112 if (ret != PSCI_E_SUCCESS) { 113 ERROR("Failed to powerdown secondary core(s)\n"); 114 } 115 116 /* Clear IPI IRQ */ 117 pm_ipi_irq_clear(primary_proc); 118 119 /* Deactivate IPI IRQ */ 120 plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ); 121 } 122 123 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle, 124 void *cookie) 125 { 126 uint32_t payload[4] = {0}; 127 enum pm_ret_status ret; 128 int ipi_status, i; 129 130 VERBOSE("Received IPI FIQ from firmware\n"); 131 132 console_flush(); 133 (void)plat_ic_acknowledge_interrupt(); 134 135 /* Check status register for each IPI except PMC */ 136 for (i = IPI_ID_APU; i <= IPI_ID_5; i++) { 137 ipi_status = ipi_mb_enquire_status(IPI_ID_APU, i); 138 139 /* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */ 140 if (ipi_status & IPI_MB_STATUS_RECV_PENDING) { 141 plat_ic_raise_ns_sgi(MBOX_SGI_SHARED_IPI, read_mpidr_el1()); 142 break; 143 } 144 } 145 146 /* If PMC has not generated interrupt then end ISR */ 147 ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC); 148 if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) == 0) { 149 plat_ic_end_of_interrupt(id); 150 return 0; 151 } 152 153 /* Handle PMC case */ 154 ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0); 155 if (ret != PM_RET_SUCCESS) { 156 payload[0] = ret; 157 } 158 159 switch (payload[0]) { 160 case PM_INIT_SUSPEND_CB: 161 if (sgi != INVALID_SGI) { 162 notify_os(); 163 } 164 break; 165 case PM_NOTIFY_CB: 166 if (sgi != INVALID_SGI) { 167 if (payload[2] == EVENT_CPU_PWRDWN) { 168 if (pwrdwn_req_received) { 169 pwrdwn_req_received = false; 170 request_cpu_pwrdwn(); 171 (void)psci_cpu_off(); 172 break; 173 } else { 174 pwrdwn_req_received = true; 175 } 176 } 177 notify_os(); 178 } else if (payload[2] == EVENT_CPU_PWRDWN) { 179 request_cpu_pwrdwn(); 180 (void)psci_cpu_off(); 181 } 182 break; 183 case PM_RET_ERROR_INVALID_CRC: 184 pm_ipi_irq_clear(primary_proc); 185 WARN("Invalid CRC in the payload\n"); 186 break; 187 188 default: 189 pm_ipi_irq_clear(primary_proc); 190 WARN("Invalid IPI payload\n"); 191 break; 192 } 193 194 /* Clear FIQ */ 195 plat_ic_end_of_interrupt(id); 196 197 return 0; 198 } 199 200 /** 201 * pm_register_sgi() - PM register the IPI interrupt. 202 * @sgi_num: SGI number to be used for communication. 203 * @reset: Reset to invalid SGI when reset=1. 204 * 205 * Return: On success, the initialization function must return 0. 206 * Any other return value will cause the framework to ignore 207 * the service. 208 * 209 * Update the SGI number to be used. 210 * 211 */ 212 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset) 213 { 214 if (reset == 1U) { 215 sgi = INVALID_SGI; 216 return 0; 217 } 218 219 if (sgi != INVALID_SGI) { 220 return -EBUSY; 221 } 222 223 if (sgi_num >= GICV3_MAX_SGI_TARGETS) { 224 return -EINVAL; 225 } 226 227 sgi = (uint32_t)sgi_num; 228 return 0; 229 } 230 231 /** 232 * pm_setup() - PM service setup. 233 * 234 * Return: On success, the initialization function must return 0. 235 * Any other return value will cause the framework to ignore 236 * the service. 237 * 238 * Initialization functions for Versal power management for 239 * communicaton with PMC. 240 * 241 * Called from sip_svc_setup initialization function with the 242 * rt_svc_init signature. 243 * 244 */ 245 int32_t pm_setup(void) 246 { 247 int32_t ret = 0; 248 249 pm_ipi_init(primary_proc); 250 pm_up = true; 251 252 /* register SGI handler for CPU power down request */ 253 ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler); 254 if (ret != 0) { 255 WARN("BL31: registering SGI interrupt failed\n"); 256 } 257 258 /* 259 * Enable IPI IRQ 260 * assume the rich OS is OK to handle callback IRQs now. 261 * Even if we were wrong, it would not enable the IRQ in 262 * the GIC. 263 */ 264 pm_ipi_irq_enable(primary_proc); 265 266 ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler); 267 if (ret != 0) { 268 WARN("BL31: registering IPI interrupt failed\n"); 269 } 270 271 gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE); 272 273 /* Register for idle callback during force power down/restart */ 274 ret = pm_register_notifier(primary_proc->node_id, EVENT_CPU_PWRDWN, 275 0x0U, 0x1U, SECURE_FLAG); 276 if (ret != 0) { 277 WARN("BL31: registering idle callback for restart/force power down failed\n"); 278 } 279 280 return ret; 281 } 282 283 /** 284 * eemi_for_compatibility() - EEMI calls handler for deprecated calls. 285 * @api_id: identifier for the API being called. 286 * @pm_arg: pointer to the argument data for the API call. 287 * @handle: Pointer to caller's context structure. 288 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 289 * 290 * Return: If EEMI API found then, uintptr_t type address, else 0. 291 * 292 * Some EEMI API's use case needs to be changed in Linux driver, so they 293 * can take advantage of common EEMI handler in TF-A. As of now the old 294 * implementation of these APIs are required to maintain backward compatibility 295 * until their use case in linux driver changes. 296 * 297 */ 298 static uintptr_t eemi_for_compatibility(uint32_t api_id, uint32_t *pm_arg, 299 void *handle, uint32_t security_flag) 300 { 301 enum pm_ret_status ret; 302 303 switch (api_id) { 304 305 case (uint32_t)PM_FEATURE_CHECK: 306 { 307 uint32_t result[RET_PAYLOAD_ARG_CNT] = {0U}; 308 309 ret = pm_feature_check(pm_arg[0], result, security_flag); 310 SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U), 311 (uint64_t)result[1] | ((uint64_t)result[2] << 32U)); 312 } 313 314 case PM_LOAD_PDI: 315 { 316 ret = pm_load_pdi(pm_arg[0], pm_arg[1], pm_arg[2], 317 security_flag); 318 SMC_RET1(handle, (uint64_t)ret); 319 } 320 321 default: 322 return (uintptr_t)0; 323 } 324 } 325 326 /** 327 * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI. 328 * @api_id: identifier for the API being called. 329 * @pm_arg: pointer to the argument data for the API call. 330 * @handle: Pointer to caller's context structure. 331 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 332 * 333 * These EEMI APIs performs CPU specific power management tasks. 334 * These EEMI APIs are invoked either from PSCI or from debugfs in kernel. 335 * These calls require CPU specific processing before sending IPI request to 336 * Platform Management Controller. For example enable/disable CPU specific 337 * interrupts. This requires separate handler for these calls and may not be 338 * handled using common eemi handler. 339 * 340 * Return: If EEMI API found then, uintptr_t type address, else 0. 341 * 342 */ 343 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, uint32_t *pm_arg, 344 void *handle, uint32_t security_flag) 345 { 346 enum pm_ret_status ret; 347 348 switch (api_id) { 349 350 case (uint32_t)PM_SELF_SUSPEND: 351 ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2], 352 pm_arg[3], security_flag); 353 SMC_RET1(handle, (u_register_t)ret); 354 355 case (uint32_t)PM_FORCE_POWERDOWN: 356 ret = pm_force_powerdown(pm_arg[0], pm_arg[1], security_flag); 357 SMC_RET1(handle, (u_register_t)ret); 358 359 case (uint32_t)PM_REQ_SUSPEND: 360 ret = pm_req_suspend(pm_arg[0], pm_arg[1], pm_arg[2], 361 pm_arg[3], security_flag); 362 SMC_RET1(handle, (u_register_t)ret); 363 364 case (uint32_t)PM_ABORT_SUSPEND: 365 ret = pm_abort_suspend(pm_arg[0], security_flag); 366 SMC_RET1(handle, (u_register_t)ret); 367 368 case (uint32_t)PM_SYSTEM_SHUTDOWN: 369 ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag); 370 SMC_RET1(handle, (u_register_t)ret); 371 372 default: 373 return (uintptr_t)0; 374 } 375 } 376 377 /** 378 * TF_A_specific_handler() - SMC handler for TF-A specific functionality. 379 * @api_id: identifier for the API being called. 380 * @pm_arg: pointer to the argument data for the API call. 381 * @handle: Pointer to caller's context structure. 382 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 383 * 384 * These EEMI calls performs functionality that does not require 385 * IPI transaction. The handler ends in TF-A and returns requested data to 386 * kernel from TF-A. 387 * 388 * Return: If TF-A specific API found then, uintptr_t type address, else 0 389 * 390 */ 391 static uintptr_t TF_A_specific_handler(uint32_t api_id, uint32_t *pm_arg, 392 void *handle, uint32_t security_flag) 393 { 394 switch (api_id) { 395 396 case TF_A_FEATURE_CHECK: 397 { 398 enum pm_ret_status ret; 399 uint32_t result[PAYLOAD_ARG_CNT] = {0U}; 400 401 ret = eemi_feature_check(pm_arg[0], result); 402 SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U)); 403 } 404 405 case TF_A_PM_REGISTER_SGI: 406 { 407 int32_t ret; 408 409 ret = pm_register_sgi(pm_arg[0], pm_arg[1]); 410 if (ret != 0) { 411 SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS); 412 } 413 414 SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS); 415 } 416 417 case PM_GET_CALLBACK_DATA: 418 { 419 uint32_t result[4] = {0}; 420 enum pm_ret_status ret; 421 422 ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U); 423 if (ret != 0) { 424 result[0] = ret; 425 } 426 427 SMC_RET2(handle, 428 (uint64_t)result[0] | ((uint64_t)result[1] << 32U), 429 (uint64_t)result[2] | ((uint64_t)result[3] << 32U)); 430 } 431 432 case PM_GET_TRUSTZONE_VERSION: 433 SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS | 434 ((uint64_t)TZ_VERSION << 32U)); 435 436 default: 437 return (uintptr_t)0; 438 } 439 } 440 441 /** 442 * eemi_handler() - Prepare EEMI payload and perform IPI transaction. 443 * @api_id: identifier for the API being called. 444 * @pm_arg: pointer to the argument data for the API call. 445 * @handle: Pointer to caller's context structure. 446 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 447 * 448 * EEMI - Embedded Energy Management Interface is Xilinx proprietary protocol 449 * to allow communication between power management controller and different 450 * processing clusters. 451 * 452 * This handler prepares EEMI protocol payload received from kernel and performs 453 * IPI transaction. 454 * 455 * Return: If EEMI API found then, uintptr_t type address, else 0 456 * 457 */ 458 static uintptr_t eemi_handler(uint32_t api_id, uint32_t *pm_arg, 459 void *handle, uint32_t security_flag) 460 { 461 enum pm_ret_status ret; 462 uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0}; 463 464 ret = pm_handle_eemi_call(security_flag, api_id, pm_arg[0], pm_arg[1], 465 pm_arg[2], pm_arg[3], pm_arg[4], 466 (uint64_t *)buf); 467 /* 468 * Two IOCTLs, to get clock name and pinctrl name of pm_query_data API 469 * receives 5 words of respoonse from firmware. Currently linux driver can 470 * receive only 4 words from TF-A. So, this needs to be handled separately 471 * than other eemi calls. 472 */ 473 if (api_id == (uint32_t)PM_QUERY_DATA) { 474 if ((pm_arg[0] == XPM_QID_CLOCK_GET_NAME || 475 pm_arg[0] == XPM_QID_PINCTRL_GET_FUNCTION_NAME) && 476 ret == PM_RET_SUCCESS) { 477 SMC_RET2(handle, (uint64_t)buf[0] | ((uint64_t)buf[1] << 32U), 478 (uint64_t)buf[2] | ((uint64_t)buf[3] << 32U)); 479 } 480 } 481 482 SMC_RET2(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U), 483 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U)); 484 } 485 486 /** 487 * eemi_api_handler() - Prepare EEMI payload and perform IPI transaction. 488 * @api_id: identifier for the API being called. 489 * @pm_arg: pointer to the argument data for the API call. 490 * @handle: Pointer to caller's context structure. 491 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 492 * 493 * EEMI - Embedded Energy Management Interface is AMD-Xilinx proprietary 494 * protocol to allow communication between power management controller and 495 * different processing clusters. 496 * 497 * This handler prepares EEMI protocol payload received from kernel and performs 498 * IPI transaction. 499 * 500 * Return: If EEMI API found then, uintptr_t type address, else 0 501 */ 502 static uintptr_t eemi_api_handler(uint32_t api_id, const uint32_t *pm_arg, 503 void *handle, uint32_t security_flag) 504 { 505 enum pm_ret_status ret; 506 uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0U}; 507 uint32_t payload[PAYLOAD_ARG_CNT] = {0U}; 508 uint32_t module_id; 509 510 module_id = (api_id & MODULE_ID_MASK) >> 8U; 511 512 PM_PACK_PAYLOAD7(payload, module_id, security_flag, api_id, 513 pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3], 514 pm_arg[4], pm_arg[5]); 515 516 ret = pm_ipi_send_sync(primary_proc, payload, (uint32_t *)buf, 517 RET_PAYLOAD_ARG_CNT); 518 519 SMC_RET4(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U), 520 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U), 521 (uint64_t)buf[3] | ((uint64_t)buf[4] << 32U), 522 (uint64_t)buf[5]); 523 } 524 525 /** 526 * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2. 527 * @smc_fid: Function Identifier. 528 * @x1: SMC64 Arguments from kernel. 529 * @x2: SMC64 Arguments from kernel. 530 * @x3: SMC64 Arguments from kernel (upper 32-bits). 531 * @x4: Unused. 532 * @cookie: Unused. 533 * @handle: Pointer to caller's context structure. 534 * @flags: SECURE_FLAG or NON_SECURE_FLAG. 535 * 536 * Return: Unused. 537 * 538 * Determines that smc_fid is valid and supported PM SMC Function ID from the 539 * list of pm_api_ids, otherwise completes the request with 540 * the unknown SMC Function ID. 541 * 542 * The SMC calls for PM service are forwarded from SIP Service SMC handler 543 * function with rt_svc_handle signature. 544 * 545 */ 546 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, 547 uint64_t x4, const void *cookie, void *handle, uint64_t flags) 548 { 549 uintptr_t ret; 550 uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0}; 551 uint32_t security_flag = NON_SECURE_FLAG; 552 uint32_t api_id; 553 bool status = false, status_tmp = false; 554 uint64_t x[4] = {x1, x2, x3, x4}; 555 556 /* Handle case where PM wasn't initialized properly */ 557 if (pm_up == false) { 558 SMC_RET1(handle, SMC_UNK); 559 } 560 561 /* 562 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0) 563 * if smc called is secure 564 * 565 * Add redundant macro call to immune the code from glitches 566 */ 567 SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags); 568 if ((status != false) && (status_tmp != false)) { 569 security_flag = SECURE_FLAG; 570 } 571 572 if ((smc_fid & FUNCID_NUM_MASK) == PASS_THROUGH_FW_CMD_ID) { 573 api_id = lower_32_bits(x[0]); 574 575 EXTRACT_ARGS(pm_arg, x); 576 577 return eemi_api_handler(api_id, pm_arg, handle, security_flag); 578 } 579 580 pm_arg[0] = (uint32_t)x1; 581 pm_arg[1] = (uint32_t)(x1 >> 32U); 582 pm_arg[2] = (uint32_t)x2; 583 pm_arg[3] = (uint32_t)(x2 >> 32U); 584 pm_arg[4] = (uint32_t)x3; 585 (void)(x4); 586 api_id = smc_fid & FUNCID_NUM_MASK; 587 588 ret = eemi_for_compatibility(api_id, pm_arg, handle, security_flag); 589 if (ret != (uintptr_t)0) { 590 return ret; 591 } 592 593 ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle, flags); 594 if (ret != (uintptr_t)0) { 595 return ret; 596 } 597 598 ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag); 599 if (ret != (uintptr_t)0) { 600 return ret; 601 } 602 603 ret = eemi_handler(api_id, pm_arg, handle, security_flag); 604 605 return ret; 606 } 607