1 /* 2 * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <string.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 #include <common/debug.h> 13 #include <lib/pmf/pmf.h> 14 #include <lib/runtime_instr.h> 15 #include <lib/smccc.h> 16 #include <plat/common/platform.h> 17 #include <services/arm_arch_svc.h> 18 19 #include "psci_private.h" 20 21 /******************************************************************************* 22 * PSCI frontend api for servicing SMCs. Described in the PSCI spec. 23 ******************************************************************************/ 24 int psci_cpu_on(u_register_t target_cpu, 25 uintptr_t entrypoint, 26 u_register_t context_id) 27 28 { 29 int rc; 30 entry_point_info_t ep; 31 32 /* Determine if the cpu exists of not */ 33 rc = psci_validate_mpidr(target_cpu); 34 if (rc != PSCI_E_SUCCESS) 35 return PSCI_E_INVALID_PARAMS; 36 37 /* Validate the entry point and get the entry_point_info */ 38 rc = psci_validate_entry_point(&ep, entrypoint, context_id); 39 if (rc != PSCI_E_SUCCESS) 40 return rc; 41 42 /* 43 * To turn this cpu on, specify which power 44 * levels need to be turned on 45 */ 46 return psci_cpu_on_start(target_cpu, &ep); 47 } 48 49 unsigned int psci_version(void) 50 { 51 return PSCI_MAJOR_VER | PSCI_MINOR_VER; 52 } 53 54 int psci_cpu_suspend(unsigned int power_state, 55 uintptr_t entrypoint, 56 u_register_t context_id) 57 { 58 int rc; 59 unsigned int target_pwrlvl, is_power_down_state; 60 entry_point_info_t ep; 61 psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; 62 plat_local_state_t cpu_pd_state; 63 #if PSCI_OS_INIT_MODE 64 unsigned int cpu_idx = plat_my_core_pos(); 65 plat_local_state_t prev[PLAT_MAX_PWR_LVL]; 66 #endif 67 68 /* Validate the power_state parameter */ 69 rc = psci_validate_power_state(power_state, &state_info); 70 if (rc != PSCI_E_SUCCESS) { 71 assert(rc == PSCI_E_INVALID_PARAMS); 72 return rc; 73 } 74 75 /* 76 * Get the value of the state type bit from the power state parameter. 77 */ 78 is_power_down_state = psci_get_pstate_type(power_state); 79 80 /* Sanity check the requested suspend levels */ 81 assert(psci_validate_suspend_req(&state_info, is_power_down_state) 82 == PSCI_E_SUCCESS); 83 84 target_pwrlvl = psci_find_target_suspend_lvl(&state_info); 85 if (target_pwrlvl == PSCI_INVALID_PWR_LVL) { 86 ERROR("Invalid target power level for suspend operation\n"); 87 panic(); 88 } 89 90 /* Fast path for CPU standby.*/ 91 if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) { 92 if (psci_plat_pm_ops->cpu_standby == NULL) 93 return PSCI_E_INVALID_PARAMS; 94 95 /* 96 * Set the state of the CPU power domain to the platform 97 * specific retention state and enter the standby state. 98 */ 99 cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL]; 100 psci_set_cpu_local_state(cpu_pd_state); 101 102 #if PSCI_OS_INIT_MODE 103 /* 104 * If in OS-initiated mode, save a copy of the previous 105 * requested local power states and update the new requested 106 * local power states for this CPU. 107 */ 108 if (psci_suspend_mode == OS_INIT) { 109 psci_update_req_local_pwr_states(target_pwrlvl, cpu_idx, 110 &state_info, prev); 111 } 112 #endif 113 114 #if ENABLE_PSCI_STAT 115 plat_psci_stat_accounting_start(&state_info); 116 #endif 117 118 #if ENABLE_RUNTIME_INSTRUMENTATION 119 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 120 RT_INSTR_ENTER_HW_LOW_PWR, 121 PMF_NO_CACHE_MAINT); 122 #endif 123 124 psci_plat_pm_ops->cpu_standby(cpu_pd_state); 125 126 /* Upon exit from standby, set the state back to RUN. */ 127 psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); 128 129 #if PSCI_OS_INIT_MODE 130 /* 131 * If in OS-initiated mode, restore the previous requested 132 * local power states for this CPU. 133 */ 134 if (psci_suspend_mode == OS_INIT) { 135 psci_restore_req_local_pwr_states(cpu_idx, prev); 136 } 137 #endif 138 139 #if ENABLE_RUNTIME_INSTRUMENTATION 140 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 141 RT_INSTR_EXIT_HW_LOW_PWR, 142 PMF_NO_CACHE_MAINT); 143 #endif 144 145 #if ENABLE_PSCI_STAT 146 plat_psci_stat_accounting_stop(&state_info); 147 148 /* Update PSCI stats */ 149 psci_stats_update_pwr_up(PSCI_CPU_PWR_LVL, &state_info); 150 #endif 151 152 return PSCI_E_SUCCESS; 153 } 154 155 /* 156 * If a power down state has been requested, we need to verify entry 157 * point and program entry information. 158 */ 159 if (is_power_down_state != 0U) { 160 rc = psci_validate_entry_point(&ep, entrypoint, context_id); 161 if (rc != PSCI_E_SUCCESS) 162 return rc; 163 } 164 165 /* 166 * Do what is needed to enter the power down state. Upon success, 167 * enter the final wfi which will power down this CPU. This function 168 * might return if the power down was abandoned for any reason, e.g. 169 * arrival of an interrupt 170 */ 171 rc = psci_cpu_suspend_start(&ep, 172 target_pwrlvl, 173 &state_info, 174 is_power_down_state); 175 176 return rc; 177 } 178 179 180 int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id) 181 { 182 int rc; 183 psci_power_state_t state_info; 184 entry_point_info_t ep; 185 186 /* Check if the current CPU is the last ON CPU in the system */ 187 if (!psci_is_last_on_cpu()) 188 return PSCI_E_DENIED; 189 190 /* Validate the entry point and get the entry_point_info */ 191 rc = psci_validate_entry_point(&ep, entrypoint, context_id); 192 if (rc != PSCI_E_SUCCESS) 193 return rc; 194 195 /* Query the psci_power_state for system suspend */ 196 psci_query_sys_suspend_pwrstate(&state_info); 197 198 /* 199 * Check if platform allows suspend to Highest power level 200 * (System level) 201 */ 202 if (psci_find_target_suspend_lvl(&state_info) < PLAT_MAX_PWR_LVL) 203 return PSCI_E_DENIED; 204 205 /* Ensure that the psci_power_state makes sense */ 206 assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN) 207 == PSCI_E_SUCCESS); 208 assert(is_local_state_off( 209 state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]) != 0); 210 211 /* 212 * Do what is needed to enter the system suspend state. This function 213 * might return if the power down was abandoned for any reason, e.g. 214 * arrival of an interrupt 215 */ 216 rc = psci_cpu_suspend_start(&ep, 217 PLAT_MAX_PWR_LVL, 218 &state_info, 219 PSTATE_TYPE_POWERDOWN); 220 221 return rc; 222 } 223 224 int psci_cpu_off(void) 225 { 226 int rc; 227 unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL; 228 229 /* 230 * Do what is needed to power off this CPU and possible higher power 231 * levels if it able to do so. Upon success, enter the final wfi 232 * which will power down this CPU. 233 */ 234 rc = psci_do_cpu_off(target_pwrlvl); 235 236 /* 237 * The only error cpu_off can return is E_DENIED. So check if that's 238 * indeed the case. 239 */ 240 assert(rc == PSCI_E_DENIED); 241 242 return rc; 243 } 244 245 int psci_affinity_info(u_register_t target_affinity, 246 unsigned int lowest_affinity_level) 247 { 248 int ret; 249 unsigned int target_idx; 250 251 /* We dont support level higher than PSCI_CPU_PWR_LVL */ 252 if (lowest_affinity_level > PSCI_CPU_PWR_LVL) 253 return PSCI_E_INVALID_PARAMS; 254 255 /* Calculate the cpu index of the target */ 256 ret = plat_core_pos_by_mpidr(target_affinity); 257 if (ret == -1) { 258 return PSCI_E_INVALID_PARAMS; 259 } 260 target_idx = (unsigned int)ret; 261 262 /* 263 * Generic management: 264 * Perform cache maintanence ahead of reading the target CPU state to 265 * ensure that the data is not stale. 266 * There is a theoretical edge case where the cache may contain stale 267 * data for the target CPU data - this can occur under the following 268 * conditions: 269 * - the target CPU is in another cluster from the current 270 * - the target CPU was the last CPU to shutdown on its cluster 271 * - the cluster was removed from coherency as part of the CPU shutdown 272 * 273 * In this case the cache maintenace that was performed as part of the 274 * target CPUs shutdown was not seen by the current CPU's cluster. And 275 * so the cache may contain stale data for the target CPU. 276 */ 277 flush_cpu_data_by_index(target_idx, 278 psci_svc_cpu_data.aff_info_state); 279 280 return psci_get_aff_info_state_by_idx(target_idx); 281 } 282 283 int psci_migrate(u_register_t target_cpu) 284 { 285 int rc; 286 u_register_t resident_cpu_mpidr; 287 288 rc = psci_spd_migrate_info(&resident_cpu_mpidr); 289 if (rc != PSCI_TOS_UP_MIG_CAP) 290 return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ? 291 PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED; 292 293 /* 294 * Migrate should only be invoked on the CPU where 295 * the Secure OS is resident. 296 */ 297 if (resident_cpu_mpidr != read_mpidr_el1()) 298 return PSCI_E_NOT_PRESENT; 299 300 /* Check the validity of the specified target cpu */ 301 rc = psci_validate_mpidr(target_cpu); 302 if (rc != PSCI_E_SUCCESS) 303 return PSCI_E_INVALID_PARAMS; 304 305 assert((psci_spd_pm != NULL) && (psci_spd_pm->svc_migrate != NULL)); 306 307 rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu); 308 assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL)); 309 310 return rc; 311 } 312 313 int psci_migrate_info_type(void) 314 { 315 u_register_t resident_cpu_mpidr; 316 317 return psci_spd_migrate_info(&resident_cpu_mpidr); 318 } 319 320 u_register_t psci_migrate_info_up_cpu(void) 321 { 322 u_register_t resident_cpu_mpidr; 323 int rc; 324 325 /* 326 * Return value of this depends upon what 327 * psci_spd_migrate_info() returns. 328 */ 329 rc = psci_spd_migrate_info(&resident_cpu_mpidr); 330 if ((rc != PSCI_TOS_NOT_UP_MIG_CAP) && (rc != PSCI_TOS_UP_MIG_CAP)) 331 return (u_register_t)(register_t) PSCI_E_INVALID_PARAMS; 332 333 return resident_cpu_mpidr; 334 } 335 336 int psci_node_hw_state(u_register_t target_cpu, 337 unsigned int power_level) 338 { 339 int rc; 340 341 /* Validate target_cpu */ 342 rc = psci_validate_mpidr(target_cpu); 343 if (rc != PSCI_E_SUCCESS) 344 return PSCI_E_INVALID_PARAMS; 345 346 /* Validate power_level against PLAT_MAX_PWR_LVL */ 347 if (power_level > PLAT_MAX_PWR_LVL) 348 return PSCI_E_INVALID_PARAMS; 349 350 /* 351 * Dispatch this call to platform to query power controller, and pass on 352 * to the caller what it returns 353 */ 354 assert(psci_plat_pm_ops->get_node_hw_state != NULL); 355 rc = psci_plat_pm_ops->get_node_hw_state(target_cpu, power_level); 356 assert(((rc >= HW_ON) && (rc <= HW_STANDBY)) 357 || (rc == PSCI_E_NOT_SUPPORTED) 358 || (rc == PSCI_E_INVALID_PARAMS)); 359 return rc; 360 } 361 362 int psci_features(unsigned int psci_fid) 363 { 364 unsigned int local_caps = psci_caps; 365 366 if (psci_fid == SMCCC_VERSION) 367 return PSCI_E_SUCCESS; 368 369 /* Check if it is a 64 bit function */ 370 if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64) 371 local_caps &= PSCI_CAP_64BIT_MASK; 372 373 /* Check for invalid fid */ 374 if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid) 375 && is_psci_fid(psci_fid))) 376 return PSCI_E_NOT_SUPPORTED; 377 378 379 /* Check if the psci fid is supported or not */ 380 if ((local_caps & define_psci_cap(psci_fid)) == 0U) 381 return PSCI_E_NOT_SUPPORTED; 382 383 /* Format the feature flags */ 384 if ((psci_fid == PSCI_CPU_SUSPEND_AARCH32) || 385 (psci_fid == PSCI_CPU_SUSPEND_AARCH64)) { 386 unsigned int ret = ((FF_PSTATE << FF_PSTATE_SHIFT) | 387 (FF_SUPPORTS_OS_INIT_MODE << FF_MODE_SUPPORT_SHIFT)); 388 return (int)ret; 389 } 390 391 /* Return 0 for all other fid's */ 392 return PSCI_E_SUCCESS; 393 } 394 395 #if PSCI_OS_INIT_MODE 396 int psci_set_suspend_mode(unsigned int mode) 397 { 398 if (psci_suspend_mode == mode) { 399 return PSCI_E_SUCCESS; 400 } 401 402 if (mode == PLAT_COORD) { 403 /* Check if the current CPU is the last ON CPU in the system */ 404 if (!psci_is_last_on_cpu_safe()) { 405 return PSCI_E_DENIED; 406 } 407 } 408 409 if (mode == OS_INIT) { 410 /* 411 * Check if all CPUs in the system are ON or if the current 412 * CPU is the last ON CPU in the system. 413 */ 414 if (!(psci_are_all_cpus_on_safe() || 415 psci_is_last_on_cpu_safe())) { 416 return PSCI_E_DENIED; 417 } 418 } 419 420 psci_suspend_mode = mode; 421 psci_flush_dcache_range((uintptr_t)&psci_suspend_mode, 422 sizeof(psci_suspend_mode)); 423 424 return PSCI_E_SUCCESS; 425 } 426 #endif 427 428 /******************************************************************************* 429 * PSCI top level handler for servicing SMCs. 430 ******************************************************************************/ 431 u_register_t psci_smc_handler(uint32_t smc_fid, 432 u_register_t x1, 433 u_register_t x2, 434 u_register_t x3, 435 u_register_t x4, 436 void *cookie, 437 void *handle, 438 u_register_t flags) 439 { 440 u_register_t ret; 441 442 if (is_caller_secure(flags)) 443 return (u_register_t)SMC_UNK; 444 445 /* Check the fid against the capabilities */ 446 if ((psci_caps & define_psci_cap(smc_fid)) == 0U) 447 return (u_register_t)SMC_UNK; 448 449 if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) { 450 /* 32-bit PSCI function, clear top parameter bits */ 451 452 uint32_t r1 = (uint32_t)x1; 453 uint32_t r2 = (uint32_t)x2; 454 uint32_t r3 = (uint32_t)x3; 455 456 switch (smc_fid) { 457 case PSCI_VERSION: 458 ret = (u_register_t)psci_version(); 459 break; 460 461 case PSCI_CPU_OFF: 462 ret = (u_register_t)psci_cpu_off(); 463 break; 464 465 case PSCI_CPU_SUSPEND_AARCH32: 466 ret = (u_register_t)psci_cpu_suspend(r1, r2, r3); 467 break; 468 469 case PSCI_CPU_ON_AARCH32: 470 ret = (u_register_t)psci_cpu_on(r1, r2, r3); 471 break; 472 473 case PSCI_AFFINITY_INFO_AARCH32: 474 ret = (u_register_t)psci_affinity_info(r1, r2); 475 break; 476 477 case PSCI_MIG_AARCH32: 478 ret = (u_register_t)psci_migrate(r1); 479 break; 480 481 case PSCI_MIG_INFO_TYPE: 482 ret = (u_register_t)psci_migrate_info_type(); 483 break; 484 485 case PSCI_MIG_INFO_UP_CPU_AARCH32: 486 ret = psci_migrate_info_up_cpu(); 487 break; 488 489 case PSCI_NODE_HW_STATE_AARCH32: 490 ret = (u_register_t)psci_node_hw_state(r1, r2); 491 break; 492 493 case PSCI_SYSTEM_SUSPEND_AARCH32: 494 ret = (u_register_t)psci_system_suspend(r1, r2); 495 break; 496 497 case PSCI_SYSTEM_OFF: 498 psci_system_off(); 499 /* We should never return from psci_system_off() */ 500 break; 501 502 case PSCI_SYSTEM_RESET: 503 psci_system_reset(); 504 /* We should never return from psci_system_reset() */ 505 break; 506 507 case PSCI_FEATURES: 508 ret = (u_register_t)psci_features(r1); 509 break; 510 511 #if PSCI_OS_INIT_MODE 512 case PSCI_SET_SUSPEND_MODE: 513 ret = (u_register_t)psci_set_suspend_mode(r1); 514 break; 515 #endif 516 517 #if ENABLE_PSCI_STAT 518 case PSCI_STAT_RESIDENCY_AARCH32: 519 ret = psci_stat_residency(r1, r2); 520 break; 521 522 case PSCI_STAT_COUNT_AARCH32: 523 ret = psci_stat_count(r1, r2); 524 break; 525 #endif 526 case PSCI_MEM_PROTECT: 527 ret = psci_mem_protect(r1); 528 break; 529 530 case PSCI_MEM_CHK_RANGE_AARCH32: 531 ret = psci_mem_chk_range(r1, r2); 532 break; 533 534 case PSCI_SYSTEM_RESET2_AARCH32: 535 /* We should never return from psci_system_reset2() */ 536 ret = psci_system_reset2(r1, r2); 537 break; 538 539 default: 540 WARN("Unimplemented PSCI Call: 0x%x\n", smc_fid); 541 ret = (u_register_t)SMC_UNK; 542 break; 543 } 544 } else { 545 /* 64-bit PSCI function */ 546 547 switch (smc_fid) { 548 case PSCI_CPU_SUSPEND_AARCH64: 549 ret = (u_register_t) 550 psci_cpu_suspend((unsigned int)x1, x2, x3); 551 break; 552 553 case PSCI_CPU_ON_AARCH64: 554 ret = (u_register_t)psci_cpu_on(x1, x2, x3); 555 break; 556 557 case PSCI_AFFINITY_INFO_AARCH64: 558 ret = (u_register_t) 559 psci_affinity_info(x1, (unsigned int)x2); 560 break; 561 562 case PSCI_MIG_AARCH64: 563 ret = (u_register_t)psci_migrate(x1); 564 break; 565 566 case PSCI_MIG_INFO_UP_CPU_AARCH64: 567 ret = psci_migrate_info_up_cpu(); 568 break; 569 570 case PSCI_NODE_HW_STATE_AARCH64: 571 ret = (u_register_t)psci_node_hw_state( 572 x1, (unsigned int) x2); 573 break; 574 575 case PSCI_SYSTEM_SUSPEND_AARCH64: 576 ret = (u_register_t)psci_system_suspend(x1, x2); 577 break; 578 579 #if ENABLE_PSCI_STAT 580 case PSCI_STAT_RESIDENCY_AARCH64: 581 ret = psci_stat_residency(x1, (unsigned int) x2); 582 break; 583 584 case PSCI_STAT_COUNT_AARCH64: 585 ret = psci_stat_count(x1, (unsigned int) x2); 586 break; 587 #endif 588 589 case PSCI_MEM_CHK_RANGE_AARCH64: 590 ret = psci_mem_chk_range(x1, x2); 591 break; 592 593 case PSCI_SYSTEM_RESET2_AARCH64: 594 /* We should never return from psci_system_reset2() */ 595 ret = psci_system_reset2((uint32_t) x1, x2); 596 break; 597 598 default: 599 WARN("Unimplemented PSCI Call: 0x%x\n", smc_fid); 600 ret = (u_register_t)SMC_UNK; 601 break; 602 } 603 } 604 605 return ret; 606 } 607