1 /* 2 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <string.h> 9 10 #include <arch.h> 11 #include <arch_features.h> 12 #include <arch_helpers.h> 13 #include <common/bl_common.h> 14 #include <common/debug.h> 15 #include <context.h> 16 #include <drivers/delay_timer.h> 17 #include <lib/cpus/cpu_ops.h> 18 #include <lib/el3_runtime/context_mgmt.h> 19 #include <lib/extensions/spe.h> 20 #include <lib/per_cpu/per_cpu.h> 21 #include <lib/pmf/pmf.h> 22 #include <lib/runtime_instr.h> 23 #include <lib/utils.h> 24 #include <plat/common/platform.h> 25 26 #include "psci_private.h" 27 28 /* 29 * SPD power management operations, expected to be supplied by the registered 30 * SPD on successful SP initialization 31 */ 32 const spd_pm_ops_t *psci_spd_pm; 33 34 /* 35 * PSCI requested local power state map. This array is used to store the local 36 * power states requested by a CPU for power levels from level 1 to 37 * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power 38 * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a 39 * CPU are the same. 40 * 41 * During state coordination, the platform is passed an array containing the 42 * local states requested for a particular non cpu power domain by each cpu 43 * within the domain. 44 * 45 * TODO: Dense packing of the requested states will cause cache thrashing 46 * when multiple power domains write to it. If we allocate the requested 47 * states at each power level in a cache-line aligned per-domain memory, 48 * the cache thrashing can be avoided. 49 */ 50 static plat_local_state_t 51 psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT]; 52 53 unsigned int psci_plat_core_count; 54 55 /******************************************************************************* 56 * Arrays that hold the platform's power domain tree information for state 57 * management of power domains. 58 * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain 59 * which is an ancestor of a CPU power domain. 60 * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain 61 ******************************************************************************/ 62 non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS] 63 #if USE_COHERENT_MEM 64 __section(".tzfw_coherent_mem") 65 #endif 66 ; 67 68 /* Lock for PSCI state coordination */ 69 DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); 70 71 PER_CPU_DEFINE(cpu_pd_node_t, psci_cpu_pd_nodes); 72 73 /******************************************************************************* 74 * Pointer to functions exported by the platform to complete power mgmt. ops 75 ******************************************************************************/ 76 const plat_psci_ops_t *psci_plat_pm_ops; 77 78 /****************************************************************************** 79 * Check that the maximum power level supported by the platform makes sense 80 *****************************************************************************/ 81 CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) && 82 (PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL), 83 assert_platform_max_pwrlvl_check); 84 85 #if PSCI_OS_INIT_MODE 86 /******************************************************************************* 87 * The power state coordination mode used in CPU_SUSPEND. 88 * Defaults to platform-coordinated mode. 89 ******************************************************************************/ 90 suspend_mode_t psci_suspend_mode = PLAT_COORD; 91 #endif 92 93 /* 94 * The plat_local_state used by the platform is one of these types: RUN, 95 * RETENTION and OFF. The platform can define further sub-states for each type 96 * apart from RUN. This categorization is done to verify the sanity of the 97 * psci_power_state passed by the platform and to print debug information. The 98 * categorization is done on the basis of the following conditions: 99 * 100 * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN. 101 * 102 * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is 103 * STATE_TYPE_RETN. 104 * 105 * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is 106 * STATE_TYPE_OFF. 107 */ 108 typedef enum plat_local_state_type { 109 STATE_TYPE_RUN = 0, 110 STATE_TYPE_RETN, 111 STATE_TYPE_OFF 112 } plat_local_state_type_t; 113 114 /* Function used to categorize plat_local_state. */ 115 static plat_local_state_type_t find_local_state_type(plat_local_state_t state) 116 { 117 if (state != 0U) { 118 if (state > PLAT_MAX_RET_STATE) { 119 return STATE_TYPE_OFF; 120 } else { 121 return STATE_TYPE_RETN; 122 } 123 } else { 124 return STATE_TYPE_RUN; 125 } 126 } 127 128 /****************************************************************************** 129 * Check that the maximum retention level supported by the platform is less 130 * than the maximum off level. 131 *****************************************************************************/ 132 CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, 133 assert_platform_max_off_and_retn_state_check); 134 135 /****************************************************************************** 136 * This function ensures that the power state parameter in a CPU_SUSPEND request 137 * is valid. If so, it returns the requested states for each power level. 138 *****************************************************************************/ 139 int psci_validate_power_state(unsigned int power_state, 140 psci_power_state_t *state_info) 141 { 142 /* Check SBZ bits in power state are zero */ 143 if (psci_check_power_state(power_state) != 0U) { 144 return PSCI_E_INVALID_PARAMS; 145 } 146 assert(psci_plat_pm_ops->validate_power_state != NULL); 147 148 /* Validate the power_state using platform pm_ops */ 149 return psci_plat_pm_ops->validate_power_state(power_state, state_info); 150 } 151 152 /****************************************************************************** 153 * This function retrieves the `psci_power_state_t` for system suspend from 154 * the platform. 155 *****************************************************************************/ 156 void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info) 157 { 158 /* 159 * Assert that the required pm_ops hook is implemented to ensure that 160 * the capability detected during psci_setup() is valid. 161 */ 162 assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL); 163 164 /* 165 * Query the platform for the power_state required for system suspend 166 */ 167 psci_plat_pm_ops->get_sys_suspend_power_state(state_info); 168 } 169 170 #if PSCI_OS_INIT_MODE 171 /******************************************************************************* 172 * This function verifies that all the other cores at the 'end_pwrlvl' have been 173 * idled and the current CPU is the last running CPU at the 'end_pwrlvl'. 174 * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false) 175 * otherwise. 176 ******************************************************************************/ 177 static bool psci_is_last_cpu_to_idle_at_pwrlvl(unsigned int my_idx, unsigned int end_pwrlvl) 178 { 179 unsigned int lvl; 180 unsigned int parent_idx = 0; 181 unsigned int cpu_start_idx, ncpus, cpu_idx; 182 plat_local_state_t local_state; 183 184 if (end_pwrlvl == PSCI_CPU_PWR_LVL) { 185 return true; 186 } 187 188 parent_idx = PER_CPU_BY_INDEX(psci_cpu_pd_nodes, my_idx)->parent_node; 189 for (lvl = PSCI_CPU_PWR_LVL + U(1); lvl < end_pwrlvl; lvl++) { 190 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 191 } 192 193 cpu_start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; 194 ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; 195 196 for (cpu_idx = cpu_start_idx; cpu_idx < cpu_start_idx + ncpus; 197 cpu_idx++) { 198 local_state = psci_get_cpu_local_state_by_idx(cpu_idx); 199 if (cpu_idx == my_idx) { 200 assert(is_local_state_run(local_state) != 0); 201 continue; 202 } 203 204 if (is_local_state_run(local_state) != 0) { 205 return false; 206 } 207 } 208 209 return true; 210 } 211 #endif 212 213 /******************************************************************************* 214 * This function verifies that all the other cores in the system have been 215 * turned OFF and the current CPU is the last running CPU in the system. 216 * Returns true, if the current CPU is the last ON CPU or false otherwise. 217 ******************************************************************************/ 218 bool psci_is_last_on_cpu(unsigned int my_idx) 219 { 220 for (unsigned int cpu_idx = 0U; cpu_idx < psci_plat_core_count; cpu_idx++) { 221 if (cpu_idx == my_idx) { 222 assert(psci_get_aff_info_state() == AFF_STATE_ON); 223 continue; 224 } 225 226 if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) { 227 VERBOSE("core=%u other than current core=%u %s\n", 228 cpu_idx, my_idx, "running in the system"); 229 return false; 230 } 231 } 232 233 return true; 234 } 235 236 /******************************************************************************* 237 * This function verifies that all cores in the system have been turned ON. 238 * Returns true, if all CPUs are ON or false otherwise. 239 ******************************************************************************/ 240 static bool psci_are_all_cpus_on(void) 241 { 242 unsigned int cpu_idx; 243 244 for (cpu_idx = 0U; cpu_idx < psci_plat_core_count; cpu_idx++) { 245 if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_OFF) { 246 return false; 247 } 248 } 249 250 return true; 251 } 252 253 /******************************************************************************* 254 * Counts the number of CPUs in the system that are currently in the ON or 255 * ON_PENDING state. 256 * 257 * @note This function does not acquire any power domain locks. It must only be 258 * called in contexts where it is guaranteed that PSCI state transitions 259 * are not concurrently happening, or where locks are already held. 260 * 261 * @return The number of CPUs currently in AFF_STATE_ON or AFF_STATE_ON_PENDING. 262 ******************************************************************************/ 263 static unsigned int psci_num_cpus_running(void) 264 { 265 unsigned int cpu_idx; 266 unsigned int no_of_cpus = 0U; 267 aff_info_state_t aff_state; 268 269 for (cpu_idx = 0U; cpu_idx < psci_plat_core_count; cpu_idx++) { 270 aff_state = psci_get_aff_info_state_by_idx(cpu_idx); 271 if (aff_state == AFF_STATE_ON || 272 aff_state == AFF_STATE_ON_PENDING) { 273 no_of_cpus++; 274 } 275 } 276 277 return no_of_cpus; 278 } 279 280 /******************************************************************************* 281 * Routine to return the maximum power level to traverse to after a cpu has 282 * been physically powered up. It is expected to be called immediately after 283 * reset from assembler code. 284 ******************************************************************************/ 285 static unsigned int get_power_on_target_pwrlvl(void) 286 { 287 unsigned int pwrlvl; 288 289 /* 290 * Assume that this cpu was suspended and retrieve its target power 291 * level. If it wasn't, the cpu is off so this will be PLAT_MAX_PWR_LVL. 292 */ 293 pwrlvl = psci_get_suspend_pwrlvl(); 294 assert(pwrlvl < PSCI_INVALID_PWR_LVL); 295 return pwrlvl; 296 } 297 298 /****************************************************************************** 299 * Helper function to update the requested local power state array. This array 300 * does not store the requested state for the CPU power level. Hence an 301 * assertion is added to prevent us from accessing the CPU power level. 302 *****************************************************************************/ 303 static void psci_set_req_local_pwr_state(unsigned int pwrlvl, 304 unsigned int cpu_idx, 305 plat_local_state_t req_pwr_state) 306 { 307 assert(pwrlvl > PSCI_CPU_PWR_LVL); 308 if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) && 309 (cpu_idx < psci_plat_core_count)) { 310 psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state; 311 } 312 } 313 314 /****************************************************************************** 315 * This function initializes the psci_req_local_pwr_states. 316 *****************************************************************************/ 317 void __init psci_init_req_local_pwr_states(void) 318 { 319 /* Initialize the requested state of all non CPU power domains as OFF */ 320 unsigned int pwrlvl; 321 unsigned int core; 322 323 for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) { 324 for (core = 0; core < psci_plat_core_count; core++) { 325 psci_req_local_pwr_states[pwrlvl][core] = 326 PLAT_MAX_OFF_STATE; 327 } 328 } 329 } 330 331 /****************************************************************************** 332 * Helper function to return a reference to an array containing the local power 333 * states requested by each cpu for a power domain at 'pwrlvl'. The size of the 334 * array will be the number of cpu power domains of which this power domain is 335 * an ancestor. These requested states will be used to determine a suitable 336 * target state for this power domain during psci state coordination. An 337 * assertion is added to prevent us from accessing the CPU power level. 338 *****************************************************************************/ 339 static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl, 340 unsigned int cpu_idx) 341 { 342 assert(pwrlvl > PSCI_CPU_PWR_LVL); 343 344 if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) && 345 (cpu_idx < psci_plat_core_count)) { 346 return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx]; 347 } else { 348 return NULL; 349 } 350 } 351 352 #if PSCI_OS_INIT_MODE 353 /****************************************************************************** 354 * Helper function to save a copy of the psci_req_local_pwr_states (prev) for a 355 * CPU (cpu_idx), and update psci_req_local_pwr_states with the new requested 356 * local power states (state_info). 357 *****************************************************************************/ 358 void psci_update_req_local_pwr_states(unsigned int end_pwrlvl, 359 unsigned int cpu_idx, 360 psci_power_state_t *state_info, 361 plat_local_state_t *prev) 362 { 363 unsigned int lvl; 364 #ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL 365 unsigned int max_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL; 366 #else 367 unsigned int max_pwrlvl = PLAT_MAX_PWR_LVL; 368 #endif 369 plat_local_state_t req_state; 370 371 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= max_pwrlvl; lvl++) { 372 /* Save the previous requested local power state */ 373 prev[lvl - 1U] = *psci_get_req_local_pwr_states(lvl, cpu_idx); 374 375 /* Update the new requested local power state */ 376 if (lvl <= end_pwrlvl) { 377 req_state = state_info->pwr_domain_state[lvl]; 378 } else { 379 req_state = state_info->pwr_domain_state[end_pwrlvl]; 380 } 381 psci_set_req_local_pwr_state(lvl, cpu_idx, req_state); 382 } 383 } 384 385 /****************************************************************************** 386 * Helper function to restore the previously saved requested local power states 387 * (prev) for a CPU (cpu_idx) to psci_req_local_pwr_states. 388 *****************************************************************************/ 389 void psci_restore_req_local_pwr_states(unsigned int cpu_idx, 390 plat_local_state_t *prev) 391 { 392 unsigned int lvl; 393 #ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL 394 unsigned int max_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL; 395 #else 396 unsigned int max_pwrlvl = PLAT_MAX_PWR_LVL; 397 #endif 398 399 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= max_pwrlvl; lvl++) { 400 /* Restore the previous requested local power state */ 401 psci_set_req_local_pwr_state(lvl, cpu_idx, prev[lvl - 1U]); 402 } 403 } 404 #endif 405 406 /* 407 * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent 408 * memory. 409 * 410 * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory, 411 * it's accessed by both cached and non-cached participants. To serve the common 412 * minimum, perform a cache flush before read and after write so that non-cached 413 * participants operate on latest data in main memory. 414 * 415 * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent 416 * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent. 417 * In both cases, no cache operations are required. 418 */ 419 420 /* 421 * Retrieve local state of non-CPU power domain node from a non-cached CPU, 422 * after any required cache maintenance operation. 423 */ 424 static plat_local_state_t get_non_cpu_pd_node_local_state( 425 unsigned int parent_idx) 426 { 427 #if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) 428 flush_dcache_range( 429 (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], 430 sizeof(psci_non_cpu_pd_nodes[parent_idx])); 431 #endif 432 return psci_non_cpu_pd_nodes[parent_idx].local_state; 433 } 434 435 /* 436 * Update local state of non-CPU power domain node from a cached CPU; perform 437 * any required cache maintenance operation afterwards. 438 */ 439 static void set_non_cpu_pd_node_local_state(unsigned int parent_idx, 440 plat_local_state_t state) 441 { 442 psci_non_cpu_pd_nodes[parent_idx].local_state = state; 443 #if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) 444 flush_dcache_range( 445 (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], 446 sizeof(psci_non_cpu_pd_nodes[parent_idx])); 447 #endif 448 } 449 450 /****************************************************************************** 451 * Helper function to return the current local power state of each power domain 452 * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This 453 * function will be called after a cpu is powered on to find the local state 454 * each power domain has emerged from. 455 *****************************************************************************/ 456 void psci_get_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl, 457 psci_power_state_t *target_state) 458 { 459 unsigned int parent_idx, lvl; 460 plat_local_state_t *pd_state = target_state->pwr_domain_state; 461 462 pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state(); 463 parent_idx = PER_CPU_BY_INDEX(psci_cpu_pd_nodes, cpu_idx)->parent_node; 464 465 /* Copy the local power state from node to state_info */ 466 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { 467 pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx); 468 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 469 } 470 471 /* Set the the higher levels to RUN */ 472 for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) { 473 target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; 474 } 475 } 476 477 /****************************************************************************** 478 * Helper function to set the target local power state that each power domain 479 * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will 480 * enter. This function will be called after coordination of requested power 481 * states has been done for each power level. 482 *****************************************************************************/ 483 void psci_set_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl, 484 const psci_power_state_t *target_state) 485 { 486 unsigned int parent_idx, lvl; 487 const plat_local_state_t *pd_state = target_state->pwr_domain_state; 488 489 psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]); 490 491 /* 492 * Need to flush as local_state might be accessed with Data Cache 493 * disabled during power on 494 */ 495 psci_flush_cpu_data(psci_svc_cpu_data.local_state); 496 497 parent_idx = PER_CPU_BY_INDEX(psci_cpu_pd_nodes, cpu_idx)->parent_node; 498 499 /* Copy the local_state from state_info */ 500 for (lvl = 1U; lvl <= end_pwrlvl; lvl++) { 501 set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]); 502 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 503 } 504 } 505 506 /******************************************************************************* 507 * PSCI helper function to get the parent nodes corresponding to a cpu_index. 508 ******************************************************************************/ 509 void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, 510 unsigned int end_lvl, 511 unsigned int *node_index) 512 { 513 unsigned int parent_node = 514 PER_CPU_BY_INDEX(psci_cpu_pd_nodes, cpu_idx)->parent_node; 515 unsigned int i; 516 unsigned int *node = node_index; 517 518 for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) { 519 *node = parent_node; 520 node++; 521 parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node; 522 } 523 } 524 525 /****************************************************************************** 526 * This function is invoked post CPU power up and initialization. It sets the 527 * affinity info state, target power state and requested power state for the 528 * current CPU and all its ancestor power domains to RUN. 529 *****************************************************************************/ 530 void psci_set_pwr_domains_to_run(unsigned int cpu_idx, unsigned int end_pwrlvl) 531 { 532 unsigned int parent_idx, lvl; 533 parent_idx = PER_CPU_BY_INDEX(psci_cpu_pd_nodes, cpu_idx)->parent_node; 534 535 /* Reset the local_state to RUN for the non cpu power domains. */ 536 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { 537 set_non_cpu_pd_node_local_state(parent_idx, 538 PSCI_LOCAL_STATE_RUN); 539 psci_set_req_local_pwr_state(lvl, 540 cpu_idx, 541 PSCI_LOCAL_STATE_RUN); 542 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 543 } 544 545 /* Set the affinity info state to ON */ 546 psci_set_aff_info_state(AFF_STATE_ON); 547 548 psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); 549 psci_flush_cpu_data(psci_svc_cpu_data); 550 } 551 552 /****************************************************************************** 553 * This function is used in platform-coordinated mode. 554 * 555 * This function is passed the local power states requested for each power 556 * domain (state_info) between the current CPU domain and its ancestors until 557 * the target power level (end_pwrlvl). It updates the array of requested power 558 * states with this information. 559 * 560 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it 561 * retrieves the states requested by all the cpus of which the power domain at 562 * that level is an ancestor. It passes this information to the platform to 563 * coordinate and return the target power state. If the target state for a level 564 * is RUN then subsequent levels are not considered. At the CPU level, state 565 * coordination is not required. Hence, the requested and the target states are 566 * the same. 567 * 568 * The 'state_info' is updated with the target state for each level between the 569 * CPU and the 'end_pwrlvl' and returned to the caller. 570 * 571 * This function will only be invoked with data cache enabled and while 572 * powering down a core. 573 *****************************************************************************/ 574 void psci_do_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl, 575 psci_power_state_t *state_info) 576 { 577 unsigned int lvl, parent_idx; 578 unsigned int start_idx; 579 unsigned int ncpus; 580 plat_local_state_t target_state; 581 582 assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); 583 parent_idx = PER_CPU_BY_INDEX(psci_cpu_pd_nodes, cpu_idx)->parent_node; 584 585 /* For level 0, the requested state will be equivalent 586 to target state */ 587 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { 588 589 /* First update the requested power state */ 590 psci_set_req_local_pwr_state(lvl, cpu_idx, 591 state_info->pwr_domain_state[lvl]); 592 593 /* Get the requested power states for this power level */ 594 start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; 595 plat_local_state_t const *req_states = psci_get_req_local_pwr_states(lvl, 596 start_idx); 597 598 /* 599 * Let the platform coordinate amongst the requested states at 600 * this power level and return the target local power state. 601 */ 602 ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; 603 target_state = plat_get_target_pwr_state(lvl, 604 req_states, 605 ncpus); 606 607 state_info->pwr_domain_state[lvl] = target_state; 608 609 /* Break early if the negotiated target power state is RUN */ 610 if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0) { 611 break; 612 } 613 614 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 615 } 616 617 /* 618 * This is for cases when we break out of the above loop early because 619 * the target power state is RUN at a power level < end_pwlvl. 620 * We update the requested power state from state_info and then 621 * set the target state as RUN. 622 */ 623 for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) { 624 psci_set_req_local_pwr_state(lvl, cpu_idx, 625 state_info->pwr_domain_state[lvl]); 626 state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; 627 628 } 629 } 630 631 #if PSCI_OS_INIT_MODE 632 /****************************************************************************** 633 * This function is used in OS-initiated mode. 634 * 635 * This function is passed the local power states requested for each power 636 * domain (state_info) between the current CPU domain and its ancestors until 637 * the target power level (end_pwrlvl), and ensures the requested power states 638 * are valid. It updates the array of requested power states with this 639 * information. 640 * 641 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it 642 * retrieves the states requested by all the cpus of which the power domain at 643 * that level is an ancestor. It passes this information to the platform to 644 * coordinate and return the target power state. If the requested state does 645 * not match the target state, the request is denied. 646 * 647 * The 'state_info' is not modified. 648 * 649 * This function will only be invoked with data cache enabled and while 650 * powering down a core. 651 *****************************************************************************/ 652 int psci_validate_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl, 653 psci_power_state_t *state_info) 654 { 655 int rc = PSCI_E_SUCCESS; 656 unsigned int lvl, parent_idx; 657 unsigned int start_idx; 658 unsigned int ncpus; 659 plat_local_state_t target_state, *req_states; 660 plat_local_state_t prev[PLAT_MAX_PWR_LVL]; 661 662 assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); 663 parent_idx = PER_CPU_BY_INDEX(psci_cpu_pd_nodes, cpu_idx)->parent_node; 664 665 /* 666 * Save a copy of the previous requested local power states and update 667 * the new requested local power states. 668 */ 669 psci_update_req_local_pwr_states(end_pwrlvl, cpu_idx, state_info, prev); 670 671 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { 672 /* Get the requested power states for this power level */ 673 start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; 674 req_states = psci_get_req_local_pwr_states(lvl, start_idx); 675 676 /* 677 * Let the platform coordinate amongst the requested states at 678 * this power level and return the target local power state. 679 */ 680 ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; 681 target_state = plat_get_target_pwr_state(lvl, 682 req_states, 683 ncpus); 684 685 /* 686 * Verify that the requested power state matches the target 687 * local power state. 688 */ 689 if (state_info->pwr_domain_state[lvl] != target_state) { 690 if (target_state == PSCI_LOCAL_STATE_RUN) { 691 rc = PSCI_E_DENIED; 692 } else { 693 rc = PSCI_E_INVALID_PARAMS; 694 } 695 goto exit; 696 } 697 698 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 699 } 700 701 /* 702 * Verify that the current core is the last running core at the 703 * specified power level. 704 */ 705 lvl = state_info->last_at_pwrlvl; 706 if (!psci_is_last_cpu_to_idle_at_pwrlvl(cpu_idx, lvl)) { 707 rc = PSCI_E_DENIED; 708 } 709 710 exit: 711 if (rc != PSCI_E_SUCCESS) { 712 /* Restore the previous requested local power states. */ 713 psci_restore_req_local_pwr_states(cpu_idx, prev); 714 return rc; 715 } 716 717 return rc; 718 } 719 #endif 720 721 /****************************************************************************** 722 * This function validates a suspend request by making sure that if a standby 723 * state is requested then no power level is turned off and the highest power 724 * level is placed in a standby/retention state. 725 * 726 * It also ensures that the state level X will enter is not shallower than the 727 * state level X + 1 will enter. 728 * 729 * This validation will be enabled only for DEBUG builds as the platform is 730 * expected to perform these validations as well. 731 *****************************************************************************/ 732 int psci_validate_suspend_req(const psci_power_state_t *state_info, 733 unsigned int is_power_down_state) 734 { 735 unsigned int max_off_lvl, target_lvl, max_retn_lvl; 736 plat_local_state_t state; 737 plat_local_state_type_t req_state_type, deepest_state_type; 738 int i; 739 740 /* Find the target suspend power level */ 741 target_lvl = psci_find_target_suspend_lvl(state_info); 742 if (target_lvl == PSCI_INVALID_PWR_LVL) { 743 return PSCI_E_INVALID_PARAMS; 744 } 745 746 /* All power domain levels are in a RUN state to begin with */ 747 deepest_state_type = STATE_TYPE_RUN; 748 749 for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) { 750 state = state_info->pwr_domain_state[i]; 751 req_state_type = find_local_state_type(state); 752 753 /* 754 * While traversing from the highest power level to the lowest, 755 * the state requested for lower levels has to be the same or 756 * deeper i.e. equal to or greater than the state at the higher 757 * levels. If this condition is true, then the requested state 758 * becomes the deepest state encountered so far. 759 */ 760 if (req_state_type < deepest_state_type) { 761 return PSCI_E_INVALID_PARAMS; 762 } 763 deepest_state_type = req_state_type; 764 } 765 766 /* Find the highest off power level */ 767 max_off_lvl = psci_find_max_off_lvl(state_info); 768 769 /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */ 770 max_retn_lvl = PSCI_INVALID_PWR_LVL; 771 if (target_lvl != max_off_lvl) { 772 max_retn_lvl = target_lvl; 773 } 774 775 /* 776 * If this is not a request for a power down state then max off level 777 * has to be invalid and max retention level has to be a valid power 778 * level. 779 */ 780 if ((is_power_down_state == 0U) && 781 ((max_off_lvl != PSCI_INVALID_PWR_LVL) || 782 (max_retn_lvl == PSCI_INVALID_PWR_LVL))) { 783 return PSCI_E_INVALID_PARAMS; 784 } 785 786 return PSCI_E_SUCCESS; 787 } 788 789 /****************************************************************************** 790 * This function finds the highest power level which will be powered down 791 * amongst all the power levels specified in the 'state_info' structure 792 *****************************************************************************/ 793 unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info) 794 { 795 int i; 796 797 for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) { 798 if (is_local_state_off(state_info->pwr_domain_state[i]) != 0) { 799 return (unsigned int) i; 800 } 801 } 802 803 return PSCI_INVALID_PWR_LVL; 804 } 805 806 /****************************************************************************** 807 * This functions finds the level of the highest power domain which will be 808 * placed in a low power state during a suspend operation. 809 *****************************************************************************/ 810 unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info) 811 { 812 int i; 813 814 for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) { 815 if (is_local_state_run(state_info->pwr_domain_state[i]) == 0) { 816 return (unsigned int) i; 817 } 818 } 819 820 return PSCI_INVALID_PWR_LVL; 821 } 822 823 /******************************************************************************* 824 * This function is passed the highest level in the topology tree that the 825 * operation should be applied to and a list of node indexes. It picks up locks 826 * from the node index list in order of increasing power domain level in the 827 * range specified. 828 ******************************************************************************/ 829 void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, 830 const unsigned int *parent_nodes) 831 { 832 unsigned int parent_idx; 833 unsigned int level; 834 835 /* No locking required for level 0. Hence start locking from level 1 */ 836 for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) { 837 parent_idx = parent_nodes[level - 1U]; 838 psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]); 839 } 840 } 841 842 /******************************************************************************* 843 * This function is passed the highest level in the topology tree that the 844 * operation should be applied to and a list of node indexes. It releases the 845 * locks in order of decreasing power domain level in the range specified. 846 ******************************************************************************/ 847 void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, 848 const unsigned int *parent_nodes) 849 { 850 unsigned int parent_idx; 851 unsigned int level; 852 853 /* Unlock top down. No unlocking required for level 0. */ 854 for (level = end_pwrlvl; level >= (PSCI_CPU_PWR_LVL + 1U); level--) { 855 parent_idx = parent_nodes[level - 1U]; 856 psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]); 857 } 858 } 859 860 /******************************************************************************* 861 * This function determines the full entrypoint information for the requested 862 * PSCI entrypoint on power on/resume and returns it. 863 ******************************************************************************/ 864 #ifdef __aarch64__ 865 static int psci_get_ns_ep_info(entry_point_info_t *ep, 866 uintptr_t entrypoint, 867 u_register_t context_id) 868 { 869 u_register_t ep_attr, sctlr; 870 unsigned int daif, ee, mode; 871 u_register_t ns_scr_el3 = read_scr_el3(); 872 u_register_t ns_sctlr_el1 = read_sctlr_el1(); 873 874 sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? 875 read_sctlr_el2() : ns_sctlr_el1; 876 ee = 0; 877 878 ep_attr = NON_SECURE | EP_ST_DISABLE; 879 if ((sctlr & SCTLR_EE_BIT) != 0U) { 880 ep_attr |= EP_EE_BIG; 881 ee = 1; 882 } 883 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); 884 885 ep->pc = entrypoint; 886 zeromem(&ep->args, sizeof(ep->args)); 887 ep->args.arg0 = context_id; 888 889 /* 890 * Figure out whether the cpu enters the non-secure address space 891 * in aarch32 or aarch64 892 */ 893 if ((ns_scr_el3 & SCR_RW_BIT) != 0U) { 894 895 /* 896 * Check whether a Thumb entry point has been provided for an 897 * aarch64 EL 898 */ 899 if ((entrypoint & 0x1UL) != 0UL) { 900 return PSCI_E_INVALID_ADDRESS; 901 } 902 903 mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1; 904 905 ep->spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX, 906 DISABLE_ALL_EXCEPTIONS); 907 } else { 908 909 mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? 910 MODE32_hyp : MODE32_svc; 911 912 /* 913 * TODO: Choose async. exception bits if HYP mode is not 914 * implemented according to the values of SCR.{AW, FW} bits 915 */ 916 daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; 917 918 ep->spsr = SPSR_MODE32((uint64_t)mode, entrypoint & 0x1, ee, 919 daif); 920 } 921 922 return PSCI_E_SUCCESS; 923 } 924 #else /* !__aarch64__ */ 925 static int psci_get_ns_ep_info(entry_point_info_t *ep, 926 uintptr_t entrypoint, 927 u_register_t context_id) 928 { 929 u_register_t ep_attr; 930 unsigned int aif, ee, mode; 931 u_register_t scr = read_scr(); 932 u_register_t ns_sctlr, sctlr; 933 934 /* Switch to non secure state */ 935 write_scr(scr | SCR_NS_BIT); 936 isb(); 937 ns_sctlr = read_sctlr(); 938 939 sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr; 940 941 /* Return to original state */ 942 write_scr(scr); 943 isb(); 944 ee = 0; 945 946 ep_attr = NON_SECURE | EP_ST_DISABLE; 947 if (sctlr & SCTLR_EE_BIT) { 948 ep_attr |= EP_EE_BIG; 949 ee = 1; 950 } 951 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); 952 953 ep->pc = entrypoint; 954 zeromem(&ep->args, sizeof(ep->args)); 955 ep->args.arg0 = context_id; 956 957 mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; 958 959 /* 960 * TODO: Choose async. exception bits if HYP mode is not 961 * implemented according to the values of SCR.{AW, FW} bits 962 */ 963 aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT; 964 965 ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif); 966 967 return PSCI_E_SUCCESS; 968 } 969 970 #endif /* __aarch64__ */ 971 972 /******************************************************************************* 973 * This function validates the entrypoint with the platform layer if the 974 * appropriate pm_ops hook is exported by the platform and returns the 975 * 'entry_point_info'. 976 ******************************************************************************/ 977 int psci_validate_entry_point(entry_point_info_t *ep, 978 uintptr_t entrypoint, 979 u_register_t context_id) 980 { 981 int rc; 982 983 /* Validate the entrypoint using platform psci_ops */ 984 if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) { 985 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); 986 if (rc != PSCI_E_SUCCESS) { 987 return PSCI_E_INVALID_ADDRESS; 988 } 989 } 990 991 /* 992 * Verify and derive the re-entry information for 993 * the non-secure world from the non-secure state from 994 * where this call originated. 995 */ 996 rc = psci_get_ns_ep_info(ep, entrypoint, context_id); 997 return rc; 998 } 999 1000 /******************************************************************************* 1001 * Generic handler which is called when a cpu is physically powered on. It 1002 * traverses the node information and finds the highest power level powered 1003 * off and performs generic, architectural, platform setup and state management 1004 * to power on that power level and power levels below it. 1005 * e.g. For a cpu that's been powered on, it will call the platform specific 1006 * code to enable the gic cpu interface and for a cluster it will enable 1007 * coherency at the interconnect level in addition to gic cpu interface. 1008 ******************************************************************************/ 1009 void psci_warmboot_entrypoint(unsigned int cpu_idx) 1010 { 1011 unsigned int end_pwrlvl; 1012 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; 1013 psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; 1014 1015 /* 1016 * Verify that we have been explicitly turned ON or resumed from 1017 * suspend. 1018 */ 1019 if (psci_get_aff_info_state() == AFF_STATE_OFF) { 1020 ERROR("Unexpected affinity info state.\n"); 1021 panic(); 1022 } 1023 1024 /* 1025 * Get the maximum power domain level to traverse to after this cpu 1026 * has been physically powered up. 1027 */ 1028 end_pwrlvl = get_power_on_target_pwrlvl(); 1029 1030 /* Get the parent nodes */ 1031 psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); 1032 1033 /* 1034 * This function acquires the lock corresponding to each power level so 1035 * that by the time all locks are taken, the system topology is snapshot 1036 * and state management can be done safely. 1037 */ 1038 psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes); 1039 1040 psci_get_target_local_pwr_states(cpu_idx, end_pwrlvl, &state_info); 1041 1042 #if ENABLE_PSCI_STAT 1043 plat_psci_stat_accounting_stop(&state_info); 1044 #endif 1045 1046 /* 1047 * This CPU could be resuming from suspend or it could have just been 1048 * turned on. To distinguish between these 2 cases, we examine the 1049 * affinity state of the CPU: 1050 * - If the affinity state is ON_PENDING then it has just been 1051 * turned on. 1052 * - Else it is resuming from suspend. 1053 * 1054 * Depending on the type of warm reset identified, choose the right set 1055 * of power management handler and perform the generic, architecture 1056 * and platform specific handling. 1057 */ 1058 if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) { 1059 psci_cpu_on_finish(cpu_idx, &state_info); 1060 } else { 1061 unsigned int max_off_lvl = psci_find_max_off_lvl(&state_info); 1062 1063 assert(max_off_lvl != PSCI_INVALID_PWR_LVL); 1064 psci_cpu_suspend_to_powerdown_finish(cpu_idx, max_off_lvl, &state_info, false); 1065 } 1066 1067 /* 1068 * Caches and (importantly) coherency are on so we can rely on seeing 1069 * whatever the primary gave us without explicit cache maintenance 1070 */ 1071 entry_point_info_t *ep = get_cpu_data(warmboot_ep_info); 1072 cm_init_my_context(ep); 1073 1074 /* 1075 * Generic management: Now we just need to retrieve the 1076 * information that we had stashed away during the cpu_on 1077 * call to set this cpu on its way. 1078 */ 1079 cm_prepare_el3_exit_ns(); 1080 1081 /* 1082 * Set the requested and target state of this CPU and all the higher 1083 * power domains which are ancestors of this CPU to run. 1084 */ 1085 psci_set_pwr_domains_to_run(cpu_idx, end_pwrlvl); 1086 1087 #if ENABLE_PSCI_STAT 1088 psci_stats_update_pwr_up(cpu_idx, end_pwrlvl, &state_info); 1089 #endif 1090 1091 /* 1092 * This loop releases the lock corresponding to each power level 1093 * in the reverse order to which they were acquired. 1094 */ 1095 psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes); 1096 } 1097 1098 /******************************************************************************* 1099 * This function initializes the set of hooks that PSCI invokes as part of power 1100 * management operation. The power management hooks are expected to be provided 1101 * by the SPD, after it finishes all its initialization 1102 ******************************************************************************/ 1103 void psci_register_spd_pm_hook(const spd_pm_ops_t *pm) 1104 { 1105 assert(pm != NULL); 1106 psci_spd_pm = pm; 1107 1108 if (pm->svc_migrate != NULL) { 1109 psci_caps |= define_psci_cap(PSCI_MIG_AARCH64); 1110 } 1111 1112 if (pm->svc_migrate_info != NULL) { 1113 psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) 1114 | define_psci_cap(PSCI_MIG_INFO_TYPE); 1115 } 1116 } 1117 1118 /******************************************************************************* 1119 * This function invokes the migrate info hook in the spd_pm_ops. It performs 1120 * the necessary return value validation. If the Secure Payload is UP and 1121 * migrate capable, it returns the mpidr of the CPU on which the Secure payload 1122 * is resident through the mpidr parameter. Else the value of the parameter on 1123 * return is undefined. 1124 ******************************************************************************/ 1125 int psci_spd_migrate_info(u_register_t *mpidr) 1126 { 1127 int rc; 1128 1129 if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL)) { 1130 return PSCI_E_NOT_SUPPORTED; 1131 } 1132 1133 rc = psci_spd_pm->svc_migrate_info(mpidr); 1134 1135 assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) || 1136 (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED)); 1137 1138 return rc; 1139 } 1140 1141 1142 /******************************************************************************* 1143 * This function prints the state of all power domains present in the 1144 * system 1145 ******************************************************************************/ 1146 void psci_print_power_domain_map(void) 1147 { 1148 #if LOG_LEVEL >= LOG_LEVEL_INFO 1149 unsigned int idx; 1150 plat_local_state_t state; 1151 plat_local_state_type_t state_type; 1152 1153 /* This array maps to the PSCI_STATE_X definitions in psci.h */ 1154 static const char * const psci_state_type_str[] = { 1155 "ON", 1156 "RETENTION", 1157 "OFF", 1158 }; 1159 1160 INFO("PSCI Power Domain Map:\n"); 1161 for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - psci_plat_core_count); 1162 idx++) { 1163 state_type = find_local_state_type( 1164 psci_non_cpu_pd_nodes[idx].local_state); 1165 INFO(" Domain Node : Level %u, parent_node %u," 1166 " State %s (0x%x)\n", 1167 psci_non_cpu_pd_nodes[idx].level, 1168 psci_non_cpu_pd_nodes[idx].parent_node, 1169 psci_state_type_str[state_type], 1170 psci_non_cpu_pd_nodes[idx].local_state); 1171 } 1172 1173 for (idx = 0; idx < psci_plat_core_count; idx++) { 1174 state = psci_get_cpu_local_state_by_idx(idx); 1175 state_type = find_local_state_type(state); 1176 INFO(" CPU Node : MPID 0x%llx, parent_node %u," 1177 " State %s (0x%x)\n", 1178 (unsigned long long)PER_CPU_BY_INDEX(psci_cpu_pd_nodes, idx)->mpidr, 1179 PER_CPU_BY_INDEX(psci_cpu_pd_nodes, idx)->parent_node, 1180 psci_state_type_str[state_type], 1181 psci_get_cpu_local_state_by_idx(idx)); 1182 } 1183 #endif 1184 } 1185 1186 /****************************************************************************** 1187 * Return whether any secondaries were powered up with CPU_ON call. A CPU that 1188 * have ever been powered up would have set its MPDIR value to something other 1189 * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to 1190 * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is 1191 * meaningful only when called on the primary CPU during early boot. 1192 *****************************************************************************/ 1193 int psci_secondaries_brought_up(void) 1194 { 1195 unsigned int idx, n_valid = 0U; 1196 1197 for (idx = 0U; idx < PLATFORM_CORE_COUNT; idx++) { 1198 if (PER_CPU_BY_INDEX(psci_cpu_pd_nodes, idx)->mpidr != PSCI_INVALID_MPIDR) { 1199 n_valid++; 1200 } 1201 } 1202 1203 assert(n_valid > 0U); 1204 1205 return (n_valid > 1U) ? 1 : 0; 1206 } 1207 1208 static u_register_t call_cpu_pwr_dwn(unsigned int power_level) 1209 { 1210 struct cpu_ops *ops = get_cpu_data(cpu_ops_ptr); 1211 1212 /* Call the last available power down handler */ 1213 if (power_level > CPU_MAX_PWR_DWN_OPS - 1) { 1214 power_level = CPU_MAX_PWR_DWN_OPS - 1; 1215 } 1216 1217 assert(ops != NULL); 1218 assert(ops->pwr_dwn_ops[power_level] != NULL); 1219 1220 return ops->pwr_dwn_ops[power_level](); 1221 } 1222 1223 static void prepare_cpu_pwr_dwn(unsigned int power_level) 1224 { 1225 /* ignore the return, all cpus should behave the same */ 1226 (void)call_cpu_pwr_dwn(power_level); 1227 } 1228 1229 static void prepare_cpu_pwr_up(unsigned int power_level) 1230 { 1231 /* 1232 * Call the pwr_dwn cpu hook again, indicating that an abandon happened. 1233 * The cpu driver is expected to clean up. We ask it to return 1234 * PABANDON_ACK to indicate that it has handled this. This is a 1235 * heuristic: the value has been chosen such that an unported CPU is 1236 * extremely unlikely to return this value. 1237 */ 1238 u_register_t ret = call_cpu_pwr_dwn(power_level); 1239 1240 /* unreachable on AArch32 so cast down to calm the compiler */ 1241 if (ret != (u_register_t) PABANDON_ACK) { 1242 panic(); 1243 } 1244 } 1245 1246 /******************************************************************************* 1247 * Initiate power down sequence, by calling power down operations registered for 1248 * this CPU. 1249 ******************************************************************************/ 1250 void psci_pwrdown_cpu_start(unsigned int power_level) 1251 { 1252 #if ENABLE_RUNTIME_INSTRUMENTATION 1253 1254 /* 1255 * Flush cache line so that even if CPU power down happens 1256 * the timestamp update is reflected in memory. 1257 */ 1258 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 1259 RT_INSTR_ENTER_CFLUSH, 1260 PMF_CACHE_MAINT); 1261 #endif 1262 1263 #if !HW_ASSISTED_COHERENCY 1264 /* 1265 * Disable data caching and handle the stack's cache maintenance. 1266 * 1267 * If the core can't automatically exit coherency, the cpu driver needs 1268 * to flush caches and exit coherency. We can't do this with data caches 1269 * enabled. The cpu driver will decide which caches to flush based on 1270 * the power level. 1271 * 1272 * If automatic coherency management is possible, we can keep data 1273 * caches on until the very end and let hardware do cache maintenance. 1274 */ 1275 psci_do_pwrdown_cache_maintenance(); 1276 #endif 1277 1278 /* Initiate the power down sequence by calling into the cpu driver. */ 1279 prepare_cpu_pwr_dwn(power_level); 1280 1281 #if ENABLE_RUNTIME_INSTRUMENTATION 1282 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 1283 RT_INSTR_EXIT_CFLUSH, 1284 PMF_NO_CACHE_MAINT); 1285 #endif 1286 } 1287 1288 /******************************************************************************* 1289 * Finish a terminal power down sequence, ending with a wfi. In case of wakeup 1290 * will retry the sleep and panic if it persists. 1291 ******************************************************************************/ 1292 void __dead2 psci_pwrdown_cpu_end_terminal(void) 1293 { 1294 #if ERRATA_SME_POWER_DOWN 1295 /* 1296 * force SME off to not get power down rejected. Getting here is 1297 * terminal so we don't care if we lose context because of another 1298 * wakeup 1299 */ 1300 if (is_feat_sme_supported()) { 1301 write_svcr(0); 1302 isb(); 1303 } 1304 #endif /* ERRATA_SME_POWER_DOWN */ 1305 1306 /* ensure write buffer empty */ 1307 dsbsy(); 1308 1309 /* 1310 * Execute a wfi which, in most cases, will allow the power controller 1311 * to physically power down this cpu. Under some circumstances that may 1312 * be denied. Hopefully this is transient, retrying a few times should 1313 * power down. 1314 */ 1315 for (int i = 0; i < 32; i++) 1316 wfi(); 1317 1318 /* Wake up wasn't transient. System is probably in a bad state. */ 1319 ERROR("Could not power off CPU.\n"); 1320 panic(); 1321 } 1322 1323 /******************************************************************************* 1324 * Finish a non-terminal power down sequence, ending with a wfi. In case of 1325 * wakeup will unwind any CPU specific actions and return. 1326 ******************************************************************************/ 1327 1328 void psci_pwrdown_cpu_end_wakeup(unsigned int power_level) 1329 { 1330 /* ensure write buffer empty */ 1331 dsbsy(); 1332 1333 /* 1334 * Turn the core off. Usually, will be terminal. In some circumstances 1335 * the powerdown will be denied and we'll need to unwind. 1336 */ 1337 wfi(); 1338 1339 /* 1340 * Waking up does not require hardware-assisted coherency, but that is 1341 * the case for every core that can wake up. Can either happen because 1342 * of errata or pabandon. 1343 */ 1344 #if !defined(__aarch64__) || !HW_ASSISTED_COHERENCY 1345 ERROR("AArch32 systems shouldn't wake up.\n"); 1346 panic(); 1347 #endif 1348 /* 1349 * Begin unwinding. Everything can be shared with CPU_ON and co later, 1350 * except the CPU specific bit. Cores that have hardware-assisted 1351 * coherency should be able to handle this. 1352 */ 1353 prepare_cpu_pwr_up(power_level); 1354 } 1355 1356 /******************************************************************************* 1357 * This function invokes the callback 'stop_func()' with the 'mpidr' of each 1358 * online PE. Caller can pass suitable method to stop a remote core. 1359 * 1360 * 'wait_ms' is the timeout value in milliseconds for the other cores to 1361 * transition to power down state. Passing '0' makes it non-blocking. 1362 * 1363 * The function returns 'PSCI_E_DENIED' if some cores failed to stop within the 1364 * given timeout. 1365 ******************************************************************************/ 1366 int psci_stop_other_cores(unsigned int this_cpu_idx, unsigned int wait_ms, 1367 void (*stop_func)(u_register_t mpidr)) 1368 { 1369 /* Invoke stop_func for each core */ 1370 for (unsigned int idx = 0U; idx < psci_plat_core_count; idx++) { 1371 /* skip current CPU */ 1372 if (idx == this_cpu_idx) { 1373 continue; 1374 } 1375 1376 /* Check if the CPU is ON */ 1377 if (psci_get_aff_info_state_by_idx(idx) == AFF_STATE_ON) { 1378 (*stop_func)(PER_CPU_BY_INDEX(psci_cpu_pd_nodes, idx)->mpidr); 1379 } 1380 } 1381 1382 /* Need to wait for other cores to shutdown */ 1383 if (wait_ms != 0U) { 1384 for (uint32_t delay_ms = wait_ms; ((delay_ms != 0U) && 1385 (!psci_is_last_on_cpu(this_cpu_idx))); delay_ms--) { 1386 mdelay(1U); 1387 } 1388 1389 if (!psci_is_last_on_cpu(this_cpu_idx)) { 1390 WARN("Failed to stop all cores!\n"); 1391 psci_print_power_domain_map(); 1392 return PSCI_E_DENIED; 1393 } 1394 } 1395 1396 return PSCI_E_SUCCESS; 1397 } 1398 1399 /******************************************************************************* 1400 * This function verifies that all the other cores in the system have been 1401 * turned OFF and the current CPU is the last running CPU in the system. 1402 * Returns true if the current CPU is the last ON CPU or false otherwise. 1403 * 1404 * This API has following differences with psci_is_last_on_cpu 1405 * 1. PSCI states are locked 1406 ******************************************************************************/ 1407 bool psci_is_last_on_cpu_safe(unsigned int this_core) 1408 { 1409 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; 1410 1411 psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes); 1412 1413 psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); 1414 1415 if (!psci_is_last_on_cpu(this_core)) { 1416 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); 1417 return false; 1418 } 1419 1420 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); 1421 1422 return true; 1423 } 1424 1425 /******************************************************************************* 1426 * This function verifies that all cores in the system have been turned ON. 1427 * Returns true, if all CPUs are ON or false otherwise. 1428 * 1429 * This API has following differences with psci_are_all_cpus_on 1430 * 1. PSCI states are locked 1431 ******************************************************************************/ 1432 bool psci_are_all_cpus_on_safe(unsigned int this_core) 1433 { 1434 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; 1435 1436 psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes); 1437 1438 psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); 1439 1440 if (!psci_are_all_cpus_on()) { 1441 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); 1442 return false; 1443 } 1444 1445 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); 1446 1447 return true; 1448 } 1449 1450 /******************************************************************************* 1451 * Safely counts the number of CPUs in the system that are currently in the ON 1452 * or ON_PENDING state. 1453 * 1454 * This function acquires and releases the necessary power domain locks to 1455 * ensure consistency of the CPU state information. 1456 * 1457 * @param this_core The index of the current core making the query. 1458 * 1459 * @return The number of CPUs currently in AFF_STATE_ON or AFF_STATE_ON_PENDING. 1460 ******************************************************************************/ 1461 unsigned int psci_num_cpus_running_on_safe(unsigned int this_core) 1462 { 1463 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; 1464 unsigned int no_of_cpus; 1465 1466 psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes); 1467 1468 psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); 1469 1470 no_of_cpus = psci_num_cpus_running(); 1471 1472 psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes); 1473 1474 return no_of_cpus; 1475 } 1476