1 /* 2 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <string.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 #include <common/bl_common.h> 13 #include <common/debug.h> 14 #include <context.h> 15 #include <lib/el3_runtime/context_mgmt.h> 16 #include <lib/utils.h> 17 #include <plat/common/platform.h> 18 19 #include "psci_private.h" 20 21 /* 22 * SPD power management operations, expected to be supplied by the registered 23 * SPD on successful SP initialization 24 */ 25 const spd_pm_ops_t *psci_spd_pm; 26 27 /* 28 * PSCI requested local power state map. This array is used to store the local 29 * power states requested by a CPU for power levels from level 1 to 30 * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power 31 * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a 32 * CPU are the same. 33 * 34 * During state coordination, the platform is passed an array containing the 35 * local states requested for a particular non cpu power domain by each cpu 36 * within the domain. 37 * 38 * TODO: Dense packing of the requested states will cause cache thrashing 39 * when multiple power domains write to it. If we allocate the requested 40 * states at each power level in a cache-line aligned per-domain memory, 41 * the cache thrashing can be avoided. 42 */ 43 static plat_local_state_t 44 psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT]; 45 46 unsigned int psci_plat_core_count; 47 48 /******************************************************************************* 49 * Arrays that hold the platform's power domain tree information for state 50 * management of power domains. 51 * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain 52 * which is an ancestor of a CPU power domain. 53 * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain 54 ******************************************************************************/ 55 non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS] 56 #if USE_COHERENT_MEM 57 __section("tzfw_coherent_mem") 58 #endif 59 ; 60 61 /* Lock for PSCI state coordination */ 62 DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); 63 64 cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; 65 66 /******************************************************************************* 67 * Pointer to functions exported by the platform to complete power mgmt. ops 68 ******************************************************************************/ 69 const plat_psci_ops_t *psci_plat_pm_ops; 70 71 /****************************************************************************** 72 * Check that the maximum power level supported by the platform makes sense 73 *****************************************************************************/ 74 CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) && 75 (PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL), 76 assert_platform_max_pwrlvl_check); 77 78 /* 79 * The plat_local_state used by the platform is one of these types: RUN, 80 * RETENTION and OFF. The platform can define further sub-states for each type 81 * apart from RUN. This categorization is done to verify the sanity of the 82 * psci_power_state passed by the platform and to print debug information. The 83 * categorization is done on the basis of the following conditions: 84 * 85 * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN. 86 * 87 * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is 88 * STATE_TYPE_RETN. 89 * 90 * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is 91 * STATE_TYPE_OFF. 92 */ 93 typedef enum plat_local_state_type { 94 STATE_TYPE_RUN = 0, 95 STATE_TYPE_RETN, 96 STATE_TYPE_OFF 97 } plat_local_state_type_t; 98 99 /* Function used to categorize plat_local_state. */ 100 static plat_local_state_type_t find_local_state_type(plat_local_state_t state) 101 { 102 if (state != 0U) { 103 if (state > PLAT_MAX_RET_STATE) { 104 return STATE_TYPE_OFF; 105 } else { 106 return STATE_TYPE_RETN; 107 } 108 } else { 109 return STATE_TYPE_RUN; 110 } 111 } 112 113 /****************************************************************************** 114 * Check that the maximum retention level supported by the platform is less 115 * than the maximum off level. 116 *****************************************************************************/ 117 CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, 118 assert_platform_max_off_and_retn_state_check); 119 120 /****************************************************************************** 121 * This function ensures that the power state parameter in a CPU_SUSPEND request 122 * is valid. If so, it returns the requested states for each power level. 123 *****************************************************************************/ 124 int psci_validate_power_state(unsigned int power_state, 125 psci_power_state_t *state_info) 126 { 127 /* Check SBZ bits in power state are zero */ 128 if (psci_check_power_state(power_state) != 0U) 129 return PSCI_E_INVALID_PARAMS; 130 131 assert(psci_plat_pm_ops->validate_power_state != NULL); 132 133 /* Validate the power_state using platform pm_ops */ 134 return psci_plat_pm_ops->validate_power_state(power_state, state_info); 135 } 136 137 /****************************************************************************** 138 * This function retrieves the `psci_power_state_t` for system suspend from 139 * the platform. 140 *****************************************************************************/ 141 void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info) 142 { 143 /* 144 * Assert that the required pm_ops hook is implemented to ensure that 145 * the capability detected during psci_setup() is valid. 146 */ 147 assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL); 148 149 /* 150 * Query the platform for the power_state required for system suspend 151 */ 152 psci_plat_pm_ops->get_sys_suspend_power_state(state_info); 153 } 154 155 /******************************************************************************* 156 * This function verifies that the all the other cores in the system have been 157 * turned OFF and the current CPU is the last running CPU in the system. 158 * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false) 159 * otherwise. 160 ******************************************************************************/ 161 unsigned int psci_is_last_on_cpu(void) 162 { 163 unsigned int cpu_idx, my_idx = plat_my_core_pos(); 164 165 for (cpu_idx = 0; cpu_idx < psci_plat_core_count; 166 cpu_idx++) { 167 if (cpu_idx == my_idx) { 168 assert(psci_get_aff_info_state() == AFF_STATE_ON); 169 continue; 170 } 171 172 if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) 173 return 0; 174 } 175 176 return 1; 177 } 178 179 /******************************************************************************* 180 * Routine to return the maximum power level to traverse to after a cpu has 181 * been physically powered up. It is expected to be called immediately after 182 * reset from assembler code. 183 ******************************************************************************/ 184 static unsigned int get_power_on_target_pwrlvl(void) 185 { 186 unsigned int pwrlvl; 187 188 /* 189 * Assume that this cpu was suspended and retrieve its target power 190 * level. If it is invalid then it could only have been turned off 191 * earlier. PLAT_MAX_PWR_LVL will be the highest power level a 192 * cpu can be turned off to. 193 */ 194 pwrlvl = psci_get_suspend_pwrlvl(); 195 if (pwrlvl == PSCI_INVALID_PWR_LVL) 196 pwrlvl = PLAT_MAX_PWR_LVL; 197 assert(pwrlvl < PSCI_INVALID_PWR_LVL); 198 return pwrlvl; 199 } 200 201 /****************************************************************************** 202 * Helper function to update the requested local power state array. This array 203 * does not store the requested state for the CPU power level. Hence an 204 * assertion is added to prevent us from accessing the CPU power level. 205 *****************************************************************************/ 206 static void psci_set_req_local_pwr_state(unsigned int pwrlvl, 207 unsigned int cpu_idx, 208 plat_local_state_t req_pwr_state) 209 { 210 assert(pwrlvl > PSCI_CPU_PWR_LVL); 211 if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) && 212 (cpu_idx < psci_plat_core_count)) { 213 psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state; 214 } 215 } 216 217 /****************************************************************************** 218 * This function initializes the psci_req_local_pwr_states. 219 *****************************************************************************/ 220 void __init psci_init_req_local_pwr_states(void) 221 { 222 /* Initialize the requested state of all non CPU power domains as OFF */ 223 unsigned int pwrlvl; 224 unsigned int core; 225 226 for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) { 227 for (core = 0; core < psci_plat_core_count; core++) { 228 psci_req_local_pwr_states[pwrlvl][core] = 229 PLAT_MAX_OFF_STATE; 230 } 231 } 232 } 233 234 /****************************************************************************** 235 * Helper function to return a reference to an array containing the local power 236 * states requested by each cpu for a power domain at 'pwrlvl'. The size of the 237 * array will be the number of cpu power domains of which this power domain is 238 * an ancestor. These requested states will be used to determine a suitable 239 * target state for this power domain during psci state coordination. An 240 * assertion is added to prevent us from accessing the CPU power level. 241 *****************************************************************************/ 242 static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl, 243 unsigned int cpu_idx) 244 { 245 assert(pwrlvl > PSCI_CPU_PWR_LVL); 246 247 if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) && 248 (cpu_idx < psci_plat_core_count)) { 249 return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx]; 250 } else 251 return NULL; 252 } 253 254 /* 255 * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent 256 * memory. 257 * 258 * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory, 259 * it's accessed by both cached and non-cached participants. To serve the common 260 * minimum, perform a cache flush before read and after write so that non-cached 261 * participants operate on latest data in main memory. 262 * 263 * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent 264 * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent. 265 * In both cases, no cache operations are required. 266 */ 267 268 /* 269 * Retrieve local state of non-CPU power domain node from a non-cached CPU, 270 * after any required cache maintenance operation. 271 */ 272 static plat_local_state_t get_non_cpu_pd_node_local_state( 273 unsigned int parent_idx) 274 { 275 #if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) 276 flush_dcache_range( 277 (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], 278 sizeof(psci_non_cpu_pd_nodes[parent_idx])); 279 #endif 280 return psci_non_cpu_pd_nodes[parent_idx].local_state; 281 } 282 283 /* 284 * Update local state of non-CPU power domain node from a cached CPU; perform 285 * any required cache maintenance operation afterwards. 286 */ 287 static void set_non_cpu_pd_node_local_state(unsigned int parent_idx, 288 plat_local_state_t state) 289 { 290 psci_non_cpu_pd_nodes[parent_idx].local_state = state; 291 #if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) 292 flush_dcache_range( 293 (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], 294 sizeof(psci_non_cpu_pd_nodes[parent_idx])); 295 #endif 296 } 297 298 /****************************************************************************** 299 * Helper function to return the current local power state of each power domain 300 * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This 301 * function will be called after a cpu is powered on to find the local state 302 * each power domain has emerged from. 303 *****************************************************************************/ 304 void psci_get_target_local_pwr_states(unsigned int end_pwrlvl, 305 psci_power_state_t *target_state) 306 { 307 unsigned int parent_idx, lvl; 308 plat_local_state_t *pd_state = target_state->pwr_domain_state; 309 310 pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state(); 311 parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; 312 313 /* Copy the local power state from node to state_info */ 314 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { 315 pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx); 316 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 317 } 318 319 /* Set the the higher levels to RUN */ 320 for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) 321 target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; 322 } 323 324 /****************************************************************************** 325 * Helper function to set the target local power state that each power domain 326 * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will 327 * enter. This function will be called after coordination of requested power 328 * states has been done for each power level. 329 *****************************************************************************/ 330 static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl, 331 const psci_power_state_t *target_state) 332 { 333 unsigned int parent_idx, lvl; 334 const plat_local_state_t *pd_state = target_state->pwr_domain_state; 335 336 psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]); 337 338 /* 339 * Need to flush as local_state might be accessed with Data Cache 340 * disabled during power on 341 */ 342 psci_flush_cpu_data(psci_svc_cpu_data.local_state); 343 344 parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; 345 346 /* Copy the local_state from state_info */ 347 for (lvl = 1U; lvl <= end_pwrlvl; lvl++) { 348 set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]); 349 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 350 } 351 } 352 353 354 /******************************************************************************* 355 * PSCI helper function to get the parent nodes corresponding to a cpu_index. 356 ******************************************************************************/ 357 void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, 358 unsigned int end_lvl, 359 unsigned int *node_index) 360 { 361 unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node; 362 unsigned int i; 363 unsigned int *node = node_index; 364 365 for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) { 366 *node = parent_node; 367 node++; 368 parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node; 369 } 370 } 371 372 /****************************************************************************** 373 * This function is invoked post CPU power up and initialization. It sets the 374 * affinity info state, target power state and requested power state for the 375 * current CPU and all its ancestor power domains to RUN. 376 *****************************************************************************/ 377 void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl) 378 { 379 unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl; 380 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; 381 382 /* Reset the local_state to RUN for the non cpu power domains. */ 383 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { 384 set_non_cpu_pd_node_local_state(parent_idx, 385 PSCI_LOCAL_STATE_RUN); 386 psci_set_req_local_pwr_state(lvl, 387 cpu_idx, 388 PSCI_LOCAL_STATE_RUN); 389 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 390 } 391 392 /* Set the affinity info state to ON */ 393 psci_set_aff_info_state(AFF_STATE_ON); 394 395 psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); 396 psci_flush_cpu_data(psci_svc_cpu_data); 397 } 398 399 /****************************************************************************** 400 * This function is passed the local power states requested for each power 401 * domain (state_info) between the current CPU domain and its ancestors until 402 * the target power level (end_pwrlvl). It updates the array of requested power 403 * states with this information. 404 * 405 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it 406 * retrieves the states requested by all the cpus of which the power domain at 407 * that level is an ancestor. It passes this information to the platform to 408 * coordinate and return the target power state. If the target state for a level 409 * is RUN then subsequent levels are not considered. At the CPU level, state 410 * coordination is not required. Hence, the requested and the target states are 411 * the same. 412 * 413 * The 'state_info' is updated with the target state for each level between the 414 * CPU and the 'end_pwrlvl' and returned to the caller. 415 * 416 * This function will only be invoked with data cache enabled and while 417 * powering down a core. 418 *****************************************************************************/ 419 void psci_do_state_coordination(unsigned int end_pwrlvl, 420 psci_power_state_t *state_info) 421 { 422 unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos(); 423 unsigned int start_idx; 424 unsigned int ncpus; 425 plat_local_state_t target_state, *req_states; 426 427 assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); 428 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; 429 430 /* For level 0, the requested state will be equivalent 431 to target state */ 432 for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { 433 434 /* First update the requested power state */ 435 psci_set_req_local_pwr_state(lvl, cpu_idx, 436 state_info->pwr_domain_state[lvl]); 437 438 /* Get the requested power states for this power level */ 439 start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; 440 req_states = psci_get_req_local_pwr_states(lvl, start_idx); 441 442 /* 443 * Let the platform coordinate amongst the requested states at 444 * this power level and return the target local power state. 445 */ 446 ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; 447 target_state = plat_get_target_pwr_state(lvl, 448 req_states, 449 ncpus); 450 451 state_info->pwr_domain_state[lvl] = target_state; 452 453 /* Break early if the negotiated target power state is RUN */ 454 if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0) 455 break; 456 457 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 458 } 459 460 /* 461 * This is for cases when we break out of the above loop early because 462 * the target power state is RUN at a power level < end_pwlvl. 463 * We update the requested power state from state_info and then 464 * set the target state as RUN. 465 */ 466 for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) { 467 psci_set_req_local_pwr_state(lvl, cpu_idx, 468 state_info->pwr_domain_state[lvl]); 469 state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; 470 471 } 472 473 /* Update the target state in the power domain nodes */ 474 psci_set_target_local_pwr_states(end_pwrlvl, state_info); 475 } 476 477 /****************************************************************************** 478 * This function validates a suspend request by making sure that if a standby 479 * state is requested then no power level is turned off and the highest power 480 * level is placed in a standby/retention state. 481 * 482 * It also ensures that the state level X will enter is not shallower than the 483 * state level X + 1 will enter. 484 * 485 * This validation will be enabled only for DEBUG builds as the platform is 486 * expected to perform these validations as well. 487 *****************************************************************************/ 488 int psci_validate_suspend_req(const psci_power_state_t *state_info, 489 unsigned int is_power_down_state) 490 { 491 unsigned int max_off_lvl, target_lvl, max_retn_lvl; 492 plat_local_state_t state; 493 plat_local_state_type_t req_state_type, deepest_state_type; 494 int i; 495 496 /* Find the target suspend power level */ 497 target_lvl = psci_find_target_suspend_lvl(state_info); 498 if (target_lvl == PSCI_INVALID_PWR_LVL) 499 return PSCI_E_INVALID_PARAMS; 500 501 /* All power domain levels are in a RUN state to begin with */ 502 deepest_state_type = STATE_TYPE_RUN; 503 504 for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) { 505 state = state_info->pwr_domain_state[i]; 506 req_state_type = find_local_state_type(state); 507 508 /* 509 * While traversing from the highest power level to the lowest, 510 * the state requested for lower levels has to be the same or 511 * deeper i.e. equal to or greater than the state at the higher 512 * levels. If this condition is true, then the requested state 513 * becomes the deepest state encountered so far. 514 */ 515 if (req_state_type < deepest_state_type) 516 return PSCI_E_INVALID_PARAMS; 517 deepest_state_type = req_state_type; 518 } 519 520 /* Find the highest off power level */ 521 max_off_lvl = psci_find_max_off_lvl(state_info); 522 523 /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */ 524 max_retn_lvl = PSCI_INVALID_PWR_LVL; 525 if (target_lvl != max_off_lvl) 526 max_retn_lvl = target_lvl; 527 528 /* 529 * If this is not a request for a power down state then max off level 530 * has to be invalid and max retention level has to be a valid power 531 * level. 532 */ 533 if ((is_power_down_state == 0U) && 534 ((max_off_lvl != PSCI_INVALID_PWR_LVL) || 535 (max_retn_lvl == PSCI_INVALID_PWR_LVL))) 536 return PSCI_E_INVALID_PARAMS; 537 538 return PSCI_E_SUCCESS; 539 } 540 541 /****************************************************************************** 542 * This function finds the highest power level which will be powered down 543 * amongst all the power levels specified in the 'state_info' structure 544 *****************************************************************************/ 545 unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info) 546 { 547 int i; 548 549 for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) { 550 if (is_local_state_off(state_info->pwr_domain_state[i]) != 0) 551 return (unsigned int) i; 552 } 553 554 return PSCI_INVALID_PWR_LVL; 555 } 556 557 /****************************************************************************** 558 * This functions finds the level of the highest power domain which will be 559 * placed in a low power state during a suspend operation. 560 *****************************************************************************/ 561 unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info) 562 { 563 int i; 564 565 for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) { 566 if (is_local_state_run(state_info->pwr_domain_state[i]) == 0) 567 return (unsigned int) i; 568 } 569 570 return PSCI_INVALID_PWR_LVL; 571 } 572 573 /******************************************************************************* 574 * This function is passed the highest level in the topology tree that the 575 * operation should be applied to and a list of node indexes. It picks up locks 576 * from the node index list in order of increasing power domain level in the 577 * range specified. 578 ******************************************************************************/ 579 void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, 580 const unsigned int *parent_nodes) 581 { 582 unsigned int parent_idx; 583 unsigned int level; 584 585 /* No locking required for level 0. Hence start locking from level 1 */ 586 for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) { 587 parent_idx = parent_nodes[level - 1U]; 588 psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]); 589 } 590 } 591 592 /******************************************************************************* 593 * This function is passed the highest level in the topology tree that the 594 * operation should be applied to and a list of node indexes. It releases the 595 * locks in order of decreasing power domain level in the range specified. 596 ******************************************************************************/ 597 void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, 598 const unsigned int *parent_nodes) 599 { 600 unsigned int parent_idx; 601 unsigned int level; 602 603 /* Unlock top down. No unlocking required for level 0. */ 604 for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1U; level--) { 605 parent_idx = parent_nodes[level - 1U]; 606 psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]); 607 } 608 } 609 610 /******************************************************************************* 611 * Simple routine to determine whether a mpidr is valid or not. 612 ******************************************************************************/ 613 int psci_validate_mpidr(u_register_t mpidr) 614 { 615 if (plat_core_pos_by_mpidr(mpidr) < 0) 616 return PSCI_E_INVALID_PARAMS; 617 618 return PSCI_E_SUCCESS; 619 } 620 621 /******************************************************************************* 622 * This function determines the full entrypoint information for the requested 623 * PSCI entrypoint on power on/resume and returns it. 624 ******************************************************************************/ 625 #ifdef __aarch64__ 626 static int psci_get_ns_ep_info(entry_point_info_t *ep, 627 uintptr_t entrypoint, 628 u_register_t context_id) 629 { 630 u_register_t ep_attr, sctlr; 631 unsigned int daif, ee, mode; 632 u_register_t ns_scr_el3 = read_scr_el3(); 633 u_register_t ns_sctlr_el1 = read_sctlr_el1(); 634 635 sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? 636 read_sctlr_el2() : ns_sctlr_el1; 637 ee = 0; 638 639 ep_attr = NON_SECURE | EP_ST_DISABLE; 640 if ((sctlr & SCTLR_EE_BIT) != 0U) { 641 ep_attr |= EP_EE_BIG; 642 ee = 1; 643 } 644 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); 645 646 ep->pc = entrypoint; 647 zeromem(&ep->args, sizeof(ep->args)); 648 ep->args.arg0 = context_id; 649 650 /* 651 * Figure out whether the cpu enters the non-secure address space 652 * in aarch32 or aarch64 653 */ 654 if ((ns_scr_el3 & SCR_RW_BIT) != 0U) { 655 656 /* 657 * Check whether a Thumb entry point has been provided for an 658 * aarch64 EL 659 */ 660 if ((entrypoint & 0x1UL) != 0UL) 661 return PSCI_E_INVALID_ADDRESS; 662 663 mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1; 664 665 ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); 666 } else { 667 668 mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? 669 MODE32_hyp : MODE32_svc; 670 671 /* 672 * TODO: Choose async. exception bits if HYP mode is not 673 * implemented according to the values of SCR.{AW, FW} bits 674 */ 675 daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; 676 677 ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif); 678 } 679 680 return PSCI_E_SUCCESS; 681 } 682 #else /* !__aarch64__ */ 683 static int psci_get_ns_ep_info(entry_point_info_t *ep, 684 uintptr_t entrypoint, 685 u_register_t context_id) 686 { 687 u_register_t ep_attr; 688 unsigned int aif, ee, mode; 689 u_register_t scr = read_scr(); 690 u_register_t ns_sctlr, sctlr; 691 692 /* Switch to non secure state */ 693 write_scr(scr | SCR_NS_BIT); 694 isb(); 695 ns_sctlr = read_sctlr(); 696 697 sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr; 698 699 /* Return to original state */ 700 write_scr(scr); 701 isb(); 702 ee = 0; 703 704 ep_attr = NON_SECURE | EP_ST_DISABLE; 705 if (sctlr & SCTLR_EE_BIT) { 706 ep_attr |= EP_EE_BIG; 707 ee = 1; 708 } 709 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); 710 711 ep->pc = entrypoint; 712 zeromem(&ep->args, sizeof(ep->args)); 713 ep->args.arg0 = context_id; 714 715 mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; 716 717 /* 718 * TODO: Choose async. exception bits if HYP mode is not 719 * implemented according to the values of SCR.{AW, FW} bits 720 */ 721 aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT; 722 723 ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif); 724 725 return PSCI_E_SUCCESS; 726 } 727 728 #endif /* __aarch64__ */ 729 730 /******************************************************************************* 731 * This function validates the entrypoint with the platform layer if the 732 * appropriate pm_ops hook is exported by the platform and returns the 733 * 'entry_point_info'. 734 ******************************************************************************/ 735 int psci_validate_entry_point(entry_point_info_t *ep, 736 uintptr_t entrypoint, 737 u_register_t context_id) 738 { 739 int rc; 740 741 /* Validate the entrypoint using platform psci_ops */ 742 if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) { 743 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); 744 if (rc != PSCI_E_SUCCESS) 745 return PSCI_E_INVALID_ADDRESS; 746 } 747 748 /* 749 * Verify and derive the re-entry information for 750 * the non-secure world from the non-secure state from 751 * where this call originated. 752 */ 753 rc = psci_get_ns_ep_info(ep, entrypoint, context_id); 754 return rc; 755 } 756 757 /******************************************************************************* 758 * Generic handler which is called when a cpu is physically powered on. It 759 * traverses the node information and finds the highest power level powered 760 * off and performs generic, architectural, platform setup and state management 761 * to power on that power level and power levels below it. 762 * e.g. For a cpu that's been powered on, it will call the platform specific 763 * code to enable the gic cpu interface and for a cluster it will enable 764 * coherency at the interconnect level in addition to gic cpu interface. 765 ******************************************************************************/ 766 void psci_warmboot_entrypoint(void) 767 { 768 unsigned int end_pwrlvl; 769 unsigned int cpu_idx = plat_my_core_pos(); 770 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; 771 psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; 772 773 /* 774 * Verify that we have been explicitly turned ON or resumed from 775 * suspend. 776 */ 777 if (psci_get_aff_info_state() == AFF_STATE_OFF) { 778 ERROR("Unexpected affinity info state.\n"); 779 panic(); 780 } 781 782 /* 783 * Get the maximum power domain level to traverse to after this cpu 784 * has been physically powered up. 785 */ 786 end_pwrlvl = get_power_on_target_pwrlvl(); 787 788 /* Get the parent nodes */ 789 psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); 790 791 /* 792 * This function acquires the lock corresponding to each power level so 793 * that by the time all locks are taken, the system topology is snapshot 794 * and state management can be done safely. 795 */ 796 psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes); 797 798 psci_get_target_local_pwr_states(end_pwrlvl, &state_info); 799 800 #if ENABLE_PSCI_STAT 801 plat_psci_stat_accounting_stop(&state_info); 802 #endif 803 804 /* 805 * This CPU could be resuming from suspend or it could have just been 806 * turned on. To distinguish between these 2 cases, we examine the 807 * affinity state of the CPU: 808 * - If the affinity state is ON_PENDING then it has just been 809 * turned on. 810 * - Else it is resuming from suspend. 811 * 812 * Depending on the type of warm reset identified, choose the right set 813 * of power management handler and perform the generic, architecture 814 * and platform specific handling. 815 */ 816 if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) 817 psci_cpu_on_finish(cpu_idx, &state_info); 818 else 819 psci_cpu_suspend_finish(cpu_idx, &state_info); 820 821 /* 822 * Set the requested and target state of this CPU and all the higher 823 * power domains which are ancestors of this CPU to run. 824 */ 825 psci_set_pwr_domains_to_run(end_pwrlvl); 826 827 #if ENABLE_PSCI_STAT 828 /* 829 * Update PSCI stats. 830 * Caches are off when writing stats data on the power down path. 831 * Since caches are now enabled, it's necessary to do cache 832 * maintenance before reading that same data. 833 */ 834 psci_stats_update_pwr_up(end_pwrlvl, &state_info); 835 #endif 836 837 /* 838 * This loop releases the lock corresponding to each power level 839 * in the reverse order to which they were acquired. 840 */ 841 psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes); 842 } 843 844 /******************************************************************************* 845 * This function initializes the set of hooks that PSCI invokes as part of power 846 * management operation. The power management hooks are expected to be provided 847 * by the SPD, after it finishes all its initialization 848 ******************************************************************************/ 849 void psci_register_spd_pm_hook(const spd_pm_ops_t *pm) 850 { 851 assert(pm != NULL); 852 psci_spd_pm = pm; 853 854 if (pm->svc_migrate != NULL) 855 psci_caps |= define_psci_cap(PSCI_MIG_AARCH64); 856 857 if (pm->svc_migrate_info != NULL) 858 psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) 859 | define_psci_cap(PSCI_MIG_INFO_TYPE); 860 } 861 862 /******************************************************************************* 863 * This function invokes the migrate info hook in the spd_pm_ops. It performs 864 * the necessary return value validation. If the Secure Payload is UP and 865 * migrate capable, it returns the mpidr of the CPU on which the Secure payload 866 * is resident through the mpidr parameter. Else the value of the parameter on 867 * return is undefined. 868 ******************************************************************************/ 869 int psci_spd_migrate_info(u_register_t *mpidr) 870 { 871 int rc; 872 873 if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL)) 874 return PSCI_E_NOT_SUPPORTED; 875 876 rc = psci_spd_pm->svc_migrate_info(mpidr); 877 878 assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) || 879 (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED)); 880 881 return rc; 882 } 883 884 885 /******************************************************************************* 886 * This function prints the state of all power domains present in the 887 * system 888 ******************************************************************************/ 889 void psci_print_power_domain_map(void) 890 { 891 #if LOG_LEVEL >= LOG_LEVEL_INFO 892 unsigned int idx; 893 plat_local_state_t state; 894 plat_local_state_type_t state_type; 895 896 /* This array maps to the PSCI_STATE_X definitions in psci.h */ 897 static const char * const psci_state_type_str[] = { 898 "ON", 899 "RETENTION", 900 "OFF", 901 }; 902 903 INFO("PSCI Power Domain Map:\n"); 904 for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - psci_plat_core_count); 905 idx++) { 906 state_type = find_local_state_type( 907 psci_non_cpu_pd_nodes[idx].local_state); 908 INFO(" Domain Node : Level %u, parent_node %d," 909 " State %s (0x%x)\n", 910 psci_non_cpu_pd_nodes[idx].level, 911 psci_non_cpu_pd_nodes[idx].parent_node, 912 psci_state_type_str[state_type], 913 psci_non_cpu_pd_nodes[idx].local_state); 914 } 915 916 for (idx = 0; idx < psci_plat_core_count; idx++) { 917 state = psci_get_cpu_local_state_by_idx(idx); 918 state_type = find_local_state_type(state); 919 INFO(" CPU Node : MPID 0x%llx, parent_node %d," 920 " State %s (0x%x)\n", 921 (unsigned long long)psci_cpu_pd_nodes[idx].mpidr, 922 psci_cpu_pd_nodes[idx].parent_node, 923 psci_state_type_str[state_type], 924 psci_get_cpu_local_state_by_idx(idx)); 925 } 926 #endif 927 } 928 929 /****************************************************************************** 930 * Return whether any secondaries were powered up with CPU_ON call. A CPU that 931 * have ever been powered up would have set its MPDIR value to something other 932 * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to 933 * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is 934 * meaningful only when called on the primary CPU during early boot. 935 *****************************************************************************/ 936 int psci_secondaries_brought_up(void) 937 { 938 unsigned int idx, n_valid = 0U; 939 940 for (idx = 0U; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) { 941 if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR) 942 n_valid++; 943 } 944 945 assert(n_valid > 0U); 946 947 return (n_valid > 1U) ? 1 : 0; 948 } 949 950 /******************************************************************************* 951 * Initiate power down sequence, by calling power down operations registered for 952 * this CPU. 953 ******************************************************************************/ 954 void psci_do_pwrdown_sequence(unsigned int power_level) 955 { 956 #if HW_ASSISTED_COHERENCY 957 /* 958 * With hardware-assisted coherency, the CPU drivers only initiate the 959 * power down sequence, without performing cache-maintenance operations 960 * in software. Data caches enabled both before and after this call. 961 */ 962 prepare_cpu_pwr_dwn(power_level); 963 #else 964 /* 965 * Without hardware-assisted coherency, the CPU drivers disable data 966 * caches, then perform cache-maintenance operations in software. 967 * 968 * This also calls prepare_cpu_pwr_dwn() to initiate power down 969 * sequence, but that function will return with data caches disabled. 970 * We must ensure that the stack memory is flushed out to memory before 971 * we start popping from it again. 972 */ 973 psci_do_pwrdown_cache_maintenance(power_level); 974 #endif 975 } 976