1 /* 2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch.h> 8 #include <arch_helpers.h> 9 #include <assert.h> 10 #include <bl_common.h> 11 #include <context.h> 12 #include <context_mgmt.h> 13 #include <debug.h> 14 #include <platform.h> 15 #include <string.h> 16 #include <utils.h> 17 #include "psci_private.h" 18 19 /* 20 * SPD power management operations, expected to be supplied by the registered 21 * SPD on successful SP initialization 22 */ 23 const spd_pm_ops_t *psci_spd_pm; 24 25 /* 26 * PSCI requested local power state map. This array is used to store the local 27 * power states requested by a CPU for power levels from level 1 to 28 * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power 29 * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a 30 * CPU are the same. 31 * 32 * During state coordination, the platform is passed an array containing the 33 * local states requested for a particular non cpu power domain by each cpu 34 * within the domain. 35 * 36 * TODO: Dense packing of the requested states will cause cache thrashing 37 * when multiple power domains write to it. If we allocate the requested 38 * states at each power level in a cache-line aligned per-domain memory, 39 * the cache thrashing can be avoided. 40 */ 41 static plat_local_state_t 42 psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT]; 43 44 45 /******************************************************************************* 46 * Arrays that hold the platform's power domain tree information for state 47 * management of power domains. 48 * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain 49 * which is an ancestor of a CPU power domain. 50 * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain 51 ******************************************************************************/ 52 non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS] 53 #if USE_COHERENT_MEM 54 __section("tzfw_coherent_mem") 55 #endif 56 ; 57 58 /* Lock for PSCI state coordination */ 59 DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); 60 61 cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; 62 63 /******************************************************************************* 64 * Pointer to functions exported by the platform to complete power mgmt. ops 65 ******************************************************************************/ 66 const plat_psci_ops_t *psci_plat_pm_ops; 67 68 /****************************************************************************** 69 * Check that the maximum power level supported by the platform makes sense 70 *****************************************************************************/ 71 CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \ 72 PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \ 73 assert_platform_max_pwrlvl_check); 74 75 /* 76 * The plat_local_state used by the platform is one of these types: RUN, 77 * RETENTION and OFF. The platform can define further sub-states for each type 78 * apart from RUN. This categorization is done to verify the sanity of the 79 * psci_power_state passed by the platform and to print debug information. The 80 * categorization is done on the basis of the following conditions: 81 * 82 * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN. 83 * 84 * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is 85 * STATE_TYPE_RETN. 86 * 87 * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is 88 * STATE_TYPE_OFF. 89 */ 90 typedef enum plat_local_state_type { 91 STATE_TYPE_RUN = 0, 92 STATE_TYPE_RETN, 93 STATE_TYPE_OFF 94 } plat_local_state_type_t; 95 96 /* Function used to categorize plat_local_state. */ 97 static plat_local_state_type_t find_local_state_type(plat_local_state_t state) 98 { 99 if (state != 0U) { 100 if (state > PLAT_MAX_RET_STATE) { 101 return STATE_TYPE_OFF; 102 } else { 103 return STATE_TYPE_RETN; 104 } 105 } else { 106 return STATE_TYPE_RUN; 107 } 108 } 109 110 /****************************************************************************** 111 * Check that the maximum retention level supported by the platform is less 112 * than the maximum off level. 113 *****************************************************************************/ 114 CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \ 115 assert_platform_max_off_and_retn_state_check); 116 117 /****************************************************************************** 118 * This function ensures that the power state parameter in a CPU_SUSPEND request 119 * is valid. If so, it returns the requested states for each power level. 120 *****************************************************************************/ 121 int psci_validate_power_state(unsigned int power_state, 122 psci_power_state_t *state_info) 123 { 124 /* Check SBZ bits in power state are zero */ 125 if (psci_check_power_state(power_state)) 126 return PSCI_E_INVALID_PARAMS; 127 128 assert(psci_plat_pm_ops->validate_power_state); 129 130 /* Validate the power_state using platform pm_ops */ 131 return psci_plat_pm_ops->validate_power_state(power_state, state_info); 132 } 133 134 /****************************************************************************** 135 * This function retrieves the `psci_power_state_t` for system suspend from 136 * the platform. 137 *****************************************************************************/ 138 void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info) 139 { 140 /* 141 * Assert that the required pm_ops hook is implemented to ensure that 142 * the capability detected during psci_setup() is valid. 143 */ 144 assert(psci_plat_pm_ops->get_sys_suspend_power_state); 145 146 /* 147 * Query the platform for the power_state required for system suspend 148 */ 149 psci_plat_pm_ops->get_sys_suspend_power_state(state_info); 150 } 151 152 /******************************************************************************* 153 * This function verifies that the all the other cores in the system have been 154 * turned OFF and the current CPU is the last running CPU in the system. 155 * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false) 156 * otherwise. 157 ******************************************************************************/ 158 unsigned int psci_is_last_on_cpu(void) 159 { 160 unsigned int cpu_idx, my_idx = plat_my_core_pos(); 161 162 for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { 163 if (cpu_idx == my_idx) { 164 assert(psci_get_aff_info_state() == AFF_STATE_ON); 165 continue; 166 } 167 168 if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) 169 return 0; 170 } 171 172 return 1; 173 } 174 175 /******************************************************************************* 176 * Routine to return the maximum power level to traverse to after a cpu has 177 * been physically powered up. It is expected to be called immediately after 178 * reset from assembler code. 179 ******************************************************************************/ 180 static unsigned int get_power_on_target_pwrlvl(void) 181 { 182 unsigned int pwrlvl; 183 184 /* 185 * Assume that this cpu was suspended and retrieve its target power 186 * level. If it is invalid then it could only have been turned off 187 * earlier. PLAT_MAX_PWR_LVL will be the highest power level a 188 * cpu can be turned off to. 189 */ 190 pwrlvl = psci_get_suspend_pwrlvl(); 191 if (pwrlvl == PSCI_INVALID_PWR_LVL) 192 pwrlvl = PLAT_MAX_PWR_LVL; 193 return pwrlvl; 194 } 195 196 /****************************************************************************** 197 * Helper function to update the requested local power state array. This array 198 * does not store the requested state for the CPU power level. Hence an 199 * assertion is added to prevent us from accessing the wrong index. 200 *****************************************************************************/ 201 static void psci_set_req_local_pwr_state(unsigned int pwrlvl, 202 unsigned int cpu_idx, 203 plat_local_state_t req_pwr_state) 204 { 205 /* 206 * This should never happen, we have this here to avoid 207 * "array subscript is above array bounds" errors in GCC. 208 */ 209 assert(pwrlvl > PSCI_CPU_PWR_LVL); 210 #pragma GCC diagnostic push 211 #pragma GCC diagnostic ignored "-Warray-bounds" 212 psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state; 213 #pragma GCC diagnostic pop 214 } 215 216 /****************************************************************************** 217 * This function initializes the psci_req_local_pwr_states. 218 *****************************************************************************/ 219 void psci_init_req_local_pwr_states(void) 220 { 221 /* Initialize the requested state of all non CPU power domains as OFF */ 222 memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE, 223 sizeof(psci_req_local_pwr_states)); 224 } 225 226 /****************************************************************************** 227 * Helper function to return a reference to an array containing the local power 228 * states requested by each cpu for a power domain at 'pwrlvl'. The size of the 229 * array will be the number of cpu power domains of which this power domain is 230 * an ancestor. These requested states will be used to determine a suitable 231 * target state for this power domain during psci state coordination. An 232 * assertion is added to prevent us from accessing the CPU power level. 233 *****************************************************************************/ 234 static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl, 235 unsigned int cpu_idx) 236 { 237 assert(pwrlvl > PSCI_CPU_PWR_LVL); 238 239 return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx]; 240 } 241 242 /* 243 * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent 244 * memory. 245 * 246 * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory, 247 * it's accessed by both cached and non-cached participants. To serve the common 248 * minimum, perform a cache flush before read and after write so that non-cached 249 * participants operate on latest data in main memory. 250 * 251 * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent 252 * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent. 253 * In both cases, no cache operations are required. 254 */ 255 256 /* 257 * Retrieve local state of non-CPU power domain node from a non-cached CPU, 258 * after any required cache maintenance operation. 259 */ 260 static plat_local_state_t get_non_cpu_pd_node_local_state( 261 unsigned int parent_idx) 262 { 263 #if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY 264 flush_dcache_range( 265 (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], 266 sizeof(psci_non_cpu_pd_nodes[parent_idx])); 267 #endif 268 return psci_non_cpu_pd_nodes[parent_idx].local_state; 269 } 270 271 /* 272 * Update local state of non-CPU power domain node from a cached CPU; perform 273 * any required cache maintenance operation afterwards. 274 */ 275 static void set_non_cpu_pd_node_local_state(unsigned int parent_idx, 276 plat_local_state_t state) 277 { 278 psci_non_cpu_pd_nodes[parent_idx].local_state = state; 279 #if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY 280 flush_dcache_range( 281 (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], 282 sizeof(psci_non_cpu_pd_nodes[parent_idx])); 283 #endif 284 } 285 286 /****************************************************************************** 287 * Helper function to return the current local power state of each power domain 288 * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This 289 * function will be called after a cpu is powered on to find the local state 290 * each power domain has emerged from. 291 *****************************************************************************/ 292 void psci_get_target_local_pwr_states(unsigned int end_pwrlvl, 293 psci_power_state_t *target_state) 294 { 295 unsigned int parent_idx, lvl; 296 plat_local_state_t *pd_state = target_state->pwr_domain_state; 297 298 pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state(); 299 parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; 300 301 /* Copy the local power state from node to state_info */ 302 for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { 303 pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx); 304 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 305 } 306 307 /* Set the the higher levels to RUN */ 308 for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) 309 target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; 310 } 311 312 /****************************************************************************** 313 * Helper function to set the target local power state that each power domain 314 * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will 315 * enter. This function will be called after coordination of requested power 316 * states has been done for each power level. 317 *****************************************************************************/ 318 static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl, 319 const psci_power_state_t *target_state) 320 { 321 unsigned int parent_idx, lvl; 322 const plat_local_state_t *pd_state = target_state->pwr_domain_state; 323 324 psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]); 325 326 /* 327 * Need to flush as local_state might be accessed with Data Cache 328 * disabled during power on 329 */ 330 psci_flush_cpu_data(psci_svc_cpu_data.local_state); 331 332 parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; 333 334 /* Copy the local_state from state_info */ 335 for (lvl = 1; lvl <= end_pwrlvl; lvl++) { 336 set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]); 337 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 338 } 339 } 340 341 342 /******************************************************************************* 343 * PSCI helper function to get the parent nodes corresponding to a cpu_index. 344 ******************************************************************************/ 345 void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, 346 unsigned int end_lvl, 347 unsigned int node_index[]) 348 { 349 unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node; 350 unsigned int i; 351 352 for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) { 353 *node_index++ = parent_node; 354 parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node; 355 } 356 } 357 358 /****************************************************************************** 359 * This function is invoked post CPU power up and initialization. It sets the 360 * affinity info state, target power state and requested power state for the 361 * current CPU and all its ancestor power domains to RUN. 362 *****************************************************************************/ 363 void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl) 364 { 365 unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl; 366 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; 367 368 /* Reset the local_state to RUN for the non cpu power domains. */ 369 for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { 370 set_non_cpu_pd_node_local_state(parent_idx, 371 PSCI_LOCAL_STATE_RUN); 372 psci_set_req_local_pwr_state(lvl, 373 cpu_idx, 374 PSCI_LOCAL_STATE_RUN); 375 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 376 } 377 378 /* Set the affinity info state to ON */ 379 psci_set_aff_info_state(AFF_STATE_ON); 380 381 psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); 382 psci_flush_cpu_data(psci_svc_cpu_data); 383 } 384 385 /****************************************************************************** 386 * This function is passed the local power states requested for each power 387 * domain (state_info) between the current CPU domain and its ancestors until 388 * the target power level (end_pwrlvl). It updates the array of requested power 389 * states with this information. 390 * 391 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it 392 * retrieves the states requested by all the cpus of which the power domain at 393 * that level is an ancestor. It passes this information to the platform to 394 * coordinate and return the target power state. If the target state for a level 395 * is RUN then subsequent levels are not considered. At the CPU level, state 396 * coordination is not required. Hence, the requested and the target states are 397 * the same. 398 * 399 * The 'state_info' is updated with the target state for each level between the 400 * CPU and the 'end_pwrlvl' and returned to the caller. 401 * 402 * This function will only be invoked with data cache enabled and while 403 * powering down a core. 404 *****************************************************************************/ 405 void psci_do_state_coordination(unsigned int end_pwrlvl, 406 psci_power_state_t *state_info) 407 { 408 unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos(); 409 unsigned int start_idx, ncpus; 410 plat_local_state_t target_state, *req_states; 411 412 assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); 413 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; 414 415 /* For level 0, the requested state will be equivalent 416 to target state */ 417 for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { 418 419 /* First update the requested power state */ 420 psci_set_req_local_pwr_state(lvl, cpu_idx, 421 state_info->pwr_domain_state[lvl]); 422 423 /* Get the requested power states for this power level */ 424 start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; 425 req_states = psci_get_req_local_pwr_states(lvl, start_idx); 426 427 /* 428 * Let the platform coordinate amongst the requested states at 429 * this power level and return the target local power state. 430 */ 431 ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; 432 target_state = plat_get_target_pwr_state(lvl, 433 req_states, 434 ncpus); 435 436 state_info->pwr_domain_state[lvl] = target_state; 437 438 /* Break early if the negotiated target power state is RUN */ 439 if (is_local_state_run(state_info->pwr_domain_state[lvl])) 440 break; 441 442 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 443 } 444 445 /* 446 * This is for cases when we break out of the above loop early because 447 * the target power state is RUN at a power level < end_pwlvl. 448 * We update the requested power state from state_info and then 449 * set the target state as RUN. 450 */ 451 for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) { 452 psci_set_req_local_pwr_state(lvl, cpu_idx, 453 state_info->pwr_domain_state[lvl]); 454 state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; 455 456 } 457 458 /* Update the target state in the power domain nodes */ 459 psci_set_target_local_pwr_states(end_pwrlvl, state_info); 460 } 461 462 /****************************************************************************** 463 * This function validates a suspend request by making sure that if a standby 464 * state is requested then no power level is turned off and the highest power 465 * level is placed in a standby/retention state. 466 * 467 * It also ensures that the state level X will enter is not shallower than the 468 * state level X + 1 will enter. 469 * 470 * This validation will be enabled only for DEBUG builds as the platform is 471 * expected to perform these validations as well. 472 *****************************************************************************/ 473 int psci_validate_suspend_req(const psci_power_state_t *state_info, 474 unsigned int is_power_down_state) 475 { 476 unsigned int max_off_lvl, target_lvl, max_retn_lvl; 477 plat_local_state_t state; 478 plat_local_state_type_t req_state_type, deepest_state_type; 479 int i; 480 481 /* Find the target suspend power level */ 482 target_lvl = psci_find_target_suspend_lvl(state_info); 483 if (target_lvl == PSCI_INVALID_PWR_LVL) 484 return PSCI_E_INVALID_PARAMS; 485 486 /* All power domain levels are in a RUN state to begin with */ 487 deepest_state_type = STATE_TYPE_RUN; 488 489 for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) { 490 state = state_info->pwr_domain_state[i]; 491 req_state_type = find_local_state_type(state); 492 493 /* 494 * While traversing from the highest power level to the lowest, 495 * the state requested for lower levels has to be the same or 496 * deeper i.e. equal to or greater than the state at the higher 497 * levels. If this condition is true, then the requested state 498 * becomes the deepest state encountered so far. 499 */ 500 if (req_state_type < deepest_state_type) 501 return PSCI_E_INVALID_PARAMS; 502 deepest_state_type = req_state_type; 503 } 504 505 /* Find the highest off power level */ 506 max_off_lvl = psci_find_max_off_lvl(state_info); 507 508 /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */ 509 max_retn_lvl = PSCI_INVALID_PWR_LVL; 510 if (target_lvl != max_off_lvl) 511 max_retn_lvl = target_lvl; 512 513 /* 514 * If this is not a request for a power down state then max off level 515 * has to be invalid and max retention level has to be a valid power 516 * level. 517 */ 518 if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL || 519 max_retn_lvl == PSCI_INVALID_PWR_LVL)) 520 return PSCI_E_INVALID_PARAMS; 521 522 return PSCI_E_SUCCESS; 523 } 524 525 /****************************************************************************** 526 * This function finds the highest power level which will be powered down 527 * amongst all the power levels specified in the 'state_info' structure 528 *****************************************************************************/ 529 unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info) 530 { 531 int i; 532 533 for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { 534 if (is_local_state_off(state_info->pwr_domain_state[i])) 535 return i; 536 } 537 538 return PSCI_INVALID_PWR_LVL; 539 } 540 541 /****************************************************************************** 542 * This functions finds the level of the highest power domain which will be 543 * placed in a low power state during a suspend operation. 544 *****************************************************************************/ 545 unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info) 546 { 547 int i; 548 549 for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { 550 if (!is_local_state_run(state_info->pwr_domain_state[i])) 551 return i; 552 } 553 554 return PSCI_INVALID_PWR_LVL; 555 } 556 557 /******************************************************************************* 558 * This function is passed a cpu_index and the highest level in the topology 559 * tree that the operation should be applied to. It picks up locks in order of 560 * increasing power domain level in the range specified. 561 ******************************************************************************/ 562 void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, 563 unsigned int cpu_idx) 564 { 565 unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; 566 unsigned int level; 567 568 /* No locking required for level 0. Hence start locking from level 1 */ 569 for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) { 570 psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]); 571 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 572 } 573 } 574 575 /******************************************************************************* 576 * This function is passed a cpu_index and the highest level in the topology 577 * tree that the operation should be applied to. It releases the locks in order 578 * of decreasing power domain level in the range specified. 579 ******************************************************************************/ 580 void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, 581 unsigned int cpu_idx) 582 { 583 unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0}; 584 int level; 585 586 /* Get the parent nodes */ 587 psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); 588 589 /* Unlock top down. No unlocking required for level 0. */ 590 for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) { 591 parent_idx = parent_nodes[level - 1]; 592 psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]); 593 } 594 } 595 596 /******************************************************************************* 597 * Simple routine to determine whether a mpidr is valid or not. 598 ******************************************************************************/ 599 int psci_validate_mpidr(u_register_t mpidr) 600 { 601 if (plat_core_pos_by_mpidr(mpidr) < 0) 602 return PSCI_E_INVALID_PARAMS; 603 604 return PSCI_E_SUCCESS; 605 } 606 607 /******************************************************************************* 608 * This function determines the full entrypoint information for the requested 609 * PSCI entrypoint on power on/resume and returns it. 610 ******************************************************************************/ 611 #ifdef AARCH32 612 static int psci_get_ns_ep_info(entry_point_info_t *ep, 613 uintptr_t entrypoint, 614 u_register_t context_id) 615 { 616 u_register_t ep_attr; 617 unsigned int aif, ee, mode; 618 u_register_t scr = read_scr(); 619 u_register_t ns_sctlr, sctlr; 620 621 /* Switch to non secure state */ 622 write_scr(scr | SCR_NS_BIT); 623 isb(); 624 ns_sctlr = read_sctlr(); 625 626 sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr; 627 628 /* Return to original state */ 629 write_scr(scr); 630 isb(); 631 ee = 0; 632 633 ep_attr = NON_SECURE | EP_ST_DISABLE; 634 if (sctlr & SCTLR_EE_BIT) { 635 ep_attr |= EP_EE_BIG; 636 ee = 1; 637 } 638 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); 639 640 ep->pc = entrypoint; 641 zeromem(&ep->args, sizeof(ep->args)); 642 ep->args.arg0 = context_id; 643 644 mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; 645 646 /* 647 * TODO: Choose async. exception bits if HYP mode is not 648 * implemented according to the values of SCR.{AW, FW} bits 649 */ 650 aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT; 651 652 ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif); 653 654 return PSCI_E_SUCCESS; 655 } 656 657 #else 658 static int psci_get_ns_ep_info(entry_point_info_t *ep, 659 uintptr_t entrypoint, 660 u_register_t context_id) 661 { 662 u_register_t ep_attr, sctlr; 663 unsigned int daif, ee, mode; 664 u_register_t ns_scr_el3 = read_scr_el3(); 665 u_register_t ns_sctlr_el1 = read_sctlr_el1(); 666 667 sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1; 668 ee = 0; 669 670 ep_attr = NON_SECURE | EP_ST_DISABLE; 671 if (sctlr & SCTLR_EE_BIT) { 672 ep_attr |= EP_EE_BIG; 673 ee = 1; 674 } 675 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); 676 677 ep->pc = entrypoint; 678 zeromem(&ep->args, sizeof(ep->args)); 679 ep->args.arg0 = context_id; 680 681 /* 682 * Figure out whether the cpu enters the non-secure address space 683 * in aarch32 or aarch64 684 */ 685 if (ns_scr_el3 & SCR_RW_BIT) { 686 687 /* 688 * Check whether a Thumb entry point has been provided for an 689 * aarch64 EL 690 */ 691 if (entrypoint & 0x1) 692 return PSCI_E_INVALID_ADDRESS; 693 694 mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1; 695 696 ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); 697 } else { 698 699 mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; 700 701 /* 702 * TODO: Choose async. exception bits if HYP mode is not 703 * implemented according to the values of SCR.{AW, FW} bits 704 */ 705 daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; 706 707 ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif); 708 } 709 710 return PSCI_E_SUCCESS; 711 } 712 #endif 713 714 /******************************************************************************* 715 * This function validates the entrypoint with the platform layer if the 716 * appropriate pm_ops hook is exported by the platform and returns the 717 * 'entry_point_info'. 718 ******************************************************************************/ 719 int psci_validate_entry_point(entry_point_info_t *ep, 720 uintptr_t entrypoint, 721 u_register_t context_id) 722 { 723 int rc; 724 725 /* Validate the entrypoint using platform psci_ops */ 726 if (psci_plat_pm_ops->validate_ns_entrypoint) { 727 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); 728 if (rc != PSCI_E_SUCCESS) 729 return PSCI_E_INVALID_ADDRESS; 730 } 731 732 /* 733 * Verify and derive the re-entry information for 734 * the non-secure world from the non-secure state from 735 * where this call originated. 736 */ 737 rc = psci_get_ns_ep_info(ep, entrypoint, context_id); 738 return rc; 739 } 740 741 /******************************************************************************* 742 * Generic handler which is called when a cpu is physically powered on. It 743 * traverses the node information and finds the highest power level powered 744 * off and performs generic, architectural, platform setup and state management 745 * to power on that power level and power levels below it. 746 * e.g. For a cpu that's been powered on, it will call the platform specific 747 * code to enable the gic cpu interface and for a cluster it will enable 748 * coherency at the interconnect level in addition to gic cpu interface. 749 ******************************************************************************/ 750 void psci_warmboot_entrypoint(void) 751 { 752 unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos(); 753 psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; 754 755 /* 756 * Verify that we have been explicitly turned ON or resumed from 757 * suspend. 758 */ 759 if (psci_get_aff_info_state() == AFF_STATE_OFF) { 760 ERROR("Unexpected affinity info state"); 761 panic(); 762 } 763 764 /* 765 * Get the maximum power domain level to traverse to after this cpu 766 * has been physically powered up. 767 */ 768 end_pwrlvl = get_power_on_target_pwrlvl(); 769 770 /* 771 * This function acquires the lock corresponding to each power level so 772 * that by the time all locks are taken, the system topology is snapshot 773 * and state management can be done safely. 774 */ 775 psci_acquire_pwr_domain_locks(end_pwrlvl, 776 cpu_idx); 777 778 psci_get_target_local_pwr_states(end_pwrlvl, &state_info); 779 780 #if ENABLE_PSCI_STAT 781 plat_psci_stat_accounting_stop(&state_info); 782 #endif 783 784 /* 785 * This CPU could be resuming from suspend or it could have just been 786 * turned on. To distinguish between these 2 cases, we examine the 787 * affinity state of the CPU: 788 * - If the affinity state is ON_PENDING then it has just been 789 * turned on. 790 * - Else it is resuming from suspend. 791 * 792 * Depending on the type of warm reset identified, choose the right set 793 * of power management handler and perform the generic, architecture 794 * and platform specific handling. 795 */ 796 if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) 797 psci_cpu_on_finish(cpu_idx, &state_info); 798 else 799 psci_cpu_suspend_finish(cpu_idx, &state_info); 800 801 /* 802 * Set the requested and target state of this CPU and all the higher 803 * power domains which are ancestors of this CPU to run. 804 */ 805 psci_set_pwr_domains_to_run(end_pwrlvl); 806 807 #if ENABLE_PSCI_STAT 808 /* 809 * Update PSCI stats. 810 * Caches are off when writing stats data on the power down path. 811 * Since caches are now enabled, it's necessary to do cache 812 * maintenance before reading that same data. 813 */ 814 psci_stats_update_pwr_up(end_pwrlvl, &state_info); 815 #endif 816 817 /* 818 * This loop releases the lock corresponding to each power level 819 * in the reverse order to which they were acquired. 820 */ 821 psci_release_pwr_domain_locks(end_pwrlvl, 822 cpu_idx); 823 } 824 825 /******************************************************************************* 826 * This function initializes the set of hooks that PSCI invokes as part of power 827 * management operation. The power management hooks are expected to be provided 828 * by the SPD, after it finishes all its initialization 829 ******************************************************************************/ 830 void psci_register_spd_pm_hook(const spd_pm_ops_t *pm) 831 { 832 assert(pm); 833 psci_spd_pm = pm; 834 835 if (pm->svc_migrate) 836 psci_caps |= define_psci_cap(PSCI_MIG_AARCH64); 837 838 if (pm->svc_migrate_info) 839 psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) 840 | define_psci_cap(PSCI_MIG_INFO_TYPE); 841 } 842 843 /******************************************************************************* 844 * This function invokes the migrate info hook in the spd_pm_ops. It performs 845 * the necessary return value validation. If the Secure Payload is UP and 846 * migrate capable, it returns the mpidr of the CPU on which the Secure payload 847 * is resident through the mpidr parameter. Else the value of the parameter on 848 * return is undefined. 849 ******************************************************************************/ 850 int psci_spd_migrate_info(u_register_t *mpidr) 851 { 852 int rc; 853 854 if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info) 855 return PSCI_E_NOT_SUPPORTED; 856 857 rc = psci_spd_pm->svc_migrate_info(mpidr); 858 859 assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \ 860 || rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED); 861 862 return rc; 863 } 864 865 866 /******************************************************************************* 867 * This function prints the state of all power domains present in the 868 * system 869 ******************************************************************************/ 870 void psci_print_power_domain_map(void) 871 { 872 #if LOG_LEVEL >= LOG_LEVEL_INFO 873 unsigned int idx; 874 plat_local_state_t state; 875 plat_local_state_type_t state_type; 876 877 /* This array maps to the PSCI_STATE_X definitions in psci.h */ 878 static const char * const psci_state_type_str[] = { 879 "ON", 880 "RETENTION", 881 "OFF", 882 }; 883 884 INFO("PSCI Power Domain Map:\n"); 885 for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT); 886 idx++) { 887 state_type = find_local_state_type( 888 psci_non_cpu_pd_nodes[idx].local_state); 889 INFO(" Domain Node : Level %u, parent_node %d," 890 " State %s (0x%x)\n", 891 psci_non_cpu_pd_nodes[idx].level, 892 psci_non_cpu_pd_nodes[idx].parent_node, 893 psci_state_type_str[state_type], 894 psci_non_cpu_pd_nodes[idx].local_state); 895 } 896 897 for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) { 898 state = psci_get_cpu_local_state_by_idx(idx); 899 state_type = find_local_state_type(state); 900 INFO(" CPU Node : MPID 0x%llx, parent_node %d," 901 " State %s (0x%x)\n", 902 (unsigned long long)psci_cpu_pd_nodes[idx].mpidr, 903 psci_cpu_pd_nodes[idx].parent_node, 904 psci_state_type_str[state_type], 905 psci_get_cpu_local_state_by_idx(idx)); 906 } 907 #endif 908 } 909 910 /****************************************************************************** 911 * Return whether any secondaries were powered up with CPU_ON call. A CPU that 912 * have ever been powered up would have set its MPDIR value to something other 913 * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to 914 * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is 915 * meaningful only when called on the primary CPU during early boot. 916 *****************************************************************************/ 917 int psci_secondaries_brought_up(void) 918 { 919 unsigned int idx, n_valid = 0; 920 921 for (idx = 0; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) { 922 if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR) 923 n_valid++; 924 } 925 926 assert(n_valid); 927 928 return (n_valid > 1); 929 } 930 931 #if ENABLE_PLAT_COMPAT 932 /******************************************************************************* 933 * PSCI Compatibility helper function to return the 'power_state' parameter of 934 * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA 935 * if not invoked within CPU_SUSPEND for the current CPU. 936 ******************************************************************************/ 937 int psci_get_suspend_powerstate(void) 938 { 939 /* Sanity check to verify that CPU is within CPU_SUSPEND */ 940 if (psci_get_aff_info_state() == AFF_STATE_ON && 941 !is_local_state_run(psci_get_cpu_local_state())) 942 return psci_power_state_compat[plat_my_core_pos()]; 943 944 return PSCI_INVALID_DATA; 945 } 946 947 /******************************************************************************* 948 * PSCI Compatibility helper function to return the state id of the current 949 * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA 950 * if not invoked within CPU_SUSPEND for the current CPU. 951 ******************************************************************************/ 952 int psci_get_suspend_stateid(void) 953 { 954 unsigned int power_state; 955 power_state = psci_get_suspend_powerstate(); 956 if (power_state != PSCI_INVALID_DATA) 957 return psci_get_pstate_id(power_state); 958 959 return PSCI_INVALID_DATA; 960 } 961 962 /******************************************************************************* 963 * PSCI Compatibility helper function to return the state id encoded in the 964 * 'power_state' parameter of the CPU specified by 'mpidr'. Returns 965 * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND. 966 ******************************************************************************/ 967 int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr) 968 { 969 int cpu_idx = plat_core_pos_by_mpidr(mpidr); 970 971 if (cpu_idx == -1) 972 return PSCI_INVALID_DATA; 973 974 /* Sanity check to verify that the CPU is in CPU_SUSPEND */ 975 if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON && 976 !is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx))) 977 return psci_get_pstate_id(psci_power_state_compat[cpu_idx]); 978 979 return PSCI_INVALID_DATA; 980 } 981 982 /******************************************************************************* 983 * This function returns highest affinity level which is in OFF 984 * state. The affinity instance with which the level is associated is 985 * determined by the caller. 986 ******************************************************************************/ 987 unsigned int psci_get_max_phys_off_afflvl(void) 988 { 989 psci_power_state_t state_info; 990 991 zeromem(&state_info, sizeof(state_info)); 992 psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info); 993 994 return psci_find_target_suspend_lvl(&state_info); 995 } 996 997 /******************************************************************************* 998 * PSCI Compatibility helper function to return target affinity level requested 999 * for the CPU_SUSPEND. This function assumes affinity levels correspond to 1000 * power domain levels on the platform. 1001 ******************************************************************************/ 1002 int psci_get_suspend_afflvl(void) 1003 { 1004 return psci_get_suspend_pwrlvl(); 1005 } 1006 1007 #endif 1008 1009 /******************************************************************************* 1010 * Initiate power down sequence, by calling power down operations registered for 1011 * this CPU. 1012 ******************************************************************************/ 1013 void psci_do_pwrdown_sequence(unsigned int power_level) 1014 { 1015 #if HW_ASSISTED_COHERENCY 1016 /* 1017 * With hardware-assisted coherency, the CPU drivers only initiate the 1018 * power down sequence, without performing cache-maintenance operations 1019 * in software. Data caches and MMU remain enabled both before and after 1020 * this call. 1021 */ 1022 prepare_cpu_pwr_dwn(power_level); 1023 #else 1024 /* 1025 * Without hardware-assisted coherency, the CPU drivers disable data 1026 * caches and MMU, then perform cache-maintenance operations in 1027 * software. 1028 * 1029 * We ought to call prepare_cpu_pwr_dwn() to initiate power down 1030 * sequence. We currently have data caches and MMU enabled, but the 1031 * function will return with data caches and MMU disabled. We must 1032 * ensure that the stack memory is flushed out to memory before we start 1033 * popping from it again. 1034 */ 1035 psci_do_pwrdown_cache_maintenance(power_level); 1036 #endif 1037 } 1038