1 /* 2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <arch.h> 32 #include <arch_helpers.h> 33 #include <assert.h> 34 #include <bl_common.h> 35 #include <context.h> 36 #include <context_mgmt.h> 37 #include <debug.h> 38 #include <platform.h> 39 #include <string.h> 40 #include <utils.h> 41 #include "psci_private.h" 42 43 /* 44 * SPD power management operations, expected to be supplied by the registered 45 * SPD on successful SP initialization 46 */ 47 const spd_pm_ops_t *psci_spd_pm; 48 49 /* 50 * PSCI requested local power state map. This array is used to store the local 51 * power states requested by a CPU for power levels from level 1 to 52 * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power 53 * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a 54 * CPU are the same. 55 * 56 * During state coordination, the platform is passed an array containing the 57 * local states requested for a particular non cpu power domain by each cpu 58 * within the domain. 59 * 60 * TODO: Dense packing of the requested states will cause cache thrashing 61 * when multiple power domains write to it. If we allocate the requested 62 * states at each power level in a cache-line aligned per-domain memory, 63 * the cache thrashing can be avoided. 64 */ 65 static plat_local_state_t 66 psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT]; 67 68 69 /******************************************************************************* 70 * Arrays that hold the platform's power domain tree information for state 71 * management of power domains. 72 * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain 73 * which is an ancestor of a CPU power domain. 74 * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain 75 ******************************************************************************/ 76 non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS] 77 #if USE_COHERENT_MEM 78 __section("tzfw_coherent_mem") 79 #endif 80 ; 81 82 /* Lock for PSCI state coordination */ 83 DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); 84 85 cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; 86 87 /******************************************************************************* 88 * Pointer to functions exported by the platform to complete power mgmt. ops 89 ******************************************************************************/ 90 const plat_psci_ops_t *psci_plat_pm_ops; 91 92 /****************************************************************************** 93 * Check that the maximum power level supported by the platform makes sense 94 *****************************************************************************/ 95 CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \ 96 PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \ 97 assert_platform_max_pwrlvl_check); 98 99 /* 100 * The plat_local_state used by the platform is one of these types: RUN, 101 * RETENTION and OFF. The platform can define further sub-states for each type 102 * apart from RUN. This categorization is done to verify the sanity of the 103 * psci_power_state passed by the platform and to print debug information. The 104 * categorization is done on the basis of the following conditions: 105 * 106 * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN. 107 * 108 * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is 109 * STATE_TYPE_RETN. 110 * 111 * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is 112 * STATE_TYPE_OFF. 113 */ 114 typedef enum plat_local_state_type { 115 STATE_TYPE_RUN = 0, 116 STATE_TYPE_RETN, 117 STATE_TYPE_OFF 118 } plat_local_state_type_t; 119 120 /* The macro used to categorize plat_local_state. */ 121 #define find_local_state_type(plat_local_state) \ 122 ((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE) \ 123 ? STATE_TYPE_OFF : STATE_TYPE_RETN) \ 124 : STATE_TYPE_RUN) 125 126 /****************************************************************************** 127 * Check that the maximum retention level supported by the platform is less 128 * than the maximum off level. 129 *****************************************************************************/ 130 CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \ 131 assert_platform_max_off_and_retn_state_check); 132 133 /****************************************************************************** 134 * This function ensures that the power state parameter in a CPU_SUSPEND request 135 * is valid. If so, it returns the requested states for each power level. 136 *****************************************************************************/ 137 int psci_validate_power_state(unsigned int power_state, 138 psci_power_state_t *state_info) 139 { 140 /* Check SBZ bits in power state are zero */ 141 if (psci_check_power_state(power_state)) 142 return PSCI_E_INVALID_PARAMS; 143 144 assert(psci_plat_pm_ops->validate_power_state); 145 146 /* Validate the power_state using platform pm_ops */ 147 return psci_plat_pm_ops->validate_power_state(power_state, state_info); 148 } 149 150 /****************************************************************************** 151 * This function retrieves the `psci_power_state_t` for system suspend from 152 * the platform. 153 *****************************************************************************/ 154 void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info) 155 { 156 /* 157 * Assert that the required pm_ops hook is implemented to ensure that 158 * the capability detected during psci_setup() is valid. 159 */ 160 assert(psci_plat_pm_ops->get_sys_suspend_power_state); 161 162 /* 163 * Query the platform for the power_state required for system suspend 164 */ 165 psci_plat_pm_ops->get_sys_suspend_power_state(state_info); 166 } 167 168 /******************************************************************************* 169 * This function verifies that the all the other cores in the system have been 170 * turned OFF and the current CPU is the last running CPU in the system. 171 * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false) 172 * otherwise. 173 ******************************************************************************/ 174 unsigned int psci_is_last_on_cpu(void) 175 { 176 unsigned int cpu_idx, my_idx = plat_my_core_pos(); 177 178 for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { 179 if (cpu_idx == my_idx) { 180 assert(psci_get_aff_info_state() == AFF_STATE_ON); 181 continue; 182 } 183 184 if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) 185 return 0; 186 } 187 188 return 1; 189 } 190 191 /******************************************************************************* 192 * Routine to return the maximum power level to traverse to after a cpu has 193 * been physically powered up. It is expected to be called immediately after 194 * reset from assembler code. 195 ******************************************************************************/ 196 static unsigned int get_power_on_target_pwrlvl(void) 197 { 198 unsigned int pwrlvl; 199 200 /* 201 * Assume that this cpu was suspended and retrieve its target power 202 * level. If it is invalid then it could only have been turned off 203 * earlier. PLAT_MAX_PWR_LVL will be the highest power level a 204 * cpu can be turned off to. 205 */ 206 pwrlvl = psci_get_suspend_pwrlvl(); 207 if (pwrlvl == PSCI_INVALID_PWR_LVL) 208 pwrlvl = PLAT_MAX_PWR_LVL; 209 return pwrlvl; 210 } 211 212 /****************************************************************************** 213 * Helper function to update the requested local power state array. This array 214 * does not store the requested state for the CPU power level. Hence an 215 * assertion is added to prevent us from accessing the wrong index. 216 *****************************************************************************/ 217 static void psci_set_req_local_pwr_state(unsigned int pwrlvl, 218 unsigned int cpu_idx, 219 plat_local_state_t req_pwr_state) 220 { 221 assert(pwrlvl > PSCI_CPU_PWR_LVL); 222 psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state; 223 } 224 225 /****************************************************************************** 226 * This function initializes the psci_req_local_pwr_states. 227 *****************************************************************************/ 228 void psci_init_req_local_pwr_states(void) 229 { 230 /* Initialize the requested state of all non CPU power domains as OFF */ 231 memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE, 232 sizeof(psci_req_local_pwr_states)); 233 } 234 235 /****************************************************************************** 236 * Helper function to return a reference to an array containing the local power 237 * states requested by each cpu for a power domain at 'pwrlvl'. The size of the 238 * array will be the number of cpu power domains of which this power domain is 239 * an ancestor. These requested states will be used to determine a suitable 240 * target state for this power domain during psci state coordination. An 241 * assertion is added to prevent us from accessing the CPU power level. 242 *****************************************************************************/ 243 static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl, 244 unsigned int cpu_idx) 245 { 246 assert(pwrlvl > PSCI_CPU_PWR_LVL); 247 248 return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx]; 249 } 250 251 /* 252 * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent 253 * memory. 254 * 255 * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory, 256 * it's accessed by both cached and non-cached participants. To serve the common 257 * minimum, perform a cache flush before read and after write so that non-cached 258 * participants operate on latest data in main memory. 259 * 260 * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent 261 * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent. 262 * In both cases, no cache operations are required. 263 */ 264 265 /* 266 * Retrieve local state of non-CPU power domain node from a non-cached CPU, 267 * after any required cache maintenance operation. 268 */ 269 static plat_local_state_t get_non_cpu_pd_node_local_state( 270 unsigned int parent_idx) 271 { 272 #if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY 273 flush_dcache_range( 274 (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], 275 sizeof(psci_non_cpu_pd_nodes[parent_idx])); 276 #endif 277 return psci_non_cpu_pd_nodes[parent_idx].local_state; 278 } 279 280 /* 281 * Update local state of non-CPU power domain node from a cached CPU; perform 282 * any required cache maintenance operation afterwards. 283 */ 284 static void set_non_cpu_pd_node_local_state(unsigned int parent_idx, 285 plat_local_state_t state) 286 { 287 psci_non_cpu_pd_nodes[parent_idx].local_state = state; 288 #if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY 289 flush_dcache_range( 290 (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], 291 sizeof(psci_non_cpu_pd_nodes[parent_idx])); 292 #endif 293 } 294 295 /****************************************************************************** 296 * Helper function to return the current local power state of each power domain 297 * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This 298 * function will be called after a cpu is powered on to find the local state 299 * each power domain has emerged from. 300 *****************************************************************************/ 301 void psci_get_target_local_pwr_states(unsigned int end_pwrlvl, 302 psci_power_state_t *target_state) 303 { 304 unsigned int parent_idx, lvl; 305 plat_local_state_t *pd_state = target_state->pwr_domain_state; 306 307 pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state(); 308 parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; 309 310 /* Copy the local power state from node to state_info */ 311 for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { 312 pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx); 313 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 314 } 315 316 /* Set the the higher levels to RUN */ 317 for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) 318 target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; 319 } 320 321 /****************************************************************************** 322 * Helper function to set the target local power state that each power domain 323 * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will 324 * enter. This function will be called after coordination of requested power 325 * states has been done for each power level. 326 *****************************************************************************/ 327 static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl, 328 const psci_power_state_t *target_state) 329 { 330 unsigned int parent_idx, lvl; 331 const plat_local_state_t *pd_state = target_state->pwr_domain_state; 332 333 psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]); 334 335 /* 336 * Need to flush as local_state might be accessed with Data Cache 337 * disabled during power on 338 */ 339 psci_flush_cpu_data(psci_svc_cpu_data.local_state); 340 341 parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; 342 343 /* Copy the local_state from state_info */ 344 for (lvl = 1; lvl <= end_pwrlvl; lvl++) { 345 set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]); 346 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 347 } 348 } 349 350 351 /******************************************************************************* 352 * PSCI helper function to get the parent nodes corresponding to a cpu_index. 353 ******************************************************************************/ 354 void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, 355 unsigned int end_lvl, 356 unsigned int node_index[]) 357 { 358 unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node; 359 int i; 360 361 for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) { 362 *node_index++ = parent_node; 363 parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node; 364 } 365 } 366 367 /****************************************************************************** 368 * This function is invoked post CPU power up and initialization. It sets the 369 * affinity info state, target power state and requested power state for the 370 * current CPU and all its ancestor power domains to RUN. 371 *****************************************************************************/ 372 void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl) 373 { 374 unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl; 375 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; 376 377 /* Reset the local_state to RUN for the non cpu power domains. */ 378 for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { 379 set_non_cpu_pd_node_local_state(parent_idx, 380 PSCI_LOCAL_STATE_RUN); 381 psci_set_req_local_pwr_state(lvl, 382 cpu_idx, 383 PSCI_LOCAL_STATE_RUN); 384 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 385 } 386 387 /* Set the affinity info state to ON */ 388 psci_set_aff_info_state(AFF_STATE_ON); 389 390 psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); 391 psci_flush_cpu_data(psci_svc_cpu_data); 392 } 393 394 /****************************************************************************** 395 * This function is passed the local power states requested for each power 396 * domain (state_info) between the current CPU domain and its ancestors until 397 * the target power level (end_pwrlvl). It updates the array of requested power 398 * states with this information. 399 * 400 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it 401 * retrieves the states requested by all the cpus of which the power domain at 402 * that level is an ancestor. It passes this information to the platform to 403 * coordinate and return the target power state. If the target state for a level 404 * is RUN then subsequent levels are not considered. At the CPU level, state 405 * coordination is not required. Hence, the requested and the target states are 406 * the same. 407 * 408 * The 'state_info' is updated with the target state for each level between the 409 * CPU and the 'end_pwrlvl' and returned to the caller. 410 * 411 * This function will only be invoked with data cache enabled and while 412 * powering down a core. 413 *****************************************************************************/ 414 void psci_do_state_coordination(unsigned int end_pwrlvl, 415 psci_power_state_t *state_info) 416 { 417 unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos(); 418 unsigned int start_idx, ncpus; 419 plat_local_state_t target_state, *req_states; 420 421 assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); 422 parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; 423 424 /* For level 0, the requested state will be equivalent 425 to target state */ 426 for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { 427 428 /* First update the requested power state */ 429 psci_set_req_local_pwr_state(lvl, cpu_idx, 430 state_info->pwr_domain_state[lvl]); 431 432 /* Get the requested power states for this power level */ 433 start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; 434 req_states = psci_get_req_local_pwr_states(lvl, start_idx); 435 436 /* 437 * Let the platform coordinate amongst the requested states at 438 * this power level and return the target local power state. 439 */ 440 ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; 441 target_state = plat_get_target_pwr_state(lvl, 442 req_states, 443 ncpus); 444 445 state_info->pwr_domain_state[lvl] = target_state; 446 447 /* Break early if the negotiated target power state is RUN */ 448 if (is_local_state_run(state_info->pwr_domain_state[lvl])) 449 break; 450 451 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 452 } 453 454 /* 455 * This is for cases when we break out of the above loop early because 456 * the target power state is RUN at a power level < end_pwlvl. 457 * We update the requested power state from state_info and then 458 * set the target state as RUN. 459 */ 460 for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) { 461 psci_set_req_local_pwr_state(lvl, cpu_idx, 462 state_info->pwr_domain_state[lvl]); 463 state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; 464 465 } 466 467 /* Update the target state in the power domain nodes */ 468 psci_set_target_local_pwr_states(end_pwrlvl, state_info); 469 } 470 471 /****************************************************************************** 472 * This function validates a suspend request by making sure that if a standby 473 * state is requested then no power level is turned off and the highest power 474 * level is placed in a standby/retention state. 475 * 476 * It also ensures that the state level X will enter is not shallower than the 477 * state level X + 1 will enter. 478 * 479 * This validation will be enabled only for DEBUG builds as the platform is 480 * expected to perform these validations as well. 481 *****************************************************************************/ 482 int psci_validate_suspend_req(const psci_power_state_t *state_info, 483 unsigned int is_power_down_state) 484 { 485 unsigned int max_off_lvl, target_lvl, max_retn_lvl; 486 plat_local_state_t state; 487 plat_local_state_type_t req_state_type, deepest_state_type; 488 int i; 489 490 /* Find the target suspend power level */ 491 target_lvl = psci_find_target_suspend_lvl(state_info); 492 if (target_lvl == PSCI_INVALID_PWR_LVL) 493 return PSCI_E_INVALID_PARAMS; 494 495 /* All power domain levels are in a RUN state to begin with */ 496 deepest_state_type = STATE_TYPE_RUN; 497 498 for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) { 499 state = state_info->pwr_domain_state[i]; 500 req_state_type = find_local_state_type(state); 501 502 /* 503 * While traversing from the highest power level to the lowest, 504 * the state requested for lower levels has to be the same or 505 * deeper i.e. equal to or greater than the state at the higher 506 * levels. If this condition is true, then the requested state 507 * becomes the deepest state encountered so far. 508 */ 509 if (req_state_type < deepest_state_type) 510 return PSCI_E_INVALID_PARAMS; 511 deepest_state_type = req_state_type; 512 } 513 514 /* Find the highest off power level */ 515 max_off_lvl = psci_find_max_off_lvl(state_info); 516 517 /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */ 518 max_retn_lvl = PSCI_INVALID_PWR_LVL; 519 if (target_lvl != max_off_lvl) 520 max_retn_lvl = target_lvl; 521 522 /* 523 * If this is not a request for a power down state then max off level 524 * has to be invalid and max retention level has to be a valid power 525 * level. 526 */ 527 if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL || 528 max_retn_lvl == PSCI_INVALID_PWR_LVL)) 529 return PSCI_E_INVALID_PARAMS; 530 531 return PSCI_E_SUCCESS; 532 } 533 534 /****************************************************************************** 535 * This function finds the highest power level which will be powered down 536 * amongst all the power levels specified in the 'state_info' structure 537 *****************************************************************************/ 538 unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info) 539 { 540 int i; 541 542 for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { 543 if (is_local_state_off(state_info->pwr_domain_state[i])) 544 return i; 545 } 546 547 return PSCI_INVALID_PWR_LVL; 548 } 549 550 /****************************************************************************** 551 * This functions finds the level of the highest power domain which will be 552 * placed in a low power state during a suspend operation. 553 *****************************************************************************/ 554 unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info) 555 { 556 int i; 557 558 for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { 559 if (!is_local_state_run(state_info->pwr_domain_state[i])) 560 return i; 561 } 562 563 return PSCI_INVALID_PWR_LVL; 564 } 565 566 /******************************************************************************* 567 * This function is passed a cpu_index and the highest level in the topology 568 * tree that the operation should be applied to. It picks up locks in order of 569 * increasing power domain level in the range specified. 570 ******************************************************************************/ 571 void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, 572 unsigned int cpu_idx) 573 { 574 unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; 575 unsigned int level; 576 577 /* No locking required for level 0. Hence start locking from level 1 */ 578 for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) { 579 psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]); 580 parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; 581 } 582 } 583 584 /******************************************************************************* 585 * This function is passed a cpu_index and the highest level in the topology 586 * tree that the operation should be applied to. It releases the locks in order 587 * of decreasing power domain level in the range specified. 588 ******************************************************************************/ 589 void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, 590 unsigned int cpu_idx) 591 { 592 unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0}; 593 int level; 594 595 /* Get the parent nodes */ 596 psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); 597 598 /* Unlock top down. No unlocking required for level 0. */ 599 for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) { 600 parent_idx = parent_nodes[level - 1]; 601 psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]); 602 } 603 } 604 605 /******************************************************************************* 606 * Simple routine to determine whether a mpidr is valid or not. 607 ******************************************************************************/ 608 int psci_validate_mpidr(u_register_t mpidr) 609 { 610 if (plat_core_pos_by_mpidr(mpidr) < 0) 611 return PSCI_E_INVALID_PARAMS; 612 613 return PSCI_E_SUCCESS; 614 } 615 616 /******************************************************************************* 617 * This function determines the full entrypoint information for the requested 618 * PSCI entrypoint on power on/resume and returns it. 619 ******************************************************************************/ 620 #ifdef AARCH32 621 static int psci_get_ns_ep_info(entry_point_info_t *ep, 622 uintptr_t entrypoint, 623 u_register_t context_id) 624 { 625 u_register_t ep_attr; 626 unsigned int aif, ee, mode; 627 u_register_t scr = read_scr(); 628 u_register_t ns_sctlr, sctlr; 629 630 /* Switch to non secure state */ 631 write_scr(scr | SCR_NS_BIT); 632 isb(); 633 ns_sctlr = read_sctlr(); 634 635 sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr; 636 637 /* Return to original state */ 638 write_scr(scr); 639 isb(); 640 ee = 0; 641 642 ep_attr = NON_SECURE | EP_ST_DISABLE; 643 if (sctlr & SCTLR_EE_BIT) { 644 ep_attr |= EP_EE_BIG; 645 ee = 1; 646 } 647 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); 648 649 ep->pc = entrypoint; 650 zeromem(&ep->args, sizeof(ep->args)); 651 ep->args.arg0 = context_id; 652 653 mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; 654 655 /* 656 * TODO: Choose async. exception bits if HYP mode is not 657 * implemented according to the values of SCR.{AW, FW} bits 658 */ 659 aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT; 660 661 ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif); 662 663 return PSCI_E_SUCCESS; 664 } 665 666 #else 667 static int psci_get_ns_ep_info(entry_point_info_t *ep, 668 uintptr_t entrypoint, 669 u_register_t context_id) 670 { 671 u_register_t ep_attr, sctlr; 672 unsigned int daif, ee, mode; 673 u_register_t ns_scr_el3 = read_scr_el3(); 674 u_register_t ns_sctlr_el1 = read_sctlr_el1(); 675 676 sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1; 677 ee = 0; 678 679 ep_attr = NON_SECURE | EP_ST_DISABLE; 680 if (sctlr & SCTLR_EE_BIT) { 681 ep_attr |= EP_EE_BIG; 682 ee = 1; 683 } 684 SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); 685 686 ep->pc = entrypoint; 687 zeromem(&ep->args, sizeof(ep->args)); 688 ep->args.arg0 = context_id; 689 690 /* 691 * Figure out whether the cpu enters the non-secure address space 692 * in aarch32 or aarch64 693 */ 694 if (ns_scr_el3 & SCR_RW_BIT) { 695 696 /* 697 * Check whether a Thumb entry point has been provided for an 698 * aarch64 EL 699 */ 700 if (entrypoint & 0x1) 701 return PSCI_E_INVALID_ADDRESS; 702 703 mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1; 704 705 ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); 706 } else { 707 708 mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; 709 710 /* 711 * TODO: Choose async. exception bits if HYP mode is not 712 * implemented according to the values of SCR.{AW, FW} bits 713 */ 714 daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; 715 716 ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif); 717 } 718 719 return PSCI_E_SUCCESS; 720 } 721 #endif 722 723 /******************************************************************************* 724 * This function validates the entrypoint with the platform layer if the 725 * appropriate pm_ops hook is exported by the platform and returns the 726 * 'entry_point_info'. 727 ******************************************************************************/ 728 int psci_validate_entry_point(entry_point_info_t *ep, 729 uintptr_t entrypoint, 730 u_register_t context_id) 731 { 732 int rc; 733 734 /* Validate the entrypoint using platform psci_ops */ 735 if (psci_plat_pm_ops->validate_ns_entrypoint) { 736 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); 737 if (rc != PSCI_E_SUCCESS) 738 return PSCI_E_INVALID_ADDRESS; 739 } 740 741 /* 742 * Verify and derive the re-entry information for 743 * the non-secure world from the non-secure state from 744 * where this call originated. 745 */ 746 rc = psci_get_ns_ep_info(ep, entrypoint, context_id); 747 return rc; 748 } 749 750 /******************************************************************************* 751 * Generic handler which is called when a cpu is physically powered on. It 752 * traverses the node information and finds the highest power level powered 753 * off and performs generic, architectural, platform setup and state management 754 * to power on that power level and power levels below it. 755 * e.g. For a cpu that's been powered on, it will call the platform specific 756 * code to enable the gic cpu interface and for a cluster it will enable 757 * coherency at the interconnect level in addition to gic cpu interface. 758 ******************************************************************************/ 759 void psci_warmboot_entrypoint(void) 760 { 761 unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos(); 762 psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; 763 764 /* 765 * Verify that we have been explicitly turned ON or resumed from 766 * suspend. 767 */ 768 if (psci_get_aff_info_state() == AFF_STATE_OFF) { 769 ERROR("Unexpected affinity info state"); 770 panic(); 771 } 772 773 /* 774 * Get the maximum power domain level to traverse to after this cpu 775 * has been physically powered up. 776 */ 777 end_pwrlvl = get_power_on_target_pwrlvl(); 778 779 /* 780 * This function acquires the lock corresponding to each power level so 781 * that by the time all locks are taken, the system topology is snapshot 782 * and state management can be done safely. 783 */ 784 psci_acquire_pwr_domain_locks(end_pwrlvl, 785 cpu_idx); 786 787 #if ENABLE_PSCI_STAT 788 plat_psci_stat_accounting_stop(&state_info); 789 #endif 790 791 psci_get_target_local_pwr_states(end_pwrlvl, &state_info); 792 793 /* 794 * This CPU could be resuming from suspend or it could have just been 795 * turned on. To distinguish between these 2 cases, we examine the 796 * affinity state of the CPU: 797 * - If the affinity state is ON_PENDING then it has just been 798 * turned on. 799 * - Else it is resuming from suspend. 800 * 801 * Depending on the type of warm reset identified, choose the right set 802 * of power management handler and perform the generic, architecture 803 * and platform specific handling. 804 */ 805 if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) 806 psci_cpu_on_finish(cpu_idx, &state_info); 807 else 808 psci_cpu_suspend_finish(cpu_idx, &state_info); 809 810 /* 811 * Set the requested and target state of this CPU and all the higher 812 * power domains which are ancestors of this CPU to run. 813 */ 814 psci_set_pwr_domains_to_run(end_pwrlvl); 815 816 #if ENABLE_PSCI_STAT 817 /* 818 * Update PSCI stats. 819 * Caches are off when writing stats data on the power down path. 820 * Since caches are now enabled, it's necessary to do cache 821 * maintenance before reading that same data. 822 */ 823 psci_stats_update_pwr_up(end_pwrlvl, &state_info); 824 #endif 825 826 /* 827 * This loop releases the lock corresponding to each power level 828 * in the reverse order to which they were acquired. 829 */ 830 psci_release_pwr_domain_locks(end_pwrlvl, 831 cpu_idx); 832 } 833 834 /******************************************************************************* 835 * This function initializes the set of hooks that PSCI invokes as part of power 836 * management operation. The power management hooks are expected to be provided 837 * by the SPD, after it finishes all its initialization 838 ******************************************************************************/ 839 void psci_register_spd_pm_hook(const spd_pm_ops_t *pm) 840 { 841 assert(pm); 842 psci_spd_pm = pm; 843 844 if (pm->svc_migrate) 845 psci_caps |= define_psci_cap(PSCI_MIG_AARCH64); 846 847 if (pm->svc_migrate_info) 848 psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) 849 | define_psci_cap(PSCI_MIG_INFO_TYPE); 850 } 851 852 /******************************************************************************* 853 * This function invokes the migrate info hook in the spd_pm_ops. It performs 854 * the necessary return value validation. If the Secure Payload is UP and 855 * migrate capable, it returns the mpidr of the CPU on which the Secure payload 856 * is resident through the mpidr parameter. Else the value of the parameter on 857 * return is undefined. 858 ******************************************************************************/ 859 int psci_spd_migrate_info(u_register_t *mpidr) 860 { 861 int rc; 862 863 if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info) 864 return PSCI_E_NOT_SUPPORTED; 865 866 rc = psci_spd_pm->svc_migrate_info(mpidr); 867 868 assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \ 869 || rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED); 870 871 return rc; 872 } 873 874 875 /******************************************************************************* 876 * This function prints the state of all power domains present in the 877 * system 878 ******************************************************************************/ 879 void psci_print_power_domain_map(void) 880 { 881 #if LOG_LEVEL >= LOG_LEVEL_INFO 882 unsigned int idx; 883 plat_local_state_t state; 884 plat_local_state_type_t state_type; 885 886 /* This array maps to the PSCI_STATE_X definitions in psci.h */ 887 static const char * const psci_state_type_str[] = { 888 "ON", 889 "RETENTION", 890 "OFF", 891 }; 892 893 INFO("PSCI Power Domain Map:\n"); 894 for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT); 895 idx++) { 896 state_type = find_local_state_type( 897 psci_non_cpu_pd_nodes[idx].local_state); 898 INFO(" Domain Node : Level %u, parent_node %d," 899 " State %s (0x%x)\n", 900 psci_non_cpu_pd_nodes[idx].level, 901 psci_non_cpu_pd_nodes[idx].parent_node, 902 psci_state_type_str[state_type], 903 psci_non_cpu_pd_nodes[idx].local_state); 904 } 905 906 for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) { 907 state = psci_get_cpu_local_state_by_idx(idx); 908 state_type = find_local_state_type(state); 909 INFO(" CPU Node : MPID 0x%llx, parent_node %d," 910 " State %s (0x%x)\n", 911 (unsigned long long)psci_cpu_pd_nodes[idx].mpidr, 912 psci_cpu_pd_nodes[idx].parent_node, 913 psci_state_type_str[state_type], 914 psci_get_cpu_local_state_by_idx(idx)); 915 } 916 #endif 917 } 918 919 #if ENABLE_PLAT_COMPAT 920 /******************************************************************************* 921 * PSCI Compatibility helper function to return the 'power_state' parameter of 922 * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA 923 * if not invoked within CPU_SUSPEND for the current CPU. 924 ******************************************************************************/ 925 int psci_get_suspend_powerstate(void) 926 { 927 /* Sanity check to verify that CPU is within CPU_SUSPEND */ 928 if (psci_get_aff_info_state() == AFF_STATE_ON && 929 !is_local_state_run(psci_get_cpu_local_state())) 930 return psci_power_state_compat[plat_my_core_pos()]; 931 932 return PSCI_INVALID_DATA; 933 } 934 935 /******************************************************************************* 936 * PSCI Compatibility helper function to return the state id of the current 937 * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA 938 * if not invoked within CPU_SUSPEND for the current CPU. 939 ******************************************************************************/ 940 int psci_get_suspend_stateid(void) 941 { 942 unsigned int power_state; 943 power_state = psci_get_suspend_powerstate(); 944 if (power_state != PSCI_INVALID_DATA) 945 return psci_get_pstate_id(power_state); 946 947 return PSCI_INVALID_DATA; 948 } 949 950 /******************************************************************************* 951 * PSCI Compatibility helper function to return the state id encoded in the 952 * 'power_state' parameter of the CPU specified by 'mpidr'. Returns 953 * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND. 954 ******************************************************************************/ 955 int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr) 956 { 957 int cpu_idx = plat_core_pos_by_mpidr(mpidr); 958 959 if (cpu_idx == -1) 960 return PSCI_INVALID_DATA; 961 962 /* Sanity check to verify that the CPU is in CPU_SUSPEND */ 963 if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON && 964 !is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx))) 965 return psci_get_pstate_id(psci_power_state_compat[cpu_idx]); 966 967 return PSCI_INVALID_DATA; 968 } 969 970 /******************************************************************************* 971 * This function returns highest affinity level which is in OFF 972 * state. The affinity instance with which the level is associated is 973 * determined by the caller. 974 ******************************************************************************/ 975 unsigned int psci_get_max_phys_off_afflvl(void) 976 { 977 psci_power_state_t state_info; 978 979 zeromem(&state_info, sizeof(state_info)); 980 psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info); 981 982 return psci_find_target_suspend_lvl(&state_info); 983 } 984 985 /******************************************************************************* 986 * PSCI Compatibility helper function to return target affinity level requested 987 * for the CPU_SUSPEND. This function assumes affinity levels correspond to 988 * power domain levels on the platform. 989 ******************************************************************************/ 990 int psci_get_suspend_afflvl(void) 991 { 992 return psci_get_suspend_pwrlvl(); 993 } 994 995 #endif 996 997 /******************************************************************************* 998 * Initiate power down sequence, by calling power down operations registered for 999 * this CPU. 1000 ******************************************************************************/ 1001 void psci_do_pwrdown_sequence(unsigned int power_level) 1002 { 1003 #if HW_ASSISTED_COHERENCY 1004 /* 1005 * With hardware-assisted coherency, the CPU drivers only initiate the 1006 * power down sequence, without performing cache-maintenance operations 1007 * in software. Data caches and MMU remain enabled both before and after 1008 * this call. 1009 */ 1010 prepare_cpu_pwr_dwn(power_level); 1011 #else 1012 /* 1013 * Without hardware-assisted coherency, the CPU drivers disable data 1014 * caches and MMU, then perform cache-maintenance operations in 1015 * software. 1016 * 1017 * We ought to call prepare_cpu_pwr_dwn() to initiate power down 1018 * sequence. We currently have data caches and MMU enabled, but the 1019 * function will return with data caches and MMU disabled. We must 1020 * ensure that the stack memory is flushed out to memory before we start 1021 * popping from it again. 1022 */ 1023 psci_do_pwrdown_cache_maintenance(power_level); 1024 #endif 1025 } 1026