1 /* 2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch.h> 8 #include <arch_helpers.h> 9 #include <assert.h> 10 #include <bl_common.h> 11 #include <debug.h> 12 #include <context_mgmt.h> 13 #include <platform.h> 14 #include <stddef.h> 15 #include "psci_private.h" 16 17 /******************************************************************************* 18 * This function checks whether a cpu which has been requested to be turned on 19 * is OFF to begin with. 20 ******************************************************************************/ 21 static int cpu_on_validate_state(aff_info_state_t aff_state) 22 { 23 if (aff_state == AFF_STATE_ON) 24 return PSCI_E_ALREADY_ON; 25 26 if (aff_state == AFF_STATE_ON_PENDING) 27 return PSCI_E_ON_PENDING; 28 29 assert(aff_state == AFF_STATE_OFF); 30 return PSCI_E_SUCCESS; 31 } 32 33 /******************************************************************************* 34 * Generic handler which is called to physically power on a cpu identified by 35 * its mpidr. It performs the generic, architectural, platform setup and state 36 * management to power on the target cpu e.g. it will ensure that 37 * enough information is stashed for it to resume execution in the non-secure 38 * security state. 39 * 40 * The state of all the relevant power domains are changed after calling the 41 * platform handler as it can return error. 42 ******************************************************************************/ 43 int psci_cpu_on_start(u_register_t target_cpu, 44 entry_point_info_t *ep) 45 { 46 int rc; 47 unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu); 48 aff_info_state_t target_aff_state; 49 50 /* Calling function must supply valid input arguments */ 51 assert((int) target_idx >= 0); 52 assert(ep != NULL); 53 54 /* 55 * This function must only be called on platforms where the 56 * CPU_ON platform hooks have been implemented. 57 */ 58 assert(psci_plat_pm_ops->pwr_domain_on && 59 psci_plat_pm_ops->pwr_domain_on_finish); 60 61 /* Protect against multiple CPUs trying to turn ON the same target CPU */ 62 psci_spin_lock_cpu(target_idx); 63 64 /* 65 * Generic management: Ensure that the cpu is off to be 66 * turned on. 67 */ 68 rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); 69 if (rc != PSCI_E_SUCCESS) 70 goto exit; 71 72 /* 73 * Call the cpu on handler registered by the Secure Payload Dispatcher 74 * to let it do any bookeeping. If the handler encounters an error, it's 75 * expected to assert within 76 */ 77 if (psci_spd_pm && psci_spd_pm->svc_on) 78 psci_spd_pm->svc_on(target_cpu); 79 80 /* 81 * Set the Affinity info state of the target cpu to ON_PENDING. 82 * Flush aff_info_state as it will be accessed with caches 83 * turned OFF. 84 */ 85 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); 86 flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); 87 88 /* 89 * The cache line invalidation by the target CPU after setting the 90 * state to OFF (see psci_do_cpu_off()), could cause the update to 91 * aff_info_state to be invalidated. Retry the update if the target 92 * CPU aff_info_state is not ON_PENDING. 93 */ 94 target_aff_state = psci_get_aff_info_state_by_idx(target_idx); 95 if (target_aff_state != AFF_STATE_ON_PENDING) { 96 assert(target_aff_state == AFF_STATE_OFF); 97 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); 98 flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); 99 100 assert(psci_get_aff_info_state_by_idx(target_idx) == AFF_STATE_ON_PENDING); 101 } 102 103 /* 104 * Perform generic, architecture and platform specific handling. 105 */ 106 /* 107 * Plat. management: Give the platform the current state 108 * of the target cpu to allow it to perform the necessary 109 * steps to power on. 110 */ 111 rc = psci_plat_pm_ops->pwr_domain_on(target_cpu); 112 assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); 113 114 if (rc == PSCI_E_SUCCESS) 115 /* Store the re-entry information for the non-secure world. */ 116 cm_init_context_by_index(target_idx, ep); 117 else { 118 /* Restore the state on error. */ 119 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); 120 flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); 121 } 122 123 exit: 124 psci_spin_unlock_cpu(target_idx); 125 return rc; 126 } 127 128 /******************************************************************************* 129 * The following function finish an earlier power on request. They 130 * are called by the common finisher routine in psci_common.c. The `state_info` 131 * is the psci_power_state from which this CPU has woken up from. 132 ******************************************************************************/ 133 void psci_cpu_on_finish(unsigned int cpu_idx, 134 psci_power_state_t *state_info) 135 { 136 /* 137 * Plat. management: Perform the platform specific actions 138 * for this cpu e.g. enabling the gic or zeroing the mailbox 139 * register. The actual state of this cpu has already been 140 * changed. 141 */ 142 psci_plat_pm_ops->pwr_domain_on_finish(state_info); 143 144 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) 145 /* 146 * Arch. management: Enable data cache and manage stack memory 147 */ 148 psci_do_pwrup_cache_maintenance(); 149 #endif 150 151 /* 152 * All the platform specific actions for turning this cpu 153 * on have completed. Perform enough arch.initialization 154 * to run in the non-secure address space. 155 */ 156 psci_arch_setup(); 157 158 /* 159 * Lock the CPU spin lock to make sure that the context initialization 160 * is done. Since the lock is only used in this function to create 161 * a synchronization point with cpu_on_start(), it can be released 162 * immediately. 163 */ 164 psci_spin_lock_cpu(cpu_idx); 165 psci_spin_unlock_cpu(cpu_idx); 166 167 /* Ensure we have been explicitly woken up by another cpu */ 168 assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING); 169 170 /* 171 * Call the cpu on finish handler registered by the Secure Payload 172 * Dispatcher to let it do any bookeeping. If the handler encounters an 173 * error, it's expected to assert within 174 */ 175 if (psci_spd_pm && psci_spd_pm->svc_on_finish) 176 psci_spd_pm->svc_on_finish(0); 177 178 /* Populate the mpidr field within the cpu node array */ 179 /* This needs to be done only once */ 180 psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; 181 182 /* 183 * Generic management: Now we just need to retrieve the 184 * information that we had stashed away during the cpu_on 185 * call to set this cpu on its way. 186 */ 187 cm_prepare_el3_exit(NON_SECURE); 188 } 189