1 /* 2 * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stdbool.h> 9 #include <string.h> 10 11 #include <platform_def.h> 12 13 #include <arch.h> 14 #include <arch_helpers.h> 15 #include <common/bl_common.h> 16 #include <context.h> 17 #include <lib/el3_runtime/context_mgmt.h> 18 #include <lib/extensions/amu.h> 19 #include <lib/utils.h> 20 21 /******************************************************************************* 22 * Context management library initialisation routine. This library is used by 23 * runtime services to share pointers to 'cpu_context' structures for the secure 24 * and non-secure states. Management of the structures and their associated 25 * memory is not done by the context management library e.g. the PSCI service 26 * manages the cpu context used for entry from and exit to the non-secure state. 27 * The Secure payload manages the context(s) corresponding to the secure state. 28 * It also uses this library to get access to the non-secure 29 * state cpu context pointers. 30 ******************************************************************************/ 31 void cm_init(void) 32 { 33 /* 34 * The context management library has only global data to initialize, but 35 * that will be done when the BSS is zeroed out 36 */ 37 } 38 39 /******************************************************************************* 40 * The following function initializes the cpu_context 'ctx' for 41 * first use, and sets the initial entrypoint state as specified by the 42 * entry_point_info structure. 43 * 44 * The security state to initialize is determined by the SECURE attribute 45 * of the entry_point_info. 46 * 47 * The EE and ST attributes are used to configure the endianness and secure 48 * timer availability for the new execution context. 49 * 50 * To prepare the register state for entry call cm_prepare_el3_exit() and 51 * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to 52 * cm_el1_sysregs_context_restore(). 53 ******************************************************************************/ 54 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) 55 { 56 unsigned int security_state; 57 uint32_t scr, sctlr; 58 regs_t *reg_ctx; 59 60 assert(ctx != NULL); 61 62 security_state = GET_SECURITY_STATE(ep->h.attr); 63 64 /* Clear any residual register values from the context */ 65 zeromem(ctx, sizeof(*ctx)); 66 67 reg_ctx = get_regs_ctx(ctx); 68 69 /* 70 * Base the context SCR on the current value, adjust for entry point 71 * specific requirements 72 */ 73 scr = read_scr(); 74 scr &= ~(SCR_NS_BIT | SCR_HCE_BIT); 75 76 if (security_state != SECURE) 77 scr |= SCR_NS_BIT; 78 79 if (security_state != SECURE) { 80 /* 81 * Set up SCTLR for the Non-secure context. 82 * 83 * SCTLR.EE: Endianness is taken from the entrypoint attributes. 84 * 85 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as 86 * required by PSCI specification) 87 * 88 * Set remaining SCTLR fields to their architecturally defined 89 * values. Some fields reset to an IMPLEMENTATION DEFINED value: 90 * 91 * SCTLR.TE: Set to zero so that exceptions to an Exception 92 * Level executing at PL1 are taken to A32 state. 93 * 94 * SCTLR.V: Set to zero to select the normal exception vectors 95 * with base address held in VBAR. 96 */ 97 assert(((ep->spsr >> SPSR_E_SHIFT) & SPSR_E_MASK) == 98 (EP_GET_EE(ep->h.attr) >> EP_EE_SHIFT)); 99 100 sctlr = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U; 101 sctlr |= (SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_V_BIT)); 102 write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr); 103 } 104 105 /* 106 * The target exception level is based on the spsr mode requested. If 107 * execution is requested to hyp mode, HVC is enabled via SCR.HCE. 108 */ 109 if (GET_M32(ep->spsr) == MODE32_hyp) 110 scr |= SCR_HCE_BIT; 111 112 /* 113 * Store the initialised values for SCTLR and SCR in the cpu_context. 114 * The Hyp mode registers are not part of the saved context and are 115 * set-up in cm_prepare_el3_exit(). 116 */ 117 write_ctx_reg(reg_ctx, CTX_SCR, scr); 118 write_ctx_reg(reg_ctx, CTX_LR, ep->pc); 119 write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr); 120 121 /* 122 * Store the r0-r3 value from the entrypoint into the context 123 * Use memcpy as we are in control of the layout of the structures 124 */ 125 memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t)); 126 } 127 128 /******************************************************************************* 129 * Enable architecture extensions on first entry to Non-secure world. 130 * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise 131 * it is zero. 132 ******************************************************************************/ 133 static void enable_extensions_nonsecure(bool el2_unused) 134 { 135 #if IMAGE_BL32 136 #if ENABLE_AMU 137 amu_enable(el2_unused); 138 #endif 139 #endif 140 } 141 142 /******************************************************************************* 143 * The following function initializes the cpu_context for a CPU specified by 144 * its `cpu_idx` for first use, and sets the initial entrypoint state as 145 * specified by the entry_point_info structure. 146 ******************************************************************************/ 147 void cm_init_context_by_index(unsigned int cpu_idx, 148 const entry_point_info_t *ep) 149 { 150 cpu_context_t *ctx; 151 ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); 152 cm_setup_context(ctx, ep); 153 } 154 155 /******************************************************************************* 156 * The following function initializes the cpu_context for the current CPU 157 * for first use, and sets the initial entrypoint state as specified by the 158 * entry_point_info structure. 159 ******************************************************************************/ 160 void cm_init_my_context(const entry_point_info_t *ep) 161 { 162 cpu_context_t *ctx; 163 ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); 164 cm_setup_context(ctx, ep); 165 } 166 167 /******************************************************************************* 168 * Prepare the CPU system registers for first entry into secure or normal world 169 * 170 * If execution is requested to hyp mode, HSCTLR is initialized 171 * If execution is requested to non-secure PL1, and the CPU supports 172 * HYP mode then HYP mode is disabled by configuring all necessary HYP mode 173 * registers. 174 ******************************************************************************/ 175 void cm_prepare_el3_exit(uint32_t security_state) 176 { 177 uint32_t hsctlr, scr; 178 cpu_context_t *ctx = cm_get_context(security_state); 179 bool el2_unused = false; 180 181 assert(ctx != NULL); 182 183 if (security_state == NON_SECURE) { 184 scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); 185 if ((scr & SCR_HCE_BIT) != 0U) { 186 /* Use SCTLR value to initialize HSCTLR */ 187 hsctlr = read_ctx_reg(get_regs_ctx(ctx), 188 CTX_NS_SCTLR); 189 hsctlr |= HSCTLR_RES1; 190 /* Temporarily set the NS bit to access HSCTLR */ 191 write_scr(read_scr() | SCR_NS_BIT); 192 /* 193 * Make sure the write to SCR is complete so that 194 * we can access HSCTLR 195 */ 196 isb(); 197 write_hsctlr(hsctlr); 198 isb(); 199 200 write_scr(read_scr() & ~SCR_NS_BIT); 201 isb(); 202 } else if ((read_id_pfr1() & 203 (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) != 0U) { 204 el2_unused = true; 205 206 /* 207 * Set the NS bit to access NS copies of certain banked 208 * registers 209 */ 210 write_scr(read_scr() | SCR_NS_BIT); 211 isb(); 212 213 /* 214 * Hyp / PL2 present but unused, need to disable safely. 215 * HSCTLR can be ignored in this case. 216 * 217 * Set HCR to its architectural reset value so that 218 * Non-secure operations do not trap to Hyp mode. 219 */ 220 write_hcr(HCR_RESET_VAL); 221 222 /* 223 * Set HCPTR to its architectural reset value so that 224 * Non-secure access from EL1 or EL0 to trace and to 225 * Advanced SIMD and floating point functionality does 226 * not trap to Hyp mode. 227 */ 228 write_hcptr(HCPTR_RESET_VAL); 229 230 /* 231 * Initialise CNTHCTL. All fields are architecturally 232 * UNKNOWN on reset and are set to zero except for 233 * field(s) listed below. 234 * 235 * CNTHCTL.PL1PCEN: Disable traps to Hyp mode of 236 * Non-secure EL0 and EL1 accessed to the physical 237 * timer registers. 238 * 239 * CNTHCTL.PL1PCTEN: Disable traps to Hyp mode of 240 * Non-secure EL0 and EL1 accessed to the physical 241 * counter registers. 242 */ 243 write_cnthctl(CNTHCTL_RESET_VAL | 244 PL1PCEN_BIT | PL1PCTEN_BIT); 245 246 /* 247 * Initialise CNTVOFF to zero as it resets to an 248 * IMPLEMENTATION DEFINED value. 249 */ 250 write64_cntvoff(0); 251 252 /* 253 * Set VPIDR and VMPIDR to match MIDR_EL1 and MPIDR 254 * respectively. 255 */ 256 write_vpidr(read_midr()); 257 write_vmpidr(read_mpidr()); 258 259 /* 260 * Initialise VTTBR, setting all fields rather than 261 * relying on the hw. Some fields are architecturally 262 * UNKNOWN at reset. 263 * 264 * VTTBR.VMID: Set to zero which is the architecturally 265 * defined reset value. Even though EL1&0 stage 2 266 * address translation is disabled, cache maintenance 267 * operations depend on the VMID. 268 * 269 * VTTBR.BADDR: Set to zero as EL1&0 stage 2 address 270 * translation is disabled. 271 */ 272 write64_vttbr(VTTBR_RESET_VAL & 273 ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) 274 | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT))); 275 276 /* 277 * Initialise HDCR, setting all the fields rather than 278 * relying on hw. 279 * 280 * HDCR.HPMN: Set to value of PMCR.N which is the 281 * architecturally-defined reset value. 282 * 283 * HDCR.HLP: Set to one so that event counter 284 * overflow, that is recorded in PMOVSCLR[0-30], 285 * occurs on the increment that changes 286 * PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU is 287 * implemented. This bit is RES0 in versions of the 288 * architecture earlier than ARMv8.5, setting it to 1 289 * doesn't have any effect on them. 290 * This bit is Reserved, UNK/SBZP in ARMv7. 291 * 292 * HDCR.HPME: Set to zero to disable EL2 Event 293 * counters. 294 */ 295 #if (ARM_ARCH_MAJOR > 7) 296 write_hdcr((HDCR_RESET_VAL | HDCR_HLP_BIT | 297 ((read_pmcr() & PMCR_N_BITS) >> 298 PMCR_N_SHIFT)) & ~HDCR_HPME_BIT); 299 #else 300 write_hdcr((HDCR_RESET_VAL | 301 ((read_pmcr() & PMCR_N_BITS) >> 302 PMCR_N_SHIFT)) & ~HDCR_HPME_BIT); 303 #endif 304 /* 305 * Set HSTR to its architectural reset value so that 306 * access to system registers in the cproc=1111 307 * encoding space do not trap to Hyp mode. 308 */ 309 write_hstr(HSTR_RESET_VAL); 310 /* 311 * Set CNTHP_CTL to its architectural reset value to 312 * disable the EL2 physical timer and prevent timer 313 * interrupts. Some fields are architecturally UNKNOWN 314 * on reset and are set to zero. 315 */ 316 write_cnthp_ctl(CNTHP_CTL_RESET_VAL); 317 isb(); 318 319 write_scr(read_scr() & ~SCR_NS_BIT); 320 isb(); 321 } 322 enable_extensions_nonsecure(el2_unused); 323 } 324 } 325