1/* 2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <context.h> 10#include <cpu_data.h> 11#include <interrupt_mgmt.h> 12#include <platform_def.h> 13#include <runtime_svc.h> 14#include <smccc.h> 15 16 .globl runtime_exceptions 17 18 .globl sync_exception_sp_el0 19 .globl irq_sp_el0 20 .globl fiq_sp_el0 21 .globl serror_sp_el0 22 23 .globl sync_exception_sp_elx 24 .globl irq_sp_elx 25 .globl fiq_sp_elx 26 .globl serror_sp_elx 27 28 .globl sync_exception_aarch64 29 .globl irq_aarch64 30 .globl fiq_aarch64 31 .globl serror_aarch64 32 33 .globl sync_exception_aarch32 34 .globl irq_aarch32 35 .globl fiq_aarch32 36 .globl serror_aarch32 37 38 /* --------------------------------------------------------------------- 39 * This macro handles Synchronous exceptions. 40 * Only SMC exceptions are supported. 41 * --------------------------------------------------------------------- 42 */ 43 .macro handle_sync_exception 44 /* Enable the SError interrupt */ 45 msr daifclr, #DAIF_ABT_BIT 46 47 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 48 49#if ENABLE_RUNTIME_INSTRUMENTATION 50 /* 51 * Read the timestamp value and store it in per-cpu data. The value 52 * will be extracted from per-cpu data by the C level SMC handler and 53 * saved to the PMF timestamp region. 54 */ 55 mrs x30, cntpct_el0 56 str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 57 mrs x29, tpidr_el3 58 str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET] 59 ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 60#endif 61 62 mrs x30, esr_el3 63 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 64 65 /* Handle SMC exceptions separately from other synchronous exceptions */ 66 cmp x30, #EC_AARCH32_SMC 67 b.eq smc_handler32 68 69 cmp x30, #EC_AARCH64_SMC 70 b.eq smc_handler64 71 72 /* Other kinds of synchronous exceptions are not handled */ 73 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 74 b report_unhandled_exception 75 .endm 76 77 78 /* --------------------------------------------------------------------- 79 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS 80 * interrupts. 81 * --------------------------------------------------------------------- 82 */ 83 .macro handle_interrupt_exception label 84 /* Enable the SError interrupt */ 85 msr daifclr, #DAIF_ABT_BIT 86 87 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 88 bl save_gp_registers 89 90 /* Save the EL3 system registers needed to return from this exception */ 91 mrs x0, spsr_el3 92 mrs x1, elr_el3 93 stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 94 95 /* Switch to the runtime stack i.e. SP_EL0 */ 96 ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 97 mov x20, sp 98 msr spsel, #0 99 mov sp, x2 100 101 /* 102 * Find out whether this is a valid interrupt type. 103 * If the interrupt controller reports a spurious interrupt then return 104 * to where we came from. 105 */ 106 bl plat_ic_get_pending_interrupt_type 107 cmp x0, #INTR_TYPE_INVAL 108 b.eq interrupt_exit_\label 109 110 /* 111 * Get the registered handler for this interrupt type. 112 * A NULL return value could be 'cause of the following conditions: 113 * 114 * a. An interrupt of a type was routed correctly but a handler for its 115 * type was not registered. 116 * 117 * b. An interrupt of a type was not routed correctly so a handler for 118 * its type was not registered. 119 * 120 * c. An interrupt of a type was routed correctly to EL3, but was 121 * deasserted before its pending state could be read. Another 122 * interrupt of a different type pended at the same time and its 123 * type was reported as pending instead. However, a handler for this 124 * type was not registered. 125 * 126 * a. and b. can only happen due to a programming error. The 127 * occurrence of c. could be beyond the control of Trusted Firmware. 128 * It makes sense to return from this exception instead of reporting an 129 * error. 130 */ 131 bl get_interrupt_type_handler 132 cbz x0, interrupt_exit_\label 133 mov x21, x0 134 135 mov x0, #INTR_ID_UNAVAILABLE 136 137 /* Set the current security state in the 'flags' parameter */ 138 mrs x2, scr_el3 139 ubfx x1, x2, #0, #1 140 141 /* Restore the reference to the 'handle' i.e. SP_EL3 */ 142 mov x2, x20 143 144 /* x3 will point to a cookie (not used now) */ 145 mov x3, xzr 146 147 /* Call the interrupt type handler */ 148 blr x21 149 150interrupt_exit_\label: 151 /* Return from exception, possibly in a different security state */ 152 b el3_exit 153 154 .endm 155 156 157 .macro save_x4_to_x29_sp_el0 158 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 159 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] 160 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] 161 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] 162 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] 163 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] 164 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] 165 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] 166 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] 167 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] 168 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] 169 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] 170 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 171 mrs x18, sp_el0 172 str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] 173 .endm 174 175 176vector_base runtime_exceptions 177 178 /* --------------------------------------------------------------------- 179 * Current EL with SP_EL0 : 0x0 - 0x200 180 * --------------------------------------------------------------------- 181 */ 182vector_entry sync_exception_sp_el0 183 /* We don't expect any synchronous exceptions from EL3 */ 184 b report_unhandled_exception 185 check_vector_size sync_exception_sp_el0 186 187vector_entry irq_sp_el0 188 /* 189 * EL3 code is non-reentrant. Any asynchronous exception is a serious 190 * error. Loop infinitely. 191 */ 192 b report_unhandled_interrupt 193 check_vector_size irq_sp_el0 194 195 196vector_entry fiq_sp_el0 197 b report_unhandled_interrupt 198 check_vector_size fiq_sp_el0 199 200 201vector_entry serror_sp_el0 202 b report_unhandled_exception 203 check_vector_size serror_sp_el0 204 205 /* --------------------------------------------------------------------- 206 * Current EL with SP_ELx: 0x200 - 0x400 207 * --------------------------------------------------------------------- 208 */ 209vector_entry sync_exception_sp_elx 210 /* 211 * This exception will trigger if anything went wrong during a previous 212 * exception entry or exit or while handling an earlier unexpected 213 * synchronous exception. There is a high probability that SP_EL3 is 214 * corrupted. 215 */ 216 b report_unhandled_exception 217 check_vector_size sync_exception_sp_elx 218 219vector_entry irq_sp_elx 220 b report_unhandled_interrupt 221 check_vector_size irq_sp_elx 222 223vector_entry fiq_sp_elx 224 b report_unhandled_interrupt 225 check_vector_size fiq_sp_elx 226 227vector_entry serror_sp_elx 228 b report_unhandled_exception 229 check_vector_size serror_sp_elx 230 231 /* --------------------------------------------------------------------- 232 * Lower EL using AArch64 : 0x400 - 0x600 233 * --------------------------------------------------------------------- 234 */ 235vector_entry sync_exception_aarch64 236 /* 237 * This exception vector will be the entry point for SMCs and traps 238 * that are unhandled at lower ELs most commonly. SP_EL3 should point 239 * to a valid cpu context where the general purpose and system register 240 * state can be saved. 241 */ 242 handle_sync_exception 243 check_vector_size sync_exception_aarch64 244 245vector_entry irq_aarch64 246 handle_interrupt_exception irq_aarch64 247 check_vector_size irq_aarch64 248 249vector_entry fiq_aarch64 250 handle_interrupt_exception fiq_aarch64 251 check_vector_size fiq_aarch64 252 253vector_entry serror_aarch64 254 /* 255 * SError exceptions from lower ELs are not currently supported. 256 * Report their occurrence. 257 */ 258 b report_unhandled_exception 259 check_vector_size serror_aarch64 260 261 /* --------------------------------------------------------------------- 262 * Lower EL using AArch32 : 0x600 - 0x800 263 * --------------------------------------------------------------------- 264 */ 265vector_entry sync_exception_aarch32 266 /* 267 * This exception vector will be the entry point for SMCs and traps 268 * that are unhandled at lower ELs most commonly. SP_EL3 should point 269 * to a valid cpu context where the general purpose and system register 270 * state can be saved. 271 */ 272 handle_sync_exception 273 check_vector_size sync_exception_aarch32 274 275vector_entry irq_aarch32 276 handle_interrupt_exception irq_aarch32 277 check_vector_size irq_aarch32 278 279vector_entry fiq_aarch32 280 handle_interrupt_exception fiq_aarch32 281 check_vector_size fiq_aarch32 282 283vector_entry serror_aarch32 284 /* 285 * SError exceptions from lower ELs are not currently supported. 286 * Report their occurrence. 287 */ 288 b report_unhandled_exception 289 check_vector_size serror_aarch32 290 291 292 /* --------------------------------------------------------------------- 293 * This macro takes an argument in x16 that is the index in the 294 * 'rt_svc_descs_indices' array, checks that the value in the array is 295 * valid, and loads in x15 the pointer to the handler of that service. 296 * --------------------------------------------------------------------- 297 */ 298 .macro load_rt_svc_desc_pointer 299 /* Load descriptor index from array of indices */ 300 adr x14, rt_svc_descs_indices 301 ldrb w15, [x14, x16] 302 303#if SMCCC_MAJOR_VERSION == 1 304 /* Any index greater than 127 is invalid. Check bit 7. */ 305 tbnz w15, 7, smc_unknown 306#elif SMCCC_MAJOR_VERSION == 2 307 /* Verify that the top 3 bits of the loaded index are 0 (w15 <= 31) */ 308 cmp w15, #31 309 b.hi smc_unknown 310#endif /* SMCCC_MAJOR_VERSION */ 311 312 /* 313 * Get the descriptor using the index 314 * x11 = (base + off), w15 = index 315 * 316 * handler = (base + off) + (index << log2(size)) 317 */ 318 adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) 319 lsl w10, w15, #RT_SVC_SIZE_LOG2 320 ldr x15, [x11, w10, uxtw] 321 .endm 322 323 /* --------------------------------------------------------------------- 324 * The following code handles secure monitor calls. 325 * Depending upon the execution state from where the SMC has been 326 * invoked, it frees some general purpose registers to perform the 327 * remaining tasks. They involve finding the runtime service handler 328 * that is the target of the SMC & switching to runtime stacks (SP_EL0) 329 * before calling the handler. 330 * 331 * Note that x30 has been explicitly saved and can be used here 332 * --------------------------------------------------------------------- 333 */ 334func smc_handler 335smc_handler32: 336 /* Check whether aarch32 issued an SMC64 */ 337 tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited 338 339smc_handler64: 340 /* 341 * Populate the parameters for the SMC handler. 342 * We already have x0-x4 in place. x5 will point to a cookie (not used 343 * now). x6 will point to the context structure (SP_EL3) and x7 will 344 * contain flags we need to pass to the handler. 345 * 346 * Save x4-x29 and sp_el0. 347 */ 348 save_x4_to_x29_sp_el0 349 350 mov x5, xzr 351 mov x6, sp 352 353#if SMCCC_MAJOR_VERSION == 1 354 355 /* Get the unique owning entity number */ 356 ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH 357 ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH 358 orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH 359 360 load_rt_svc_desc_pointer 361 362#elif SMCCC_MAJOR_VERSION == 2 363 364 /* Bit 31 must be set */ 365 tbz x0, #FUNCID_TYPE_SHIFT, smc_unknown 366 367 /* 368 * Check MSB of namespace to decide between compatibility/vendor and 369 * SPCI/SPRT 370 */ 371 tbz x0, #(FUNCID_NAMESPACE_SHIFT + 1), compat_or_vendor 372 373 /* Namespaces SPRT and SPCI currently unimplemented */ 374 b smc_unknown 375 376compat_or_vendor: 377 378 /* Namespace is b'00 (compatibility) or b'01 (vendor) */ 379 380 /* 381 * Add the LSB of the namespace (bit [28]) to the OEN [27:24] to create 382 * a 5-bit index into the rt_svc_descs_indices array. 383 * 384 * The low 16 entries of the rt_svc_descs_indices array correspond to 385 * OENs of the compatibility namespace and the top 16 entries of the 386 * array are assigned to the vendor namespace descriptor. 387 */ 388 ubfx x16, x0, #FUNCID_OEN_SHIFT, #(FUNCID_OEN_WIDTH + 1) 389 390 load_rt_svc_desc_pointer 391 392#endif /* SMCCC_MAJOR_VERSION */ 393 394 /* 395 * Restore the saved C runtime stack value which will become the new 396 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context' 397 * structure prior to the last ERET from EL3. 398 */ 399 ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 400 401 /* Switch to SP_EL0 */ 402 msr spsel, #0 403 404 /* 405 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world 406 * switch during SMC handling. 407 * TODO: Revisit if all system registers can be saved later. 408 */ 409 mrs x16, spsr_el3 410 mrs x17, elr_el3 411 mrs x18, scr_el3 412 stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 413 str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 414 415 /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ 416 bfi x7, x18, #0, #1 417 418 mov sp, x12 419 420 /* 421 * Call the Secure Monitor Call handler and then drop directly into 422 * el3_exit() which will program any remaining architectural state 423 * prior to issuing the ERET to the desired lower EL. 424 */ 425#if DEBUG 426 cbz x15, rt_svc_fw_critical_error 427#endif 428 blr x15 429 430 b el3_exit 431 432smc_unknown: 433 /* 434 * Here we restore x4-x18 regardless of where we came from. AArch32 435 * callers will find the registers contents unchanged, but AArch64 436 * callers will find the registers modified (with stale earlier NS 437 * content). Either way, we aren't leaking any secure information 438 * through them. 439 */ 440 mov x0, #SMC_UNK 441 b restore_gp_registers_callee_eret 442 443smc_prohibited: 444 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 445 mov x0, #SMC_UNK 446 eret 447 448rt_svc_fw_critical_error: 449 /* Switch to SP_ELx */ 450 msr spsel, #1 451 no_ret report_unhandled_exception 452endfunc smc_handler 453