1/* 2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <context.h> 10#include <cpu_data.h> 11#include <interrupt_mgmt.h> 12#include <platform_def.h> 13#include <runtime_svc.h> 14#include <smccc.h> 15 16 .globl runtime_exceptions 17 18 .globl sync_exception_sp_el0 19 .globl irq_sp_el0 20 .globl fiq_sp_el0 21 .globl serror_sp_el0 22 23 .globl sync_exception_sp_elx 24 .globl irq_sp_elx 25 .globl fiq_sp_elx 26 .globl serror_sp_elx 27 28 .globl sync_exception_aarch64 29 .globl irq_aarch64 30 .globl fiq_aarch64 31 .globl serror_aarch64 32 33 .globl sync_exception_aarch32 34 .globl irq_aarch32 35 .globl fiq_aarch32 36 .globl serror_aarch32 37 38 /* --------------------------------------------------------------------- 39 * This macro handles Synchronous exceptions. 40 * Only SMC exceptions are supported. 41 * --------------------------------------------------------------------- 42 */ 43 .macro handle_sync_exception 44 /* Enable the SError interrupt */ 45 msr daifclr, #DAIF_ABT_BIT 46 47 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 48 49#if ENABLE_RUNTIME_INSTRUMENTATION 50 /* 51 * Read the timestamp value and store it in per-cpu data. The value 52 * will be extracted from per-cpu data by the C level SMC handler and 53 * saved to the PMF timestamp region. 54 */ 55 mrs x30, cntpct_el0 56 str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 57 mrs x29, tpidr_el3 58 str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET] 59 ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 60#endif 61 62 mrs x30, esr_el3 63 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 64 65 /* Handle SMC exceptions separately from other synchronous exceptions */ 66 cmp x30, #EC_AARCH32_SMC 67 b.eq smc_handler32 68 69 cmp x30, #EC_AARCH64_SMC 70 b.eq smc_handler64 71 72 /* Other kinds of synchronous exceptions are not handled */ 73 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 74 b report_unhandled_exception 75 .endm 76 77 78 /* --------------------------------------------------------------------- 79 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS 80 * interrupts. 81 * --------------------------------------------------------------------- 82 */ 83 .macro handle_interrupt_exception label 84 /* Enable the SError interrupt */ 85 msr daifclr, #DAIF_ABT_BIT 86 87 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 88 bl save_gp_registers 89 90 /* Save the EL3 system registers needed to return from this exception */ 91 mrs x0, spsr_el3 92 mrs x1, elr_el3 93 stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 94 95 /* Switch to the runtime stack i.e. SP_EL0 */ 96 ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 97 mov x20, sp 98 msr spsel, #0 99 mov sp, x2 100 101 /* 102 * Find out whether this is a valid interrupt type. 103 * If the interrupt controller reports a spurious interrupt then return 104 * to where we came from. 105 */ 106 bl plat_ic_get_pending_interrupt_type 107 cmp x0, #INTR_TYPE_INVAL 108 b.eq interrupt_exit_\label 109 110 /* 111 * Get the registered handler for this interrupt type. 112 * A NULL return value could be 'cause of the following conditions: 113 * 114 * a. An interrupt of a type was routed correctly but a handler for its 115 * type was not registered. 116 * 117 * b. An interrupt of a type was not routed correctly so a handler for 118 * its type was not registered. 119 * 120 * c. An interrupt of a type was routed correctly to EL3, but was 121 * deasserted before its pending state could be read. Another 122 * interrupt of a different type pended at the same time and its 123 * type was reported as pending instead. However, a handler for this 124 * type was not registered. 125 * 126 * a. and b. can only happen due to a programming error. The 127 * occurrence of c. could be beyond the control of Trusted Firmware. 128 * It makes sense to return from this exception instead of reporting an 129 * error. 130 */ 131 bl get_interrupt_type_handler 132 cbz x0, interrupt_exit_\label 133 mov x21, x0 134 135 mov x0, #INTR_ID_UNAVAILABLE 136 137 /* Set the current security state in the 'flags' parameter */ 138 mrs x2, scr_el3 139 ubfx x1, x2, #0, #1 140 141 /* Restore the reference to the 'handle' i.e. SP_EL3 */ 142 mov x2, x20 143 144 /* x3 will point to a cookie (not used now) */ 145 mov x3, xzr 146 147 /* Call the interrupt type handler */ 148 blr x21 149 150interrupt_exit_\label: 151 /* Return from exception, possibly in a different security state */ 152 b el3_exit 153 154 .endm 155 156 157vector_base runtime_exceptions 158 159 /* --------------------------------------------------------------------- 160 * Current EL with SP_EL0 : 0x0 - 0x200 161 * --------------------------------------------------------------------- 162 */ 163vector_entry sync_exception_sp_el0 164 /* We don't expect any synchronous exceptions from EL3 */ 165 b report_unhandled_exception 166 check_vector_size sync_exception_sp_el0 167 168vector_entry irq_sp_el0 169 /* 170 * EL3 code is non-reentrant. Any asynchronous exception is a serious 171 * error. Loop infinitely. 172 */ 173 b report_unhandled_interrupt 174 check_vector_size irq_sp_el0 175 176 177vector_entry fiq_sp_el0 178 b report_unhandled_interrupt 179 check_vector_size fiq_sp_el0 180 181 182vector_entry serror_sp_el0 183 b report_unhandled_exception 184 check_vector_size serror_sp_el0 185 186 /* --------------------------------------------------------------------- 187 * Current EL with SP_ELx: 0x200 - 0x400 188 * --------------------------------------------------------------------- 189 */ 190vector_entry sync_exception_sp_elx 191 /* 192 * This exception will trigger if anything went wrong during a previous 193 * exception entry or exit or while handling an earlier unexpected 194 * synchronous exception. There is a high probability that SP_EL3 is 195 * corrupted. 196 */ 197 b report_unhandled_exception 198 check_vector_size sync_exception_sp_elx 199 200vector_entry irq_sp_elx 201 b report_unhandled_interrupt 202 check_vector_size irq_sp_elx 203 204vector_entry fiq_sp_elx 205 b report_unhandled_interrupt 206 check_vector_size fiq_sp_elx 207 208vector_entry serror_sp_elx 209 b report_unhandled_exception 210 check_vector_size serror_sp_elx 211 212 /* --------------------------------------------------------------------- 213 * Lower EL using AArch64 : 0x400 - 0x600 214 * --------------------------------------------------------------------- 215 */ 216vector_entry sync_exception_aarch64 217 /* 218 * This exception vector will be the entry point for SMCs and traps 219 * that are unhandled at lower ELs most commonly. SP_EL3 should point 220 * to a valid cpu context where the general purpose and system register 221 * state can be saved. 222 */ 223 handle_sync_exception 224 check_vector_size sync_exception_aarch64 225 226vector_entry irq_aarch64 227 handle_interrupt_exception irq_aarch64 228 check_vector_size irq_aarch64 229 230vector_entry fiq_aarch64 231 handle_interrupt_exception fiq_aarch64 232 check_vector_size fiq_aarch64 233 234vector_entry serror_aarch64 235 /* 236 * SError exceptions from lower ELs are not currently supported. 237 * Report their occurrence. 238 */ 239 b report_unhandled_exception 240 check_vector_size serror_aarch64 241 242 /* --------------------------------------------------------------------- 243 * Lower EL using AArch32 : 0x600 - 0x800 244 * --------------------------------------------------------------------- 245 */ 246vector_entry sync_exception_aarch32 247 /* 248 * This exception vector will be the entry point for SMCs and traps 249 * that are unhandled at lower ELs most commonly. SP_EL3 should point 250 * to a valid cpu context where the general purpose and system register 251 * state can be saved. 252 */ 253 handle_sync_exception 254 check_vector_size sync_exception_aarch32 255 256vector_entry irq_aarch32 257 handle_interrupt_exception irq_aarch32 258 check_vector_size irq_aarch32 259 260vector_entry fiq_aarch32 261 handle_interrupt_exception fiq_aarch32 262 check_vector_size fiq_aarch32 263 264vector_entry serror_aarch32 265 /* 266 * SError exceptions from lower ELs are not currently supported. 267 * Report their occurrence. 268 */ 269 b report_unhandled_exception 270 check_vector_size serror_aarch32 271 272 273 /* --------------------------------------------------------------------- 274 * This macro takes an argument in x16 that is the index in the 275 * 'rt_svc_descs_indices' array, checks that the value in the array is 276 * valid, and loads in x15 the pointer to the handler of that service. 277 * --------------------------------------------------------------------- 278 */ 279 .macro load_rt_svc_desc_pointer 280 /* Load descriptor index from array of indices */ 281 adr x14, rt_svc_descs_indices 282 ldrb w15, [x14, x16] 283 284#if SMCCC_MAJOR_VERSION == 1 285 /* Any index greater than 127 is invalid. Check bit 7. */ 286 tbnz w15, 7, smc_unknown 287#elif SMCCC_MAJOR_VERSION == 2 288 /* Verify that the top 3 bits of the loaded index are 0 (w15 <= 31) */ 289 cmp w15, #31 290 b.hi smc_unknown 291#endif /* SMCCC_MAJOR_VERSION */ 292 293 /* 294 * Get the descriptor using the index 295 * x11 = (base + off), w15 = index 296 * 297 * handler = (base + off) + (index << log2(size)) 298 */ 299 adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) 300 lsl w10, w15, #RT_SVC_SIZE_LOG2 301 ldr x15, [x11, w10, uxtw] 302 .endm 303 304 /* --------------------------------------------------------------------- 305 * The following code handles secure monitor calls. 306 * Depending upon the execution state from where the SMC has been 307 * invoked, it frees some general purpose registers to perform the 308 * remaining tasks. They involve finding the runtime service handler 309 * that is the target of the SMC & switching to runtime stacks (SP_EL0) 310 * before calling the handler. 311 * 312 * Note that x30 has been explicitly saved and can be used here 313 * --------------------------------------------------------------------- 314 */ 315func smc_handler 316smc_handler32: 317 /* Check whether aarch32 issued an SMC64 */ 318 tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited 319 320smc_handler64: 321 /* 322 * Populate the parameters for the SMC handler. 323 * We already have x0-x4 in place. x5 will point to a cookie (not used 324 * now). x6 will point to the context structure (SP_EL3) and x7 will 325 * contain flags we need to pass to the handler. 326 * 327 * Save x4-x29 and sp_el0. 328 */ 329 stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] 330 stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] 331 stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] 332 stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] 333 stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] 334 stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] 335 stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] 336 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] 337 stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] 338 stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] 339 stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] 340 stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] 341 stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 342 mrs x18, sp_el0 343 str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] 344 345 mov x5, xzr 346 mov x6, sp 347 348#if SMCCC_MAJOR_VERSION == 1 349 350 /* Get the unique owning entity number */ 351 ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH 352 ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH 353 orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH 354 355 load_rt_svc_desc_pointer 356 357#elif SMCCC_MAJOR_VERSION == 2 358 359 /* Bit 31 must be set */ 360 tbz x0, #FUNCID_TYPE_SHIFT, smc_unknown 361 362 /* 363 * Check MSB of namespace to decide between compatibility/vendor and 364 * SPCI/SPRT 365 */ 366 tbz x0, #(FUNCID_NAMESPACE_SHIFT + 1), compat_or_vendor 367 368 /* Namespaces SPRT and SPCI currently unimplemented */ 369 b smc_unknown 370 371compat_or_vendor: 372 373 /* Namespace is b'00 (compatibility) or b'01 (vendor) */ 374 375 /* 376 * Add the LSB of the namespace (bit [28]) to the OEN [27:24] to create 377 * a 5-bit index into the rt_svc_descs_indices array. 378 * 379 * The low 16 entries of the rt_svc_descs_indices array correspond to 380 * OENs of the compatibility namespace and the top 16 entries of the 381 * array are assigned to the vendor namespace descriptor. 382 */ 383 ubfx x16, x0, #FUNCID_OEN_SHIFT, #(FUNCID_OEN_WIDTH + 1) 384 385 load_rt_svc_desc_pointer 386 387#endif /* SMCCC_MAJOR_VERSION */ 388 389 /* 390 * Restore the saved C runtime stack value which will become the new 391 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context' 392 * structure prior to the last ERET from EL3. 393 */ 394 ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 395 396 /* Switch to SP_EL0 */ 397 msr spsel, #0 398 399 /* 400 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world 401 * switch during SMC handling. 402 * TODO: Revisit if all system registers can be saved later. 403 */ 404 mrs x16, spsr_el3 405 mrs x17, elr_el3 406 mrs x18, scr_el3 407 stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 408 str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 409 410 /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ 411 bfi x7, x18, #0, #1 412 413 mov sp, x12 414 415 /* 416 * Call the Secure Monitor Call handler and then drop directly into 417 * el3_exit() which will program any remaining architectural state 418 * prior to issuing the ERET to the desired lower EL. 419 */ 420#if DEBUG 421 cbz x15, rt_svc_fw_critical_error 422#endif 423 blr x15 424 425 b el3_exit 426 427smc_unknown: 428 /* 429 * Unknown SMC call. Populate return value with SMC_UNK, restore 430 * GP registers, and return to caller. 431 */ 432 mov x0, #SMC_UNK 433 str x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 434 b restore_gp_registers_eret 435 436smc_prohibited: 437 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 438 mov x0, #SMC_UNK 439 eret 440 441rt_svc_fw_critical_error: 442 /* Switch to SP_ELx */ 443 msr spsel, #1 444 no_ret report_unhandled_exception 445endfunc smc_handler 446