1b7cb133eSJeenu Viswambharan /* 2*37596fcbSDaniel Boulby * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. 3b7cb133eSJeenu Viswambharan * 4b7cb133eSJeenu Viswambharan * SPDX-License-Identifier: BSD-3-Clause 5b7cb133eSJeenu Viswambharan */ 6b7cb133eSJeenu Viswambharan 7b7cb133eSJeenu Viswambharan #include <assert.h> 8b7cb133eSJeenu Viswambharan #include <string.h> 909d40e0eSAntonio Nino Diaz 1009d40e0eSAntonio Nino Diaz #include <arch_helpers.h> 11*37596fcbSDaniel Boulby #include <arch_features.h> 1209d40e0eSAntonio Nino Diaz #include <bl31/ehf.h> 1309d40e0eSAntonio Nino Diaz #include <bl31/interrupt_mgmt.h> 1409d40e0eSAntonio Nino Diaz #include <common/bl_common.h> 1509d40e0eSAntonio Nino Diaz #include <common/debug.h> 1609d40e0eSAntonio Nino Diaz #include <common/runtime_svc.h> 1709d40e0eSAntonio Nino Diaz #include <lib/cassert.h> 1809d40e0eSAntonio Nino Diaz #include <services/sdei.h> 1909d40e0eSAntonio Nino Diaz 20b7cb133eSJeenu Viswambharan #include "sdei_private.h" 21b7cb133eSJeenu Viswambharan 22b7cb133eSJeenu Viswambharan /* x0-x17 GPREGS context */ 23ba6e5ca6SJeenu Viswambharan #define SDEI_SAVED_GPREGS 18U 24b7cb133eSJeenu Viswambharan 25b7cb133eSJeenu Viswambharan /* Maximum preemption nesting levels: Critical priority and Normal priority */ 26ba6e5ca6SJeenu Viswambharan #define MAX_EVENT_NESTING 2U 27b7cb133eSJeenu Viswambharan 28b7cb133eSJeenu Viswambharan /* Per-CPU SDEI state access macro */ 29ba6e5ca6SJeenu Viswambharan #define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()]) 30b7cb133eSJeenu Viswambharan 31b7cb133eSJeenu Viswambharan /* Structure to store information about an outstanding dispatch */ 32b7cb133eSJeenu Viswambharan typedef struct sdei_dispatch_context { 33b7cb133eSJeenu Viswambharan sdei_ev_map_t *map; 34b7cb133eSJeenu Viswambharan uint64_t x[SDEI_SAVED_GPREGS]; 35e0566305SAntonio Nino Diaz jmp_buf *dispatch_jmp; 36b7cb133eSJeenu Viswambharan 37b7cb133eSJeenu Viswambharan /* Exception state registers */ 38b7cb133eSJeenu Viswambharan uint64_t elr_el3; 39b7cb133eSJeenu Viswambharan uint64_t spsr_el3; 406f03bc77SDimitris Papastamos 416f03bc77SDimitris Papastamos #if DYNAMIC_WORKAROUND_CVE_2018_3639 426f03bc77SDimitris Papastamos /* CVE-2018-3639 mitigation state */ 436f03bc77SDimitris Papastamos uint64_t disable_cve_2018_3639; 446f03bc77SDimitris Papastamos #endif 45b7cb133eSJeenu Viswambharan } sdei_dispatch_context_t; 46b7cb133eSJeenu Viswambharan 47b7cb133eSJeenu Viswambharan /* Per-CPU SDEI state data */ 48b7cb133eSJeenu Viswambharan typedef struct sdei_cpu_state { 49b7cb133eSJeenu Viswambharan sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING]; 50b7cb133eSJeenu Viswambharan unsigned short stack_top; /* Empty ascending */ 51ba6e5ca6SJeenu Viswambharan bool pe_masked; 52ba6e5ca6SJeenu Viswambharan bool pending_enables; 53b7cb133eSJeenu Viswambharan } sdei_cpu_state_t; 54b7cb133eSJeenu Viswambharan 55b7cb133eSJeenu Viswambharan /* SDEI states for all cores in the system */ 56ba6e5ca6SJeenu Viswambharan static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT]; 57b7cb133eSJeenu Viswambharan 58ba6e5ca6SJeenu Viswambharan int64_t sdei_pe_mask(void) 59b7cb133eSJeenu Viswambharan { 60ba6e5ca6SJeenu Viswambharan int64_t ret = 0; 61b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 62b7cb133eSJeenu Viswambharan 63b7cb133eSJeenu Viswambharan /* 64b7cb133eSJeenu Viswambharan * Return value indicates whether this call had any effect in the mask 65b7cb133eSJeenu Viswambharan * status of this PE. 66b7cb133eSJeenu Viswambharan */ 67ba6e5ca6SJeenu Viswambharan if (!state->pe_masked) { 68ba6e5ca6SJeenu Viswambharan state->pe_masked = true; 69ba6e5ca6SJeenu Viswambharan ret = 1; 70ba6e5ca6SJeenu Viswambharan } 71b7cb133eSJeenu Viswambharan 72b7cb133eSJeenu Viswambharan return ret; 73b7cb133eSJeenu Viswambharan } 74b7cb133eSJeenu Viswambharan 75b7cb133eSJeenu Viswambharan void sdei_pe_unmask(void) 76b7cb133eSJeenu Viswambharan { 77ba6e5ca6SJeenu Viswambharan unsigned int i; 78b7cb133eSJeenu Viswambharan sdei_ev_map_t *map; 79b7cb133eSJeenu Viswambharan sdei_entry_t *se; 80b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 81b7cb133eSJeenu Viswambharan uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; 82b7cb133eSJeenu Viswambharan 83b7cb133eSJeenu Viswambharan /* 84b7cb133eSJeenu Viswambharan * If there are pending enables, iterate through the private mappings 85b7cb133eSJeenu Viswambharan * and enable those bound maps that are in enabled state. Also, iterate 86b7cb133eSJeenu Viswambharan * through shared mappings and enable interrupts of events that are 87b7cb133eSJeenu Viswambharan * targeted to this PE. 88b7cb133eSJeenu Viswambharan */ 89b7cb133eSJeenu Viswambharan if (state->pending_enables) { 90b7cb133eSJeenu Viswambharan for_each_private_map(i, map) { 91b7cb133eSJeenu Viswambharan se = get_event_entry(map); 92b7cb133eSJeenu Viswambharan if (is_map_bound(map) && GET_EV_STATE(se, ENABLED)) 93b7cb133eSJeenu Viswambharan plat_ic_enable_interrupt(map->intr); 94b7cb133eSJeenu Viswambharan } 95b7cb133eSJeenu Viswambharan 96b7cb133eSJeenu Viswambharan for_each_shared_map(i, map) { 97b7cb133eSJeenu Viswambharan se = get_event_entry(map); 98b7cb133eSJeenu Viswambharan 99b7cb133eSJeenu Viswambharan sdei_map_lock(map); 100ba6e5ca6SJeenu Viswambharan if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) && 101b7cb133eSJeenu Viswambharan (se->reg_flags == SDEI_REGF_RM_PE) && 102b7cb133eSJeenu Viswambharan (se->affinity == my_mpidr)) { 103b7cb133eSJeenu Viswambharan plat_ic_enable_interrupt(map->intr); 104b7cb133eSJeenu Viswambharan } 105b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 106b7cb133eSJeenu Viswambharan } 107b7cb133eSJeenu Viswambharan } 108b7cb133eSJeenu Viswambharan 109ba6e5ca6SJeenu Viswambharan state->pending_enables = false; 110ba6e5ca6SJeenu Viswambharan state->pe_masked = false; 111b7cb133eSJeenu Viswambharan } 112b7cb133eSJeenu Viswambharan 113b7cb133eSJeenu Viswambharan /* Push a dispatch context to the dispatch stack */ 114b7cb133eSJeenu Viswambharan static sdei_dispatch_context_t *push_dispatch(void) 115b7cb133eSJeenu Viswambharan { 116b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 117b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 118b7cb133eSJeenu Viswambharan 119b7cb133eSJeenu Viswambharan /* Cannot have more than max events */ 120b7cb133eSJeenu Viswambharan assert(state->stack_top < MAX_EVENT_NESTING); 121b7cb133eSJeenu Viswambharan 122b7cb133eSJeenu Viswambharan disp_ctx = &state->dispatch_stack[state->stack_top]; 123b7cb133eSJeenu Viswambharan state->stack_top++; 124b7cb133eSJeenu Viswambharan 125b7cb133eSJeenu Viswambharan return disp_ctx; 126b7cb133eSJeenu Viswambharan } 127b7cb133eSJeenu Viswambharan 128b7cb133eSJeenu Viswambharan /* Pop a dispatch context to the dispatch stack */ 129b7cb133eSJeenu Viswambharan static sdei_dispatch_context_t *pop_dispatch(void) 130b7cb133eSJeenu Viswambharan { 131b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 132b7cb133eSJeenu Viswambharan 133ba6e5ca6SJeenu Viswambharan if (state->stack_top == 0U) 134b7cb133eSJeenu Viswambharan return NULL; 135b7cb133eSJeenu Viswambharan 136b7cb133eSJeenu Viswambharan assert(state->stack_top <= MAX_EVENT_NESTING); 137b7cb133eSJeenu Viswambharan 138b7cb133eSJeenu Viswambharan state->stack_top--; 139b7cb133eSJeenu Viswambharan 140b7cb133eSJeenu Viswambharan return &state->dispatch_stack[state->stack_top]; 141b7cb133eSJeenu Viswambharan } 142b7cb133eSJeenu Viswambharan 143b7cb133eSJeenu Viswambharan /* Retrieve the context at the top of dispatch stack */ 144b7cb133eSJeenu Viswambharan static sdei_dispatch_context_t *get_outstanding_dispatch(void) 145b7cb133eSJeenu Viswambharan { 146b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 147b7cb133eSJeenu Viswambharan 148ba6e5ca6SJeenu Viswambharan if (state->stack_top == 0U) 149b7cb133eSJeenu Viswambharan return NULL; 150b7cb133eSJeenu Viswambharan 151b7cb133eSJeenu Viswambharan assert(state->stack_top <= MAX_EVENT_NESTING); 152b7cb133eSJeenu Viswambharan 153ba6e5ca6SJeenu Viswambharan return &state->dispatch_stack[state->stack_top - 1U]; 154b7cb133eSJeenu Viswambharan } 155b7cb133eSJeenu Viswambharan 156cdb6ac94SJeenu Viswambharan static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map, 157cdb6ac94SJeenu Viswambharan void *tgt_ctx) 158b7cb133eSJeenu Viswambharan { 159b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 160ba6e5ca6SJeenu Viswambharan const gp_regs_t *tgt_gpregs; 161ba6e5ca6SJeenu Viswambharan const el3_state_t *tgt_el3; 162b7cb133eSJeenu Viswambharan 163ba6e5ca6SJeenu Viswambharan assert(tgt_ctx != NULL); 164b7cb133eSJeenu Viswambharan tgt_gpregs = get_gpregs_ctx(tgt_ctx); 165b7cb133eSJeenu Viswambharan tgt_el3 = get_el3state_ctx(tgt_ctx); 166b7cb133eSJeenu Viswambharan 167b7cb133eSJeenu Viswambharan disp_ctx = push_dispatch(); 168ba6e5ca6SJeenu Viswambharan assert(disp_ctx != NULL); 169b7cb133eSJeenu Viswambharan disp_ctx->map = map; 170b7cb133eSJeenu Viswambharan 171b7cb133eSJeenu Viswambharan /* Save general purpose and exception registers */ 172b7cb133eSJeenu Viswambharan memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x)); 173b7cb133eSJeenu Viswambharan disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3); 174b7cb133eSJeenu Viswambharan disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3); 1756f03bc77SDimitris Papastamos 176cdb6ac94SJeenu Viswambharan return disp_ctx; 177b7cb133eSJeenu Viswambharan } 178b7cb133eSJeenu Viswambharan 179ba6e5ca6SJeenu Viswambharan static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx) 180b7cb133eSJeenu Viswambharan { 181b7cb133eSJeenu Viswambharan gp_regs_t *tgt_gpregs; 182b7cb133eSJeenu Viswambharan el3_state_t *tgt_el3; 183b7cb133eSJeenu Viswambharan 184ba6e5ca6SJeenu Viswambharan assert(tgt_ctx != NULL); 185b7cb133eSJeenu Viswambharan tgt_gpregs = get_gpregs_ctx(tgt_ctx); 186b7cb133eSJeenu Viswambharan tgt_el3 = get_el3state_ctx(tgt_ctx); 187b7cb133eSJeenu Viswambharan 188b7cb133eSJeenu Viswambharan CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)), 189b7cb133eSJeenu Viswambharan foo); 190b7cb133eSJeenu Viswambharan 191b7cb133eSJeenu Viswambharan /* Restore general purpose and exception registers */ 192b7cb133eSJeenu Viswambharan memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x)); 193b7cb133eSJeenu Viswambharan write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3); 194b7cb133eSJeenu Viswambharan write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3); 1956f03bc77SDimitris Papastamos 1966f03bc77SDimitris Papastamos #if DYNAMIC_WORKAROUND_CVE_2018_3639 1976f03bc77SDimitris Papastamos cve_2018_3639_t *tgt_cve_2018_3639; 1986f03bc77SDimitris Papastamos tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx); 1996f03bc77SDimitris Papastamos 2006f03bc77SDimitris Papastamos /* Restore CVE-2018-3639 mitigation state */ 2016f03bc77SDimitris Papastamos write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 2026f03bc77SDimitris Papastamos disp_ctx->disable_cve_2018_3639); 2036f03bc77SDimitris Papastamos #endif 204b7cb133eSJeenu Viswambharan } 205b7cb133eSJeenu Viswambharan 206b7cb133eSJeenu Viswambharan static void save_secure_context(void) 207b7cb133eSJeenu Viswambharan { 208b7cb133eSJeenu Viswambharan cm_el1_sysregs_context_save(SECURE); 209b7cb133eSJeenu Viswambharan } 210b7cb133eSJeenu Viswambharan 211b7cb133eSJeenu Viswambharan /* Restore Secure context and arrange to resume it at the next ERET */ 212b7cb133eSJeenu Viswambharan static void restore_and_resume_secure_context(void) 213b7cb133eSJeenu Viswambharan { 214b7cb133eSJeenu Viswambharan cm_el1_sysregs_context_restore(SECURE); 215b7cb133eSJeenu Viswambharan cm_set_next_eret_context(SECURE); 216b7cb133eSJeenu Viswambharan } 217b7cb133eSJeenu Viswambharan 218b7cb133eSJeenu Viswambharan /* 219b7cb133eSJeenu Viswambharan * Restore Non-secure context and arrange to resume it at the next ERET. Return 220b7cb133eSJeenu Viswambharan * pointer to the Non-secure context. 221b7cb133eSJeenu Viswambharan */ 222b7cb133eSJeenu Viswambharan static cpu_context_t *restore_and_resume_ns_context(void) 223b7cb133eSJeenu Viswambharan { 224b7cb133eSJeenu Viswambharan cpu_context_t *ns_ctx; 225b7cb133eSJeenu Viswambharan 226b7cb133eSJeenu Viswambharan cm_el1_sysregs_context_restore(NON_SECURE); 227b7cb133eSJeenu Viswambharan cm_set_next_eret_context(NON_SECURE); 228b7cb133eSJeenu Viswambharan 229b7cb133eSJeenu Viswambharan ns_ctx = cm_get_context(NON_SECURE); 230ba6e5ca6SJeenu Viswambharan assert(ns_ctx != NULL); 231b7cb133eSJeenu Viswambharan 232b7cb133eSJeenu Viswambharan return ns_ctx; 233b7cb133eSJeenu Viswambharan } 234b7cb133eSJeenu Viswambharan 235b7cb133eSJeenu Viswambharan /* 236*37596fcbSDaniel Boulby * Prepare for ERET: 237*37596fcbSDaniel Boulby * - Set the ELR to the registered handler address 238*37596fcbSDaniel Boulby * - Set the SPSR register as described in the SDEI documentation and 239*37596fcbSDaniel Boulby * the AArch64.TakeException() pseudocode function in 240*37596fcbSDaniel Boulby * ARM DDI 0487F.c page J1-7635 241*37596fcbSDaniel Boulby */ 242*37596fcbSDaniel Boulby 243*37596fcbSDaniel Boulby static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ctx) 244*37596fcbSDaniel Boulby { 245*37596fcbSDaniel Boulby unsigned int client_el = sdei_client_el(); 246*37596fcbSDaniel Boulby u_register_t sdei_spsr = SPSR_64(client_el, MODE_SP_ELX, 247*37596fcbSDaniel Boulby DISABLE_ALL_EXCEPTIONS); 248*37596fcbSDaniel Boulby 249*37596fcbSDaniel Boulby u_register_t interrupted_pstate = disp_ctx->spsr_el3; 250*37596fcbSDaniel Boulby 251*37596fcbSDaniel Boulby /* Check the SPAN bit in the client el SCTLR */ 252*37596fcbSDaniel Boulby u_register_t client_el_sctlr; 253*37596fcbSDaniel Boulby 254*37596fcbSDaniel Boulby if (client_el == MODE_EL2) { 255*37596fcbSDaniel Boulby client_el_sctlr = read_sctlr_el2(); 256*37596fcbSDaniel Boulby } else { 257*37596fcbSDaniel Boulby client_el_sctlr = read_sctlr_el1(); 258*37596fcbSDaniel Boulby } 259*37596fcbSDaniel Boulby 260*37596fcbSDaniel Boulby /* 261*37596fcbSDaniel Boulby * Check whether to force the PAN bit or use the value in the 262*37596fcbSDaniel Boulby * interrupted EL according to the check described in 263*37596fcbSDaniel Boulby * TakeException. Since the client can only be Non-Secure 264*37596fcbSDaniel Boulby * EL2 or El1 some of the conditions in ElIsInHost() we know 265*37596fcbSDaniel Boulby * will always be True. 266*37596fcbSDaniel Boulby * When the client_el is EL2 we know that there will be a SPAN 267*37596fcbSDaniel Boulby * bit in SCTLR_EL2 as we have already checked for the condition 268*37596fcbSDaniel Boulby * HCR_EL2.E2H = 1 and HCR_EL2.TGE = 1 269*37596fcbSDaniel Boulby */ 270*37596fcbSDaniel Boulby u_register_t hcr_el2 = read_hcr(); 271*37596fcbSDaniel Boulby bool el_is_in_host = is_armv8_1_vhe_present() && 272*37596fcbSDaniel Boulby (hcr_el2 & HCR_TGE_BIT) && 273*37596fcbSDaniel Boulby (hcr_el2 & HCR_E2H_BIT); 274*37596fcbSDaniel Boulby 275*37596fcbSDaniel Boulby if (is_armv8_1_pan_present() && 276*37596fcbSDaniel Boulby ((client_el == MODE_EL1) || 277*37596fcbSDaniel Boulby (client_el == MODE_EL2 && el_is_in_host)) && 278*37596fcbSDaniel Boulby ((client_el_sctlr & SCTLR_SPAN_BIT) == 0U)) { 279*37596fcbSDaniel Boulby sdei_spsr |= SPSR_PAN_BIT; 280*37596fcbSDaniel Boulby } else { 281*37596fcbSDaniel Boulby sdei_spsr |= (interrupted_pstate & SPSR_PAN_BIT); 282*37596fcbSDaniel Boulby } 283*37596fcbSDaniel Boulby 284*37596fcbSDaniel Boulby /* If SSBS is implemented, take the value from the client el SCTLR */ 285*37596fcbSDaniel Boulby u_register_t ssbs_enabled = (read_id_aa64pfr1_el1() 286*37596fcbSDaniel Boulby >> ID_AA64PFR1_EL1_SSBS_SHIFT) 287*37596fcbSDaniel Boulby & ID_AA64PFR1_EL1_SSBS_MASK; 288*37596fcbSDaniel Boulby if (ssbs_enabled != SSBS_UNAVAILABLE) { 289*37596fcbSDaniel Boulby u_register_t ssbs_bit = ((client_el_sctlr & SCTLR_DSSBS_BIT) 290*37596fcbSDaniel Boulby >> SCTLR_DSSBS_SHIFT) 291*37596fcbSDaniel Boulby << SPSR_SSBS_SHIFT_AARCH64; 292*37596fcbSDaniel Boulby sdei_spsr |= ssbs_bit; 293*37596fcbSDaniel Boulby } 294*37596fcbSDaniel Boulby 295*37596fcbSDaniel Boulby /* If MTE is implemented in the client el set the TCO bit */ 296*37596fcbSDaniel Boulby if (get_armv8_5_mte_support() >= MTE_IMPLEMENTED_ELX) { 297*37596fcbSDaniel Boulby sdei_spsr |= SPSR_TCO_BIT_AARCH64; 298*37596fcbSDaniel Boulby } 299*37596fcbSDaniel Boulby 300*37596fcbSDaniel Boulby /* Take the DIT field from the pstate of the interrupted el */ 301*37596fcbSDaniel Boulby sdei_spsr |= (interrupted_pstate & SPSR_DIT_BIT); 302*37596fcbSDaniel Boulby 303*37596fcbSDaniel Boulby cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, sdei_spsr); 304*37596fcbSDaniel Boulby } 305*37596fcbSDaniel Boulby 306*37596fcbSDaniel Boulby /* 307b7cb133eSJeenu Viswambharan * Populate the Non-secure context so that the next ERET will dispatch to the 308b7cb133eSJeenu Viswambharan * SDEI client. 309b7cb133eSJeenu Viswambharan */ 310b7cb133eSJeenu Viswambharan static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se, 311e0566305SAntonio Nino Diaz cpu_context_t *ctx, jmp_buf *dispatch_jmp) 312b7cb133eSJeenu Viswambharan { 313cdb6ac94SJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 314b7cb133eSJeenu Viswambharan 315b7cb133eSJeenu Viswambharan /* Push the event and context */ 316cdb6ac94SJeenu Viswambharan disp_ctx = save_event_ctx(map, ctx); 317b7cb133eSJeenu Viswambharan 318b7cb133eSJeenu Viswambharan /* 319b7cb133eSJeenu Viswambharan * Setup handler arguments: 320b7cb133eSJeenu Viswambharan * 321b7cb133eSJeenu Viswambharan * - x0: Event number 322b7cb133eSJeenu Viswambharan * - x1: Handler argument supplied at the time of event registration 323b7cb133eSJeenu Viswambharan * - x2: Interrupted PC 324b7cb133eSJeenu Viswambharan * - x3: Interrupted SPSR 325b7cb133eSJeenu Viswambharan */ 326ba6e5ca6SJeenu Viswambharan SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num); 327b7cb133eSJeenu Viswambharan SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg); 328cdb6ac94SJeenu Viswambharan SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3); 329cdb6ac94SJeenu Viswambharan SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3); 330b7cb133eSJeenu Viswambharan 331*37596fcbSDaniel Boulby /* Setup the elr and spsr register to prepare for ERET */ 332*37596fcbSDaniel Boulby sdei_set_elr_spsr(se, disp_ctx); 333cdb6ac94SJeenu Viswambharan 334cdb6ac94SJeenu Viswambharan #if DYNAMIC_WORKAROUND_CVE_2018_3639 335cdb6ac94SJeenu Viswambharan cve_2018_3639_t *tgt_cve_2018_3639; 336cdb6ac94SJeenu Viswambharan tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx); 337cdb6ac94SJeenu Viswambharan 338cdb6ac94SJeenu Viswambharan /* Save CVE-2018-3639 mitigation state */ 339cdb6ac94SJeenu Viswambharan disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639, 340cdb6ac94SJeenu Viswambharan CTX_CVE_2018_3639_DISABLE); 341cdb6ac94SJeenu Viswambharan 342cdb6ac94SJeenu Viswambharan /* Force SDEI handler to execute with mitigation enabled by default */ 343cdb6ac94SJeenu Viswambharan write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0); 344cdb6ac94SJeenu Viswambharan #endif 345cdb6ac94SJeenu Viswambharan 346cdb6ac94SJeenu Viswambharan disp_ctx->dispatch_jmp = dispatch_jmp; 347b7cb133eSJeenu Viswambharan } 348b7cb133eSJeenu Viswambharan 349b7cb133eSJeenu Viswambharan /* Handle a triggered SDEI interrupt while events were masked on this PE */ 350b7cb133eSJeenu Viswambharan static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se, 351b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state, unsigned int intr_raw) 352b7cb133eSJeenu Viswambharan { 353b7cb133eSJeenu Viswambharan uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK); 354ba6e5ca6SJeenu Viswambharan bool disable = false; 355b7cb133eSJeenu Viswambharan 356b7cb133eSJeenu Viswambharan /* Nothing to do for event 0 */ 357b7cb133eSJeenu Viswambharan if (map->ev_num == SDEI_EVENT_0) 358b7cb133eSJeenu Viswambharan return; 359b7cb133eSJeenu Viswambharan 360b7cb133eSJeenu Viswambharan /* 361b7cb133eSJeenu Viswambharan * For a private event, or for a shared event specifically routed to 362b7cb133eSJeenu Viswambharan * this CPU, we disable interrupt, leave the interrupt pending, and do 363b7cb133eSJeenu Viswambharan * EOI. 364b7cb133eSJeenu Viswambharan */ 365ba6e5ca6SJeenu Viswambharan if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE)) 366ba6e5ca6SJeenu Viswambharan disable = true; 367ba6e5ca6SJeenu Viswambharan 368ba6e5ca6SJeenu Viswambharan if (se->reg_flags == SDEI_REGF_RM_PE) 369b7cb133eSJeenu Viswambharan assert(se->affinity == my_mpidr); 370b7cb133eSJeenu Viswambharan 371b7cb133eSJeenu Viswambharan if (disable) { 372b7cb133eSJeenu Viswambharan plat_ic_disable_interrupt(map->intr); 373b7cb133eSJeenu Viswambharan plat_ic_set_interrupt_pending(map->intr); 374b7cb133eSJeenu Viswambharan plat_ic_end_of_interrupt(intr_raw); 375ba6e5ca6SJeenu Viswambharan state->pending_enables = true; 376b7cb133eSJeenu Viswambharan 377b7cb133eSJeenu Viswambharan return; 378b7cb133eSJeenu Viswambharan } 379b7cb133eSJeenu Viswambharan 380b7cb133eSJeenu Viswambharan /* 381b7cb133eSJeenu Viswambharan * We just received a shared event with routing set to ANY PE. The 382b7cb133eSJeenu Viswambharan * interrupt can't be delegated on this PE as SDEI events are masked. 383b7cb133eSJeenu Viswambharan * However, because its routing mode is ANY, it is possible that the 384b7cb133eSJeenu Viswambharan * event can be delegated on any other PE that hasn't masked events. 385b7cb133eSJeenu Viswambharan * Therefore, we set the interrupt back pending so as to give other 386b7cb133eSJeenu Viswambharan * suitable PEs a chance of handling it. 387b7cb133eSJeenu Viswambharan */ 388ba6e5ca6SJeenu Viswambharan assert(plat_ic_is_spi(map->intr) != 0); 389b7cb133eSJeenu Viswambharan plat_ic_set_interrupt_pending(map->intr); 390b7cb133eSJeenu Viswambharan 391b7cb133eSJeenu Viswambharan /* 392b7cb133eSJeenu Viswambharan * Leaving the same interrupt pending also means that the same interrupt 393b7cb133eSJeenu Viswambharan * can target this PE again as soon as this PE leaves EL3. Whether and 394b7cb133eSJeenu Viswambharan * how often that happens depends on the implementation of GIC. 395b7cb133eSJeenu Viswambharan * 396b7cb133eSJeenu Viswambharan * We therefore call a platform handler to resolve this situation. 397b7cb133eSJeenu Viswambharan */ 398b7cb133eSJeenu Viswambharan plat_sdei_handle_masked_trigger(my_mpidr, map->intr); 399b7cb133eSJeenu Viswambharan 400b7cb133eSJeenu Viswambharan /* This PE is masked. We EOI the interrupt, as it can't be delegated */ 401b7cb133eSJeenu Viswambharan plat_ic_end_of_interrupt(intr_raw); 402b7cb133eSJeenu Viswambharan } 403b7cb133eSJeenu Viswambharan 404b7cb133eSJeenu Viswambharan /* SDEI main interrupt handler */ 405b7cb133eSJeenu Viswambharan int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle, 406b7cb133eSJeenu Viswambharan void *cookie) 407b7cb133eSJeenu Viswambharan { 408b7cb133eSJeenu Viswambharan sdei_entry_t *se; 409b7cb133eSJeenu Viswambharan cpu_context_t *ctx; 410b7cb133eSJeenu Viswambharan sdei_ev_map_t *map; 411ba6e5ca6SJeenu Viswambharan const sdei_dispatch_context_t *disp_ctx; 412b7cb133eSJeenu Viswambharan unsigned int sec_state; 413b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state; 414b7cb133eSJeenu Viswambharan uint32_t intr; 415e0566305SAntonio Nino Diaz jmp_buf dispatch_jmp; 416ba6e5ca6SJeenu Viswambharan const uint64_t mpidr = read_mpidr_el1(); 417b7cb133eSJeenu Viswambharan 418b7cb133eSJeenu Viswambharan /* 419b7cb133eSJeenu Viswambharan * To handle an event, the following conditions must be true: 420b7cb133eSJeenu Viswambharan * 421b7cb133eSJeenu Viswambharan * 1. Event must be signalled 422b7cb133eSJeenu Viswambharan * 2. Event must be enabled 423b7cb133eSJeenu Viswambharan * 3. This PE must be a target PE for the event 424b7cb133eSJeenu Viswambharan * 4. PE must be unmasked for SDEI 425b7cb133eSJeenu Viswambharan * 5. If this is a normal event, no event must be running 426b7cb133eSJeenu Viswambharan * 6. If this is a critical event, no critical event must be running 427b7cb133eSJeenu Viswambharan * 428b7cb133eSJeenu Viswambharan * (1) and (2) are true when this function is running 429b7cb133eSJeenu Viswambharan * (3) is enforced in GIC by selecting the appropriate routing option 430b7cb133eSJeenu Viswambharan * (4) is satisfied by client calling PE_UNMASK 431b7cb133eSJeenu Viswambharan * (5) and (6) is enforced using interrupt priority, the RPR, in GIC: 432b7cb133eSJeenu Viswambharan * - Normal SDEI events belong to Normal SDE priority class 433b7cb133eSJeenu Viswambharan * - Critical SDEI events belong to Critical CSDE priority class 434b7cb133eSJeenu Viswambharan * 435b7cb133eSJeenu Viswambharan * The interrupt has already been acknowledged, and therefore is active, 436b7cb133eSJeenu Viswambharan * so no other PE can handle this event while we are at it. 437b7cb133eSJeenu Viswambharan * 438b7cb133eSJeenu Viswambharan * Find if this is an SDEI interrupt. There must be an event mapped to 439b7cb133eSJeenu Viswambharan * this interrupt 440b7cb133eSJeenu Viswambharan */ 441b7cb133eSJeenu Viswambharan intr = plat_ic_get_interrupt_id(intr_raw); 442ba6e5ca6SJeenu Viswambharan map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0)); 443ba6e5ca6SJeenu Viswambharan if (map == NULL) { 444b7cb133eSJeenu Viswambharan ERROR("No SDEI map for interrupt %u\n", intr); 445b7cb133eSJeenu Viswambharan panic(); 446b7cb133eSJeenu Viswambharan } 447b7cb133eSJeenu Viswambharan 448b7cb133eSJeenu Viswambharan /* 449b7cb133eSJeenu Viswambharan * Received interrupt number must either correspond to event 0, or must 450b7cb133eSJeenu Viswambharan * be bound interrupt. 451b7cb133eSJeenu Viswambharan */ 452b7cb133eSJeenu Viswambharan assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)); 453b7cb133eSJeenu Viswambharan 454b7cb133eSJeenu Viswambharan se = get_event_entry(map); 455b7cb133eSJeenu Viswambharan state = sdei_get_this_pe_state(); 456b7cb133eSJeenu Viswambharan 457ba6e5ca6SJeenu Viswambharan if (state->pe_masked) { 458b7cb133eSJeenu Viswambharan /* 459b7cb133eSJeenu Viswambharan * Interrupts received while this PE was masked can't be 460b7cb133eSJeenu Viswambharan * dispatched. 461b7cb133eSJeenu Viswambharan */ 462ba6e5ca6SJeenu Viswambharan SDEI_LOG("interrupt %u on %llx while PE masked\n", map->intr, 463ba6e5ca6SJeenu Viswambharan mpidr); 464b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 465b7cb133eSJeenu Viswambharan sdei_map_lock(map); 466b7cb133eSJeenu Viswambharan 467b7cb133eSJeenu Viswambharan handle_masked_trigger(map, se, state, intr_raw); 468b7cb133eSJeenu Viswambharan 469b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 470b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 471b7cb133eSJeenu Viswambharan 472b7cb133eSJeenu Viswambharan return 0; 473b7cb133eSJeenu Viswambharan } 474b7cb133eSJeenu Viswambharan 475b7cb133eSJeenu Viswambharan /* Insert load barrier for signalled SDEI event */ 476b7cb133eSJeenu Viswambharan if (map->ev_num == SDEI_EVENT_0) 477b7cb133eSJeenu Viswambharan dmbld(); 478b7cb133eSJeenu Viswambharan 479b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 480b7cb133eSJeenu Viswambharan sdei_map_lock(map); 481b7cb133eSJeenu Viswambharan 482b7cb133eSJeenu Viswambharan /* Assert shared event routed to this PE had been configured so */ 483b7cb133eSJeenu Viswambharan if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) { 484ba6e5ca6SJeenu Viswambharan assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK)); 485b7cb133eSJeenu Viswambharan } 486b7cb133eSJeenu Viswambharan 487b7cb133eSJeenu Viswambharan if (!can_sdei_state_trans(se, DO_DISPATCH)) { 488b7cb133eSJeenu Viswambharan SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n", 489b7cb133eSJeenu Viswambharan map->ev_num, se->state); 490b7cb133eSJeenu Viswambharan 491b7cb133eSJeenu Viswambharan /* 492b7cb133eSJeenu Viswambharan * If the event is registered, leave the interrupt pending so 493b7cb133eSJeenu Viswambharan * that it's delivered when the event is enabled. 494b7cb133eSJeenu Viswambharan */ 495b7cb133eSJeenu Viswambharan if (GET_EV_STATE(se, REGISTERED)) 496b7cb133eSJeenu Viswambharan plat_ic_set_interrupt_pending(map->intr); 497b7cb133eSJeenu Viswambharan 498b7cb133eSJeenu Viswambharan /* 499b7cb133eSJeenu Viswambharan * The interrupt was disabled or unregistered after the handler 500b7cb133eSJeenu Viswambharan * started to execute, which means now the interrupt is already 501b7cb133eSJeenu Viswambharan * disabled and we just need to EOI the interrupt. 502b7cb133eSJeenu Viswambharan */ 503b7cb133eSJeenu Viswambharan plat_ic_end_of_interrupt(intr_raw); 504b7cb133eSJeenu Viswambharan 505b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 506b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 507b7cb133eSJeenu Viswambharan 508b7cb133eSJeenu Viswambharan return 0; 509b7cb133eSJeenu Viswambharan } 510b7cb133eSJeenu Viswambharan 511b7cb133eSJeenu Viswambharan disp_ctx = get_outstanding_dispatch(); 512b7cb133eSJeenu Viswambharan if (is_event_critical(map)) { 513b7cb133eSJeenu Viswambharan /* 514b7cb133eSJeenu Viswambharan * If this event is Critical, and if there's an outstanding 515b7cb133eSJeenu Viswambharan * dispatch, assert the latter is a Normal dispatch. Critical 516b7cb133eSJeenu Viswambharan * events can preempt an outstanding Normal event dispatch. 517b7cb133eSJeenu Viswambharan */ 518ba6e5ca6SJeenu Viswambharan if (disp_ctx != NULL) 519b7cb133eSJeenu Viswambharan assert(is_event_normal(disp_ctx->map)); 520b7cb133eSJeenu Viswambharan } else { 521b7cb133eSJeenu Viswambharan /* 522b7cb133eSJeenu Viswambharan * If this event is Normal, assert that there are no outstanding 523b7cb133eSJeenu Viswambharan * dispatches. Normal events can't preempt any outstanding event 524b7cb133eSJeenu Viswambharan * dispatches. 525b7cb133eSJeenu Viswambharan */ 526b7cb133eSJeenu Viswambharan assert(disp_ctx == NULL); 527b7cb133eSJeenu Viswambharan } 528b7cb133eSJeenu Viswambharan 529b7cb133eSJeenu Viswambharan sec_state = get_interrupt_src_ss(flags); 530b7cb133eSJeenu Viswambharan 531b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 532b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 533b7cb133eSJeenu Viswambharan 534ba6e5ca6SJeenu Viswambharan SDEI_LOG("ACK %llx, ev:%d ss:%d spsr:%lx ELR:%lx\n", mpidr, map->ev_num, 535ba6e5ca6SJeenu Viswambharan sec_state, read_spsr_el3(), read_elr_el3()); 536b7cb133eSJeenu Viswambharan 537b7cb133eSJeenu Viswambharan ctx = handle; 538b7cb133eSJeenu Viswambharan 539b7cb133eSJeenu Viswambharan /* 540b7cb133eSJeenu Viswambharan * Check if we interrupted secure state. Perform a context switch so 541b7cb133eSJeenu Viswambharan * that we can delegate to NS. 542b7cb133eSJeenu Viswambharan */ 543b7cb133eSJeenu Viswambharan if (sec_state == SECURE) { 544b7cb133eSJeenu Viswambharan save_secure_context(); 545b7cb133eSJeenu Viswambharan ctx = restore_and_resume_ns_context(); 546b7cb133eSJeenu Viswambharan } 547b7cb133eSJeenu Viswambharan 548cdb6ac94SJeenu Viswambharan /* Synchronously dispatch event */ 549cdb6ac94SJeenu Viswambharan setup_ns_dispatch(map, se, ctx, &dispatch_jmp); 550cdb6ac94SJeenu Viswambharan begin_sdei_synchronous_dispatch(&dispatch_jmp); 551b7cb133eSJeenu Viswambharan 552b7cb133eSJeenu Viswambharan /* 553cdb6ac94SJeenu Viswambharan * We reach here when client completes the event. 554cdb6ac94SJeenu Viswambharan * 55590a9213bSJeenu Viswambharan * If the cause of dispatch originally interrupted the Secure world, 556cdb6ac94SJeenu Viswambharan * resume Secure. 557cdb6ac94SJeenu Viswambharan * 558cdb6ac94SJeenu Viswambharan * No need to save the Non-secure context ahead of a world switch: the 559cdb6ac94SJeenu Viswambharan * Non-secure context was fully saved before dispatch, and has been 560cdb6ac94SJeenu Viswambharan * returned to its pre-dispatch state. 561b7cb133eSJeenu Viswambharan */ 56290a9213bSJeenu Viswambharan if (sec_state == SECURE) 563cdb6ac94SJeenu Viswambharan restore_and_resume_secure_context(); 564cdb6ac94SJeenu Viswambharan 565cdb6ac94SJeenu Viswambharan /* 566cdb6ac94SJeenu Viswambharan * The event was dispatched after receiving SDEI interrupt. With 567cdb6ac94SJeenu Viswambharan * the event handling completed, EOI the corresponding 568cdb6ac94SJeenu Viswambharan * interrupt. 569cdb6ac94SJeenu Viswambharan */ 570297a9a0fSJeenu Viswambharan if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) { 571cdb6ac94SJeenu Viswambharan ERROR("Invalid SDEI mapping: ev=%u\n", map->ev_num); 572cdb6ac94SJeenu Viswambharan panic(); 573cdb6ac94SJeenu Viswambharan } 574cdb6ac94SJeenu Viswambharan plat_ic_end_of_interrupt(intr_raw); 575cdb6ac94SJeenu Viswambharan 576b7cb133eSJeenu Viswambharan return 0; 577b7cb133eSJeenu Viswambharan } 578b7cb133eSJeenu Viswambharan 579cdb6ac94SJeenu Viswambharan /* 580cdb6ac94SJeenu Viswambharan * Explicitly dispatch the given SDEI event. 581cdb6ac94SJeenu Viswambharan * 582cdb6ac94SJeenu Viswambharan * When calling this API, the caller must be prepared for the SDEI dispatcher to 583cdb6ac94SJeenu Viswambharan * restore and make Non-secure context as active. This call returns only after 584cdb6ac94SJeenu Viswambharan * the client has completed the dispatch. Then, the Non-secure context will be 585cdb6ac94SJeenu Viswambharan * active, and the following ERET will return to Non-secure. 586cdb6ac94SJeenu Viswambharan * 587cdb6ac94SJeenu Viswambharan * Should the caller require re-entry to Secure, it must restore the Secure 588cdb6ac94SJeenu Viswambharan * context and program registers for ERET. 589cdb6ac94SJeenu Viswambharan */ 590cdb6ac94SJeenu Viswambharan int sdei_dispatch_event(int ev_num) 59155a1266eSJeenu Viswambharan { 59255a1266eSJeenu Viswambharan sdei_entry_t *se; 59355a1266eSJeenu Viswambharan sdei_ev_map_t *map; 594cdb6ac94SJeenu Viswambharan cpu_context_t *ns_ctx; 59555a1266eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 59655a1266eSJeenu Viswambharan sdei_cpu_state_t *state; 597e0566305SAntonio Nino Diaz jmp_buf dispatch_jmp; 59855a1266eSJeenu Viswambharan 59955a1266eSJeenu Viswambharan /* Can't dispatch if events are masked on this PE */ 60055a1266eSJeenu Viswambharan state = sdei_get_this_pe_state(); 601ba6e5ca6SJeenu Viswambharan if (state->pe_masked) 60255a1266eSJeenu Viswambharan return -1; 60355a1266eSJeenu Viswambharan 60455a1266eSJeenu Viswambharan /* Event 0 can't be dispatched */ 60555a1266eSJeenu Viswambharan if (ev_num == SDEI_EVENT_0) 60655a1266eSJeenu Viswambharan return -1; 60755a1266eSJeenu Viswambharan 60855a1266eSJeenu Viswambharan /* Locate mapping corresponding to this event */ 60955a1266eSJeenu Viswambharan map = find_event_map(ev_num); 610ba6e5ca6SJeenu Viswambharan if (map == NULL) 61155a1266eSJeenu Viswambharan return -1; 61255a1266eSJeenu Viswambharan 613af2c9ecdSJeenu Viswambharan /* Only explicit events can be dispatched */ 614af2c9ecdSJeenu Viswambharan if (!is_map_explicit(map)) 61555a1266eSJeenu Viswambharan return -1; 61655a1266eSJeenu Viswambharan 61755a1266eSJeenu Viswambharan /* Examine state of dispatch stack */ 61855a1266eSJeenu Viswambharan disp_ctx = get_outstanding_dispatch(); 619ba6e5ca6SJeenu Viswambharan if (disp_ctx != NULL) { 62055a1266eSJeenu Viswambharan /* 62155a1266eSJeenu Viswambharan * There's an outstanding dispatch. If the outstanding dispatch 62255a1266eSJeenu Viswambharan * is critical, no more dispatches are possible. 62355a1266eSJeenu Viswambharan */ 62455a1266eSJeenu Viswambharan if (is_event_critical(disp_ctx->map)) 62555a1266eSJeenu Viswambharan return -1; 62655a1266eSJeenu Viswambharan 62755a1266eSJeenu Viswambharan /* 62855a1266eSJeenu Viswambharan * If the outstanding dispatch is Normal, only critical events 62955a1266eSJeenu Viswambharan * can be dispatched. 63055a1266eSJeenu Viswambharan */ 63155a1266eSJeenu Viswambharan if (is_event_normal(map)) 63255a1266eSJeenu Viswambharan return -1; 63355a1266eSJeenu Viswambharan } 63455a1266eSJeenu Viswambharan 63555a1266eSJeenu Viswambharan se = get_event_entry(map); 63655a1266eSJeenu Viswambharan if (!can_sdei_state_trans(se, DO_DISPATCH)) 63755a1266eSJeenu Viswambharan return -1; 63855a1266eSJeenu Viswambharan 63955a1266eSJeenu Viswambharan /* Activate the priority corresponding to the event being dispatched */ 64055a1266eSJeenu Viswambharan ehf_activate_priority(sdei_event_priority(map)); 64155a1266eSJeenu Viswambharan 64255a1266eSJeenu Viswambharan /* 643cdb6ac94SJeenu Viswambharan * Prepare for NS dispatch by restoring the Non-secure context and 644cdb6ac94SJeenu Viswambharan * marking that as active. 64555a1266eSJeenu Viswambharan */ 646cdb6ac94SJeenu Viswambharan ns_ctx = restore_and_resume_ns_context(); 647cdb6ac94SJeenu Viswambharan 648cdb6ac94SJeenu Viswambharan /* Dispatch event synchronously */ 649cdb6ac94SJeenu Viswambharan setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp); 650cdb6ac94SJeenu Viswambharan begin_sdei_synchronous_dispatch(&dispatch_jmp); 65155a1266eSJeenu Viswambharan 65255a1266eSJeenu Viswambharan /* 653cdb6ac94SJeenu Viswambharan * We reach here when client completes the event. 654cdb6ac94SJeenu Viswambharan * 655cdb6ac94SJeenu Viswambharan * Deactivate the priority level that was activated at the time of 656cdb6ac94SJeenu Viswambharan * explicit dispatch. 65755a1266eSJeenu Viswambharan */ 658cdb6ac94SJeenu Viswambharan ehf_deactivate_priority(sdei_event_priority(map)); 65955a1266eSJeenu Viswambharan 66055a1266eSJeenu Viswambharan return 0; 66155a1266eSJeenu Viswambharan } 66255a1266eSJeenu Viswambharan 663e0566305SAntonio Nino Diaz static void end_sdei_synchronous_dispatch(jmp_buf *buffer) 664cdb6ac94SJeenu Viswambharan { 665e0566305SAntonio Nino Diaz longjmp(*buffer, 1); 666cdb6ac94SJeenu Viswambharan } 667cdb6ac94SJeenu Viswambharan 668ba6e5ca6SJeenu Viswambharan int sdei_event_complete(bool resume, uint64_t pc) 669b7cb133eSJeenu Viswambharan { 670b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 671b7cb133eSJeenu Viswambharan sdei_entry_t *se; 672b7cb133eSJeenu Viswambharan sdei_ev_map_t *map; 673b7cb133eSJeenu Viswambharan cpu_context_t *ctx; 674b7cb133eSJeenu Viswambharan sdei_action_t act; 675b7cb133eSJeenu Viswambharan unsigned int client_el = sdei_client_el(); 676b7cb133eSJeenu Viswambharan 677b7cb133eSJeenu Viswambharan /* Return error if called without an active event */ 6788e3032f9SJeenu Viswambharan disp_ctx = get_outstanding_dispatch(); 679ba6e5ca6SJeenu Viswambharan if (disp_ctx == NULL) 680b7cb133eSJeenu Viswambharan return SDEI_EDENY; 681b7cb133eSJeenu Viswambharan 682b7cb133eSJeenu Viswambharan /* Validate resumption point */ 683b7cb133eSJeenu Viswambharan if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0)) 684b7cb133eSJeenu Viswambharan return SDEI_EDENY; 685b7cb133eSJeenu Viswambharan 686b7cb133eSJeenu Viswambharan map = disp_ctx->map; 687ba6e5ca6SJeenu Viswambharan assert(map != NULL); 688b7cb133eSJeenu Viswambharan se = get_event_entry(map); 689b7cb133eSJeenu Viswambharan 690611eb9cfSJeenu Viswambharan if (is_event_shared(map)) 691611eb9cfSJeenu Viswambharan sdei_map_lock(map); 692611eb9cfSJeenu Viswambharan 693b7cb133eSJeenu Viswambharan act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE; 694b7cb133eSJeenu Viswambharan if (!can_sdei_state_trans(se, act)) { 695b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 696b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 697b7cb133eSJeenu Viswambharan return SDEI_EDENY; 698b7cb133eSJeenu Viswambharan } 699b7cb133eSJeenu Viswambharan 700611eb9cfSJeenu Viswambharan if (is_event_shared(map)) 701611eb9cfSJeenu Viswambharan sdei_map_unlock(map); 702611eb9cfSJeenu Viswambharan 7038e3032f9SJeenu Viswambharan /* Having done sanity checks, pop dispatch */ 704ba6e5ca6SJeenu Viswambharan (void) pop_dispatch(); 7058e3032f9SJeenu Viswambharan 7068e3032f9SJeenu Viswambharan SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(), 7078e3032f9SJeenu Viswambharan map->ev_num, read_spsr_el3(), read_elr_el3()); 7088e3032f9SJeenu Viswambharan 709b7cb133eSJeenu Viswambharan /* 710b7cb133eSJeenu Viswambharan * Restore Non-secure to how it was originally interrupted. Once done, 711b7cb133eSJeenu Viswambharan * it's up-to-date with the saved copy. 712b7cb133eSJeenu Viswambharan */ 713b7cb133eSJeenu Viswambharan ctx = cm_get_context(NON_SECURE); 714b7cb133eSJeenu Viswambharan restore_event_ctx(disp_ctx, ctx); 715b7cb133eSJeenu Viswambharan 716b7cb133eSJeenu Viswambharan if (resume) { 717b7cb133eSJeenu Viswambharan /* 718b7cb133eSJeenu Viswambharan * Complete-and-resume call. Prepare the Non-secure context 719b7cb133eSJeenu Viswambharan * (currently active) for complete and resume. 720b7cb133eSJeenu Viswambharan */ 721b7cb133eSJeenu Viswambharan cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el, 722b7cb133eSJeenu Viswambharan MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 723b7cb133eSJeenu Viswambharan 724b7cb133eSJeenu Viswambharan /* 725b7cb133eSJeenu Viswambharan * Make it look as if a synchronous exception were taken at the 726b7cb133eSJeenu Viswambharan * supplied Non-secure resumption point. Populate SPSR and 727b7cb133eSJeenu Viswambharan * ELR_ELx so that an ERET from there works as expected. 728b7cb133eSJeenu Viswambharan * 729b7cb133eSJeenu Viswambharan * The assumption is that the client, if necessary, would have 730b7cb133eSJeenu Viswambharan * saved any live content in these registers before making this 731b7cb133eSJeenu Viswambharan * call. 732b7cb133eSJeenu Viswambharan */ 733b7cb133eSJeenu Viswambharan if (client_el == MODE_EL2) { 734b7cb133eSJeenu Viswambharan write_elr_el2(disp_ctx->elr_el3); 735b7cb133eSJeenu Viswambharan write_spsr_el2(disp_ctx->spsr_el3); 736b7cb133eSJeenu Viswambharan } else { 737b7cb133eSJeenu Viswambharan /* EL1 */ 738b7cb133eSJeenu Viswambharan write_elr_el1(disp_ctx->elr_el3); 739b7cb133eSJeenu Viswambharan write_spsr_el1(disp_ctx->spsr_el3); 740b7cb133eSJeenu Viswambharan } 741b7cb133eSJeenu Viswambharan } 742b7cb133eSJeenu Viswambharan 743cdb6ac94SJeenu Viswambharan /* End the outstanding dispatch */ 7445e60c39aSJeenu Viswambharan end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp); 745b7cb133eSJeenu Viswambharan 746b7cb133eSJeenu Viswambharan return 0; 747b7cb133eSJeenu Viswambharan } 748b7cb133eSJeenu Viswambharan 749ba6e5ca6SJeenu Viswambharan int64_t sdei_event_context(void *handle, unsigned int param) 750b7cb133eSJeenu Viswambharan { 751b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 752b7cb133eSJeenu Viswambharan 753b7cb133eSJeenu Viswambharan if (param >= SDEI_SAVED_GPREGS) 754b7cb133eSJeenu Viswambharan return SDEI_EINVAL; 755b7cb133eSJeenu Viswambharan 756b7cb133eSJeenu Viswambharan /* Get outstanding dispatch on this CPU */ 757b7cb133eSJeenu Viswambharan disp_ctx = get_outstanding_dispatch(); 758ba6e5ca6SJeenu Viswambharan if (disp_ctx == NULL) 759b7cb133eSJeenu Viswambharan return SDEI_EDENY; 760b7cb133eSJeenu Viswambharan 761ba6e5ca6SJeenu Viswambharan assert(disp_ctx->map != NULL); 762b7cb133eSJeenu Viswambharan 763b7cb133eSJeenu Viswambharan if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT)) 764b7cb133eSJeenu Viswambharan return SDEI_EDENY; 765b7cb133eSJeenu Viswambharan 766b7cb133eSJeenu Viswambharan /* 767b7cb133eSJeenu Viswambharan * No locking is required for the Running status as this is the only CPU 768b7cb133eSJeenu Viswambharan * which can complete the event 769b7cb133eSJeenu Viswambharan */ 770b7cb133eSJeenu Viswambharan 771ba6e5ca6SJeenu Viswambharan return (int64_t) disp_ctx->x[param]; 772b7cb133eSJeenu Viswambharan } 773