1b7cb133eSJeenu Viswambharan /* 2b142ede7SIgor Podgainõi * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved. 3b7cb133eSJeenu Viswambharan * 4b7cb133eSJeenu Viswambharan * SPDX-License-Identifier: BSD-3-Clause 5b7cb133eSJeenu Viswambharan */ 6b7cb133eSJeenu Viswambharan 7b7cb133eSJeenu Viswambharan #include <assert.h> 84ce3e99aSScott Branden #include <inttypes.h> 94ce3e99aSScott Branden #include <stdint.h> 10b7cb133eSJeenu Viswambharan #include <string.h> 1109d40e0eSAntonio Nino Diaz 1209d40e0eSAntonio Nino Diaz #include <arch_helpers.h> 1337596fcbSDaniel Boulby #include <arch_features.h> 1409d40e0eSAntonio Nino Diaz #include <bl31/ehf.h> 1509d40e0eSAntonio Nino Diaz #include <bl31/interrupt_mgmt.h> 1603fafc0bSArvind Ram Prakash #include <bl31/sync_handle.h> 1709d40e0eSAntonio Nino Diaz #include <common/bl_common.h> 1809d40e0eSAntonio Nino Diaz #include <common/debug.h> 1909d40e0eSAntonio Nino Diaz #include <common/runtime_svc.h> 2009d40e0eSAntonio Nino Diaz #include <lib/cassert.h> 2109d40e0eSAntonio Nino Diaz #include <services/sdei.h> 2209d40e0eSAntonio Nino Diaz 23b7cb133eSJeenu Viswambharan #include "sdei_private.h" 24b7cb133eSJeenu Viswambharan 25b7cb133eSJeenu Viswambharan /* x0-x17 GPREGS context */ 26ba6e5ca6SJeenu Viswambharan #define SDEI_SAVED_GPREGS 18U 27b7cb133eSJeenu Viswambharan 28b7cb133eSJeenu Viswambharan /* Maximum preemption nesting levels: Critical priority and Normal priority */ 29ba6e5ca6SJeenu Viswambharan #define MAX_EVENT_NESTING 2U 30b7cb133eSJeenu Viswambharan 31b7cb133eSJeenu Viswambharan /* Per-CPU SDEI state access macro */ 32ba6e5ca6SJeenu Viswambharan #define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()]) 33b142ede7SIgor Podgainõi #define sdei_get_target_pe_state(_pe) (&cpu_state[plat_core_pos_by_mpidr(_pe)]) 34b7cb133eSJeenu Viswambharan 35b7cb133eSJeenu Viswambharan /* Structure to store information about an outstanding dispatch */ 36b7cb133eSJeenu Viswambharan typedef struct sdei_dispatch_context { 37b7cb133eSJeenu Viswambharan sdei_ev_map_t *map; 38b7cb133eSJeenu Viswambharan uint64_t x[SDEI_SAVED_GPREGS]; 39e0566305SAntonio Nino Diaz jmp_buf *dispatch_jmp; 40b7cb133eSJeenu Viswambharan 41b7cb133eSJeenu Viswambharan /* Exception state registers */ 42b7cb133eSJeenu Viswambharan uint64_t elr_el3; 43b7cb133eSJeenu Viswambharan uint64_t spsr_el3; 446f03bc77SDimitris Papastamos 456f03bc77SDimitris Papastamos #if DYNAMIC_WORKAROUND_CVE_2018_3639 466f03bc77SDimitris Papastamos /* CVE-2018-3639 mitigation state */ 476f03bc77SDimitris Papastamos uint64_t disable_cve_2018_3639; 486f03bc77SDimitris Papastamos #endif 49b7cb133eSJeenu Viswambharan } sdei_dispatch_context_t; 50b7cb133eSJeenu Viswambharan 51b7cb133eSJeenu Viswambharan /* Per-CPU SDEI state data */ 52b7cb133eSJeenu Viswambharan typedef struct sdei_cpu_state { 53b7cb133eSJeenu Viswambharan sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING]; 54b7cb133eSJeenu Viswambharan unsigned short stack_top; /* Empty ascending */ 55ba6e5ca6SJeenu Viswambharan bool pe_masked; 56ba6e5ca6SJeenu Viswambharan bool pending_enables; 57b7cb133eSJeenu Viswambharan } sdei_cpu_state_t; 58b7cb133eSJeenu Viswambharan 59b7cb133eSJeenu Viswambharan /* SDEI states for all cores in the system */ 60ba6e5ca6SJeenu Viswambharan static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT]; 61b7cb133eSJeenu Viswambharan 62b142ede7SIgor Podgainõi bool sdei_is_target_pe_masked(uint64_t target_pe) 63b142ede7SIgor Podgainõi { 64*9dde2cc1SMark Dykes int errstat = plat_core_pos_by_mpidr(target_pe); 65*9dde2cc1SMark Dykes if (errstat >= 0) { 66*9dde2cc1SMark Dykes const sdei_cpu_state_t *state = &cpu_state[errstat]; 67b142ede7SIgor Podgainõi return state->pe_masked; 68b142ede7SIgor Podgainõi } 69*9dde2cc1SMark Dykes return true; 70*9dde2cc1SMark Dykes } 71b142ede7SIgor Podgainõi 72ba6e5ca6SJeenu Viswambharan int64_t sdei_pe_mask(void) 73b7cb133eSJeenu Viswambharan { 74ba6e5ca6SJeenu Viswambharan int64_t ret = 0; 75b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 76b7cb133eSJeenu Viswambharan 77b7cb133eSJeenu Viswambharan /* 78b7cb133eSJeenu Viswambharan * Return value indicates whether this call had any effect in the mask 79b7cb133eSJeenu Viswambharan * status of this PE. 80b7cb133eSJeenu Viswambharan */ 81ba6e5ca6SJeenu Viswambharan if (!state->pe_masked) { 82ba6e5ca6SJeenu Viswambharan state->pe_masked = true; 83ba6e5ca6SJeenu Viswambharan ret = 1; 84ba6e5ca6SJeenu Viswambharan } 85b7cb133eSJeenu Viswambharan 86b7cb133eSJeenu Viswambharan return ret; 87b7cb133eSJeenu Viswambharan } 88b7cb133eSJeenu Viswambharan 89b7cb133eSJeenu Viswambharan void sdei_pe_unmask(void) 90b7cb133eSJeenu Viswambharan { 91ba6e5ca6SJeenu Viswambharan unsigned int i; 92b7cb133eSJeenu Viswambharan sdei_ev_map_t *map; 93b7cb133eSJeenu Viswambharan sdei_entry_t *se; 94b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 95b7cb133eSJeenu Viswambharan uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; 96b7cb133eSJeenu Viswambharan 97b7cb133eSJeenu Viswambharan /* 98b7cb133eSJeenu Viswambharan * If there are pending enables, iterate through the private mappings 99b7cb133eSJeenu Viswambharan * and enable those bound maps that are in enabled state. Also, iterate 100b7cb133eSJeenu Viswambharan * through shared mappings and enable interrupts of events that are 101b7cb133eSJeenu Viswambharan * targeted to this PE. 102b7cb133eSJeenu Viswambharan */ 103b7cb133eSJeenu Viswambharan if (state->pending_enables) { 104b7cb133eSJeenu Viswambharan for_each_private_map(i, map) { 105b7cb133eSJeenu Viswambharan se = get_event_entry(map); 106b7cb133eSJeenu Viswambharan if (is_map_bound(map) && GET_EV_STATE(se, ENABLED)) 107b7cb133eSJeenu Viswambharan plat_ic_enable_interrupt(map->intr); 108b7cb133eSJeenu Viswambharan } 109b7cb133eSJeenu Viswambharan 110b7cb133eSJeenu Viswambharan for_each_shared_map(i, map) { 111b7cb133eSJeenu Viswambharan se = get_event_entry(map); 112b7cb133eSJeenu Viswambharan 113b7cb133eSJeenu Viswambharan sdei_map_lock(map); 114ba6e5ca6SJeenu Viswambharan if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) && 115b7cb133eSJeenu Viswambharan (se->reg_flags == SDEI_REGF_RM_PE) && 116b7cb133eSJeenu Viswambharan (se->affinity == my_mpidr)) { 117b7cb133eSJeenu Viswambharan plat_ic_enable_interrupt(map->intr); 118b7cb133eSJeenu Viswambharan } 119b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 120b7cb133eSJeenu Viswambharan } 121b7cb133eSJeenu Viswambharan } 122b7cb133eSJeenu Viswambharan 123ba6e5ca6SJeenu Viswambharan state->pending_enables = false; 124ba6e5ca6SJeenu Viswambharan state->pe_masked = false; 125b7cb133eSJeenu Viswambharan } 126b7cb133eSJeenu Viswambharan 127b7cb133eSJeenu Viswambharan /* Push a dispatch context to the dispatch stack */ 128b7cb133eSJeenu Viswambharan static sdei_dispatch_context_t *push_dispatch(void) 129b7cb133eSJeenu Viswambharan { 130b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 131b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 132b7cb133eSJeenu Viswambharan 133b7cb133eSJeenu Viswambharan /* Cannot have more than max events */ 134b7cb133eSJeenu Viswambharan assert(state->stack_top < MAX_EVENT_NESTING); 135b7cb133eSJeenu Viswambharan 136b7cb133eSJeenu Viswambharan disp_ctx = &state->dispatch_stack[state->stack_top]; 137b7cb133eSJeenu Viswambharan state->stack_top++; 138b7cb133eSJeenu Viswambharan 139b7cb133eSJeenu Viswambharan return disp_ctx; 140b7cb133eSJeenu Viswambharan } 141b7cb133eSJeenu Viswambharan 142b7cb133eSJeenu Viswambharan /* Pop a dispatch context to the dispatch stack */ 143b7cb133eSJeenu Viswambharan static sdei_dispatch_context_t *pop_dispatch(void) 144b7cb133eSJeenu Viswambharan { 145b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 146b7cb133eSJeenu Viswambharan 147ba6e5ca6SJeenu Viswambharan if (state->stack_top == 0U) 148b7cb133eSJeenu Viswambharan return NULL; 149b7cb133eSJeenu Viswambharan 150b7cb133eSJeenu Viswambharan assert(state->stack_top <= MAX_EVENT_NESTING); 151b7cb133eSJeenu Viswambharan 152b7cb133eSJeenu Viswambharan state->stack_top--; 153b7cb133eSJeenu Viswambharan 154b7cb133eSJeenu Viswambharan return &state->dispatch_stack[state->stack_top]; 155b7cb133eSJeenu Viswambharan } 156b7cb133eSJeenu Viswambharan 157b7cb133eSJeenu Viswambharan /* Retrieve the context at the top of dispatch stack */ 158b7cb133eSJeenu Viswambharan static sdei_dispatch_context_t *get_outstanding_dispatch(void) 159b7cb133eSJeenu Viswambharan { 160b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 161b7cb133eSJeenu Viswambharan 162ba6e5ca6SJeenu Viswambharan if (state->stack_top == 0U) 163b7cb133eSJeenu Viswambharan return NULL; 164b7cb133eSJeenu Viswambharan 165b7cb133eSJeenu Viswambharan assert(state->stack_top <= MAX_EVENT_NESTING); 166b7cb133eSJeenu Viswambharan 167ba6e5ca6SJeenu Viswambharan return &state->dispatch_stack[state->stack_top - 1U]; 168b7cb133eSJeenu Viswambharan } 169b7cb133eSJeenu Viswambharan 170cdb6ac94SJeenu Viswambharan static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map, 171cdb6ac94SJeenu Viswambharan void *tgt_ctx) 172b7cb133eSJeenu Viswambharan { 173b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 174ba6e5ca6SJeenu Viswambharan const gp_regs_t *tgt_gpregs; 175ba6e5ca6SJeenu Viswambharan const el3_state_t *tgt_el3; 176b7cb133eSJeenu Viswambharan 177ba6e5ca6SJeenu Viswambharan assert(tgt_ctx != NULL); 178b7cb133eSJeenu Viswambharan tgt_gpregs = get_gpregs_ctx(tgt_ctx); 179b7cb133eSJeenu Viswambharan tgt_el3 = get_el3state_ctx(tgt_ctx); 180b7cb133eSJeenu Viswambharan 181b7cb133eSJeenu Viswambharan disp_ctx = push_dispatch(); 182ba6e5ca6SJeenu Viswambharan assert(disp_ctx != NULL); 183b7cb133eSJeenu Viswambharan disp_ctx->map = map; 184b7cb133eSJeenu Viswambharan 185b7cb133eSJeenu Viswambharan /* Save general purpose and exception registers */ 186b7cb133eSJeenu Viswambharan memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x)); 187b7cb133eSJeenu Viswambharan disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3); 188b7cb133eSJeenu Viswambharan disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3); 1896f03bc77SDimitris Papastamos 190cdb6ac94SJeenu Viswambharan return disp_ctx; 191b7cb133eSJeenu Viswambharan } 192b7cb133eSJeenu Viswambharan 193ba6e5ca6SJeenu Viswambharan static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx) 194b7cb133eSJeenu Viswambharan { 195b7cb133eSJeenu Viswambharan gp_regs_t *tgt_gpregs; 196b7cb133eSJeenu Viswambharan el3_state_t *tgt_el3; 197b7cb133eSJeenu Viswambharan 198ba6e5ca6SJeenu Viswambharan assert(tgt_ctx != NULL); 199b7cb133eSJeenu Viswambharan tgt_gpregs = get_gpregs_ctx(tgt_ctx); 200b7cb133eSJeenu Viswambharan tgt_el3 = get_el3state_ctx(tgt_ctx); 201b7cb133eSJeenu Viswambharan 202b7cb133eSJeenu Viswambharan CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)), 203b7cb133eSJeenu Viswambharan foo); 204b7cb133eSJeenu Viswambharan 205b7cb133eSJeenu Viswambharan /* Restore general purpose and exception registers */ 206b7cb133eSJeenu Viswambharan memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x)); 207b7cb133eSJeenu Viswambharan write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3); 208b7cb133eSJeenu Viswambharan write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3); 2096f03bc77SDimitris Papastamos 2106f03bc77SDimitris Papastamos #if DYNAMIC_WORKAROUND_CVE_2018_3639 2116f03bc77SDimitris Papastamos cve_2018_3639_t *tgt_cve_2018_3639; 2126f03bc77SDimitris Papastamos tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx); 2136f03bc77SDimitris Papastamos 2146f03bc77SDimitris Papastamos /* Restore CVE-2018-3639 mitigation state */ 2156f03bc77SDimitris Papastamos write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 2166f03bc77SDimitris Papastamos disp_ctx->disable_cve_2018_3639); 2176f03bc77SDimitris Papastamos #endif 218b7cb133eSJeenu Viswambharan } 219b7cb133eSJeenu Viswambharan 220b7cb133eSJeenu Viswambharan static void save_secure_context(void) 221b7cb133eSJeenu Viswambharan { 222b7cb133eSJeenu Viswambharan cm_el1_sysregs_context_save(SECURE); 223b7cb133eSJeenu Viswambharan } 224b7cb133eSJeenu Viswambharan 225b7cb133eSJeenu Viswambharan /* Restore Secure context and arrange to resume it at the next ERET */ 226b7cb133eSJeenu Viswambharan static void restore_and_resume_secure_context(void) 227b7cb133eSJeenu Viswambharan { 228b7cb133eSJeenu Viswambharan cm_el1_sysregs_context_restore(SECURE); 229b7cb133eSJeenu Viswambharan cm_set_next_eret_context(SECURE); 230b7cb133eSJeenu Viswambharan } 231b7cb133eSJeenu Viswambharan 232b7cb133eSJeenu Viswambharan /* 233b7cb133eSJeenu Viswambharan * Restore Non-secure context and arrange to resume it at the next ERET. Return 234b7cb133eSJeenu Viswambharan * pointer to the Non-secure context. 235b7cb133eSJeenu Viswambharan */ 236b7cb133eSJeenu Viswambharan static cpu_context_t *restore_and_resume_ns_context(void) 237b7cb133eSJeenu Viswambharan { 238b7cb133eSJeenu Viswambharan cpu_context_t *ns_ctx; 239b7cb133eSJeenu Viswambharan 240b7cb133eSJeenu Viswambharan cm_el1_sysregs_context_restore(NON_SECURE); 241b7cb133eSJeenu Viswambharan cm_set_next_eret_context(NON_SECURE); 242b7cb133eSJeenu Viswambharan 243b7cb133eSJeenu Viswambharan ns_ctx = cm_get_context(NON_SECURE); 244ba6e5ca6SJeenu Viswambharan assert(ns_ctx != NULL); 245b7cb133eSJeenu Viswambharan 246b7cb133eSJeenu Viswambharan return ns_ctx; 247b7cb133eSJeenu Viswambharan } 248b7cb133eSJeenu Viswambharan 249b7cb133eSJeenu Viswambharan /* 25037596fcbSDaniel Boulby * Prepare for ERET: 25137596fcbSDaniel Boulby * - Set the ELR to the registered handler address 25203fafc0bSArvind Ram Prakash * - Set the SPSR register by calling the common create_spsr() function 25337596fcbSDaniel Boulby */ 25437596fcbSDaniel Boulby 25537596fcbSDaniel Boulby static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ctx) 25637596fcbSDaniel Boulby { 25737596fcbSDaniel Boulby unsigned int client_el = sdei_client_el(); 25837596fcbSDaniel Boulby u_register_t sdei_spsr = SPSR_64(client_el, MODE_SP_ELX, 25937596fcbSDaniel Boulby DISABLE_ALL_EXCEPTIONS); 26037596fcbSDaniel Boulby 26137596fcbSDaniel Boulby u_register_t interrupted_pstate = disp_ctx->spsr_el3; 26237596fcbSDaniel Boulby 26303fafc0bSArvind Ram Prakash sdei_spsr = create_spsr(interrupted_pstate, client_el); 26437596fcbSDaniel Boulby 26537596fcbSDaniel Boulby cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, sdei_spsr); 26637596fcbSDaniel Boulby } 26737596fcbSDaniel Boulby 26837596fcbSDaniel Boulby /* 269b7cb133eSJeenu Viswambharan * Populate the Non-secure context so that the next ERET will dispatch to the 270b7cb133eSJeenu Viswambharan * SDEI client. 271b7cb133eSJeenu Viswambharan */ 272b7cb133eSJeenu Viswambharan static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se, 273e0566305SAntonio Nino Diaz cpu_context_t *ctx, jmp_buf *dispatch_jmp) 274b7cb133eSJeenu Viswambharan { 275cdb6ac94SJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 276b7cb133eSJeenu Viswambharan 277b7cb133eSJeenu Viswambharan /* Push the event and context */ 278cdb6ac94SJeenu Viswambharan disp_ctx = save_event_ctx(map, ctx); 279b7cb133eSJeenu Viswambharan 280b7cb133eSJeenu Viswambharan /* 281b7cb133eSJeenu Viswambharan * Setup handler arguments: 282b7cb133eSJeenu Viswambharan * 283b7cb133eSJeenu Viswambharan * - x0: Event number 284b7cb133eSJeenu Viswambharan * - x1: Handler argument supplied at the time of event registration 285b7cb133eSJeenu Viswambharan * - x2: Interrupted PC 286b7cb133eSJeenu Viswambharan * - x3: Interrupted SPSR 287b7cb133eSJeenu Viswambharan */ 288ba6e5ca6SJeenu Viswambharan SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num); 289b7cb133eSJeenu Viswambharan SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg); 290cdb6ac94SJeenu Viswambharan SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3); 291cdb6ac94SJeenu Viswambharan SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3); 292b7cb133eSJeenu Viswambharan 29337596fcbSDaniel Boulby /* Setup the elr and spsr register to prepare for ERET */ 29437596fcbSDaniel Boulby sdei_set_elr_spsr(se, disp_ctx); 295cdb6ac94SJeenu Viswambharan 296cdb6ac94SJeenu Viswambharan #if DYNAMIC_WORKAROUND_CVE_2018_3639 297cdb6ac94SJeenu Viswambharan cve_2018_3639_t *tgt_cve_2018_3639; 298cdb6ac94SJeenu Viswambharan tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx); 299cdb6ac94SJeenu Viswambharan 300cdb6ac94SJeenu Viswambharan /* Save CVE-2018-3639 mitigation state */ 301cdb6ac94SJeenu Viswambharan disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639, 302cdb6ac94SJeenu Viswambharan CTX_CVE_2018_3639_DISABLE); 303cdb6ac94SJeenu Viswambharan 304cdb6ac94SJeenu Viswambharan /* Force SDEI handler to execute with mitigation enabled by default */ 305cdb6ac94SJeenu Viswambharan write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0); 306cdb6ac94SJeenu Viswambharan #endif 307cdb6ac94SJeenu Viswambharan 308cdb6ac94SJeenu Viswambharan disp_ctx->dispatch_jmp = dispatch_jmp; 309b7cb133eSJeenu Viswambharan } 310b7cb133eSJeenu Viswambharan 311b7cb133eSJeenu Viswambharan /* Handle a triggered SDEI interrupt while events were masked on this PE */ 312b7cb133eSJeenu Viswambharan static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se, 313b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state, unsigned int intr_raw) 314b7cb133eSJeenu Viswambharan { 315b7cb133eSJeenu Viswambharan uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK); 316ba6e5ca6SJeenu Viswambharan bool disable = false; 317b7cb133eSJeenu Viswambharan 318b7cb133eSJeenu Viswambharan /* Nothing to do for event 0 */ 319b7cb133eSJeenu Viswambharan if (map->ev_num == SDEI_EVENT_0) 320b7cb133eSJeenu Viswambharan return; 321b7cb133eSJeenu Viswambharan 322b7cb133eSJeenu Viswambharan /* 323b7cb133eSJeenu Viswambharan * For a private event, or for a shared event specifically routed to 324b7cb133eSJeenu Viswambharan * this CPU, we disable interrupt, leave the interrupt pending, and do 325b7cb133eSJeenu Viswambharan * EOI. 326b7cb133eSJeenu Viswambharan */ 327ba6e5ca6SJeenu Viswambharan if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE)) 328ba6e5ca6SJeenu Viswambharan disable = true; 329ba6e5ca6SJeenu Viswambharan 330ba6e5ca6SJeenu Viswambharan if (se->reg_flags == SDEI_REGF_RM_PE) 331b7cb133eSJeenu Viswambharan assert(se->affinity == my_mpidr); 332b7cb133eSJeenu Viswambharan 333b7cb133eSJeenu Viswambharan if (disable) { 334b7cb133eSJeenu Viswambharan plat_ic_disable_interrupt(map->intr); 335b7cb133eSJeenu Viswambharan plat_ic_set_interrupt_pending(map->intr); 336b7cb133eSJeenu Viswambharan plat_ic_end_of_interrupt(intr_raw); 337ba6e5ca6SJeenu Viswambharan state->pending_enables = true; 338b7cb133eSJeenu Viswambharan 339b7cb133eSJeenu Viswambharan return; 340b7cb133eSJeenu Viswambharan } 341b7cb133eSJeenu Viswambharan 342b7cb133eSJeenu Viswambharan /* 343b7cb133eSJeenu Viswambharan * We just received a shared event with routing set to ANY PE. The 344b7cb133eSJeenu Viswambharan * interrupt can't be delegated on this PE as SDEI events are masked. 345b7cb133eSJeenu Viswambharan * However, because its routing mode is ANY, it is possible that the 346b7cb133eSJeenu Viswambharan * event can be delegated on any other PE that hasn't masked events. 347b7cb133eSJeenu Viswambharan * Therefore, we set the interrupt back pending so as to give other 348b7cb133eSJeenu Viswambharan * suitable PEs a chance of handling it. 349b7cb133eSJeenu Viswambharan */ 350ba6e5ca6SJeenu Viswambharan assert(plat_ic_is_spi(map->intr) != 0); 351b7cb133eSJeenu Viswambharan plat_ic_set_interrupt_pending(map->intr); 352b7cb133eSJeenu Viswambharan 353b7cb133eSJeenu Viswambharan /* 354b7cb133eSJeenu Viswambharan * Leaving the same interrupt pending also means that the same interrupt 355b7cb133eSJeenu Viswambharan * can target this PE again as soon as this PE leaves EL3. Whether and 356b7cb133eSJeenu Viswambharan * how often that happens depends on the implementation of GIC. 357b7cb133eSJeenu Viswambharan * 358b7cb133eSJeenu Viswambharan * We therefore call a platform handler to resolve this situation. 359b7cb133eSJeenu Viswambharan */ 360b7cb133eSJeenu Viswambharan plat_sdei_handle_masked_trigger(my_mpidr, map->intr); 361b7cb133eSJeenu Viswambharan 362b7cb133eSJeenu Viswambharan /* This PE is masked. We EOI the interrupt, as it can't be delegated */ 363b7cb133eSJeenu Viswambharan plat_ic_end_of_interrupt(intr_raw); 364b7cb133eSJeenu Viswambharan } 365b7cb133eSJeenu Viswambharan 366b7cb133eSJeenu Viswambharan /* SDEI main interrupt handler */ 367b7cb133eSJeenu Viswambharan int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle, 368b7cb133eSJeenu Viswambharan void *cookie) 369b7cb133eSJeenu Viswambharan { 370b7cb133eSJeenu Viswambharan sdei_entry_t *se; 371b7cb133eSJeenu Viswambharan cpu_context_t *ctx; 372b7cb133eSJeenu Viswambharan sdei_ev_map_t *map; 373ba6e5ca6SJeenu Viswambharan const sdei_dispatch_context_t *disp_ctx; 374b7cb133eSJeenu Viswambharan unsigned int sec_state; 375b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state; 376b7cb133eSJeenu Viswambharan uint32_t intr; 377e0566305SAntonio Nino Diaz jmp_buf dispatch_jmp; 378ba6e5ca6SJeenu Viswambharan const uint64_t mpidr = read_mpidr_el1(); 379b7cb133eSJeenu Viswambharan 380b7cb133eSJeenu Viswambharan /* 381b7cb133eSJeenu Viswambharan * To handle an event, the following conditions must be true: 382b7cb133eSJeenu Viswambharan * 383b7cb133eSJeenu Viswambharan * 1. Event must be signalled 384b7cb133eSJeenu Viswambharan * 2. Event must be enabled 385b7cb133eSJeenu Viswambharan * 3. This PE must be a target PE for the event 386b7cb133eSJeenu Viswambharan * 4. PE must be unmasked for SDEI 387b7cb133eSJeenu Viswambharan * 5. If this is a normal event, no event must be running 388b7cb133eSJeenu Viswambharan * 6. If this is a critical event, no critical event must be running 389b7cb133eSJeenu Viswambharan * 390b7cb133eSJeenu Viswambharan * (1) and (2) are true when this function is running 391b7cb133eSJeenu Viswambharan * (3) is enforced in GIC by selecting the appropriate routing option 392b7cb133eSJeenu Viswambharan * (4) is satisfied by client calling PE_UNMASK 393b7cb133eSJeenu Viswambharan * (5) and (6) is enforced using interrupt priority, the RPR, in GIC: 394b7cb133eSJeenu Viswambharan * - Normal SDEI events belong to Normal SDE priority class 395b7cb133eSJeenu Viswambharan * - Critical SDEI events belong to Critical CSDE priority class 396b7cb133eSJeenu Viswambharan * 397b7cb133eSJeenu Viswambharan * The interrupt has already been acknowledged, and therefore is active, 398b7cb133eSJeenu Viswambharan * so no other PE can handle this event while we are at it. 399b7cb133eSJeenu Viswambharan * 400b7cb133eSJeenu Viswambharan * Find if this is an SDEI interrupt. There must be an event mapped to 401b7cb133eSJeenu Viswambharan * this interrupt 402b7cb133eSJeenu Viswambharan */ 403b7cb133eSJeenu Viswambharan intr = plat_ic_get_interrupt_id(intr_raw); 404ba6e5ca6SJeenu Viswambharan map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0)); 405ba6e5ca6SJeenu Viswambharan if (map == NULL) { 406b7cb133eSJeenu Viswambharan ERROR("No SDEI map for interrupt %u\n", intr); 407b7cb133eSJeenu Viswambharan panic(); 408b7cb133eSJeenu Viswambharan } 409b7cb133eSJeenu Viswambharan 410b7cb133eSJeenu Viswambharan /* 411b7cb133eSJeenu Viswambharan * Received interrupt number must either correspond to event 0, or must 412b7cb133eSJeenu Viswambharan * be bound interrupt. 413b7cb133eSJeenu Viswambharan */ 414b7cb133eSJeenu Viswambharan assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)); 415b7cb133eSJeenu Viswambharan 416b7cb133eSJeenu Viswambharan se = get_event_entry(map); 417b7cb133eSJeenu Viswambharan state = sdei_get_this_pe_state(); 418b7cb133eSJeenu Viswambharan 419ba6e5ca6SJeenu Viswambharan if (state->pe_masked) { 420b7cb133eSJeenu Viswambharan /* 421b7cb133eSJeenu Viswambharan * Interrupts received while this PE was masked can't be 422b7cb133eSJeenu Viswambharan * dispatched. 423b7cb133eSJeenu Viswambharan */ 4244ce3e99aSScott Branden SDEI_LOG("interrupt %u on %" PRIx64 " while PE masked\n", 4254ce3e99aSScott Branden map->intr, mpidr); 426b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 427b7cb133eSJeenu Viswambharan sdei_map_lock(map); 428b7cb133eSJeenu Viswambharan 429b7cb133eSJeenu Viswambharan handle_masked_trigger(map, se, state, intr_raw); 430b7cb133eSJeenu Viswambharan 431b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 432b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 433b7cb133eSJeenu Viswambharan 434b7cb133eSJeenu Viswambharan return 0; 435b7cb133eSJeenu Viswambharan } 436b7cb133eSJeenu Viswambharan 437b7cb133eSJeenu Viswambharan /* Insert load barrier for signalled SDEI event */ 438b7cb133eSJeenu Viswambharan if (map->ev_num == SDEI_EVENT_0) 439b7cb133eSJeenu Viswambharan dmbld(); 440b7cb133eSJeenu Viswambharan 441b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 442b7cb133eSJeenu Viswambharan sdei_map_lock(map); 443b7cb133eSJeenu Viswambharan 444b7cb133eSJeenu Viswambharan /* Assert shared event routed to this PE had been configured so */ 445b7cb133eSJeenu Viswambharan if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) { 446ba6e5ca6SJeenu Viswambharan assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK)); 447b7cb133eSJeenu Viswambharan } 448b7cb133eSJeenu Viswambharan 449b7cb133eSJeenu Viswambharan if (!can_sdei_state_trans(se, DO_DISPATCH)) { 450b7cb133eSJeenu Viswambharan SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n", 451b7cb133eSJeenu Viswambharan map->ev_num, se->state); 452b7cb133eSJeenu Viswambharan 453b7cb133eSJeenu Viswambharan /* 454b7cb133eSJeenu Viswambharan * If the event is registered, leave the interrupt pending so 455b7cb133eSJeenu Viswambharan * that it's delivered when the event is enabled. 456b7cb133eSJeenu Viswambharan */ 457b7cb133eSJeenu Viswambharan if (GET_EV_STATE(se, REGISTERED)) 458b7cb133eSJeenu Viswambharan plat_ic_set_interrupt_pending(map->intr); 459b7cb133eSJeenu Viswambharan 460b7cb133eSJeenu Viswambharan /* 461b7cb133eSJeenu Viswambharan * The interrupt was disabled or unregistered after the handler 462b7cb133eSJeenu Viswambharan * started to execute, which means now the interrupt is already 463b7cb133eSJeenu Viswambharan * disabled and we just need to EOI the interrupt. 464b7cb133eSJeenu Viswambharan */ 465b7cb133eSJeenu Viswambharan plat_ic_end_of_interrupt(intr_raw); 466b7cb133eSJeenu Viswambharan 467b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 468b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 469b7cb133eSJeenu Viswambharan 470b7cb133eSJeenu Viswambharan return 0; 471b7cb133eSJeenu Viswambharan } 472b7cb133eSJeenu Viswambharan 473b7cb133eSJeenu Viswambharan disp_ctx = get_outstanding_dispatch(); 474b7cb133eSJeenu Viswambharan if (is_event_critical(map)) { 475b7cb133eSJeenu Viswambharan /* 476b7cb133eSJeenu Viswambharan * If this event is Critical, and if there's an outstanding 477b7cb133eSJeenu Viswambharan * dispatch, assert the latter is a Normal dispatch. Critical 478b7cb133eSJeenu Viswambharan * events can preempt an outstanding Normal event dispatch. 479b7cb133eSJeenu Viswambharan */ 480ba6e5ca6SJeenu Viswambharan if (disp_ctx != NULL) 481b7cb133eSJeenu Viswambharan assert(is_event_normal(disp_ctx->map)); 482b7cb133eSJeenu Viswambharan } else { 483b7cb133eSJeenu Viswambharan /* 484b7cb133eSJeenu Viswambharan * If this event is Normal, assert that there are no outstanding 485b7cb133eSJeenu Viswambharan * dispatches. Normal events can't preempt any outstanding event 486b7cb133eSJeenu Viswambharan * dispatches. 487b7cb133eSJeenu Viswambharan */ 488b7cb133eSJeenu Viswambharan assert(disp_ctx == NULL); 489b7cb133eSJeenu Viswambharan } 490b7cb133eSJeenu Viswambharan 491b7cb133eSJeenu Viswambharan sec_state = get_interrupt_src_ss(flags); 492b7cb133eSJeenu Viswambharan 493b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 494b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 495b7cb133eSJeenu Viswambharan 4964ce3e99aSScott Branden SDEI_LOG("ACK %" PRIx64 ", ev:0x%x ss:%d spsr:%lx ELR:%lx\n", 4974ce3e99aSScott Branden mpidr, map->ev_num, sec_state, read_spsr_el3(), read_elr_el3()); 498b7cb133eSJeenu Viswambharan 499b7cb133eSJeenu Viswambharan ctx = handle; 500b7cb133eSJeenu Viswambharan 501b7cb133eSJeenu Viswambharan /* 502b7cb133eSJeenu Viswambharan * Check if we interrupted secure state. Perform a context switch so 503b7cb133eSJeenu Viswambharan * that we can delegate to NS. 504b7cb133eSJeenu Viswambharan */ 505b7cb133eSJeenu Viswambharan if (sec_state == SECURE) { 506b7cb133eSJeenu Viswambharan save_secure_context(); 507b7cb133eSJeenu Viswambharan ctx = restore_and_resume_ns_context(); 508b7cb133eSJeenu Viswambharan } 509b7cb133eSJeenu Viswambharan 510cdb6ac94SJeenu Viswambharan /* Synchronously dispatch event */ 511cdb6ac94SJeenu Viswambharan setup_ns_dispatch(map, se, ctx, &dispatch_jmp); 512cdb6ac94SJeenu Viswambharan begin_sdei_synchronous_dispatch(&dispatch_jmp); 513b7cb133eSJeenu Viswambharan 514b7cb133eSJeenu Viswambharan /* 515cdb6ac94SJeenu Viswambharan * We reach here when client completes the event. 516cdb6ac94SJeenu Viswambharan * 51790a9213bSJeenu Viswambharan * If the cause of dispatch originally interrupted the Secure world, 518cdb6ac94SJeenu Viswambharan * resume Secure. 519cdb6ac94SJeenu Viswambharan * 520cdb6ac94SJeenu Viswambharan * No need to save the Non-secure context ahead of a world switch: the 521cdb6ac94SJeenu Viswambharan * Non-secure context was fully saved before dispatch, and has been 522cdb6ac94SJeenu Viswambharan * returned to its pre-dispatch state. 523b7cb133eSJeenu Viswambharan */ 52490a9213bSJeenu Viswambharan if (sec_state == SECURE) 525cdb6ac94SJeenu Viswambharan restore_and_resume_secure_context(); 526cdb6ac94SJeenu Viswambharan 527cdb6ac94SJeenu Viswambharan /* 528cdb6ac94SJeenu Viswambharan * The event was dispatched after receiving SDEI interrupt. With 529cdb6ac94SJeenu Viswambharan * the event handling completed, EOI the corresponding 530cdb6ac94SJeenu Viswambharan * interrupt. 531cdb6ac94SJeenu Viswambharan */ 532297a9a0fSJeenu Viswambharan if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) { 5336b94356bSVasyl Gomonovych ERROR("Invalid SDEI mapping: ev=0x%x\n", map->ev_num); 534cdb6ac94SJeenu Viswambharan panic(); 535cdb6ac94SJeenu Viswambharan } 536cdb6ac94SJeenu Viswambharan plat_ic_end_of_interrupt(intr_raw); 537cdb6ac94SJeenu Viswambharan 538b7cb133eSJeenu Viswambharan return 0; 539b7cb133eSJeenu Viswambharan } 540b7cb133eSJeenu Viswambharan 541cdb6ac94SJeenu Viswambharan /* 542cdb6ac94SJeenu Viswambharan * Explicitly dispatch the given SDEI event. 543cdb6ac94SJeenu Viswambharan * 544cdb6ac94SJeenu Viswambharan * When calling this API, the caller must be prepared for the SDEI dispatcher to 545cdb6ac94SJeenu Viswambharan * restore and make Non-secure context as active. This call returns only after 546cdb6ac94SJeenu Viswambharan * the client has completed the dispatch. Then, the Non-secure context will be 547cdb6ac94SJeenu Viswambharan * active, and the following ERET will return to Non-secure. 548cdb6ac94SJeenu Viswambharan * 549cdb6ac94SJeenu Viswambharan * Should the caller require re-entry to Secure, it must restore the Secure 550cdb6ac94SJeenu Viswambharan * context and program registers for ERET. 551cdb6ac94SJeenu Viswambharan */ 552cdb6ac94SJeenu Viswambharan int sdei_dispatch_event(int ev_num) 55355a1266eSJeenu Viswambharan { 55455a1266eSJeenu Viswambharan sdei_entry_t *se; 55555a1266eSJeenu Viswambharan sdei_ev_map_t *map; 556cdb6ac94SJeenu Viswambharan cpu_context_t *ns_ctx; 55755a1266eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 55855a1266eSJeenu Viswambharan sdei_cpu_state_t *state; 559e0566305SAntonio Nino Diaz jmp_buf dispatch_jmp; 56055a1266eSJeenu Viswambharan 56155a1266eSJeenu Viswambharan /* Can't dispatch if events are masked on this PE */ 56255a1266eSJeenu Viswambharan state = sdei_get_this_pe_state(); 563ba6e5ca6SJeenu Viswambharan if (state->pe_masked) 56455a1266eSJeenu Viswambharan return -1; 56555a1266eSJeenu Viswambharan 56655a1266eSJeenu Viswambharan /* Event 0 can't be dispatched */ 56755a1266eSJeenu Viswambharan if (ev_num == SDEI_EVENT_0) 56855a1266eSJeenu Viswambharan return -1; 56955a1266eSJeenu Viswambharan 57055a1266eSJeenu Viswambharan /* Locate mapping corresponding to this event */ 57155a1266eSJeenu Viswambharan map = find_event_map(ev_num); 572ba6e5ca6SJeenu Viswambharan if (map == NULL) 57355a1266eSJeenu Viswambharan return -1; 57455a1266eSJeenu Viswambharan 575af2c9ecdSJeenu Viswambharan /* Only explicit events can be dispatched */ 576af2c9ecdSJeenu Viswambharan if (!is_map_explicit(map)) 57755a1266eSJeenu Viswambharan return -1; 57855a1266eSJeenu Viswambharan 57955a1266eSJeenu Viswambharan /* Examine state of dispatch stack */ 58055a1266eSJeenu Viswambharan disp_ctx = get_outstanding_dispatch(); 581ba6e5ca6SJeenu Viswambharan if (disp_ctx != NULL) { 58255a1266eSJeenu Viswambharan /* 58355a1266eSJeenu Viswambharan * There's an outstanding dispatch. If the outstanding dispatch 58455a1266eSJeenu Viswambharan * is critical, no more dispatches are possible. 58555a1266eSJeenu Viswambharan */ 58655a1266eSJeenu Viswambharan if (is_event_critical(disp_ctx->map)) 58755a1266eSJeenu Viswambharan return -1; 58855a1266eSJeenu Viswambharan 58955a1266eSJeenu Viswambharan /* 59055a1266eSJeenu Viswambharan * If the outstanding dispatch is Normal, only critical events 59155a1266eSJeenu Viswambharan * can be dispatched. 59255a1266eSJeenu Viswambharan */ 59355a1266eSJeenu Viswambharan if (is_event_normal(map)) 59455a1266eSJeenu Viswambharan return -1; 59555a1266eSJeenu Viswambharan } 59655a1266eSJeenu Viswambharan 59755a1266eSJeenu Viswambharan se = get_event_entry(map); 59855a1266eSJeenu Viswambharan if (!can_sdei_state_trans(se, DO_DISPATCH)) 59955a1266eSJeenu Viswambharan return -1; 60055a1266eSJeenu Viswambharan 60155a1266eSJeenu Viswambharan /* 602cdb6ac94SJeenu Viswambharan * Prepare for NS dispatch by restoring the Non-secure context and 603cdb6ac94SJeenu Viswambharan * marking that as active. 60455a1266eSJeenu Viswambharan */ 605cdb6ac94SJeenu Viswambharan ns_ctx = restore_and_resume_ns_context(); 606cdb6ac94SJeenu Viswambharan 607d21f1ddbSMing Huang /* Activate the priority corresponding to the event being dispatched */ 608d21f1ddbSMing Huang ehf_activate_priority(sdei_event_priority(map)); 609d21f1ddbSMing Huang 610cdb6ac94SJeenu Viswambharan /* Dispatch event synchronously */ 611cdb6ac94SJeenu Viswambharan setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp); 612cdb6ac94SJeenu Viswambharan begin_sdei_synchronous_dispatch(&dispatch_jmp); 61355a1266eSJeenu Viswambharan 61455a1266eSJeenu Viswambharan /* 615cdb6ac94SJeenu Viswambharan * We reach here when client completes the event. 616cdb6ac94SJeenu Viswambharan * 617cdb6ac94SJeenu Viswambharan * Deactivate the priority level that was activated at the time of 618cdb6ac94SJeenu Viswambharan * explicit dispatch. 61955a1266eSJeenu Viswambharan */ 620cdb6ac94SJeenu Viswambharan ehf_deactivate_priority(sdei_event_priority(map)); 62155a1266eSJeenu Viswambharan 62255a1266eSJeenu Viswambharan return 0; 62355a1266eSJeenu Viswambharan } 62455a1266eSJeenu Viswambharan 625e0566305SAntonio Nino Diaz static void end_sdei_synchronous_dispatch(jmp_buf *buffer) 626cdb6ac94SJeenu Viswambharan { 627e0566305SAntonio Nino Diaz longjmp(*buffer, 1); 628cdb6ac94SJeenu Viswambharan } 629cdb6ac94SJeenu Viswambharan 630ba6e5ca6SJeenu Viswambharan int sdei_event_complete(bool resume, uint64_t pc) 631b7cb133eSJeenu Viswambharan { 632b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 633b7cb133eSJeenu Viswambharan sdei_entry_t *se; 634b7cb133eSJeenu Viswambharan sdei_ev_map_t *map; 635b7cb133eSJeenu Viswambharan cpu_context_t *ctx; 636b7cb133eSJeenu Viswambharan sdei_action_t act; 637b7cb133eSJeenu Viswambharan unsigned int client_el = sdei_client_el(); 638b7cb133eSJeenu Viswambharan 639b7cb133eSJeenu Viswambharan /* Return error if called without an active event */ 6408e3032f9SJeenu Viswambharan disp_ctx = get_outstanding_dispatch(); 641ba6e5ca6SJeenu Viswambharan if (disp_ctx == NULL) 642b7cb133eSJeenu Viswambharan return SDEI_EDENY; 643b7cb133eSJeenu Viswambharan 644b7cb133eSJeenu Viswambharan /* Validate resumption point */ 645b7cb133eSJeenu Viswambharan if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0)) 646b7cb133eSJeenu Viswambharan return SDEI_EDENY; 647b7cb133eSJeenu Viswambharan 648b7cb133eSJeenu Viswambharan map = disp_ctx->map; 649ba6e5ca6SJeenu Viswambharan assert(map != NULL); 650b7cb133eSJeenu Viswambharan se = get_event_entry(map); 651b7cb133eSJeenu Viswambharan 652611eb9cfSJeenu Viswambharan if (is_event_shared(map)) 653611eb9cfSJeenu Viswambharan sdei_map_lock(map); 654611eb9cfSJeenu Viswambharan 655b7cb133eSJeenu Viswambharan act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE; 656b7cb133eSJeenu Viswambharan if (!can_sdei_state_trans(se, act)) { 657b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 658b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 659b7cb133eSJeenu Viswambharan return SDEI_EDENY; 660b7cb133eSJeenu Viswambharan } 661b7cb133eSJeenu Viswambharan 662611eb9cfSJeenu Viswambharan if (is_event_shared(map)) 663611eb9cfSJeenu Viswambharan sdei_map_unlock(map); 664611eb9cfSJeenu Viswambharan 6658e3032f9SJeenu Viswambharan /* Having done sanity checks, pop dispatch */ 666ba6e5ca6SJeenu Viswambharan (void) pop_dispatch(); 6678e3032f9SJeenu Viswambharan 6684ce3e99aSScott Branden SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(), 6698e3032f9SJeenu Viswambharan map->ev_num, read_spsr_el3(), read_elr_el3()); 6708e3032f9SJeenu Viswambharan 671b7cb133eSJeenu Viswambharan /* 672b7cb133eSJeenu Viswambharan * Restore Non-secure to how it was originally interrupted. Once done, 673b7cb133eSJeenu Viswambharan * it's up-to-date with the saved copy. 674b7cb133eSJeenu Viswambharan */ 675b7cb133eSJeenu Viswambharan ctx = cm_get_context(NON_SECURE); 676b7cb133eSJeenu Viswambharan restore_event_ctx(disp_ctx, ctx); 677b7cb133eSJeenu Viswambharan 678b7cb133eSJeenu Viswambharan if (resume) { 679b7cb133eSJeenu Viswambharan /* 680b7cb133eSJeenu Viswambharan * Complete-and-resume call. Prepare the Non-secure context 681b7cb133eSJeenu Viswambharan * (currently active) for complete and resume. 682b7cb133eSJeenu Viswambharan */ 683b7cb133eSJeenu Viswambharan cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el, 684b7cb133eSJeenu Viswambharan MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 685b7cb133eSJeenu Viswambharan 686b7cb133eSJeenu Viswambharan /* 687b7cb133eSJeenu Viswambharan * Make it look as if a synchronous exception were taken at the 688b7cb133eSJeenu Viswambharan * supplied Non-secure resumption point. Populate SPSR and 689b7cb133eSJeenu Viswambharan * ELR_ELx so that an ERET from there works as expected. 690b7cb133eSJeenu Viswambharan * 691b7cb133eSJeenu Viswambharan * The assumption is that the client, if necessary, would have 692b7cb133eSJeenu Viswambharan * saved any live content in these registers before making this 693b7cb133eSJeenu Viswambharan * call. 694b7cb133eSJeenu Viswambharan */ 695b7cb133eSJeenu Viswambharan if (client_el == MODE_EL2) { 696b7cb133eSJeenu Viswambharan write_elr_el2(disp_ctx->elr_el3); 697b7cb133eSJeenu Viswambharan write_spsr_el2(disp_ctx->spsr_el3); 698b7cb133eSJeenu Viswambharan } else { 699b7cb133eSJeenu Viswambharan /* EL1 */ 700b7cb133eSJeenu Viswambharan write_elr_el1(disp_ctx->elr_el3); 701b7cb133eSJeenu Viswambharan write_spsr_el1(disp_ctx->spsr_el3); 702b7cb133eSJeenu Viswambharan } 703b7cb133eSJeenu Viswambharan } 704b7cb133eSJeenu Viswambharan 705cdb6ac94SJeenu Viswambharan /* End the outstanding dispatch */ 7065e60c39aSJeenu Viswambharan end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp); 707b7cb133eSJeenu Viswambharan 708b7cb133eSJeenu Viswambharan return 0; 709b7cb133eSJeenu Viswambharan } 710b7cb133eSJeenu Viswambharan 711ba6e5ca6SJeenu Viswambharan int64_t sdei_event_context(void *handle, unsigned int param) 712b7cb133eSJeenu Viswambharan { 713b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 714b7cb133eSJeenu Viswambharan 715b7cb133eSJeenu Viswambharan if (param >= SDEI_SAVED_GPREGS) 716b7cb133eSJeenu Viswambharan return SDEI_EINVAL; 717b7cb133eSJeenu Viswambharan 718b7cb133eSJeenu Viswambharan /* Get outstanding dispatch on this CPU */ 719b7cb133eSJeenu Viswambharan disp_ctx = get_outstanding_dispatch(); 720ba6e5ca6SJeenu Viswambharan if (disp_ctx == NULL) 721b7cb133eSJeenu Viswambharan return SDEI_EDENY; 722b7cb133eSJeenu Viswambharan 723ba6e5ca6SJeenu Viswambharan assert(disp_ctx->map != NULL); 724b7cb133eSJeenu Viswambharan 725b7cb133eSJeenu Viswambharan if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT)) 726b7cb133eSJeenu Viswambharan return SDEI_EDENY; 727b7cb133eSJeenu Viswambharan 728b7cb133eSJeenu Viswambharan /* 729b7cb133eSJeenu Viswambharan * No locking is required for the Running status as this is the only CPU 730b7cb133eSJeenu Viswambharan * which can complete the event 731b7cb133eSJeenu Viswambharan */ 732b7cb133eSJeenu Viswambharan 733ba6e5ca6SJeenu Viswambharan return (int64_t) disp_ctx->x[param]; 734b7cb133eSJeenu Viswambharan } 735