1b7cb133eSJeenu Viswambharan /* 2*8e3032f9SJeenu Viswambharan * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3b7cb133eSJeenu Viswambharan * 4b7cb133eSJeenu Viswambharan * SPDX-License-Identifier: BSD-3-Clause 5b7cb133eSJeenu Viswambharan */ 6b7cb133eSJeenu Viswambharan 7b7cb133eSJeenu Viswambharan #include <arch_helpers.h> 8b7cb133eSJeenu Viswambharan #include <assert.h> 9b7cb133eSJeenu Viswambharan #include <bl_common.h> 10b7cb133eSJeenu Viswambharan #include <cassert.h> 11b7cb133eSJeenu Viswambharan #include <context_mgmt.h> 12b7cb133eSJeenu Viswambharan #include <debug.h> 13b7cb133eSJeenu Viswambharan #include <ehf.h> 14b7cb133eSJeenu Viswambharan #include <interrupt_mgmt.h> 15b7cb133eSJeenu Viswambharan #include <runtime_svc.h> 16b7cb133eSJeenu Viswambharan #include <sdei.h> 17b7cb133eSJeenu Viswambharan #include <string.h> 18b7cb133eSJeenu Viswambharan #include "sdei_private.h" 19b7cb133eSJeenu Viswambharan 20b7cb133eSJeenu Viswambharan #define PE_MASKED 1 21b7cb133eSJeenu Viswambharan #define PE_NOT_MASKED 0 22b7cb133eSJeenu Viswambharan 23b7cb133eSJeenu Viswambharan /* x0-x17 GPREGS context */ 24b7cb133eSJeenu Viswambharan #define SDEI_SAVED_GPREGS 18 25b7cb133eSJeenu Viswambharan 26b7cb133eSJeenu Viswambharan /* Maximum preemption nesting levels: Critical priority and Normal priority */ 27b7cb133eSJeenu Viswambharan #define MAX_EVENT_NESTING 2 28b7cb133eSJeenu Viswambharan 29b7cb133eSJeenu Viswambharan /* Per-CPU SDEI state access macro */ 30b7cb133eSJeenu Viswambharan #define sdei_get_this_pe_state() (&sdei_cpu_state[plat_my_core_pos()]) 31b7cb133eSJeenu Viswambharan 32b7cb133eSJeenu Viswambharan /* Structure to store information about an outstanding dispatch */ 33b7cb133eSJeenu Viswambharan typedef struct sdei_dispatch_context { 34b7cb133eSJeenu Viswambharan sdei_ev_map_t *map; 35b7cb133eSJeenu Viswambharan unsigned int sec_state; 36b7cb133eSJeenu Viswambharan unsigned int intr_raw; 37b7cb133eSJeenu Viswambharan uint64_t x[SDEI_SAVED_GPREGS]; 38b7cb133eSJeenu Viswambharan 39b7cb133eSJeenu Viswambharan /* Exception state registers */ 40b7cb133eSJeenu Viswambharan uint64_t elr_el3; 41b7cb133eSJeenu Viswambharan uint64_t spsr_el3; 42b7cb133eSJeenu Viswambharan } sdei_dispatch_context_t; 43b7cb133eSJeenu Viswambharan 44b7cb133eSJeenu Viswambharan /* Per-CPU SDEI state data */ 45b7cb133eSJeenu Viswambharan typedef struct sdei_cpu_state { 46b7cb133eSJeenu Viswambharan sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING]; 47b7cb133eSJeenu Viswambharan unsigned short stack_top; /* Empty ascending */ 48b7cb133eSJeenu Viswambharan unsigned int pe_masked:1; 49b7cb133eSJeenu Viswambharan unsigned int pending_enables:1; 50b7cb133eSJeenu Viswambharan } sdei_cpu_state_t; 51b7cb133eSJeenu Viswambharan 52b7cb133eSJeenu Viswambharan /* SDEI states for all cores in the system */ 53b7cb133eSJeenu Viswambharan static sdei_cpu_state_t sdei_cpu_state[PLATFORM_CORE_COUNT]; 54b7cb133eSJeenu Viswambharan 55b7cb133eSJeenu Viswambharan unsigned int sdei_pe_mask(void) 56b7cb133eSJeenu Viswambharan { 57b7cb133eSJeenu Viswambharan unsigned int ret; 58b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 59b7cb133eSJeenu Viswambharan 60b7cb133eSJeenu Viswambharan /* 61b7cb133eSJeenu Viswambharan * Return value indicates whether this call had any effect in the mask 62b7cb133eSJeenu Viswambharan * status of this PE. 63b7cb133eSJeenu Viswambharan */ 64b7cb133eSJeenu Viswambharan ret = (state->pe_masked ^ PE_MASKED); 65b7cb133eSJeenu Viswambharan state->pe_masked = PE_MASKED; 66b7cb133eSJeenu Viswambharan 67b7cb133eSJeenu Viswambharan return ret; 68b7cb133eSJeenu Viswambharan } 69b7cb133eSJeenu Viswambharan 70b7cb133eSJeenu Viswambharan void sdei_pe_unmask(void) 71b7cb133eSJeenu Viswambharan { 72b7cb133eSJeenu Viswambharan int i; 73b7cb133eSJeenu Viswambharan sdei_ev_map_t *map; 74b7cb133eSJeenu Viswambharan sdei_entry_t *se; 75b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 76b7cb133eSJeenu Viswambharan uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; 77b7cb133eSJeenu Viswambharan 78b7cb133eSJeenu Viswambharan /* 79b7cb133eSJeenu Viswambharan * If there are pending enables, iterate through the private mappings 80b7cb133eSJeenu Viswambharan * and enable those bound maps that are in enabled state. Also, iterate 81b7cb133eSJeenu Viswambharan * through shared mappings and enable interrupts of events that are 82b7cb133eSJeenu Viswambharan * targeted to this PE. 83b7cb133eSJeenu Viswambharan */ 84b7cb133eSJeenu Viswambharan if (state->pending_enables) { 85b7cb133eSJeenu Viswambharan for_each_private_map(i, map) { 86b7cb133eSJeenu Viswambharan se = get_event_entry(map); 87b7cb133eSJeenu Viswambharan if (is_map_bound(map) && GET_EV_STATE(se, ENABLED)) 88b7cb133eSJeenu Viswambharan plat_ic_enable_interrupt(map->intr); 89b7cb133eSJeenu Viswambharan } 90b7cb133eSJeenu Viswambharan 91b7cb133eSJeenu Viswambharan for_each_shared_map(i, map) { 92b7cb133eSJeenu Viswambharan se = get_event_entry(map); 93b7cb133eSJeenu Viswambharan 94b7cb133eSJeenu Viswambharan sdei_map_lock(map); 95b7cb133eSJeenu Viswambharan if (is_map_bound(map) && 96b7cb133eSJeenu Viswambharan GET_EV_STATE(se, ENABLED) && 97b7cb133eSJeenu Viswambharan (se->reg_flags == SDEI_REGF_RM_PE) && 98b7cb133eSJeenu Viswambharan (se->affinity == my_mpidr)) { 99b7cb133eSJeenu Viswambharan plat_ic_enable_interrupt(map->intr); 100b7cb133eSJeenu Viswambharan } 101b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 102b7cb133eSJeenu Viswambharan } 103b7cb133eSJeenu Viswambharan } 104b7cb133eSJeenu Viswambharan 105b7cb133eSJeenu Viswambharan state->pending_enables = 0; 106b7cb133eSJeenu Viswambharan state->pe_masked = PE_NOT_MASKED; 107b7cb133eSJeenu Viswambharan } 108b7cb133eSJeenu Viswambharan 109b7cb133eSJeenu Viswambharan /* Push a dispatch context to the dispatch stack */ 110b7cb133eSJeenu Viswambharan static sdei_dispatch_context_t *push_dispatch(void) 111b7cb133eSJeenu Viswambharan { 112b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 113b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 114b7cb133eSJeenu Viswambharan 115b7cb133eSJeenu Viswambharan /* Cannot have more than max events */ 116b7cb133eSJeenu Viswambharan assert(state->stack_top < MAX_EVENT_NESTING); 117b7cb133eSJeenu Viswambharan 118b7cb133eSJeenu Viswambharan disp_ctx = &state->dispatch_stack[state->stack_top]; 119b7cb133eSJeenu Viswambharan state->stack_top++; 120b7cb133eSJeenu Viswambharan 121b7cb133eSJeenu Viswambharan return disp_ctx; 122b7cb133eSJeenu Viswambharan } 123b7cb133eSJeenu Viswambharan 124b7cb133eSJeenu Viswambharan /* Pop a dispatch context to the dispatch stack */ 125b7cb133eSJeenu Viswambharan static sdei_dispatch_context_t *pop_dispatch(void) 126b7cb133eSJeenu Viswambharan { 127b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 128b7cb133eSJeenu Viswambharan 129b7cb133eSJeenu Viswambharan if (state->stack_top == 0) 130b7cb133eSJeenu Viswambharan return NULL; 131b7cb133eSJeenu Viswambharan 132b7cb133eSJeenu Viswambharan assert(state->stack_top <= MAX_EVENT_NESTING); 133b7cb133eSJeenu Viswambharan 134b7cb133eSJeenu Viswambharan state->stack_top--; 135b7cb133eSJeenu Viswambharan 136b7cb133eSJeenu Viswambharan return &state->dispatch_stack[state->stack_top]; 137b7cb133eSJeenu Viswambharan } 138b7cb133eSJeenu Viswambharan 139b7cb133eSJeenu Viswambharan /* Retrieve the context at the top of dispatch stack */ 140b7cb133eSJeenu Viswambharan static sdei_dispatch_context_t *get_outstanding_dispatch(void) 141b7cb133eSJeenu Viswambharan { 142b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state = sdei_get_this_pe_state(); 143b7cb133eSJeenu Viswambharan 144b7cb133eSJeenu Viswambharan if (state->stack_top == 0) 145b7cb133eSJeenu Viswambharan return NULL; 146b7cb133eSJeenu Viswambharan 147b7cb133eSJeenu Viswambharan assert(state->stack_top <= MAX_EVENT_NESTING); 148b7cb133eSJeenu Viswambharan 149b7cb133eSJeenu Viswambharan return &state->dispatch_stack[state->stack_top - 1]; 150b7cb133eSJeenu Viswambharan } 151b7cb133eSJeenu Viswambharan 152b7cb133eSJeenu Viswambharan static void save_event_ctx(sdei_ev_map_t *map, void *tgt_ctx, int sec_state, 153b7cb133eSJeenu Viswambharan unsigned int intr_raw) 154b7cb133eSJeenu Viswambharan { 155b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 156b7cb133eSJeenu Viswambharan gp_regs_t *tgt_gpregs; 157b7cb133eSJeenu Viswambharan el3_state_t *tgt_el3; 158b7cb133eSJeenu Viswambharan 159b7cb133eSJeenu Viswambharan assert(tgt_ctx); 160b7cb133eSJeenu Viswambharan tgt_gpregs = get_gpregs_ctx(tgt_ctx); 161b7cb133eSJeenu Viswambharan tgt_el3 = get_el3state_ctx(tgt_ctx); 162b7cb133eSJeenu Viswambharan 163b7cb133eSJeenu Viswambharan disp_ctx = push_dispatch(); 164b7cb133eSJeenu Viswambharan assert(disp_ctx); 165b7cb133eSJeenu Viswambharan disp_ctx->sec_state = sec_state; 166b7cb133eSJeenu Viswambharan disp_ctx->map = map; 167b7cb133eSJeenu Viswambharan disp_ctx->intr_raw = intr_raw; 168b7cb133eSJeenu Viswambharan 169b7cb133eSJeenu Viswambharan /* Save general purpose and exception registers */ 170b7cb133eSJeenu Viswambharan memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x)); 171b7cb133eSJeenu Viswambharan disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3); 172b7cb133eSJeenu Viswambharan disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3); 173b7cb133eSJeenu Viswambharan } 174b7cb133eSJeenu Viswambharan 175b7cb133eSJeenu Viswambharan static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx) 176b7cb133eSJeenu Viswambharan { 177b7cb133eSJeenu Viswambharan gp_regs_t *tgt_gpregs; 178b7cb133eSJeenu Viswambharan el3_state_t *tgt_el3; 179b7cb133eSJeenu Viswambharan 180b7cb133eSJeenu Viswambharan assert(tgt_ctx); 181b7cb133eSJeenu Viswambharan tgt_gpregs = get_gpregs_ctx(tgt_ctx); 182b7cb133eSJeenu Viswambharan tgt_el3 = get_el3state_ctx(tgt_ctx); 183b7cb133eSJeenu Viswambharan 184b7cb133eSJeenu Viswambharan CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)), 185b7cb133eSJeenu Viswambharan foo); 186b7cb133eSJeenu Viswambharan 187b7cb133eSJeenu Viswambharan /* Restore general purpose and exception registers */ 188b7cb133eSJeenu Viswambharan memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x)); 189b7cb133eSJeenu Viswambharan write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3); 190b7cb133eSJeenu Viswambharan write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3); 191b7cb133eSJeenu Viswambharan } 192b7cb133eSJeenu Viswambharan 193b7cb133eSJeenu Viswambharan static void save_secure_context(void) 194b7cb133eSJeenu Viswambharan { 195b7cb133eSJeenu Viswambharan cm_el1_sysregs_context_save(SECURE); 196b7cb133eSJeenu Viswambharan } 197b7cb133eSJeenu Viswambharan 198b7cb133eSJeenu Viswambharan /* Restore Secure context and arrange to resume it at the next ERET */ 199b7cb133eSJeenu Viswambharan static void restore_and_resume_secure_context(void) 200b7cb133eSJeenu Viswambharan { 201b7cb133eSJeenu Viswambharan cm_el1_sysregs_context_restore(SECURE); 202b7cb133eSJeenu Viswambharan cm_set_next_eret_context(SECURE); 203b7cb133eSJeenu Viswambharan } 204b7cb133eSJeenu Viswambharan 205b7cb133eSJeenu Viswambharan /* 206b7cb133eSJeenu Viswambharan * Restore Non-secure context and arrange to resume it at the next ERET. Return 207b7cb133eSJeenu Viswambharan * pointer to the Non-secure context. 208b7cb133eSJeenu Viswambharan */ 209b7cb133eSJeenu Viswambharan static cpu_context_t *restore_and_resume_ns_context(void) 210b7cb133eSJeenu Viswambharan { 211b7cb133eSJeenu Viswambharan cpu_context_t *ns_ctx; 212b7cb133eSJeenu Viswambharan 213b7cb133eSJeenu Viswambharan cm_el1_sysregs_context_restore(NON_SECURE); 214b7cb133eSJeenu Viswambharan cm_set_next_eret_context(NON_SECURE); 215b7cb133eSJeenu Viswambharan 216b7cb133eSJeenu Viswambharan ns_ctx = cm_get_context(NON_SECURE); 217b7cb133eSJeenu Viswambharan assert(ns_ctx); 218b7cb133eSJeenu Viswambharan 219b7cb133eSJeenu Viswambharan return ns_ctx; 220b7cb133eSJeenu Viswambharan } 221b7cb133eSJeenu Viswambharan 222b7cb133eSJeenu Viswambharan /* 223b7cb133eSJeenu Viswambharan * Populate the Non-secure context so that the next ERET will dispatch to the 224b7cb133eSJeenu Viswambharan * SDEI client. 225b7cb133eSJeenu Viswambharan */ 226b7cb133eSJeenu Viswambharan static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se, 227b7cb133eSJeenu Viswambharan cpu_context_t *ctx, int sec_state_to_resume, 228b7cb133eSJeenu Viswambharan unsigned int intr_raw) 229b7cb133eSJeenu Viswambharan { 230b7cb133eSJeenu Viswambharan el3_state_t *el3_ctx = get_el3state_ctx(ctx); 231b7cb133eSJeenu Viswambharan 232b7cb133eSJeenu Viswambharan /* Push the event and context */ 233b7cb133eSJeenu Viswambharan save_event_ctx(map, ctx, sec_state_to_resume, intr_raw); 234b7cb133eSJeenu Viswambharan 235b7cb133eSJeenu Viswambharan /* 236b7cb133eSJeenu Viswambharan * Setup handler arguments: 237b7cb133eSJeenu Viswambharan * 238b7cb133eSJeenu Viswambharan * - x0: Event number 239b7cb133eSJeenu Viswambharan * - x1: Handler argument supplied at the time of event registration 240b7cb133eSJeenu Viswambharan * - x2: Interrupted PC 241b7cb133eSJeenu Viswambharan * - x3: Interrupted SPSR 242b7cb133eSJeenu Viswambharan */ 243b7cb133eSJeenu Viswambharan SMC_SET_GP(ctx, CTX_GPREG_X0, map->ev_num); 244b7cb133eSJeenu Viswambharan SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg); 245b7cb133eSJeenu Viswambharan SMC_SET_GP(ctx, CTX_GPREG_X2, read_ctx_reg(el3_ctx, CTX_ELR_EL3)); 246b7cb133eSJeenu Viswambharan SMC_SET_GP(ctx, CTX_GPREG_X3, read_ctx_reg(el3_ctx, CTX_SPSR_EL3)); 247b7cb133eSJeenu Viswambharan 248b7cb133eSJeenu Viswambharan /* 249b7cb133eSJeenu Viswambharan * Prepare for ERET: 250b7cb133eSJeenu Viswambharan * 251b7cb133eSJeenu Viswambharan * - Set PC to the registered handler address 252b7cb133eSJeenu Viswambharan * - Set SPSR to jump to client EL with exceptions masked 253b7cb133eSJeenu Viswambharan */ 254b7cb133eSJeenu Viswambharan cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, 255b7cb133eSJeenu Viswambharan SPSR_64(sdei_client_el(), MODE_SP_ELX, 256b7cb133eSJeenu Viswambharan DISABLE_ALL_EXCEPTIONS)); 257b7cb133eSJeenu Viswambharan } 258b7cb133eSJeenu Viswambharan 259b7cb133eSJeenu Viswambharan /* Handle a triggered SDEI interrupt while events were masked on this PE */ 260b7cb133eSJeenu Viswambharan static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se, 261b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state, unsigned int intr_raw) 262b7cb133eSJeenu Viswambharan { 263b7cb133eSJeenu Viswambharan uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK); 264b7cb133eSJeenu Viswambharan int disable = 0; 265b7cb133eSJeenu Viswambharan 266b7cb133eSJeenu Viswambharan /* Nothing to do for event 0 */ 267b7cb133eSJeenu Viswambharan if (map->ev_num == SDEI_EVENT_0) 268b7cb133eSJeenu Viswambharan return; 269b7cb133eSJeenu Viswambharan 270b7cb133eSJeenu Viswambharan /* 271b7cb133eSJeenu Viswambharan * For a private event, or for a shared event specifically routed to 272b7cb133eSJeenu Viswambharan * this CPU, we disable interrupt, leave the interrupt pending, and do 273b7cb133eSJeenu Viswambharan * EOI. 274b7cb133eSJeenu Viswambharan */ 275b7cb133eSJeenu Viswambharan if (is_event_private(map)) { 276b7cb133eSJeenu Viswambharan disable = 1; 277b7cb133eSJeenu Viswambharan } else if (se->reg_flags == SDEI_REGF_RM_PE) { 278b7cb133eSJeenu Viswambharan assert(se->affinity == my_mpidr); 279b7cb133eSJeenu Viswambharan disable = 1; 280b7cb133eSJeenu Viswambharan } 281b7cb133eSJeenu Viswambharan 282b7cb133eSJeenu Viswambharan if (disable) { 283b7cb133eSJeenu Viswambharan plat_ic_disable_interrupt(map->intr); 284b7cb133eSJeenu Viswambharan plat_ic_set_interrupt_pending(map->intr); 285b7cb133eSJeenu Viswambharan plat_ic_end_of_interrupt(intr_raw); 286b7cb133eSJeenu Viswambharan state->pending_enables = 1; 287b7cb133eSJeenu Viswambharan 288b7cb133eSJeenu Viswambharan return; 289b7cb133eSJeenu Viswambharan } 290b7cb133eSJeenu Viswambharan 291b7cb133eSJeenu Viswambharan /* 292b7cb133eSJeenu Viswambharan * We just received a shared event with routing set to ANY PE. The 293b7cb133eSJeenu Viswambharan * interrupt can't be delegated on this PE as SDEI events are masked. 294b7cb133eSJeenu Viswambharan * However, because its routing mode is ANY, it is possible that the 295b7cb133eSJeenu Viswambharan * event can be delegated on any other PE that hasn't masked events. 296b7cb133eSJeenu Viswambharan * Therefore, we set the interrupt back pending so as to give other 297b7cb133eSJeenu Viswambharan * suitable PEs a chance of handling it. 298b7cb133eSJeenu Viswambharan */ 299b7cb133eSJeenu Viswambharan assert(plat_ic_is_spi(map->intr)); 300b7cb133eSJeenu Viswambharan plat_ic_set_interrupt_pending(map->intr); 301b7cb133eSJeenu Viswambharan 302b7cb133eSJeenu Viswambharan /* 303b7cb133eSJeenu Viswambharan * Leaving the same interrupt pending also means that the same interrupt 304b7cb133eSJeenu Viswambharan * can target this PE again as soon as this PE leaves EL3. Whether and 305b7cb133eSJeenu Viswambharan * how often that happens depends on the implementation of GIC. 306b7cb133eSJeenu Viswambharan * 307b7cb133eSJeenu Viswambharan * We therefore call a platform handler to resolve this situation. 308b7cb133eSJeenu Viswambharan */ 309b7cb133eSJeenu Viswambharan plat_sdei_handle_masked_trigger(my_mpidr, map->intr); 310b7cb133eSJeenu Viswambharan 311b7cb133eSJeenu Viswambharan /* This PE is masked. We EOI the interrupt, as it can't be delegated */ 312b7cb133eSJeenu Viswambharan plat_ic_end_of_interrupt(intr_raw); 313b7cb133eSJeenu Viswambharan } 314b7cb133eSJeenu Viswambharan 315b7cb133eSJeenu Viswambharan /* SDEI main interrupt handler */ 316b7cb133eSJeenu Viswambharan int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle, 317b7cb133eSJeenu Viswambharan void *cookie) 318b7cb133eSJeenu Viswambharan { 319b7cb133eSJeenu Viswambharan sdei_entry_t *se; 320b7cb133eSJeenu Viswambharan cpu_context_t *ctx; 321b7cb133eSJeenu Viswambharan sdei_ev_map_t *map; 322b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 323b7cb133eSJeenu Viswambharan unsigned int sec_state; 324b7cb133eSJeenu Viswambharan sdei_cpu_state_t *state; 325b7cb133eSJeenu Viswambharan uint32_t intr; 326b7cb133eSJeenu Viswambharan 327b7cb133eSJeenu Viswambharan /* 328b7cb133eSJeenu Viswambharan * To handle an event, the following conditions must be true: 329b7cb133eSJeenu Viswambharan * 330b7cb133eSJeenu Viswambharan * 1. Event must be signalled 331b7cb133eSJeenu Viswambharan * 2. Event must be enabled 332b7cb133eSJeenu Viswambharan * 3. This PE must be a target PE for the event 333b7cb133eSJeenu Viswambharan * 4. PE must be unmasked for SDEI 334b7cb133eSJeenu Viswambharan * 5. If this is a normal event, no event must be running 335b7cb133eSJeenu Viswambharan * 6. If this is a critical event, no critical event must be running 336b7cb133eSJeenu Viswambharan * 337b7cb133eSJeenu Viswambharan * (1) and (2) are true when this function is running 338b7cb133eSJeenu Viswambharan * (3) is enforced in GIC by selecting the appropriate routing option 339b7cb133eSJeenu Viswambharan * (4) is satisfied by client calling PE_UNMASK 340b7cb133eSJeenu Viswambharan * (5) and (6) is enforced using interrupt priority, the RPR, in GIC: 341b7cb133eSJeenu Viswambharan * - Normal SDEI events belong to Normal SDE priority class 342b7cb133eSJeenu Viswambharan * - Critical SDEI events belong to Critical CSDE priority class 343b7cb133eSJeenu Viswambharan * 344b7cb133eSJeenu Viswambharan * The interrupt has already been acknowledged, and therefore is active, 345b7cb133eSJeenu Viswambharan * so no other PE can handle this event while we are at it. 346b7cb133eSJeenu Viswambharan * 347b7cb133eSJeenu Viswambharan * Find if this is an SDEI interrupt. There must be an event mapped to 348b7cb133eSJeenu Viswambharan * this interrupt 349b7cb133eSJeenu Viswambharan */ 350b7cb133eSJeenu Viswambharan intr = plat_ic_get_interrupt_id(intr_raw); 351b7cb133eSJeenu Viswambharan map = find_event_map_by_intr(intr, plat_ic_is_spi(intr)); 352b7cb133eSJeenu Viswambharan if (!map) { 353b7cb133eSJeenu Viswambharan ERROR("No SDEI map for interrupt %u\n", intr); 354b7cb133eSJeenu Viswambharan panic(); 355b7cb133eSJeenu Viswambharan } 356b7cb133eSJeenu Viswambharan 357b7cb133eSJeenu Viswambharan /* 358b7cb133eSJeenu Viswambharan * Received interrupt number must either correspond to event 0, or must 359b7cb133eSJeenu Viswambharan * be bound interrupt. 360b7cb133eSJeenu Viswambharan */ 361b7cb133eSJeenu Viswambharan assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)); 362b7cb133eSJeenu Viswambharan 363b7cb133eSJeenu Viswambharan se = get_event_entry(map); 364b7cb133eSJeenu Viswambharan state = sdei_get_this_pe_state(); 365b7cb133eSJeenu Viswambharan 366b7cb133eSJeenu Viswambharan if (state->pe_masked == PE_MASKED) { 367b7cb133eSJeenu Viswambharan /* 368b7cb133eSJeenu Viswambharan * Interrupts received while this PE was masked can't be 369b7cb133eSJeenu Viswambharan * dispatched. 370b7cb133eSJeenu Viswambharan */ 371b7cb133eSJeenu Viswambharan SDEI_LOG("interrupt %u on %lx while PE masked\n", map->intr, 372b7cb133eSJeenu Viswambharan read_mpidr_el1()); 373b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 374b7cb133eSJeenu Viswambharan sdei_map_lock(map); 375b7cb133eSJeenu Viswambharan 376b7cb133eSJeenu Viswambharan handle_masked_trigger(map, se, state, intr_raw); 377b7cb133eSJeenu Viswambharan 378b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 379b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 380b7cb133eSJeenu Viswambharan 381b7cb133eSJeenu Viswambharan return 0; 382b7cb133eSJeenu Viswambharan } 383b7cb133eSJeenu Viswambharan 384b7cb133eSJeenu Viswambharan /* Insert load barrier for signalled SDEI event */ 385b7cb133eSJeenu Viswambharan if (map->ev_num == SDEI_EVENT_0) 386b7cb133eSJeenu Viswambharan dmbld(); 387b7cb133eSJeenu Viswambharan 388b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 389b7cb133eSJeenu Viswambharan sdei_map_lock(map); 390b7cb133eSJeenu Viswambharan 391b7cb133eSJeenu Viswambharan /* Assert shared event routed to this PE had been configured so */ 392b7cb133eSJeenu Viswambharan if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) { 393b7cb133eSJeenu Viswambharan assert(se->affinity == 394b7cb133eSJeenu Viswambharan (read_mpidr_el1() & MPIDR_AFFINITY_MASK)); 395b7cb133eSJeenu Viswambharan } 396b7cb133eSJeenu Viswambharan 397b7cb133eSJeenu Viswambharan if (!can_sdei_state_trans(se, DO_DISPATCH)) { 398b7cb133eSJeenu Viswambharan SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n", 399b7cb133eSJeenu Viswambharan map->ev_num, se->state); 400b7cb133eSJeenu Viswambharan 401b7cb133eSJeenu Viswambharan /* 402b7cb133eSJeenu Viswambharan * If the event is registered, leave the interrupt pending so 403b7cb133eSJeenu Viswambharan * that it's delivered when the event is enabled. 404b7cb133eSJeenu Viswambharan */ 405b7cb133eSJeenu Viswambharan if (GET_EV_STATE(se, REGISTERED)) 406b7cb133eSJeenu Viswambharan plat_ic_set_interrupt_pending(map->intr); 407b7cb133eSJeenu Viswambharan 408b7cb133eSJeenu Viswambharan /* 409b7cb133eSJeenu Viswambharan * The interrupt was disabled or unregistered after the handler 410b7cb133eSJeenu Viswambharan * started to execute, which means now the interrupt is already 411b7cb133eSJeenu Viswambharan * disabled and we just need to EOI the interrupt. 412b7cb133eSJeenu Viswambharan */ 413b7cb133eSJeenu Viswambharan plat_ic_end_of_interrupt(intr_raw); 414b7cb133eSJeenu Viswambharan 415b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 416b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 417b7cb133eSJeenu Viswambharan 418b7cb133eSJeenu Viswambharan return 0; 419b7cb133eSJeenu Viswambharan } 420b7cb133eSJeenu Viswambharan 421b7cb133eSJeenu Viswambharan disp_ctx = get_outstanding_dispatch(); 422b7cb133eSJeenu Viswambharan if (is_event_critical(map)) { 423b7cb133eSJeenu Viswambharan /* 424b7cb133eSJeenu Viswambharan * If this event is Critical, and if there's an outstanding 425b7cb133eSJeenu Viswambharan * dispatch, assert the latter is a Normal dispatch. Critical 426b7cb133eSJeenu Viswambharan * events can preempt an outstanding Normal event dispatch. 427b7cb133eSJeenu Viswambharan */ 428b7cb133eSJeenu Viswambharan if (disp_ctx) 429b7cb133eSJeenu Viswambharan assert(is_event_normal(disp_ctx->map)); 430b7cb133eSJeenu Viswambharan } else { 431b7cb133eSJeenu Viswambharan /* 432b7cb133eSJeenu Viswambharan * If this event is Normal, assert that there are no outstanding 433b7cb133eSJeenu Viswambharan * dispatches. Normal events can't preempt any outstanding event 434b7cb133eSJeenu Viswambharan * dispatches. 435b7cb133eSJeenu Viswambharan */ 436b7cb133eSJeenu Viswambharan assert(disp_ctx == NULL); 437b7cb133eSJeenu Viswambharan } 438b7cb133eSJeenu Viswambharan 439b7cb133eSJeenu Viswambharan sec_state = get_interrupt_src_ss(flags); 440b7cb133eSJeenu Viswambharan 441b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 442b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 443b7cb133eSJeenu Viswambharan 444b7cb133eSJeenu Viswambharan SDEI_LOG("ACK %lx, ev:%d ss:%d spsr:%lx ELR:%lx\n", read_mpidr_el1(), 445b7cb133eSJeenu Viswambharan map->ev_num, sec_state, read_spsr_el3(), 446b7cb133eSJeenu Viswambharan read_elr_el3()); 447b7cb133eSJeenu Viswambharan 448b7cb133eSJeenu Viswambharan ctx = handle; 449b7cb133eSJeenu Viswambharan 450b7cb133eSJeenu Viswambharan /* 451b7cb133eSJeenu Viswambharan * Check if we interrupted secure state. Perform a context switch so 452b7cb133eSJeenu Viswambharan * that we can delegate to NS. 453b7cb133eSJeenu Viswambharan */ 454b7cb133eSJeenu Viswambharan if (sec_state == SECURE) { 455b7cb133eSJeenu Viswambharan save_secure_context(); 456b7cb133eSJeenu Viswambharan ctx = restore_and_resume_ns_context(); 457b7cb133eSJeenu Viswambharan } 458b7cb133eSJeenu Viswambharan 459b7cb133eSJeenu Viswambharan setup_ns_dispatch(map, se, ctx, sec_state, intr_raw); 460b7cb133eSJeenu Viswambharan 461b7cb133eSJeenu Viswambharan /* 462b7cb133eSJeenu Viswambharan * End of interrupt is done in sdei_event_complete, when the client 463b7cb133eSJeenu Viswambharan * signals completion. 464b7cb133eSJeenu Viswambharan */ 465b7cb133eSJeenu Viswambharan return 0; 466b7cb133eSJeenu Viswambharan } 467b7cb133eSJeenu Viswambharan 46855a1266eSJeenu Viswambharan /* Explicitly dispatch the given SDEI event */ 46955a1266eSJeenu Viswambharan int sdei_dispatch_event(int ev_num, unsigned int preempted_sec_state) 47055a1266eSJeenu Viswambharan { 47155a1266eSJeenu Viswambharan sdei_entry_t *se; 47255a1266eSJeenu Viswambharan sdei_ev_map_t *map; 47355a1266eSJeenu Viswambharan cpu_context_t *ctx; 47455a1266eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 47555a1266eSJeenu Viswambharan sdei_cpu_state_t *state; 47655a1266eSJeenu Viswambharan 47755a1266eSJeenu Viswambharan /* Validate preempted security state */ 478424fc73aSJeenu Viswambharan if ((preempted_sec_state != SECURE) && 479424fc73aSJeenu Viswambharan (preempted_sec_state != NON_SECURE)) { 48055a1266eSJeenu Viswambharan return -1; 481424fc73aSJeenu Viswambharan } 48255a1266eSJeenu Viswambharan 48355a1266eSJeenu Viswambharan /* Can't dispatch if events are masked on this PE */ 48455a1266eSJeenu Viswambharan state = sdei_get_this_pe_state(); 48555a1266eSJeenu Viswambharan if (state->pe_masked == PE_MASKED) 48655a1266eSJeenu Viswambharan return -1; 48755a1266eSJeenu Viswambharan 48855a1266eSJeenu Viswambharan /* Event 0 can't be dispatched */ 48955a1266eSJeenu Viswambharan if (ev_num == SDEI_EVENT_0) 49055a1266eSJeenu Viswambharan return -1; 49155a1266eSJeenu Viswambharan 49255a1266eSJeenu Viswambharan /* Locate mapping corresponding to this event */ 49355a1266eSJeenu Viswambharan map = find_event_map(ev_num); 49455a1266eSJeenu Viswambharan if (!map) 49555a1266eSJeenu Viswambharan return -1; 49655a1266eSJeenu Viswambharan 49755a1266eSJeenu Viswambharan /* 49855a1266eSJeenu Viswambharan * Statically-bound or dynamic maps are dispatched only as a result of 49955a1266eSJeenu Viswambharan * interrupt, and not upon explicit request. 50055a1266eSJeenu Viswambharan */ 50155a1266eSJeenu Viswambharan if (is_map_dynamic(map) || is_map_bound(map)) 50255a1266eSJeenu Viswambharan return -1; 50355a1266eSJeenu Viswambharan 50455a1266eSJeenu Viswambharan /* The event must be private */ 50555a1266eSJeenu Viswambharan if (is_event_shared(map)) 50655a1266eSJeenu Viswambharan return -1; 50755a1266eSJeenu Viswambharan 50855a1266eSJeenu Viswambharan /* Examine state of dispatch stack */ 50955a1266eSJeenu Viswambharan disp_ctx = get_outstanding_dispatch(); 51055a1266eSJeenu Viswambharan if (disp_ctx) { 51155a1266eSJeenu Viswambharan /* 51255a1266eSJeenu Viswambharan * There's an outstanding dispatch. If the outstanding dispatch 51355a1266eSJeenu Viswambharan * is critical, no more dispatches are possible. 51455a1266eSJeenu Viswambharan */ 51555a1266eSJeenu Viswambharan if (is_event_critical(disp_ctx->map)) 51655a1266eSJeenu Viswambharan return -1; 51755a1266eSJeenu Viswambharan 51855a1266eSJeenu Viswambharan /* 51955a1266eSJeenu Viswambharan * If the outstanding dispatch is Normal, only critical events 52055a1266eSJeenu Viswambharan * can be dispatched. 52155a1266eSJeenu Viswambharan */ 52255a1266eSJeenu Viswambharan if (is_event_normal(map)) 52355a1266eSJeenu Viswambharan return -1; 52455a1266eSJeenu Viswambharan } 52555a1266eSJeenu Viswambharan 52655a1266eSJeenu Viswambharan se = get_event_entry(map); 52755a1266eSJeenu Viswambharan if (!can_sdei_state_trans(se, DO_DISPATCH)) 52855a1266eSJeenu Viswambharan return -1; 52955a1266eSJeenu Viswambharan 53055a1266eSJeenu Viswambharan /* Activate the priority corresponding to the event being dispatched */ 53155a1266eSJeenu Viswambharan ehf_activate_priority(sdei_event_priority(map)); 53255a1266eSJeenu Viswambharan 53355a1266eSJeenu Viswambharan /* 53455a1266eSJeenu Viswambharan * We assume the current context is SECURE, and that it's already been 53555a1266eSJeenu Viswambharan * saved. 53655a1266eSJeenu Viswambharan */ 53755a1266eSJeenu Viswambharan ctx = restore_and_resume_ns_context(); 53855a1266eSJeenu Viswambharan 53955a1266eSJeenu Viswambharan /* 54055a1266eSJeenu Viswambharan * The caller has effectively terminated execution. Record to resume the 54155a1266eSJeenu Viswambharan * preempted context later when the event completes or 54255a1266eSJeenu Viswambharan * complete-and-resumes. 54355a1266eSJeenu Viswambharan */ 54455a1266eSJeenu Viswambharan setup_ns_dispatch(map, se, ctx, preempted_sec_state, 0); 54555a1266eSJeenu Viswambharan 54655a1266eSJeenu Viswambharan return 0; 54755a1266eSJeenu Viswambharan } 54855a1266eSJeenu Viswambharan 549b7cb133eSJeenu Viswambharan int sdei_event_complete(int resume, uint64_t pc) 550b7cb133eSJeenu Viswambharan { 551b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 552b7cb133eSJeenu Viswambharan sdei_entry_t *se; 553b7cb133eSJeenu Viswambharan sdei_ev_map_t *map; 554b7cb133eSJeenu Viswambharan cpu_context_t *ctx; 555b7cb133eSJeenu Viswambharan sdei_action_t act; 556b7cb133eSJeenu Viswambharan unsigned int client_el = sdei_client_el(); 557b7cb133eSJeenu Viswambharan 558b7cb133eSJeenu Viswambharan /* Return error if called without an active event */ 559*8e3032f9SJeenu Viswambharan disp_ctx = get_outstanding_dispatch(); 560b7cb133eSJeenu Viswambharan if (!disp_ctx) 561b7cb133eSJeenu Viswambharan return SDEI_EDENY; 562b7cb133eSJeenu Viswambharan 563b7cb133eSJeenu Viswambharan /* Validate resumption point */ 564b7cb133eSJeenu Viswambharan if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0)) 565b7cb133eSJeenu Viswambharan return SDEI_EDENY; 566b7cb133eSJeenu Viswambharan 567b7cb133eSJeenu Viswambharan map = disp_ctx->map; 568b7cb133eSJeenu Viswambharan assert(map); 569b7cb133eSJeenu Viswambharan se = get_event_entry(map); 570b7cb133eSJeenu Viswambharan 571b7cb133eSJeenu Viswambharan act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE; 572b7cb133eSJeenu Viswambharan if (!can_sdei_state_trans(se, act)) { 573b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 574b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 575b7cb133eSJeenu Viswambharan return SDEI_EDENY; 576b7cb133eSJeenu Viswambharan } 577b7cb133eSJeenu Viswambharan 578*8e3032f9SJeenu Viswambharan /* Having done sanity checks, pop dispatch */ 579*8e3032f9SJeenu Viswambharan pop_dispatch(); 580*8e3032f9SJeenu Viswambharan 581*8e3032f9SJeenu Viswambharan SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(), 582*8e3032f9SJeenu Viswambharan map->ev_num, read_spsr_el3(), read_elr_el3()); 583*8e3032f9SJeenu Viswambharan 584*8e3032f9SJeenu Viswambharan if (is_event_shared(map)) 585*8e3032f9SJeenu Viswambharan sdei_map_lock(map); 586*8e3032f9SJeenu Viswambharan 587b7cb133eSJeenu Viswambharan /* 588b7cb133eSJeenu Viswambharan * Restore Non-secure to how it was originally interrupted. Once done, 589b7cb133eSJeenu Viswambharan * it's up-to-date with the saved copy. 590b7cb133eSJeenu Viswambharan */ 591b7cb133eSJeenu Viswambharan ctx = cm_get_context(NON_SECURE); 592b7cb133eSJeenu Viswambharan restore_event_ctx(disp_ctx, ctx); 593b7cb133eSJeenu Viswambharan 594b7cb133eSJeenu Viswambharan if (resume) { 595b7cb133eSJeenu Viswambharan /* 596b7cb133eSJeenu Viswambharan * Complete-and-resume call. Prepare the Non-secure context 597b7cb133eSJeenu Viswambharan * (currently active) for complete and resume. 598b7cb133eSJeenu Viswambharan */ 599b7cb133eSJeenu Viswambharan cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el, 600b7cb133eSJeenu Viswambharan MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 601b7cb133eSJeenu Viswambharan 602b7cb133eSJeenu Viswambharan /* 603b7cb133eSJeenu Viswambharan * Make it look as if a synchronous exception were taken at the 604b7cb133eSJeenu Viswambharan * supplied Non-secure resumption point. Populate SPSR and 605b7cb133eSJeenu Viswambharan * ELR_ELx so that an ERET from there works as expected. 606b7cb133eSJeenu Viswambharan * 607b7cb133eSJeenu Viswambharan * The assumption is that the client, if necessary, would have 608b7cb133eSJeenu Viswambharan * saved any live content in these registers before making this 609b7cb133eSJeenu Viswambharan * call. 610b7cb133eSJeenu Viswambharan */ 611b7cb133eSJeenu Viswambharan if (client_el == MODE_EL2) { 612b7cb133eSJeenu Viswambharan write_elr_el2(disp_ctx->elr_el3); 613b7cb133eSJeenu Viswambharan write_spsr_el2(disp_ctx->spsr_el3); 614b7cb133eSJeenu Viswambharan } else { 615b7cb133eSJeenu Viswambharan /* EL1 */ 616b7cb133eSJeenu Viswambharan write_elr_el1(disp_ctx->elr_el3); 617b7cb133eSJeenu Viswambharan write_spsr_el1(disp_ctx->spsr_el3); 618b7cb133eSJeenu Viswambharan } 619b7cb133eSJeenu Viswambharan } 620b7cb133eSJeenu Viswambharan 621b7cb133eSJeenu Viswambharan /* 622b7cb133eSJeenu Viswambharan * If the cause of dispatch originally interrupted the Secure world, and 623b7cb133eSJeenu Viswambharan * if Non-secure world wasn't allowed to preempt Secure execution, 624b7cb133eSJeenu Viswambharan * resume Secure. 625b7cb133eSJeenu Viswambharan * 626b7cb133eSJeenu Viswambharan * No need to save the Non-secure context ahead of a world switch: the 627b7cb133eSJeenu Viswambharan * Non-secure context was fully saved before dispatch, and has been 628b7cb133eSJeenu Viswambharan * returned to its pre-dispatch state. 629b7cb133eSJeenu Viswambharan */ 630b7cb133eSJeenu Viswambharan if ((disp_ctx->sec_state == SECURE) && 631b7cb133eSJeenu Viswambharan (ehf_is_ns_preemption_allowed() == 0)) { 632b7cb133eSJeenu Viswambharan restore_and_resume_secure_context(); 633b7cb133eSJeenu Viswambharan } 634b7cb133eSJeenu Viswambharan 635b7cb133eSJeenu Viswambharan if ((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)) { 636b7cb133eSJeenu Viswambharan /* 637b7cb133eSJeenu Viswambharan * The event was dispatched after receiving SDEI interrupt. With 638b7cb133eSJeenu Viswambharan * the event handling completed, EOI the corresponding 639b7cb133eSJeenu Viswambharan * interrupt. 640b7cb133eSJeenu Viswambharan */ 641b7cb133eSJeenu Viswambharan plat_ic_end_of_interrupt(disp_ctx->intr_raw); 64255a1266eSJeenu Viswambharan } else { 64355a1266eSJeenu Viswambharan /* 64455a1266eSJeenu Viswambharan * An unbound event must have been dispatched explicitly. 64555a1266eSJeenu Viswambharan * Deactivate the priority level that was activated at the time 64655a1266eSJeenu Viswambharan * of explicit dispatch. 64755a1266eSJeenu Viswambharan */ 64855a1266eSJeenu Viswambharan ehf_deactivate_priority(sdei_event_priority(map)); 649b7cb133eSJeenu Viswambharan } 650b7cb133eSJeenu Viswambharan 651b7cb133eSJeenu Viswambharan if (is_event_shared(map)) 652b7cb133eSJeenu Viswambharan sdei_map_unlock(map); 653b7cb133eSJeenu Viswambharan 654b7cb133eSJeenu Viswambharan return 0; 655b7cb133eSJeenu Viswambharan } 656b7cb133eSJeenu Viswambharan 657b7cb133eSJeenu Viswambharan int sdei_event_context(void *handle, unsigned int param) 658b7cb133eSJeenu Viswambharan { 659b7cb133eSJeenu Viswambharan sdei_dispatch_context_t *disp_ctx; 660b7cb133eSJeenu Viswambharan 661b7cb133eSJeenu Viswambharan if (param >= SDEI_SAVED_GPREGS) 662b7cb133eSJeenu Viswambharan return SDEI_EINVAL; 663b7cb133eSJeenu Viswambharan 664b7cb133eSJeenu Viswambharan /* Get outstanding dispatch on this CPU */ 665b7cb133eSJeenu Viswambharan disp_ctx = get_outstanding_dispatch(); 666b7cb133eSJeenu Viswambharan if (!disp_ctx) 667b7cb133eSJeenu Viswambharan return SDEI_EDENY; 668b7cb133eSJeenu Viswambharan 669b7cb133eSJeenu Viswambharan assert(disp_ctx->map); 670b7cb133eSJeenu Viswambharan 671b7cb133eSJeenu Viswambharan if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT)) 672b7cb133eSJeenu Viswambharan return SDEI_EDENY; 673b7cb133eSJeenu Viswambharan 674b7cb133eSJeenu Viswambharan /* 675b7cb133eSJeenu Viswambharan * No locking is required for the Running status as this is the only CPU 676b7cb133eSJeenu Viswambharan * which can complete the event 677b7cb133eSJeenu Viswambharan */ 678b7cb133eSJeenu Viswambharan 679b7cb133eSJeenu Viswambharan return disp_ctx->x[param]; 680b7cb133eSJeenu Viswambharan } 681