xref: /rk3399_ARM-atf/bl31/aarch64/ea_delegate.S (revision f87e54f73cfee5042df526af6185ac6d9653a8f5)
1/*
2 * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2022, NVIDIA Corporation. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8
9#include <assert_macros.S>
10#include <asm_macros.S>
11#include <assert_macros.S>
12#include <bl31/ea_handle.h>
13#include <context.h>
14#include <lib/extensions/ras_arch.h>
15#include <cpu_macros.S>
16#include <context.h>
17
18	.globl	handle_lower_el_sync_ea
19	.globl	handle_lower_el_async_ea
20	.globl	handle_pending_async_ea
21	.globl	reflect_pending_async_ea_to_lower_el
22/*
23 * This function forms the tail end of Synchronous Exception entry from lower
24 * EL, and expects to handle Synchronous External Aborts from lower EL and CPU
25 * Implementation Defined Exceptions. If any other kind of exception is detected,
26 * then this function reports unhandled exception.
27 *
28 * It delegates the handling of the EA to platform handler, and upon successfully
29 * handling the EA, exits EL3; otherwise panics.
30 *
31 * This function assumes x30 has been saved.
32 */
33func handle_lower_el_sync_ea
34	mrs	x30, esr_el3
35	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
36
37	/* Check for I/D aborts from lower EL */
38	cmp	x30, #EC_IABORT_LOWER_EL
39	b.eq	1f
40
41	cmp	x30, #EC_DABORT_LOWER_EL
42	b.eq	1f
43
44	/* Save GP registers */
45	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
46	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
47	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
48
49	/* Get the cpu_ops pointer */
50	bl	get_cpu_ops_ptr
51
52	/* Get the cpu_ops exception handler */
53	ldr	x0, [x0, #CPU_E_HANDLER_FUNC]
54
55	/*
56	 * If the reserved function pointer is NULL, this CPU does not have an
57	 * implementation defined exception handler function
58	 */
59	cbz	x0, 2f
60	mrs	x1, esr_el3
61	ubfx	x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
62	blr	x0
63	b	2f
64
651:
66	/*
67	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
68	 * Also save PMCR_EL0 and set the PSTATE to a known state.
69	 */
70	bl	prepare_el3_entry
71
72#if ENABLE_PAUTH
73	/* Load and program APIAKey firmware key */
74	bl	pauth_load_bl31_apiakey
75#endif
76
77	/* Setup exception class and syndrome arguments for platform handler */
78	mov	x0, #ERROR_EA_SYNC
79	mrs	x1, esr_el3
80	bl	delegate_sync_ea
81
82	/* el3_exit assumes SP_EL0 on entry */
83	msr	spsel, #MODE_SP_EL0
84	b	el3_exit
852:
86	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
87	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
88	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
89
90	/* Synchronous exceptions other than the above are assumed to be EA */
91	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
92	no_ret	report_unhandled_exception
93endfunc handle_lower_el_sync_ea
94
95
96/*
97 * This function handles SErrors from lower ELs.
98 *
99 * It delegates the handling of the EA to platform handler, and upon successfully
100 * handling the EA, exits EL3; otherwise panics.
101 *
102 * This function assumes x30 has been saved.
103 */
104func handle_lower_el_async_ea
105
106	/*
107	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
108	 * Also save PMCR_EL0 and set the PSTATE to a known state.
109	 */
110	bl	prepare_el3_entry
111
112#if ENABLE_PAUTH
113	/* Load and program APIAKey firmware key */
114	bl	pauth_load_bl31_apiakey
115#endif
116
117	/* Setup exception class and syndrome arguments for platform handler */
118	mov	x0, #ERROR_EA_ASYNC
119	mrs	x1, esr_el3
120	bl	delegate_async_ea
121
122	/* el3_exit assumes SP_EL0 on entry */
123	msr	spsel, #MODE_SP_EL0
124	b	el3_exit
125endfunc handle_lower_el_async_ea
126
127/*
128 * NOTE 1 : Synchronized async EA handling
129 *
130 * Comment here applicable to following two functions
131 *   - handle_pending_async_ea
132 *   - reflect_pending_async_ea_to_lower_el
133 *
134 * Must be called from exception vector directly.
135 *
136 * These special handling is required to cater for handling async EA from
137 * lower EL synchronized at EL3 entry.
138 *
139 * This scenario may arise when there is an error (EA) in the system which is not
140 * yet signaled to PE while executing in lower EL. During entry into EL3, the errors
141 * are synchronized either implicitly or explicitly causing async EA to pend at EL3.
142 *
143 * On detecting the pending EA (via ISR_EL1.A), based on routing model of EA
144 * either handle it in EL3 using "handle_pending_async_ea" (FFH)  or return to
145 * lower EL using "reflect_pending_async_ea_to_lower_el" (KFH) .
146 */
147
148/*
149 * Refer to NOTE 1 : Firmware First Handling (FFH)
150 *  Called when FFH is enabled and outgoing world is Non-Secure (scr_el3.ea = 1).
151 *
152 * This function assumes x30 has been saved.
153 */
154#if HANDLE_EA_EL3_FIRST_NS
155func handle_pending_async_ea
156	/*
157	 * Prepare for nested handling of EA. Stash sysregs clobbered by nested
158	 * exception and handler
159	 */
160	str	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_GPREG_LR]
161	mrs	x30, esr_el3
162	str	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ESR_EL3]
163	mrs	x30, spsr_el3
164	str	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_SPSR_EL3]
165	mrs	x30, elr_el3
166	str	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
167
168	mov	x30, #1
169	str	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
170	/*
171	 * Restore the original x30 saved as part of entering EL3. This is not
172	 * required for the current function but for EL3 SError vector entry
173	 * once PSTATE.A bit is unmasked. We restore x30 and then the same
174	 * value is stored in EL3 SError vector entry.
175	 */
176	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
177
178	/*
179	 * After clearing PSTATE.A bit pending SError will trigger at current EL.
180	 * Put explicit synchronization event to ensure newly unmasked interrupt
181	 * is taken immediately.
182	 */
183	unmask_async_ea
184
185	/* Restore the original exception information along with zeroing the storage */
186	ldr	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
187	msr	elr_el3, x30
188	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
189	ldr	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_SPSR_EL3]
190	msr	spsr_el3, x30
191	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_SPSR_EL3]
192	ldr	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ESR_EL3]
193	msr	esr_el3, x30
194	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ESR_EL3]
195
196	/*
197	 * If the original exception corresponds to SError from lower El, eret back
198	 * to lower EL, otherwise return to vector table for original exception handling.
199	 */
200	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
201	cmp	x30, #EC_SERROR
202	ldr	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_GPREG_LR]
203	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_GPREG_LR]
204	b.eq	1f
205	ret
2061:
207	exception_return
208endfunc handle_pending_async_ea
209#endif /* HANDLE_EA_EL3_FIRST_NS */
210
211/*
212 * Refer to NOTE 1 : Kernel First handling (KFH)
213 *   Called in following scenarios
214 *     - Always, if outgoing world is either Secure or Realm
215 *     - KFH mode if outgoing world is Non-secure.
216 *
217 * This function assumes x30 has been saved.
218 */
219
220func reflect_pending_async_ea_to_lower_el
221	/*
222	 * As the original exception was not handled we need to ensure that we return
223	 * back to the instruction which caused the exception. To acheive that, eret
224	 * to "elr-4" (Label "subtract_elr_el3") for SMC or simply eret otherwise
225	 * (Label "skip_smc_check").
226	 *
227	 * LIMITATION: It could be that async EA is masked at the target exception level
228	 * or the priority of async EA wrt to the EL3/secure interrupt is lower, which
229	 * causes back and forth between lower EL and EL3. In case of back and forth between
230	 * lower EL and EL3, we can track the loop count in "CTX_NESTED_EA_FLAG" and leverage
231	 * previous ELR in "CTX_SAVED_ELR_EL3" to detect this cycle and further panic
232	 * to indicate a problem here (Label "check_loop_ctr").
233	 * However, setting SCR_EL3.IESB = 1, should give priority to SError handling
234	 * as per AArch64.TakeException pseudo code in Arm ARM.
235	 *
236	 * TODO: In future if EL3 gets a capability to inject a virtual SError to lower
237	 * ELs, we can remove the el3_panic and handle the original exception first and
238	 * inject SError to lower EL before ereting back.
239	 */
240	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
241	ldr	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
242	mrs	x28, elr_el3
243	cmp	x29, x28
244	b.eq	check_loop_ctr
245	str	x28, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
246	/* Zero the loop counter */
247	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
248	b	skip_loop_ctr
249check_loop_ctr:
250	ldr	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
251	add	x29, x29, #1
252	str	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
253	cmp	x29, #ASYNC_EA_REPLAY_COUNTER
254	b.ge	el3_panic
255skip_loop_ctr:
256	/*
257	 * Logic to distinguish if we came from SMC or any other exception.
258	 * Use offsets in vector entry to get which exception we are handling.
259	 * In each vector entry of size 0x200, address "0x0-0x80" is for sync
260	 * exception and "0x80-0x200" is for async exceptions.
261	 * Use vector base address (vbar_el3) and exception offset (LR) to
262	 * calculate whether the address we came from is any of the following
263	 * "0x0-0x80", "0x200-0x280", "0x400-0x480" or "0x600-0x680"
264	 */
265	mrs	x29, vbar_el3
266	sub	x30, x30, x29
267	and	x30, x30, #0x1ff
268	cmp	x30, #0x80
269	b.ge	skip_smc_check
270	/* Its a synchronous exception, Now check if it is SMC or not? */
271	mrs	x30, esr_el3
272	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
273	cmp	x30, #EC_AARCH32_SMC
274	b.eq	subtract_elr_el3
275	cmp	x30, #EC_AARCH64_SMC
276	b.eq	subtract_elr_el3
277	b	skip_smc_check
278subtract_elr_el3:
279	sub	x28, x28, #4
280skip_smc_check:
281	msr	elr_el3, x28
282	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
283	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
284	exception_return
285endfunc reflect_pending_async_ea_to_lower_el
286
287/*
288 * Prelude for Synchronous External Abort handling. This function assumes that
289 * all GP registers have been saved by the caller.
290 *
291 * x0: EA reason
292 * x1: EA syndrome
293 */
294func delegate_sync_ea
295#if ENABLE_FEAT_RAS
296	/*
297	 * Check for Uncontainable error type. If so, route to the platform
298	 * fatal error handler rather than the generic EA one.
299	 */
300	ubfx    x2, x1, #EABORT_SET_SHIFT, #EABORT_SET_WIDTH
301	cmp     x2, #ERROR_STATUS_SET_UC
302	b.ne    1f
303
304	/* Check fault status code */
305	ubfx    x3, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
306	cmp     x3, #SYNC_EA_FSC
307	b.ne    1f
308
309	no_ret  plat_handle_uncontainable_ea
3101:
311#endif
312
313	b       ea_proceed
314endfunc delegate_sync_ea
315
316
317/*
318 * Prelude for Asynchronous External Abort handling. This function assumes that
319 * all GP registers have been saved by the caller.
320 *
321 * x0: EA reason
322 * x1: EA syndrome
323 */
324func delegate_async_ea
325#if ENABLE_FEAT_RAS
326	/* Check Exception Class to ensure SError, as this function should
327	 * only be invoked for SError. If that is not the case, which implies
328	 * either an HW error or programming error, panic.
329	 */
330	ubfx	x2, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
331	cmp	x2, EC_SERROR
332	b.ne	el3_panic
333	/*
334	 * Check for Implementation Defined Syndrome. If so, skip checking
335	 * Uncontainable error type from the syndrome as the format is unknown.
336	 */
337	tbnz	x1, #SERROR_IDS_BIT, 1f
338
339	/* AET only valid when DFSC is 0x11 */
340	ubfx	x2, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
341	cmp	x2, #DFSC_SERROR
342	b.ne	1f
343
344	/*
345	 * Check for Uncontainable error type. If so, route to the platform
346	 * fatal error handler rather than the generic EA one.
347	 */
348	ubfx	x3, x1, #EABORT_AET_SHIFT, #EABORT_AET_WIDTH
349	cmp	x3, #ERROR_STATUS_UET_UC
350	b.ne	1f
351
352	no_ret	plat_handle_uncontainable_ea
3531:
354#endif
355
356	b	ea_proceed
357endfunc delegate_async_ea
358
359
360/*
361 * Delegate External Abort handling to platform's EA handler. This function
362 * assumes that all GP registers have been saved by the caller.
363 *
364 * x0: EA reason
365 * x1: EA syndrome
366 */
367func ea_proceed
368	/*
369	 * If the ESR loaded earlier is not zero, we were processing an EA
370	 * already, and this is a double fault.
371	 */
372	ldr	x5, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
373	cbz	x5, 1f
374	no_ret	plat_handle_double_fault
375
3761:
377	/* Save EL3 state */
378	mrs	x2, spsr_el3
379	mrs	x3, elr_el3
380	stp	x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
381
382	/*
383	 * Save ESR as handling might involve lower ELs, and returning back to
384	 * EL3 from there would trample the original ESR.
385	 */
386	mrs	x4, scr_el3
387	mrs	x5, esr_el3
388	stp	x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
389
390	/*
391	 * Setup rest of arguments, and call platform External Abort handler.
392	 *
393	 * x0: EA reason (already in place)
394	 * x1: Exception syndrome (already in place).
395	 * x2: Cookie (unused for now).
396	 * x3: Context pointer.
397	 * x4: Flags (security state from SCR for now).
398	 */
399	mov	x2, xzr
400	mov	x3, sp
401	ubfx	x4, x4, #0, #1
402
403	/* Switch to runtime stack */
404	ldr	x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
405	msr	spsel, #MODE_SP_EL0
406	mov	sp, x5
407
408	mov	x29, x30
409#if ENABLE_ASSERTIONS
410	/* Stash the stack pointer */
411	mov	x28, sp
412#endif
413	bl	plat_ea_handler
414
415#if ENABLE_ASSERTIONS
416	/*
417	 * Error handling flows might involve long jumps; so upon returning from
418	 * the platform error handler, validate that the we've completely
419	 * unwound the stack.
420	 */
421	mov	x27, sp
422	cmp	x28, x27
423	ASM_ASSERT(eq)
424#endif
425
426	/* Make SP point to context */
427	msr	spsel, #MODE_SP_ELX
428
429	/* Restore EL3 state and ESR */
430	ldp	x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
431	msr	spsr_el3, x1
432	msr	elr_el3, x2
433
434	/* Restore ESR_EL3 and SCR_EL3 */
435	ldp	x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
436	msr	scr_el3, x3
437	msr	esr_el3, x4
438
439#if ENABLE_ASSERTIONS
440	cmp	x4, xzr
441	ASM_ASSERT(ne)
442#endif
443
444	/* Clear ESR storage */
445	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
446
447	ret	x29
448endfunc ea_proceed
449