xref: /optee_os/core/arch/arm/kernel/entry_a64.S (revision 6cfa381e534b362afbd103f526b132048e54ba47)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2022, Linaro Limited
4 * Copyright (c) 2021-2023, Arm Limited
5 */
6
7#include <platform_config.h>
8
9#include <arm64_macros.S>
10#include <arm.h>
11#include <asm.S>
12#include <generated/asm-defines.h>
13#include <keep.h>
14#include <kernel/thread_private.h>
15#include <mm/core_mmu.h>
16#include <sm/optee_smc.h>
17#include <sm/teesmc_opteed.h>
18#include <sm/teesmc_opteed_macros.h>
19
20	/*
21	 * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0.
22	 * SP_EL0 is assigned:
23	 *   stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD
24	 * SP_EL1 is assigned thread_core_local[cpu_id]
25	 */
26	.macro set_sp
27		bl	__get_core_pos
28		cmp	x0, #CFG_TEE_CORE_NB_CORE
29		/* Unsupported CPU, park it before it breaks something */
30		bge	unhandled_cpu
31		add	x0, x0, #1
32		adr_l	x1, stack_tmp_stride
33		ldr	w1, [x1]
34		mul	x1, x0, x1
35
36		/* x0 = stack_tmp - STACK_TMP_GUARD */
37		adr_l	x2, stack_tmp_rel
38		ldr	w0, [x2]
39		add	x0, x0, x2
40
41		msr	spsel, #0
42		add	sp, x1, x0
43		bl	thread_get_core_local
44		msr	spsel, #1
45		mov	sp, x0
46		msr	spsel, #0
47	.endm
48
49	.macro read_feat_mte reg
50		mrs	\reg, id_aa64pfr1_el1
51		ubfx	\reg, \reg, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
52	.endm
53
54	.macro read_feat_pan reg
55		mrs	\reg, id_mmfr3_el1
56		ubfx	\reg, \reg, #ID_MMFR3_EL1_PAN_SHIFT, #4
57	.endm
58
59	.macro set_sctlr_el1
60		mrs	x0, sctlr_el1
61		orr	x0, x0, #SCTLR_I
62		orr	x0, x0, #SCTLR_SA
63		orr	x0, x0, #SCTLR_SPAN
64#if defined(CFG_CORE_RWDATA_NOEXEC)
65		orr	x0, x0, #SCTLR_WXN
66#endif
67#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
68		orr	x0, x0, #SCTLR_A
69#else
70		bic	x0, x0, #SCTLR_A
71#endif
72#ifdef CFG_MEMTAG
73		read_feat_mte x1
74		cmp	w1, #1
75		b.ls	111f
76		orr	x0, x0, #(SCTLR_ATA | SCTLR_ATA0)
77		bic	x0, x0, #SCTLR_TCF_MASK
78		bic	x0, x0, #SCTLR_TCF0_MASK
79111:
80#endif
81#if defined(CFG_TA_PAUTH) && defined(CFG_TA_BTI)
82		orr	x0, x0, #SCTLR_BT0
83#endif
84#if defined(CFG_CORE_PAUTH) && defined(CFG_CORE_BTI)
85		orr	x0, x0, #SCTLR_BT1
86#endif
87		msr	sctlr_el1, x0
88	.endm
89
90	.macro init_memtag_per_cpu
91		read_feat_mte x0
92		cmp	w0, #1
93		b.ls	11f
94
95#ifdef CFG_TEE_CORE_DEBUG
96		/*
97		 * This together with GCR_EL1.RRND = 0 will make the tags
98		 * acquired with the irg instruction deterministic.
99		 */
100		mov_imm	x0, 0xcafe00
101		msr	rgsr_el1, x0
102		/* Avoid tag = 0x0 and 0xf */
103		mov	x0, #0
104#else
105		/*
106		 * Still avoid tag = 0x0 and 0xf as we use that tag for
107		 * everything which isn't explicitly tagged. Setting
108		 * GCR_EL1.RRND = 1 to allow an implementation specific
109		 * method of generating the tags.
110		 */
111		mov	x0, #GCR_EL1_RRND
112#endif
113		orr	x0, x0, #1
114		orr	x0, x0, #(1 << 15)
115		msr	gcr_el1, x0
116
117		/*
118		 * Enable the tag checks on the current CPU.
119		 *
120		 * Depends on boot_init_memtag() having cleared tags for
121		 * TEE core memory. Well, not really, addresses with the
122		 * tag value 0b0000 will use unchecked access due to
123		 * TCR_TCMA0.
124		 */
125		mrs	x0, tcr_el1
126		orr	x0, x0, #TCR_TBI0
127		orr	x0, x0, #TCR_TCMA0
128		msr	tcr_el1, x0
129
130		mrs	x0, sctlr_el1
131		orr	x0, x0, #SCTLR_TCF_SYNC
132		orr	x0, x0, #SCTLR_TCF0_SYNC
133		msr	sctlr_el1, x0
134
135		isb
13611:
137	.endm
138
139	.macro init_pauth_per_cpu
140		msr	spsel, #1
141		ldp	x0, x1, [sp, #THREAD_CORE_LOCAL_KEYS]
142		msr	spsel, #0
143		write_apiakeyhi x0
144		write_apiakeylo x1
145		mrs	x0, sctlr_el1
146		orr	x0, x0, #SCTLR_ENIA
147		msr	sctlr_el1, x0
148		isb
149	.endm
150
151	.macro init_pan
152		read_feat_pan x0
153		cmp	x0, #0
154		b.eq	1f
155		mrs	x0, sctlr_el1
156		bic	x0, x0, #SCTLR_SPAN
157		msr	sctlr_el1, x0
158		write_pan_enable
159	1:
160	.endm
161
162FUNC _start , :
163	/*
164	 * Register use:
165	 * x0	- CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
166	 *	  if non-NULL holds the TOS FW config [1] address
167	 *	- CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=y:
168	 *	  address of FF-A Boot Information Blob
169	 *	- CFG_CORE_FFA=n:
170	 *	  if non-NULL holds the pagable part address
171	 * x2	- CFG_CORE_SEL2_SPMC=n:
172	 *	  if non-NULL holds the system DTB address
173	 *
174	 * x19 - saved x0
175	 * x20 - saved x2
176	 *
177	 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
178	 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
179	 * here.
180	 */
181	mov	x19, x0
182#if defined(CFG_DT_ADDR)
183	ldr     x20, =CFG_DT_ADDR
184#else
185	mov	x20, x2		/* Save DT address */
186#endif
187
188	adr	x0, reset_vect_table
189	msr	vbar_el1, x0
190	isb
191
192#ifdef CFG_PAN
193	init_pan
194#endif
195
196	set_sctlr_el1
197	isb
198
199#ifdef CFG_WITH_PAGER
200	/*
201	 * Move init code into correct location and move hashes to a
202	 * temporary safe location until the heap is initialized.
203	 *
204	 * The binary is built as:
205	 * [Pager code, rodata and data] : In correct location
206	 * [Init code and rodata] : Should be copied to __init_start
207	 * [struct boot_embdata + data] : Should be saved before
208	 * initializing pager, first uint32_t tells the length of the data
209	 */
210	adr	x0, __init_start	/* dst */
211	adr	x1, __data_end		/* src */
212	adr	x2, __init_end
213	sub	x2, x2, x0		/* init len */
214	ldr	w4, [x1, x2]		/* length of hashes etc */
215	add	x2, x2, x4		/* length of init and hashes etc */
216	/* Copy backwards (as memmove) in case we're overlapping */
217	add	x0, x0, x2		/* __init_start + len */
218	add	x1, x1, x2		/* __data_end + len */
219	adr	x3, cached_mem_end
220	str	x0, [x3]
221	adr	x2, __init_start
222copy_init:
223	ldp	x3, x4, [x1, #-16]!
224	stp	x3, x4, [x0, #-16]!
225	cmp	x0, x2
226	b.gt	copy_init
227#else
228	/*
229	 * The binary is built as:
230	 * [Core, rodata and data] : In correct location
231	 * [struct boot_embdata + data] : Should be moved to __end, first
232	 * uint32_t tells the length of the struct + data
233	 */
234	adr_l	x0, __end		/* dst */
235	adr_l	x1, __data_end		/* src */
236	ldr	w2, [x1]		/* struct boot_embdata::total_len */
237	/* Copy backwards (as memmove) in case we're overlapping */
238	add	x0, x0, x2
239	add	x1, x1, x2
240	adr	x3, cached_mem_end
241	str	x0, [x3]
242	adr_l	x2, __end
243
244copy_init:
245	ldp	x3, x4, [x1, #-16]!
246	stp	x3, x4, [x0, #-16]!
247	cmp	x0, x2
248	b.gt	copy_init
249#endif
250
251	/*
252	 * Clear .bss, this code obviously depends on the linker keeping
253	 * start/end of .bss at least 8 byte aligned.
254	 */
255	adr_l	x0, __bss_start
256	adr_l	x1, __bss_end
257clear_bss:
258	str	xzr, [x0], #8
259	cmp	x0, x1
260	b.lt	clear_bss
261
262#ifdef CFG_NS_VIRTUALIZATION
263	/*
264	 * Clear .nex_bss, this code obviously depends on the linker keeping
265	 * start/end of .bss at least 8 byte aligned.
266	 */
267	adr_l	x0, __nex_bss_start
268	adr_l	x1, __nex_bss_end
269clear_nex_bss:
270	str	xzr, [x0], #8
271	cmp	x0, x1
272	b.lt	clear_nex_bss
273#endif
274
275
276#if defined(CFG_CORE_PHYS_RELOCATABLE)
277	/*
278	 * Save the base physical address, it will not change after this
279	 * point.
280	 */
281	adr_l	x2, core_mmu_tee_load_pa
282	adr	x1, _start		/* Load address */
283	str	x1, [x2]
284
285	mov_imm	x0, TEE_LOAD_ADDR	/* Compiled load address */
286	sub	x0, x1, x0		/* Relocatation offset */
287
288	cbz	x0, 1f
289	bl	relocate
2901:
291#endif
292
293	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
294	set_sp
295
296	bl	thread_init_thread_core_local
297
298	/* Enable aborts now that we can receive exceptions */
299	msr	daifclr, #DAIFBIT_ABT
300
301	/*
302	 * Invalidate dcache for all memory used during initialization to
303	 * avoid nasty surprices when the cache is turned on. We must not
304	 * invalidate memory not used by OP-TEE since we may invalidate
305	 * entries used by for instance ARM Trusted Firmware.
306	 */
307	adr_l	x0, __text_start
308	ldr	x1, cached_mem_end
309	sub	x1, x1, x0
310	bl	dcache_cleaninv_range
311
312	/* Enable Console */
313	bl	console_init
314
315#if defined(CFG_CORE_SEL2_SPMC)
316	mov	x0, x19		/* boot info */
317	bl	boot_save_boot_info
318#endif
319
320#ifdef CFG_MEMTAG
321	/*
322	 * If FEAT_MTE2 is available, initializes the memtag callbacks.
323	 * Tags for OP-TEE core memory are then cleared to make it safe to
324	 * enable MEMTAG below.
325	 */
326	bl	boot_init_memtag
327#endif
328
329#ifdef CFG_CORE_ASLR
330	mov	x0, x20		/* DT address */
331	bl	get_aslr_seed
332#else
333	mov	x0, #0
334#endif
335
336	adr	x1, boot_mmu_config
337	bl	core_init_mmu_map
338
339#ifdef CFG_CORE_ASLR
340	/*
341	 * Process relocation information again updating for the virtual
342	 * map offset. We're doing this now before MMU is enabled as some
343	 * of the memory will become write protected.
344	 */
345	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
346	cbz	x0, 1f
347	/*
348	 * Update cached_mem_end address with load offset since it was
349	 * calculated before relocation.
350	 */
351	adr	x5, cached_mem_end
352	ldr	x6, [x5]
353	add	x6, x6, x0
354	str	x6, [x5]
355	adr	x1, _start		/* Load address */
356	bl	relocate
3571:
358#endif
359
360	bl	__get_core_pos
361	bl	enable_mmu
362#ifdef CFG_CORE_ASLR
363	/*
364	 * Reinitialize console, since register_serial_console() has
365	 * previously registered a PA and with ASLR the VA is different
366	 * from the PA.
367	 */
368	bl	console_init
369#endif
370
371#ifdef CFG_NS_VIRTUALIZATION
372	/*
373	 * Initialize partition tables for each partition to
374	 * default_partition which has been relocated now to a different VA
375	 */
376	bl	core_mmu_set_default_prtn_tbl
377#endif
378
379#ifdef CFG_CORE_SEL1_SPMC
380	mov	x0, xzr		/* pager not used */
381#else
382	mov	x0, x19		/* pagable part address */
383#endif
384	mov	x1, #-1
385	bl	boot_init_primary_early
386
387#ifdef CFG_MEMTAG
388	init_memtag_per_cpu
389#endif
390
391#ifndef CFG_NS_VIRTUALIZATION
392	mov	x21, sp
393	adr_l	x0, threads
394	ldr	x0, [x0, #THREAD_CTX_STACK_VA_END]
395	mov	sp, x0
396	bl	thread_get_core_local
397	mov	x22, x0
398	str	wzr, [x22, #THREAD_CORE_LOCAL_FLAGS]
399#endif
400	mov	x0, x20		/* DT address also known as HW_CONFIG */
401#ifdef CFG_CORE_SEL1_SPMC
402	mov	x1, x19		/* TOS_FW_CONFIG DT address */
403#else
404	mov	x1, xzr		/* unused */
405#endif
406	bl	boot_init_primary_late
407#ifdef CFG_CORE_PAUTH
408	init_pauth_per_cpu
409#endif
410
411#ifndef CFG_NS_VIRTUALIZATION
412	mov	x0, #THREAD_CLF_TMP
413	str     w0, [x22, #THREAD_CORE_LOCAL_FLAGS]
414	mov	sp, x21
415#endif
416
417#ifdef _CFG_CORE_STACK_PROTECTOR
418	/* Update stack canary value */
419	sub	sp, sp, #0x10
420	mov	x0, sp
421	mov	x1, #1
422	mov	x2, #0x8
423	bl	plat_get_random_stack_canaries
424	ldr	x0, [sp]
425	adr_l	x5, __stack_chk_guard
426	str	x0, [x5]
427	add	sp, sp, #0x10
428#endif
429
430	/*
431	 * In case we've touched memory that secondary CPUs will use before
432	 * they have turned on their D-cache, clean and invalidate the
433	 * D-cache before exiting to normal world.
434	 */
435	adr_l	x0, __text_start
436	ldr	x1, cached_mem_end
437	sub	x1, x1, x0
438	bl	dcache_cleaninv_range
439
440
441	/*
442	 * Clear current thread id now to allow the thread to be reused on
443	 * next entry. Matches the thread_init_boot_thread in
444	 * boot.c.
445	 */
446#ifndef CFG_NS_VIRTUALIZATION
447	bl 	thread_clr_boot_thread
448#endif
449
450#ifdef CFG_CORE_FFA
451	adr	x0, cpu_on_handler
452	/*
453	 * Compensate for the virtual map offset since cpu_on_handler() is
454	 * called with MMU off.
455	 */
456	ldr	x1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
457	sub	x0, x0, x1
458	bl	thread_spmc_register_secondary_ep
459	b	thread_ffa_msg_wait
460#else
461	/*
462	 * Pass the vector address returned from main_init Compensate for
463	 * the virtual map offset since cpu_on_handler() is called with MMU
464	 * off.
465	 */
466	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
467	adr	x1, thread_vector_table
468	sub	x1, x1, x0
469	mov	x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
470	smc	#0
471	/* SMC should not return */
472	panic_at_smc_return
473#endif
474END_FUNC _start
475DECLARE_KEEP_INIT _start
476
477	.section .identity_map.data
478	.balign	8
479LOCAL_DATA cached_mem_end , :
480	.skip	8
481END_DATA cached_mem_end
482
483#if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
484LOCAL_FUNC relocate , :
485	/*
486	 * x0 holds relocate offset
487	 * x1 holds load address
488	 */
489#ifdef CFG_WITH_PAGER
490	adr_l	x6, __init_end
491#else
492	adr_l	x6, __end
493#endif
494	ldp	w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET]
495
496	add	x2, x2, x6	/* start of relocations */
497	add	x3, x3, x2	/* end of relocations */
498
499	/*
500	 * Relocations are not formatted as Rela64, instead they are in a
501	 * compressed format created by get_reloc_bin() in
502	 * scripts/gen_tee_bin.py
503	 *
504	 * All the R_AARCH64_RELATIVE relocations are translated into a
505	 * list of 32-bit offsets from TEE_LOAD_ADDR. At each address a
506	 * 64-bit value pointed out which increased with the load offset.
507	 */
508
509#ifdef CFG_WITH_PAGER
510	/*
511	 * With pager enabled we can only relocate the pager and init
512	 * parts, the rest has to be done when a page is populated.
513	 */
514	sub	x6, x6, x1
515#endif
516
517	b	2f
518	/* Loop over the relocation addresses and process all entries */
5191:	ldr	w4, [x2], #4
520#ifdef CFG_WITH_PAGER
521	/* Skip too large addresses */
522	cmp	x4, x6
523	b.ge	2f
524#endif
525	add	x4, x4, x1
526	ldr	x5, [x4]
527	add	x5, x5, x0
528	str	x5, [x4]
529
5302:	cmp	x2, x3
531	b.ne	1b
532
533	ret
534END_FUNC relocate
535#endif
536
537/*
538 * void enable_mmu(unsigned long core_pos);
539 *
540 * This function depends on being mapped with in the identity map where
541 * physical address and virtual address is the same. After MMU has been
542 * enabled the instruction pointer will be updated to execute as the new
543 * offset instead. Stack pointers and the return address are updated.
544 */
545LOCAL_FUNC enable_mmu , : , .identity_map
546	adr	x1, boot_mmu_config
547	load_xregs x1, 0, 2, 6
548	/*
549	 * x0 = core_pos
550	 * x2 = tcr_el1
551	 * x3 = mair_el1
552	 * x4 = ttbr0_el1_base
553	 * x5 = ttbr0_core_offset
554	 * x6 = load_offset
555	 */
556	msr	tcr_el1, x2
557	msr	mair_el1, x3
558
559	/*
560	 * ttbr0_el1 = ttbr0_el1_base + ttbr0_core_offset * core_pos
561	 */
562	madd	x1, x5, x0, x4
563	msr	ttbr0_el1, x1
564	msr	ttbr1_el1, xzr
565	isb
566
567	/* Invalidate TLB */
568	tlbi	vmalle1
569
570	/*
571	 * Make sure translation table writes have drained into memory and
572	 * the TLB invalidation is complete.
573	 */
574	dsb	sy
575	isb
576
577	/* Enable the MMU */
578	mrs	x1, sctlr_el1
579	orr	x1, x1, #SCTLR_M
580	msr	sctlr_el1, x1
581	isb
582
583	/* Update vbar */
584	mrs	x1, vbar_el1
585	add	x1, x1, x6
586	msr	vbar_el1, x1
587	isb
588
589	/* Invalidate instruction cache and branch predictor */
590	ic	iallu
591	isb
592
593	/* Enable I and D cache */
594	mrs	x1, sctlr_el1
595	orr	x1, x1, #SCTLR_I
596	orr	x1, x1, #SCTLR_C
597	msr	sctlr_el1, x1
598	isb
599
600	/* Adjust stack pointers and return address */
601	msr	spsel, #1
602	add	sp, sp, x6
603	msr	spsel, #0
604	add	sp, sp, x6
605	add	x30, x30, x6
606
607	ret
608END_FUNC enable_mmu
609
610	.section .identity_map.data
611	.balign	8
612DATA boot_mmu_config , : /* struct core_mmu_config */
613	.skip	CORE_MMU_CONFIG_SIZE
614END_DATA boot_mmu_config
615
616FUNC cpu_on_handler , :
617	mov	x19, x0
618	mov	x20, x1
619	mov	x21, x30
620
621	adr	x0, reset_vect_table
622	msr	vbar_el1, x0
623	isb
624
625	set_sctlr_el1
626	isb
627
628#ifdef CFG_PAN
629	init_pan
630#endif
631
632	/* Enable aborts now that we can receive exceptions */
633	msr	daifclr, #DAIFBIT_ABT
634
635	bl	__get_core_pos
636	bl	enable_mmu
637
638	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
639	set_sp
640
641#ifdef CFG_MEMTAG
642	init_memtag_per_cpu
643#endif
644#ifdef CFG_CORE_PAUTH
645	init_pauth_per_cpu
646#endif
647
648	mov	x0, x19
649	mov	x1, x20
650#ifdef CFG_CORE_FFA
651	bl	boot_cpu_on_handler
652	b	thread_ffa_msg_wait
653#else
654	mov	x30, x21
655	b	boot_cpu_on_handler
656#endif
657END_FUNC cpu_on_handler
658DECLARE_KEEP_PAGER cpu_on_handler
659
660LOCAL_FUNC unhandled_cpu , :
661	wfi
662	b	unhandled_cpu
663END_FUNC unhandled_cpu
664
665LOCAL_DATA stack_tmp_rel , :
666	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
667END_DATA stack_tmp_rel
668
669	/*
670	 * This macro verifies that the a given vector doesn't exceed the
671	 * architectural limit of 32 instructions. This is meant to be placed
672	 * immedately after the last instruction in the vector. It takes the
673	 * vector entry as the parameter
674	 */
675	.macro check_vector_size since
676	  .if (. - \since) > (32 * 4)
677	    .error "Vector exceeds 32 instructions"
678	  .endif
679	.endm
680
681	.section .identity_map, "ax", %progbits
682	.align	11
683LOCAL_FUNC reset_vect_table , :, .identity_map, , nobti
684	/* -----------------------------------------------------
685	 * Current EL with SP0 : 0x0 - 0x180
686	 * -----------------------------------------------------
687	 */
688SynchronousExceptionSP0:
689	b	SynchronousExceptionSP0
690	check_vector_size SynchronousExceptionSP0
691
692	.align	7
693IrqSP0:
694	b	IrqSP0
695	check_vector_size IrqSP0
696
697	.align	7
698FiqSP0:
699	b	FiqSP0
700	check_vector_size FiqSP0
701
702	.align	7
703SErrorSP0:
704	b	SErrorSP0
705	check_vector_size SErrorSP0
706
707	/* -----------------------------------------------------
708	 * Current EL with SPx: 0x200 - 0x380
709	 * -----------------------------------------------------
710	 */
711	.align	7
712SynchronousExceptionSPx:
713	b	SynchronousExceptionSPx
714	check_vector_size SynchronousExceptionSPx
715
716	.align	7
717IrqSPx:
718	b	IrqSPx
719	check_vector_size IrqSPx
720
721	.align	7
722FiqSPx:
723	b	FiqSPx
724	check_vector_size FiqSPx
725
726	.align	7
727SErrorSPx:
728	b	SErrorSPx
729	check_vector_size SErrorSPx
730
731	/* -----------------------------------------------------
732	 * Lower EL using AArch64 : 0x400 - 0x580
733	 * -----------------------------------------------------
734	 */
735	.align	7
736SynchronousExceptionA64:
737	b	SynchronousExceptionA64
738	check_vector_size SynchronousExceptionA64
739
740	.align	7
741IrqA64:
742	b	IrqA64
743	check_vector_size IrqA64
744
745	.align	7
746FiqA64:
747	b	FiqA64
748	check_vector_size FiqA64
749
750	.align	7
751SErrorA64:
752	b   	SErrorA64
753	check_vector_size SErrorA64
754
755	/* -----------------------------------------------------
756	 * Lower EL using AArch32 : 0x0 - 0x180
757	 * -----------------------------------------------------
758	 */
759	.align	7
760SynchronousExceptionA32:
761	b	SynchronousExceptionA32
762	check_vector_size SynchronousExceptionA32
763
764	.align	7
765IrqA32:
766	b	IrqA32
767	check_vector_size IrqA32
768
769	.align	7
770FiqA32:
771	b	FiqA32
772	check_vector_size FiqA32
773
774	.align	7
775SErrorA32:
776	b	SErrorA32
777	check_vector_size SErrorA32
778
779END_FUNC reset_vect_table
780
781BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
782