xref: /optee_os/core/arch/arm/kernel/entry_a64.S (revision 8cf8403b7f1ddbb2c0c9e4e5ef1bc04fa402024b)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2022, Linaro Limited
4 * Copyright (c) 2021-2023, Arm Limited
5 */
6
7#include <platform_config.h>
8
9#include <arm64_macros.S>
10#include <arm.h>
11#include <asm.S>
12#include <generated/asm-defines.h>
13#include <keep.h>
14#include <kernel/thread_private.h>
15#include <mm/core_mmu.h>
16#include <sm/optee_smc.h>
17#include <sm/teesmc_opteed.h>
18#include <sm/teesmc_opteed_macros.h>
19
20	/*
21	 * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0.
22	 * SP_EL0 is assigned:
23	 *   stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD
24	 * SP_EL1 is assigned thread_core_local[cpu_id]
25	 */
26	.macro set_sp
27		bl	__get_core_pos
28		cmp	x0, #CFG_TEE_CORE_NB_CORE
29		/* Unsupported CPU, park it before it breaks something */
30		bge	unhandled_cpu
31		add	x0, x0, #1
32		adr_l	x1, stack_tmp_stride
33		ldr	w1, [x1]
34		mul	x1, x0, x1
35
36		/* x0 = stack_tmp - STACK_TMP_GUARD */
37		adr_l	x2, stack_tmp_rel
38		ldr	w0, [x2]
39		add	x0, x0, x2
40
41		msr	spsel, #0
42		add	sp, x1, x0
43		bl	thread_get_core_local
44		msr	spsel, #1
45		mov	sp, x0
46		msr	spsel, #0
47	.endm
48
49	.macro read_feat_mte reg
50		mrs	\reg, id_aa64pfr1_el1
51		ubfx	\reg, \reg, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
52	.endm
53
54	.macro read_feat_pan reg
55		mrs	\reg, id_mmfr3_el1
56		ubfx	\reg, \reg, #ID_MMFR3_EL1_PAN_SHIFT, #4
57	.endm
58
59	.macro set_sctlr_el1
60		mrs	x0, sctlr_el1
61		orr	x0, x0, #SCTLR_I
62		orr	x0, x0, #SCTLR_SA
63		orr	x0, x0, #SCTLR_SPAN
64#if defined(CFG_CORE_RWDATA_NOEXEC)
65		orr	x0, x0, #SCTLR_WXN
66#endif
67#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
68		orr	x0, x0, #SCTLR_A
69#else
70		bic	x0, x0, #SCTLR_A
71#endif
72#ifdef CFG_MEMTAG
73		read_feat_mte x1
74		cmp	w1, #1
75		b.ls	111f
76		orr	x0, x0, #(SCTLR_ATA | SCTLR_ATA0)
77		bic	x0, x0, #SCTLR_TCF_MASK
78		bic	x0, x0, #SCTLR_TCF0_MASK
79111:
80#endif
81#if defined(CFG_TA_PAUTH) && defined(CFG_TA_BTI)
82		orr	x0, x0, #SCTLR_BT0
83#endif
84#if defined(CFG_CORE_PAUTH) && defined(CFG_CORE_BTI)
85		orr	x0, x0, #SCTLR_BT1
86#endif
87		msr	sctlr_el1, x0
88	.endm
89
90	.macro init_memtag_per_cpu
91		read_feat_mte x0
92		cmp	w0, #1
93		b.ls	11f
94
95#ifdef CFG_TEE_CORE_DEBUG
96		/*
97		 * This together with GCR_EL1.RRND = 0 will make the tags
98		 * acquired with the irg instruction deterministic.
99		 */
100		mov_imm	x0, 0xcafe00
101		msr	rgsr_el1, x0
102		/* Avoid tag = 0x0 and 0xf */
103		mov	x0, #0
104#else
105		/*
106		 * Still avoid tag = 0x0 and 0xf as we use that tag for
107		 * everything which isn't explicitly tagged. Setting
108		 * GCR_EL1.RRND = 1 to allow an implementation specific
109		 * method of generating the tags.
110		 */
111		mov	x0, #GCR_EL1_RRND
112#endif
113		orr	x0, x0, #1
114		orr	x0, x0, #(1 << 15)
115		msr	gcr_el1, x0
116
117		/*
118		 * Enable the tag checks on the current CPU.
119		 *
120		 * Depends on boot_init_memtag() having cleared tags for
121		 * TEE core memory. Well, not really, addresses with the
122		 * tag value 0b0000 will use unchecked access due to
123		 * TCR_TCMA0.
124		 */
125		mrs	x0, tcr_el1
126		orr	x0, x0, #TCR_TBI0
127		orr	x0, x0, #TCR_TCMA0
128		msr	tcr_el1, x0
129
130		mrs	x0, sctlr_el1
131		orr	x0, x0, #SCTLR_TCF_SYNC
132		orr	x0, x0, #SCTLR_TCF0_SYNC
133		msr	sctlr_el1, x0
134
135		isb
13611:
137	.endm
138
139	.macro init_pauth_secondary_cpu
140		msr	spsel, #1
141		ldp	x0, x1, [sp, #THREAD_CORE_LOCAL_KEYS]
142		msr	spsel, #0
143		write_apiakeyhi x0
144		write_apiakeylo x1
145		mrs	x0, sctlr_el1
146		orr	x0, x0, #SCTLR_ENIA
147		msr	sctlr_el1, x0
148		isb
149	.endm
150
151	.macro init_pan
152		read_feat_pan x0
153		cmp	x0, #0
154		b.eq	1f
155		mrs	x0, sctlr_el1
156		bic	x0, x0, #SCTLR_SPAN
157		msr	sctlr_el1, x0
158		write_pan_enable
159	1:
160	.endm
161
162FUNC _start , :
163	/*
164	 * Temporary copy of boot argument registers, will be passed to
165	 * boot_save_args() further down.
166	 */
167	mov	x19, x0
168	mov	x20, x1
169	mov	x21, x2
170	mov	x22, x3
171
172	adr	x0, reset_vect_table
173	msr	vbar_el1, x0
174	isb
175
176#ifdef CFG_PAN
177	init_pan
178#endif
179
180	set_sctlr_el1
181	isb
182
183#ifdef CFG_WITH_PAGER
184	/*
185	 * Move init code into correct location and move hashes to a
186	 * temporary safe location until the heap is initialized.
187	 *
188	 * The binary is built as:
189	 * [Pager code, rodata and data] : In correct location
190	 * [Init code and rodata] : Should be copied to __init_start
191	 * [struct boot_embdata + data] : Should be saved before
192	 * initializing pager, first uint32_t tells the length of the data
193	 */
194	adr	x0, __init_start	/* dst */
195	adr	x1, __data_end		/* src */
196	adr	x2, __init_end
197	sub	x2, x2, x0		/* init len */
198	ldr	w4, [x1, x2]		/* length of hashes etc */
199	add	x2, x2, x4		/* length of init and hashes etc */
200	/* Copy backwards (as memmove) in case we're overlapping */
201	add	x0, x0, x2		/* __init_start + len */
202	add	x1, x1, x2		/* __data_end + len */
203	adr_l	x3, boot_cached_mem_end
204	str	x0, [x3]
205	adr	x2, __init_start
206copy_init:
207	ldp	x3, x4, [x1, #-16]!
208	stp	x3, x4, [x0, #-16]!
209	cmp	x0, x2
210	b.gt	copy_init
211#else
212	/*
213	 * The binary is built as:
214	 * [Core, rodata and data] : In correct location
215	 * [struct boot_embdata + data] : Should be moved to right before
216	 * __vcore_free_end, the first uint32_t tells the length of the
217	 * struct + data
218	 */
219	adr_l	x1, __data_end		/* src */
220	ldr	w2, [x1]		/* struct boot_embdata::total_len */
221	/* dst */
222	adr_l	x0, __vcore_free_end
223	sub	x0, x0, x2
224	/* round down to beginning of page */
225	bic	x0, x0, #(SMALL_PAGE_SIZE - 1)
226	adr_l	x3, boot_embdata_ptr
227	str	x0, [x3]
228
229	/* Copy backwards (as memmove) in case we're overlapping */
230	add	x1, x1, x2
231	add	x2, x0, x2
232	adr_l	x3, boot_cached_mem_end
233	str	x2, [x3]
234
235copy_init:
236	ldp	x3, x4, [x1, #-16]!
237	stp	x3, x4, [x2, #-16]!
238	cmp	x2, x0
239	b.gt	copy_init
240#endif
241
242	/*
243	 * Clear .bss, this code obviously depends on the linker keeping
244	 * start/end of .bss at least 8 byte aligned.
245	 */
246	adr_l	x0, __bss_start
247	adr_l	x1, __bss_end
248clear_bss:
249	str	xzr, [x0], #8
250	cmp	x0, x1
251	b.lt	clear_bss
252
253#ifdef CFG_NS_VIRTUALIZATION
254	/*
255	 * Clear .nex_bss, this code obviously depends on the linker keeping
256	 * start/end of .bss at least 8 byte aligned.
257	 */
258	adr_l	x0, __nex_bss_start
259	adr_l	x1, __nex_bss_end
260clear_nex_bss:
261	str	xzr, [x0], #8
262	cmp	x0, x1
263	b.lt	clear_nex_bss
264#endif
265
266
267#if defined(CFG_CORE_PHYS_RELOCATABLE)
268	/*
269	 * Save the base physical address, it will not change after this
270	 * point.
271	 */
272	adr_l	x2, core_mmu_tee_load_pa
273	adr	x1, _start		/* Load address */
274	str	x1, [x2]
275
276	mov_imm	x0, TEE_LOAD_ADDR	/* Compiled load address */
277	sub	x0, x1, x0		/* Relocatation offset */
278
279	cbz	x0, 1f
280	bl	relocate
2811:
282#endif
283
284	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
285	set_sp
286
287	bl	thread_init_thread_core_local
288
289	/* Enable aborts now that we can receive exceptions */
290	msr	daifclr, #DAIFBIT_ABT
291
292	/*
293	 * Invalidate dcache for all memory used during initialization to
294	 * avoid nasty surprices when the cache is turned on. We must not
295	 * invalidate memory not used by OP-TEE since we may invalidate
296	 * entries used by for instance ARM Trusted Firmware.
297	 */
298	adr_l	x0, __text_start
299	adr_l	x1, boot_cached_mem_end
300	ldr	x1, [x1]
301	sub	x1, x1, x0
302	bl	dcache_cleaninv_range
303
304	/* Enable Console */
305	bl	console_init
306
307	mov	x0, x19
308	mov	x1, x20
309	mov	x2, x21
310	mov	x3, x22
311	mov	x4, xzr
312	bl	boot_save_args
313
314#ifdef CFG_WITH_PAGER
315	adr_l	x0, __init_end	/* pointer to boot_embdata */
316	ldr	w1, [x0]	/* struct boot_embdata::total_len */
317	add	x0, x0, x1
318	add	x0, x0, #0xfff	/* round up */
319	bic	x0, x0, #0xfff  /* to next page */
320	mov_imm x1, (TEE_RAM_PH_SIZE + TEE_RAM_START)
321	mov	x2, x1
322#else
323	adr_l	x0, __vcore_free_start
324	adr_l	x1, boot_embdata_ptr
325	ldr	x1, [x1]
326	adr_l	x2, __vcore_free_end;
327#endif
328	bl	boot_mem_init
329
330#ifdef CFG_MEMTAG
331	/*
332	 * If FEAT_MTE2 is available, initializes the memtag callbacks.
333	 * Tags for OP-TEE core memory are then cleared to make it safe to
334	 * enable MEMTAG below.
335	 */
336	bl	boot_init_memtag
337#endif
338
339#ifdef CFG_CORE_ASLR
340	bl	get_aslr_seed
341#ifdef CFG_CORE_ASLR_SEED
342	mov_imm	x0, CFG_CORE_ASLR_SEED
343#endif
344#else
345	mov	x0, #0
346#endif
347
348	adr	x1, boot_mmu_config
349	bl	core_init_mmu_map
350
351#ifdef CFG_CORE_ASLR
352	/*
353	 * Process relocation information again updating for the virtual
354	 * map offset. We're doing this now before MMU is enabled as some
355	 * of the memory will become write protected.
356	 */
357	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
358	cbz	x0, 1f
359	/*
360	 * Update boot_cached_mem_end address with load offset since it was
361	 * calculated before relocation.
362	 */
363	adr_l	x5, boot_cached_mem_end
364	ldr	x6, [x5]
365	add	x6, x6, x0
366	str	x6, [x5]
367	adr	x1, _start		/* Load address */
368	bl	relocate
3691:
370#endif
371
372	bl	__get_core_pos
373	bl	enable_mmu
374#ifdef CFG_CORE_ASLR
375	adr_l	x0, boot_mmu_config
376	ldr	x0, [x0, #CORE_MMU_CONFIG_MAP_OFFSET]
377	bl	boot_mem_relocate
378	/*
379	 * Reinitialize console, since register_serial_console() has
380	 * previously registered a PA and with ASLR the VA is different
381	 * from the PA.
382	 */
383	bl	console_init
384#endif
385
386#ifdef CFG_MEMTAG
387	bl	boot_clear_memtag
388#endif
389
390#ifdef CFG_NS_VIRTUALIZATION
391	/*
392	 * Initialize partition tables for each partition to
393	 * default_partition which has been relocated now to a different VA
394	 */
395	bl	core_mmu_set_default_prtn_tbl
396#endif
397
398	bl	boot_init_primary_early
399
400#ifdef CFG_MEMTAG
401	init_memtag_per_cpu
402#endif
403
404#ifndef CFG_NS_VIRTUALIZATION
405	mov	x23, sp
406	adr_l	x0, threads
407	ldr	x0, [x0, #THREAD_CTX_STACK_VA_END]
408	mov	sp, x0
409	bl	thread_get_core_local
410	mov	x24, x0
411	str	wzr, [x24, #THREAD_CORE_LOCAL_FLAGS]
412#endif
413	bl	boot_init_primary_late
414#ifdef CFG_CORE_PAUTH
415	adr_l	x0, threads
416	ldp	x1, x2, [x0, #THREAD_CTX_KEYS]
417	write_apiakeyhi x1
418	write_apiakeylo x2
419	mrs	x0, sctlr_el1
420	orr	x0, x0, #SCTLR_ENIA
421	msr	sctlr_el1, x0
422	isb
423#endif
424	bl	boot_init_primary_final
425
426#ifndef CFG_NS_VIRTUALIZATION
427	mov	x0, #THREAD_CLF_TMP
428	str     w0, [x24, #THREAD_CORE_LOCAL_FLAGS]
429	mov	sp, x23
430#ifdef CFG_CORE_PAUTH
431	ldp	x0, x1, [x24, #THREAD_CORE_LOCAL_KEYS]
432	write_apiakeyhi x0
433	write_apiakeylo x1
434	isb
435#endif
436#endif
437
438#ifdef _CFG_CORE_STACK_PROTECTOR
439	/* Update stack canary value */
440	sub	sp, sp, #0x10
441	mov	x0, sp
442	mov	x1, #1
443	mov	x2, #0x8
444	bl	plat_get_random_stack_canaries
445	ldr	x0, [sp]
446	adr_l	x5, __stack_chk_guard
447	str	x0, [x5]
448	add	sp, sp, #0x10
449#endif
450
451	/*
452	 * In case we've touched memory that secondary CPUs will use before
453	 * they have turned on their D-cache, clean and invalidate the
454	 * D-cache before exiting to normal world.
455	 */
456	adr_l	x0, __text_start
457	adr_l	x1, boot_cached_mem_end
458	ldr	x1, [x1]
459	sub	x1, x1, x0
460	bl	dcache_cleaninv_range
461
462
463	/*
464	 * Clear current thread id now to allow the thread to be reused on
465	 * next entry. Matches the thread_init_boot_thread in
466	 * boot.c.
467	 */
468#ifndef CFG_NS_VIRTUALIZATION
469	bl 	thread_clr_boot_thread
470#endif
471
472#ifdef CFG_CORE_FFA
473	adr	x0, cpu_on_handler
474	/*
475	 * Compensate for the virtual map offset since cpu_on_handler() is
476	 * called with MMU off.
477	 */
478	ldr	x1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
479	sub	x0, x0, x1
480	bl	thread_spmc_register_secondary_ep
481	b	thread_ffa_msg_wait
482#else
483	/*
484	 * Pass the vector address returned from main_init Compensate for
485	 * the virtual map offset since cpu_on_handler() is called with MMU
486	 * off.
487	 */
488	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
489	adr	x1, thread_vector_table
490	sub	x1, x1, x0
491	mov	x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
492	smc	#0
493	/* SMC should not return */
494	panic_at_smc_return
495#endif
496END_FUNC _start
497DECLARE_KEEP_INIT _start
498
499#ifndef CFG_WITH_PAGER
500	.section .identity_map.data
501	.balign	8
502LOCAL_DATA boot_embdata_ptr , :
503	.skip	8
504END_DATA boot_embdata_ptr
505#endif
506
507#if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
508LOCAL_FUNC relocate , :
509	/*
510	 * x0 holds relocate offset
511	 * x1 holds load address
512	 */
513#ifdef CFG_WITH_PAGER
514	adr_l	x6, __init_end
515#else
516	adr_l	x6, boot_embdata_ptr
517	ldr	x6, [x6]
518#endif
519	ldp	w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET]
520
521	add	x2, x2, x6	/* start of relocations */
522	add	x3, x3, x2	/* end of relocations */
523
524	/*
525	 * Relocations are not formatted as Rela64, instead they are in a
526	 * compressed format created by get_reloc_bin() in
527	 * scripts/gen_tee_bin.py
528	 *
529	 * All the R_AARCH64_RELATIVE relocations are translated into a
530	 * list of 32-bit offsets from TEE_LOAD_ADDR. At each address a
531	 * 64-bit value pointed out which increased with the load offset.
532	 */
533
534#ifdef CFG_WITH_PAGER
535	/*
536	 * With pager enabled we can only relocate the pager and init
537	 * parts, the rest has to be done when a page is populated.
538	 */
539	sub	x6, x6, x1
540#endif
541
542	b	2f
543	/* Loop over the relocation addresses and process all entries */
5441:	ldr	w4, [x2], #4
545#ifdef CFG_WITH_PAGER
546	/* Skip too large addresses */
547	cmp	x4, x6
548	b.ge	2f
549#endif
550	add	x4, x4, x1
551	ldr	x5, [x4]
552	add	x5, x5, x0
553	str	x5, [x4]
554
5552:	cmp	x2, x3
556	b.ne	1b
557
558	ret
559END_FUNC relocate
560#endif
561
562/*
563 * void enable_mmu(unsigned long core_pos);
564 *
565 * This function depends on being mapped with in the identity map where
566 * physical address and virtual address is the same. After MMU has been
567 * enabled the instruction pointer will be updated to execute as the new
568 * offset instead. Stack pointers and the return address are updated.
569 */
570LOCAL_FUNC enable_mmu , : , .identity_map
571	adr	x1, boot_mmu_config
572	load_xregs x1, 0, 2, 6
573	/*
574	 * x0 = core_pos
575	 * x2 = tcr_el1
576	 * x3 = mair_el1
577	 * x4 = ttbr0_el1_base
578	 * x5 = ttbr0_core_offset
579	 * x6 = load_offset
580	 */
581	msr	tcr_el1, x2
582	msr	mair_el1, x3
583
584	/*
585	 * ttbr0_el1 = ttbr0_el1_base + ttbr0_core_offset * core_pos
586	 */
587	madd	x1, x5, x0, x4
588	msr	ttbr0_el1, x1
589	msr	ttbr1_el1, xzr
590	isb
591
592	/* Invalidate TLB */
593	tlbi	vmalle1
594
595	/*
596	 * Make sure translation table writes have drained into memory and
597	 * the TLB invalidation is complete.
598	 */
599	dsb	sy
600	isb
601
602	/* Enable the MMU */
603	mrs	x1, sctlr_el1
604	orr	x1, x1, #SCTLR_M
605	msr	sctlr_el1, x1
606	isb
607
608	/* Update vbar */
609	mrs	x1, vbar_el1
610	add	x1, x1, x6
611	msr	vbar_el1, x1
612	isb
613
614	/* Invalidate instruction cache and branch predictor */
615	ic	iallu
616	isb
617
618	/* Enable I and D cache */
619	mrs	x1, sctlr_el1
620	orr	x1, x1, #SCTLR_I
621	orr	x1, x1, #SCTLR_C
622	msr	sctlr_el1, x1
623	isb
624
625	/* Adjust stack pointers and return address */
626	msr	spsel, #1
627	add	sp, sp, x6
628	msr	spsel, #0
629	add	sp, sp, x6
630	add	x30, x30, x6
631
632	ret
633END_FUNC enable_mmu
634
635	.section .identity_map.data
636	.balign	8
637DATA boot_mmu_config , : /* struct core_mmu_config */
638	.skip	CORE_MMU_CONFIG_SIZE
639END_DATA boot_mmu_config
640
641FUNC cpu_on_handler , :
642	mov	x19, x0
643	mov	x20, x1
644	mov	x21, x30
645
646	adr	x0, reset_vect_table
647	msr	vbar_el1, x0
648	isb
649
650	set_sctlr_el1
651	isb
652
653#ifdef CFG_PAN
654	init_pan
655#endif
656
657	/* Enable aborts now that we can receive exceptions */
658	msr	daifclr, #DAIFBIT_ABT
659
660	bl	__get_core_pos
661	bl	enable_mmu
662
663	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
664	set_sp
665
666#ifdef CFG_MEMTAG
667	init_memtag_per_cpu
668#endif
669#ifdef CFG_CORE_PAUTH
670	init_pauth_secondary_cpu
671#endif
672
673	mov	x0, x19
674	mov	x1, x20
675#ifdef CFG_CORE_FFA
676	bl	boot_cpu_on_handler
677	b	thread_ffa_msg_wait
678#else
679	mov	x30, x21
680	b	boot_cpu_on_handler
681#endif
682END_FUNC cpu_on_handler
683DECLARE_KEEP_PAGER cpu_on_handler
684
685LOCAL_FUNC unhandled_cpu , :
686	wfi
687	b	unhandled_cpu
688END_FUNC unhandled_cpu
689
690LOCAL_DATA stack_tmp_rel , :
691	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
692END_DATA stack_tmp_rel
693
694	/*
695	 * This macro verifies that the a given vector doesn't exceed the
696	 * architectural limit of 32 instructions. This is meant to be placed
697	 * immedately after the last instruction in the vector. It takes the
698	 * vector entry as the parameter
699	 */
700	.macro check_vector_size since
701	  .if (. - \since) > (32 * 4)
702	    .error "Vector exceeds 32 instructions"
703	  .endif
704	.endm
705
706	.section .identity_map, "ax", %progbits
707	.align	11
708LOCAL_FUNC reset_vect_table , :, .identity_map, , nobti
709	/* -----------------------------------------------------
710	 * Current EL with SP0 : 0x0 - 0x180
711	 * -----------------------------------------------------
712	 */
713SynchronousExceptionSP0:
714	b	SynchronousExceptionSP0
715	check_vector_size SynchronousExceptionSP0
716
717	.align	7
718IrqSP0:
719	b	IrqSP0
720	check_vector_size IrqSP0
721
722	.align	7
723FiqSP0:
724	b	FiqSP0
725	check_vector_size FiqSP0
726
727	.align	7
728SErrorSP0:
729	b	SErrorSP0
730	check_vector_size SErrorSP0
731
732	/* -----------------------------------------------------
733	 * Current EL with SPx: 0x200 - 0x380
734	 * -----------------------------------------------------
735	 */
736	.align	7
737SynchronousExceptionSPx:
738	b	SynchronousExceptionSPx
739	check_vector_size SynchronousExceptionSPx
740
741	.align	7
742IrqSPx:
743	b	IrqSPx
744	check_vector_size IrqSPx
745
746	.align	7
747FiqSPx:
748	b	FiqSPx
749	check_vector_size FiqSPx
750
751	.align	7
752SErrorSPx:
753	b	SErrorSPx
754	check_vector_size SErrorSPx
755
756	/* -----------------------------------------------------
757	 * Lower EL using AArch64 : 0x400 - 0x580
758	 * -----------------------------------------------------
759	 */
760	.align	7
761SynchronousExceptionA64:
762	b	SynchronousExceptionA64
763	check_vector_size SynchronousExceptionA64
764
765	.align	7
766IrqA64:
767	b	IrqA64
768	check_vector_size IrqA64
769
770	.align	7
771FiqA64:
772	b	FiqA64
773	check_vector_size FiqA64
774
775	.align	7
776SErrorA64:
777	b   	SErrorA64
778	check_vector_size SErrorA64
779
780	/* -----------------------------------------------------
781	 * Lower EL using AArch32 : 0x0 - 0x180
782	 * -----------------------------------------------------
783	 */
784	.align	7
785SynchronousExceptionA32:
786	b	SynchronousExceptionA32
787	check_vector_size SynchronousExceptionA32
788
789	.align	7
790IrqA32:
791	b	IrqA32
792	check_vector_size IrqA32
793
794	.align	7
795FiqA32:
796	b	FiqA32
797	check_vector_size FiqA32
798
799	.align	7
800SErrorA32:
801	b	SErrorA32
802	check_vector_size SErrorA32
803
804END_FUNC reset_vect_table
805
806BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
807