xref: /optee_os/core/arch/arm/kernel/entry_a64.S (revision 5c2c0fb31efbeff60960336d7438e810b825d582)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2022, Linaro Limited
4 * Copyright (c) 2021-2023, Arm Limited
5 */
6
7#include <platform_config.h>
8
9#include <arm64_macros.S>
10#include <arm.h>
11#include <asm.S>
12#include <generated/asm-defines.h>
13#include <keep.h>
14#include <kernel/thread_private.h>
15#include <mm/core_mmu.h>
16#include <sm/optee_smc.h>
17#include <sm/teesmc_opteed.h>
18#include <sm/teesmc_opteed_macros.h>
19
20	/*
21	 * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0.
22	 * SP_EL0 is assigned:
23	 *   stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD
24	 * SP_EL1 is assigned thread_core_local[cpu_id]
25	 */
26	.macro set_sp
27		bl	__get_core_pos
28		cmp	x0, #CFG_TEE_CORE_NB_CORE
29		/* Unsupported CPU, park it before it breaks something */
30		bge	unhandled_cpu
31		add	x0, x0, #1
32		adr_l	x1, stack_tmp_stride
33		ldr	w1, [x1]
34		mul	x1, x0, x1
35
36		/* x0 = stack_tmp - STACK_TMP_GUARD */
37		adr_l	x2, stack_tmp_rel
38		ldr	w0, [x2]
39		add	x0, x0, x2
40
41		msr	spsel, #0
42		add	sp, x1, x0
43		bl	thread_get_core_local
44		msr	spsel, #1
45		mov	sp, x0
46		msr	spsel, #0
47	.endm
48
49	.macro read_feat_mte reg
50		mrs	\reg, id_aa64pfr1_el1
51		ubfx	\reg, \reg, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
52	.endm
53
54	.macro set_sctlr_el1
55		mrs	x0, sctlr_el1
56		orr	x0, x0, #SCTLR_I
57		orr	x0, x0, #SCTLR_SA
58		orr	x0, x0, #SCTLR_SPAN
59#if defined(CFG_CORE_RWDATA_NOEXEC)
60		orr	x0, x0, #SCTLR_WXN
61#endif
62#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
63		orr	x0, x0, #SCTLR_A
64#else
65		bic	x0, x0, #SCTLR_A
66#endif
67#ifdef CFG_MEMTAG
68		read_feat_mte x1
69		cmp	w1, #1
70		b.ls	111f
71		orr	x0, x0, #(SCTLR_ATA | SCTLR_ATA0)
72		bic	x0, x0, #SCTLR_TCF_MASK
73		bic	x0, x0, #SCTLR_TCF0_MASK
74111:
75#endif
76#if defined(CFG_TA_PAUTH) && defined(CFG_TA_BTI)
77		orr	x0, x0, #SCTLR_BT0
78#endif
79#if defined(CFG_CORE_PAUTH) && defined(CFG_CORE_BTI)
80		orr	x0, x0, #SCTLR_BT1
81#endif
82		msr	sctlr_el1, x0
83	.endm
84
85	.macro init_memtag_per_cpu
86		read_feat_mte x0
87		cmp	w0, #1
88		b.ls	11f
89
90#ifdef CFG_TEE_CORE_DEBUG
91		/*
92		 * This together with GCR_EL1.RRND = 0 will make the tags
93		 * acquired with the irg instruction deterministic.
94		 */
95		mov_imm	x0, 0xcafe00
96		msr	rgsr_el1, x0
97		/* Avoid tag = 0x0 and 0xf */
98		mov	x0, #0
99#else
100		/*
101		 * Still avoid tag = 0x0 and 0xf as we use that tag for
102		 * everything which isn't explicitly tagged. Setting
103		 * GCR_EL1.RRND = 1 to allow an implementation specific
104		 * method of generating the tags.
105		 */
106		mov	x0, #GCR_EL1_RRND
107#endif
108		orr	x0, x0, #1
109		orr	x0, x0, #(1 << 15)
110		msr	gcr_el1, x0
111
112		/*
113		 * Enable the tag checks on the current CPU.
114		 *
115		 * Depends on boot_init_memtag() having cleared tags for
116		 * TEE core memory. Well, not really, addresses with the
117		 * tag value 0b0000 will use unchecked access due to
118		 * TCR_TCMA0.
119		 */
120		mrs	x0, tcr_el1
121		orr	x0, x0, #TCR_TBI0
122		orr	x0, x0, #TCR_TCMA0
123		msr	tcr_el1, x0
124
125		mrs	x0, sctlr_el1
126		orr	x0, x0, #SCTLR_TCF_SYNC
127		orr	x0, x0, #SCTLR_TCF0_SYNC
128		msr	sctlr_el1, x0
129
130		isb
13111:
132	.endm
133
134	.macro init_pauth_per_cpu
135		msr	spsel, #1
136		ldp	x0, x1, [sp, #THREAD_CORE_LOCAL_KEYS]
137		msr	spsel, #0
138		write_apiakeyhi x0
139		write_apiakeylo x1
140		mrs	x0, sctlr_el1
141		orr	x0, x0, #SCTLR_ENIA
142		msr	sctlr_el1, x0
143		isb
144	.endm
145
146FUNC _start , :
147	/*
148	 * Register use:
149	 * x0	- CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
150	 *	  if non-NULL holds the TOS FW config [1] address
151	 *	- CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=y:
152	 *	  address of FF-A Boot Information Blob
153	 *	- CFG_CORE_FFA=n:
154	 *	  if non-NULL holds the pagable part address
155	 * x2	- CFG_CORE_SEL2_SPMC=n:
156	 *	  if non-NULL holds the system DTB address
157	 *
158	 * x19 - saved x0
159	 * x20 - saved x2
160	 *
161	 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
162	 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
163	 * here.
164	 */
165	mov	x19, x0
166#if defined(CFG_DT_ADDR)
167	ldr     x20, =CFG_DT_ADDR
168#else
169	mov	x20, x2		/* Save DT address */
170#endif
171
172	adr	x0, reset_vect_table
173	msr	vbar_el1, x0
174	isb
175
176	set_sctlr_el1
177	isb
178
179#ifdef CFG_WITH_PAGER
180	/*
181	 * Move init code into correct location and move hashes to a
182	 * temporary safe location until the heap is initialized.
183	 *
184	 * The binary is built as:
185	 * [Pager code, rodata and data] : In correct location
186	 * [Init code and rodata] : Should be copied to __init_start
187	 * [struct boot_embdata + data] : Should be saved before
188	 * initializing pager, first uint32_t tells the length of the data
189	 */
190	adr	x0, __init_start	/* dst */
191	adr	x1, __data_end		/* src */
192	adr	x2, __init_end
193	sub	x2, x2, x0		/* init len */
194	ldr	w4, [x1, x2]		/* length of hashes etc */
195	add	x2, x2, x4		/* length of init and hashes etc */
196	/* Copy backwards (as memmove) in case we're overlapping */
197	add	x0, x0, x2		/* __init_start + len */
198	add	x1, x1, x2		/* __data_end + len */
199	adr	x3, cached_mem_end
200	str	x0, [x3]
201	adr	x2, __init_start
202copy_init:
203	ldp	x3, x4, [x1, #-16]!
204	stp	x3, x4, [x0, #-16]!
205	cmp	x0, x2
206	b.gt	copy_init
207#else
208	/*
209	 * The binary is built as:
210	 * [Core, rodata and data] : In correct location
211	 * [struct boot_embdata + data] : Should be moved to __end, first
212	 * uint32_t tells the length of the struct + data
213	 */
214	adr_l	x0, __end		/* dst */
215	adr_l	x1, __data_end		/* src */
216	ldr	w2, [x1]		/* struct boot_embdata::total_len */
217	/* Copy backwards (as memmove) in case we're overlapping */
218	add	x0, x0, x2
219	add	x1, x1, x2
220	adr	x3, cached_mem_end
221	str	x0, [x3]
222	adr_l	x2, __end
223
224copy_init:
225	ldp	x3, x4, [x1, #-16]!
226	stp	x3, x4, [x0, #-16]!
227	cmp	x0, x2
228	b.gt	copy_init
229#endif
230
231	/*
232	 * Clear .bss, this code obviously depends on the linker keeping
233	 * start/end of .bss at least 8 byte aligned.
234	 */
235	adr_l	x0, __bss_start
236	adr_l	x1, __bss_end
237clear_bss:
238	str	xzr, [x0], #8
239	cmp	x0, x1
240	b.lt	clear_bss
241
242#ifdef CFG_NS_VIRTUALIZATION
243	/*
244	 * Clear .nex_bss, this code obviously depends on the linker keeping
245	 * start/end of .bss at least 8 byte aligned.
246	 */
247	adr_l	x0, __nex_bss_start
248	adr_l	x1, __nex_bss_end
249clear_nex_bss:
250	str	xzr, [x0], #8
251	cmp	x0, x1
252	b.lt	clear_nex_bss
253#endif
254
255
256#if defined(CFG_CORE_PHYS_RELOCATABLE)
257	/*
258	 * Save the base physical address, it will not change after this
259	 * point.
260	 */
261	adr_l	x2, core_mmu_tee_load_pa
262	adr	x1, _start		/* Load address */
263	str	x1, [x2]
264
265	mov_imm	x0, TEE_LOAD_ADDR	/* Compiled load address */
266	sub	x0, x1, x0		/* Relocatation offset */
267
268	cbz	x0, 1f
269	bl	relocate
2701:
271#endif
272
273	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
274	set_sp
275
276	bl	thread_init_thread_core_local
277
278	/* Enable aborts now that we can receive exceptions */
279	msr	daifclr, #DAIFBIT_ABT
280
281	/*
282	 * Invalidate dcache for all memory used during initialization to
283	 * avoid nasty surprices when the cache is turned on. We must not
284	 * invalidate memory not used by OP-TEE since we may invalidate
285	 * entries used by for instance ARM Trusted Firmware.
286	 */
287	adr_l	x0, __text_start
288	ldr	x1, cached_mem_end
289	sub	x1, x1, x0
290	bl	dcache_cleaninv_range
291
292	/* Enable Console */
293	bl	console_init
294
295#if defined(CFG_CORE_SEL2_SPMC) && defined(CFG_CORE_PHYS_RELOCATABLE)
296	mov	x0, x19		/* boot info */
297	bl	boot_save_boot_info
298#endif
299
300#ifdef CFG_MEMTAG
301	/*
302	 * If FEAT_MTE2 is available, initializes the memtag callbacks.
303	 * Tags for OP-TEE core memory are then cleared to make it safe to
304	 * enable MEMTAG below.
305	 */
306	bl	boot_init_memtag
307#endif
308
309#ifdef CFG_CORE_ASLR
310	mov	x0, x20		/* DT address */
311	bl	get_aslr_seed
312#else
313	mov	x0, #0
314#endif
315
316	adr	x1, boot_mmu_config
317	bl	core_init_mmu_map
318
319#ifdef CFG_CORE_ASLR
320	/*
321	 * Process relocation information again updating for the virtual
322	 * map offset. We're doing this now before MMU is enabled as some
323	 * of the memory will become write protected.
324	 */
325	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
326	cbz	x0, 1f
327	/*
328	 * Update cached_mem_end address with load offset since it was
329	 * calculated before relocation.
330	 */
331	adr	x5, cached_mem_end
332	ldr	x6, [x5]
333	add	x6, x6, x0
334	str	x6, [x5]
335	adr	x1, _start		/* Load address */
336	bl	relocate
3371:
338#endif
339
340	bl	__get_core_pos
341	bl	enable_mmu
342#ifdef CFG_CORE_ASLR
343	/*
344	 * Reinitialize console, since register_serial_console() has
345	 * previously registered a PA and with ASLR the VA is different
346	 * from the PA.
347	 */
348	bl	console_init
349#endif
350
351#ifdef CFG_NS_VIRTUALIZATION
352	/*
353	 * Initialize partition tables for each partition to
354	 * default_partition which has been relocated now to a different VA
355	 */
356	bl	core_mmu_set_default_prtn_tbl
357#endif
358
359#ifdef CFG_CORE_SEL1_SPMC
360	mov	x0, xzr		/* pager not used */
361#else
362	mov	x0, x19		/* pagable part address */
363#endif
364	mov	x1, #-1
365	bl	boot_init_primary_early
366
367#ifdef CFG_MEMTAG
368	init_memtag_per_cpu
369#endif
370
371#ifndef CFG_NS_VIRTUALIZATION
372	mov	x21, sp
373	adr_l	x0, threads
374	ldr	x0, [x0, #THREAD_CTX_STACK_VA_END]
375	mov	sp, x0
376	bl	thread_get_core_local
377	mov	x22, x0
378	str	wzr, [x22, #THREAD_CORE_LOCAL_FLAGS]
379#endif
380	mov	x0, x20		/* DT address also known as HW_CONFIG */
381#ifdef CFG_CORE_SEL1_SPMC
382	mov	x1, x19		/* TOS_FW_CONFIG DT address */
383#else
384	mov	x1, xzr		/* unused */
385#endif
386	bl	boot_init_primary_late
387#ifdef CFG_CORE_PAUTH
388	init_pauth_per_cpu
389#endif
390
391#ifndef CFG_NS_VIRTUALIZATION
392	mov	x0, #THREAD_CLF_TMP
393	str     w0, [x22, #THREAD_CORE_LOCAL_FLAGS]
394	mov	sp, x21
395#endif
396
397#ifdef _CFG_CORE_STACK_PROTECTOR
398	/* Update stack canary value */
399	sub	sp, sp, #0x10
400	mov	x0, sp
401	mov	x1, #1
402	mov	x2, #0x8
403	bl	plat_get_random_stack_canaries
404	ldr	x0, [sp]
405	adr_l	x5, __stack_chk_guard
406	str	x0, [x5]
407	add	sp, sp, #0x10
408#endif
409
410	/*
411	 * In case we've touched memory that secondary CPUs will use before
412	 * they have turned on their D-cache, clean and invalidate the
413	 * D-cache before exiting to normal world.
414	 */
415	adr_l	x0, __text_start
416	ldr	x1, cached_mem_end
417	sub	x1, x1, x0
418	bl	dcache_cleaninv_range
419
420
421	/*
422	 * Clear current thread id now to allow the thread to be reused on
423	 * next entry. Matches the thread_init_boot_thread in
424	 * boot.c.
425	 */
426#ifndef CFG_NS_VIRTUALIZATION
427	bl 	thread_clr_boot_thread
428#endif
429
430#ifdef CFG_CORE_FFA
431	adr	x0, cpu_on_handler
432	/*
433	 * Compensate for the virtual map offset since cpu_on_handler() is
434	 * called with MMU off.
435	 */
436	ldr	x1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
437	sub	x0, x0, x1
438	bl	thread_spmc_register_secondary_ep
439	b	thread_ffa_msg_wait
440#else
441	/*
442	 * Pass the vector address returned from main_init Compensate for
443	 * the virtual map offset since cpu_on_handler() is called with MMU
444	 * off.
445	 */
446	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
447	adr	x1, thread_vector_table
448	sub	x1, x1, x0
449	mov	x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
450	smc	#0
451	/* SMC should not return */
452	panic_at_smc_return
453#endif
454END_FUNC _start
455DECLARE_KEEP_INIT _start
456
457	.section .identity_map.data
458	.balign	8
459LOCAL_DATA cached_mem_end , :
460	.skip	8
461END_DATA cached_mem_end
462
463#if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
464LOCAL_FUNC relocate , :
465	/*
466	 * x0 holds relocate offset
467	 * x1 holds load address
468	 */
469#ifdef CFG_WITH_PAGER
470	adr_l	x6, __init_end
471#else
472	adr_l	x6, __end
473#endif
474	ldp	w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET]
475
476	add	x2, x2, x6	/* start of relocations */
477	add	x3, x3, x2	/* end of relocations */
478
479	/*
480	 * Relocations are not formatted as Rela64, instead they are in a
481	 * compressed format created by get_reloc_bin() in
482	 * scripts/gen_tee_bin.py
483	 *
484	 * All the R_AARCH64_RELATIVE relocations are translated into a
485	 * list of 32-bit offsets from TEE_LOAD_ADDR. At each address a
486	 * 64-bit value pointed out which increased with the load offset.
487	 */
488
489#ifdef CFG_WITH_PAGER
490	/*
491	 * With pager enabled we can only relocate the pager and init
492	 * parts, the rest has to be done when a page is populated.
493	 */
494	sub	x6, x6, x1
495#endif
496
497	b	2f
498	/* Loop over the relocation addresses and process all entries */
4991:	ldr	w4, [x2], #4
500#ifdef CFG_WITH_PAGER
501	/* Skip too large addresses */
502	cmp	x4, x6
503	b.ge	2f
504#endif
505	add	x4, x4, x1
506	ldr	x5, [x4]
507	add	x5, x5, x0
508	str	x5, [x4]
509
5102:	cmp	x2, x3
511	b.ne	1b
512
513	ret
514END_FUNC relocate
515#endif
516
517/*
518 * void enable_mmu(unsigned long core_pos);
519 *
520 * This function depends on being mapped with in the identity map where
521 * physical address and virtual address is the same. After MMU has been
522 * enabled the instruction pointer will be updated to execute as the new
523 * offset instead. Stack pointers and the return address are updated.
524 */
525LOCAL_FUNC enable_mmu , : , .identity_map
526	adr	x1, boot_mmu_config
527	load_xregs x1, 0, 2, 6
528	/*
529	 * x0 = core_pos
530	 * x2 = tcr_el1
531	 * x3 = mair_el1
532	 * x4 = ttbr0_el1_base
533	 * x5 = ttbr0_core_offset
534	 * x6 = load_offset
535	 */
536	msr	tcr_el1, x2
537	msr	mair_el1, x3
538
539	/*
540	 * ttbr0_el1 = ttbr0_el1_base + ttbr0_core_offset * core_pos
541	 */
542	madd	x1, x5, x0, x4
543	msr	ttbr0_el1, x1
544	msr	ttbr1_el1, xzr
545	isb
546
547	/* Invalidate TLB */
548	tlbi	vmalle1
549
550	/*
551	 * Make sure translation table writes have drained into memory and
552	 * the TLB invalidation is complete.
553	 */
554	dsb	sy
555	isb
556
557	/* Enable the MMU */
558	mrs	x1, sctlr_el1
559	orr	x1, x1, #SCTLR_M
560	msr	sctlr_el1, x1
561	isb
562
563	/* Update vbar */
564	mrs	x1, vbar_el1
565	add	x1, x1, x6
566	msr	vbar_el1, x1
567	isb
568
569	/* Invalidate instruction cache and branch predictor */
570	ic	iallu
571	isb
572
573	/* Enable I and D cache */
574	mrs	x1, sctlr_el1
575	orr	x1, x1, #SCTLR_I
576	orr	x1, x1, #SCTLR_C
577	msr	sctlr_el1, x1
578	isb
579
580	/* Adjust stack pointers and return address */
581	msr	spsel, #1
582	add	sp, sp, x6
583	msr	spsel, #0
584	add	sp, sp, x6
585	add	x30, x30, x6
586
587	ret
588END_FUNC enable_mmu
589
590	.section .identity_map.data
591	.balign	8
592DATA boot_mmu_config , : /* struct core_mmu_config */
593	.skip	CORE_MMU_CONFIG_SIZE
594END_DATA boot_mmu_config
595
596FUNC cpu_on_handler , :
597	mov	x19, x0
598	mov	x20, x1
599	mov	x21, x30
600
601	adr	x0, reset_vect_table
602	msr	vbar_el1, x0
603	isb
604
605	set_sctlr_el1
606	isb
607
608	/* Enable aborts now that we can receive exceptions */
609	msr	daifclr, #DAIFBIT_ABT
610
611	bl	__get_core_pos
612	bl	enable_mmu
613
614	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
615	set_sp
616
617#ifdef CFG_MEMTAG
618	init_memtag_per_cpu
619#endif
620#ifdef CFG_CORE_PAUTH
621	init_pauth_per_cpu
622#endif
623
624	mov	x0, x19
625	mov	x1, x20
626#ifdef CFG_CORE_FFA
627	bl	boot_cpu_on_handler
628	b	thread_ffa_msg_wait
629#else
630	mov	x30, x21
631	b	boot_cpu_on_handler
632#endif
633END_FUNC cpu_on_handler
634DECLARE_KEEP_PAGER cpu_on_handler
635
636LOCAL_FUNC unhandled_cpu , :
637	wfi
638	b	unhandled_cpu
639END_FUNC unhandled_cpu
640
641LOCAL_DATA stack_tmp_rel , :
642	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
643END_DATA stack_tmp_rel
644
645	/*
646	 * This macro verifies that the a given vector doesn't exceed the
647	 * architectural limit of 32 instructions. This is meant to be placed
648	 * immedately after the last instruction in the vector. It takes the
649	 * vector entry as the parameter
650	 */
651	.macro check_vector_size since
652	  .if (. - \since) > (32 * 4)
653	    .error "Vector exceeds 32 instructions"
654	  .endif
655	.endm
656
657	.section .identity_map, "ax", %progbits
658	.align	11
659LOCAL_FUNC reset_vect_table , :, .identity_map, , nobti
660	/* -----------------------------------------------------
661	 * Current EL with SP0 : 0x0 - 0x180
662	 * -----------------------------------------------------
663	 */
664SynchronousExceptionSP0:
665	b	SynchronousExceptionSP0
666	check_vector_size SynchronousExceptionSP0
667
668	.align	7
669IrqSP0:
670	b	IrqSP0
671	check_vector_size IrqSP0
672
673	.align	7
674FiqSP0:
675	b	FiqSP0
676	check_vector_size FiqSP0
677
678	.align	7
679SErrorSP0:
680	b	SErrorSP0
681	check_vector_size SErrorSP0
682
683	/* -----------------------------------------------------
684	 * Current EL with SPx: 0x200 - 0x380
685	 * -----------------------------------------------------
686	 */
687	.align	7
688SynchronousExceptionSPx:
689	b	SynchronousExceptionSPx
690	check_vector_size SynchronousExceptionSPx
691
692	.align	7
693IrqSPx:
694	b	IrqSPx
695	check_vector_size IrqSPx
696
697	.align	7
698FiqSPx:
699	b	FiqSPx
700	check_vector_size FiqSPx
701
702	.align	7
703SErrorSPx:
704	b	SErrorSPx
705	check_vector_size SErrorSPx
706
707	/* -----------------------------------------------------
708	 * Lower EL using AArch64 : 0x400 - 0x580
709	 * -----------------------------------------------------
710	 */
711	.align	7
712SynchronousExceptionA64:
713	b	SynchronousExceptionA64
714	check_vector_size SynchronousExceptionA64
715
716	.align	7
717IrqA64:
718	b	IrqA64
719	check_vector_size IrqA64
720
721	.align	7
722FiqA64:
723	b	FiqA64
724	check_vector_size FiqA64
725
726	.align	7
727SErrorA64:
728	b   	SErrorA64
729	check_vector_size SErrorA64
730
731	/* -----------------------------------------------------
732	 * Lower EL using AArch32 : 0x0 - 0x180
733	 * -----------------------------------------------------
734	 */
735	.align	7
736SynchronousExceptionA32:
737	b	SynchronousExceptionA32
738	check_vector_size SynchronousExceptionA32
739
740	.align	7
741IrqA32:
742	b	IrqA32
743	check_vector_size IrqA32
744
745	.align	7
746FiqA32:
747	b	FiqA32
748	check_vector_size FiqA32
749
750	.align	7
751SErrorA32:
752	b	SErrorA32
753	check_vector_size SErrorA32
754
755END_FUNC reset_vect_table
756
757BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
758