xref: /optee_os/core/arch/arm/kernel/entry_a64.S (revision 949b0c0c6256c79b714d188839b67a85ec5a0b3b)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2022, Linaro Limited
4 * Copyright (c) 2021-2023, Arm Limited
5 */
6
7#include <platform_config.h>
8
9#include <arm64_macros.S>
10#include <arm.h>
11#include <asm.S>
12#include <generated/asm-defines.h>
13#include <keep.h>
14#include <kernel/thread.h>
15#include <kernel/thread_private.h>
16#include <mm/core_mmu.h>
17#include <sm/optee_smc.h>
18#include <sm/teesmc_opteed.h>
19#include <sm/teesmc_opteed_macros.h>
20
21	/*
22	 * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0.
23	 * SP_EL0 is assigned:
24	 *   stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD
25	 * SP_EL1 is assigned thread_core_local[cpu_id]
26	 */
27	.macro set_sp
28		bl	__get_core_pos
29		cmp	x0, #CFG_TEE_CORE_NB_CORE
30		/* Unsupported CPU, park it before it breaks something */
31		bge	unhandled_cpu
32		add	x0, x0, #1
33		adr_l	x1, stack_tmp_stride
34		ldr	w1, [x1]
35		mul	x1, x0, x1
36
37		/* x0 = stack_tmp - STACK_TMP_GUARD */
38		adr_l	x2, stack_tmp_rel
39		ldr	w0, [x2]
40		add	x0, x0, x2
41
42		msr	spsel, #0
43		add	sp, x1, x0
44		bl	thread_get_core_local
45		msr	spsel, #1
46		mov	sp, x0
47		msr	spsel, #0
48	.endm
49
50	.macro read_feat_mte reg
51		mrs	\reg, id_aa64pfr1_el1
52		ubfx	\reg, \reg, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
53	.endm
54
55	.macro read_feat_pan reg
56		mrs	\reg, id_mmfr3_el1
57		ubfx	\reg, \reg, #ID_MMFR3_EL1_PAN_SHIFT, #4
58	.endm
59
60	.macro set_sctlr_el1
61		mrs	x0, sctlr_el1
62		orr	x0, x0, #SCTLR_I
63		orr	x0, x0, #SCTLR_SA
64		orr	x0, x0, #SCTLR_SPAN
65#if defined(CFG_CORE_RWDATA_NOEXEC)
66		orr	x0, x0, #SCTLR_WXN
67#endif
68#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
69		orr	x0, x0, #SCTLR_A
70#else
71		bic	x0, x0, #SCTLR_A
72#endif
73#ifdef CFG_MEMTAG
74		read_feat_mte x1
75		cmp	w1, #1
76		b.ls	111f
77		orr	x0, x0, #(SCTLR_ATA | SCTLR_ATA0)
78		bic	x0, x0, #SCTLR_TCF_MASK
79		bic	x0, x0, #SCTLR_TCF0_MASK
80111:
81#endif
82#if defined(CFG_TA_PAUTH) && defined(CFG_TA_BTI)
83		orr	x0, x0, #SCTLR_BT0
84#endif
85#if defined(CFG_CORE_PAUTH) && defined(CFG_CORE_BTI)
86		orr	x0, x0, #SCTLR_BT1
87#endif
88		msr	sctlr_el1, x0
89	.endm
90
91	.macro init_memtag_per_cpu
92		read_feat_mte x0
93		cmp	w0, #1
94		b.ls	11f
95
96#ifdef CFG_TEE_CORE_DEBUG
97		/*
98		 * This together with GCR_EL1.RRND = 0 will make the tags
99		 * acquired with the irg instruction deterministic.
100		 */
101		mov_imm	x0, 0xcafe00
102		msr	rgsr_el1, x0
103		/* Avoid tag = 0x0 and 0xf */
104		mov	x0, #0
105#else
106		/*
107		 * Still avoid tag = 0x0 and 0xf as we use that tag for
108		 * everything which isn't explicitly tagged. Setting
109		 * GCR_EL1.RRND = 1 to allow an implementation specific
110		 * method of generating the tags.
111		 */
112		mov	x0, #GCR_EL1_RRND
113#endif
114		orr	x0, x0, #1
115		orr	x0, x0, #(1 << 15)
116		msr	gcr_el1, x0
117
118		/*
119		 * Enable the tag checks on the current CPU.
120		 *
121		 * Depends on boot_init_memtag() having cleared tags for
122		 * TEE core memory. Well, not really, addresses with the
123		 * tag value 0b0000 will use unchecked access due to
124		 * TCR_TCMA0.
125		 */
126		mrs	x0, tcr_el1
127		orr	x0, x0, #TCR_TBI0
128		orr	x0, x0, #TCR_TCMA0
129		msr	tcr_el1, x0
130
131		mrs	x0, sctlr_el1
132		orr	x0, x0, #SCTLR_TCF_SYNC
133		orr	x0, x0, #SCTLR_TCF0_SYNC
134		msr	sctlr_el1, x0
135
136		isb
13711:
138	.endm
139
140	.macro init_pauth_secondary_cpu
141		msr	spsel, #1
142		ldp	x0, x1, [sp, #THREAD_CORE_LOCAL_KEYS]
143		msr	spsel, #0
144		write_apiakeyhi x0
145		write_apiakeylo x1
146		mrs	x0, sctlr_el1
147		orr	x0, x0, #SCTLR_ENIA
148		msr	sctlr_el1, x0
149		isb
150	.endm
151
152	.macro init_pan
153		read_feat_pan x0
154		cmp	x0, #0
155		b.eq	1f
156		mrs	x0, sctlr_el1
157		bic	x0, x0, #SCTLR_SPAN
158		msr	sctlr_el1, x0
159		write_pan_enable
160	1:
161	.endm
162
163FUNC _start , :
164	/*
165	 * Temporary copy of boot argument registers, will be passed to
166	 * boot_save_args() further down.
167	 */
168	mov	x19, x0
169	mov	x20, x1
170	mov	x21, x2
171	mov	x22, x3
172
173	adr	x0, reset_vect_table
174	msr	vbar_el1, x0
175	isb
176
177#ifdef CFG_PAN
178	init_pan
179#endif
180
181	set_sctlr_el1
182	isb
183
184#ifdef CFG_WITH_PAGER
185	/*
186	 * Move init code into correct location and move hashes to a
187	 * temporary safe location until the heap is initialized.
188	 *
189	 * The binary is built as:
190	 * [Pager code, rodata and data] : In correct location
191	 * [Init code and rodata] : Should be copied to __init_start
192	 * [struct boot_embdata + data] : Should be saved before
193	 * initializing pager, first uint32_t tells the length of the data
194	 */
195	adr	x0, __init_start	/* dst */
196	adr	x1, __data_end		/* src */
197	adr	x2, __init_end
198	sub	x2, x2, x0		/* init len */
199	ldr	w4, [x1, x2]		/* length of hashes etc */
200	add	x2, x2, x4		/* length of init and hashes etc */
201	/* Copy backwards (as memmove) in case we're overlapping */
202	add	x0, x0, x2		/* __init_start + len */
203	add	x1, x1, x2		/* __data_end + len */
204	adr_l	x3, boot_cached_mem_end
205	str	x0, [x3]
206	adr	x2, __init_start
207copy_init:
208	ldp	x3, x4, [x1, #-16]!
209	stp	x3, x4, [x0, #-16]!
210	cmp	x0, x2
211	b.gt	copy_init
212#else
213	/*
214	 * The binary is built as:
215	 * [Core, rodata and data] : In correct location
216	 * [struct boot_embdata + data] : Should be moved to right before
217	 * __vcore_free_end, the first uint32_t tells the length of the
218	 * struct + data
219	 */
220	adr_l	x1, __data_end		/* src */
221	ldr	w2, [x1]		/* struct boot_embdata::total_len */
222	/* dst */
223	adr_l	x0, __vcore_free_end
224	sub	x0, x0, x2
225	/* round down to beginning of page */
226	bic	x0, x0, #(SMALL_PAGE_SIZE - 1)
227	adr_l	x3, boot_embdata_ptr
228	str	x0, [x3]
229
230	/* Copy backwards (as memmove) in case we're overlapping */
231	add	x1, x1, x2
232	add	x2, x0, x2
233	adr_l	x3, boot_cached_mem_end
234	str	x2, [x3]
235
236copy_init:
237	ldp	x3, x4, [x1, #-16]!
238	stp	x3, x4, [x2, #-16]!
239	cmp	x2, x0
240	b.gt	copy_init
241#endif
242
243	/*
244	 * Clear .bss, this code obviously depends on the linker keeping
245	 * start/end of .bss at least 8 byte aligned.
246	 */
247	adr_l	x0, __bss_start
248	adr_l	x1, __bss_end
249clear_bss:
250	str	xzr, [x0], #8
251	cmp	x0, x1
252	b.lt	clear_bss
253
254#ifdef CFG_NS_VIRTUALIZATION
255	/*
256	 * Clear .nex_bss, this code obviously depends on the linker keeping
257	 * start/end of .bss at least 8 byte aligned.
258	 */
259	adr_l	x0, __nex_bss_start
260	adr_l	x1, __nex_bss_end
261clear_nex_bss:
262	str	xzr, [x0], #8
263	cmp	x0, x1
264	b.lt	clear_nex_bss
265#endif
266
267
268#if defined(CFG_CORE_PHYS_RELOCATABLE)
269	/*
270	 * Save the base physical address, it will not change after this
271	 * point.
272	 */
273	adr_l	x2, core_mmu_tee_load_pa
274	adr	x1, _start		/* Load address */
275	str	x1, [x2]
276
277	mov_imm	x0, TEE_LOAD_ADDR	/* Compiled load address */
278	sub	x0, x1, x0		/* Relocatation offset */
279
280	cbz	x0, 1f
281	bl	relocate
2821:
283#endif
284
285	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
286	set_sp
287
288	/* Initialize thread_core_local[current_cpu_id] for early boot */
289	bl	thread_get_abt_stack
290	mov	x1, sp
291	msr	spsel, #1
292	str	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
293	str	x0, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
294	mov	x0, #THREAD_ID_INVALID
295	str	x0, [sp, #THREAD_CORE_LOCAL_CURR_THREAD]
296	mov	w0, #THREAD_CLF_TMP
297	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
298	msr	spsel, #0
299
300	/* Enable aborts now that we can receive exceptions */
301	msr	daifclr, #DAIFBIT_ABT
302
303	/*
304	 * Invalidate dcache for all memory used during initialization to
305	 * avoid nasty surprices when the cache is turned on. We must not
306	 * invalidate memory not used by OP-TEE since we may invalidate
307	 * entries used by for instance ARM Trusted Firmware.
308	 */
309	adr_l	x0, __text_start
310	adr_l	x1, boot_cached_mem_end
311	ldr	x1, [x1]
312	sub	x1, x1, x0
313	bl	dcache_cleaninv_range
314
315	/* Enable Console */
316	bl	console_init
317
318	mov	x0, x19
319	mov	x1, x20
320	mov	x2, x21
321	mov	x3, x22
322	mov	x4, xzr
323	bl	boot_save_args
324
325#ifdef CFG_WITH_PAGER
326	adr_l	x0, __init_end	/* pointer to boot_embdata */
327	ldr	w1, [x0]	/* struct boot_embdata::total_len */
328	add	x0, x0, x1
329	add	x0, x0, #0xfff	/* round up */
330	bic	x0, x0, #0xfff  /* to next page */
331	mov_imm x1, (TEE_RAM_PH_SIZE + TEE_RAM_START)
332	mov	x2, x1
333#else
334	adr_l	x0, __vcore_free_start
335	adr_l	x1, boot_embdata_ptr
336	ldr	x1, [x1]
337	adr_l	x2, __vcore_free_end;
338#endif
339	bl	boot_mem_init
340
341#ifdef CFG_MEMTAG
342	/*
343	 * If FEAT_MTE2 is available, initializes the memtag callbacks.
344	 * Tags for OP-TEE core memory are then cleared to make it safe to
345	 * enable MEMTAG below.
346	 */
347	bl	boot_init_memtag
348#endif
349
350#ifdef CFG_CORE_ASLR
351	bl	get_aslr_seed
352#ifdef CFG_CORE_ASLR_SEED
353	mov_imm	x0, CFG_CORE_ASLR_SEED
354#endif
355#else
356	mov	x0, #0
357#endif
358
359	adr	x1, boot_mmu_config
360	bl	core_init_mmu_map
361
362#ifdef CFG_CORE_ASLR
363	/*
364	 * Process relocation information again updating for the virtual
365	 * map offset. We're doing this now before MMU is enabled as some
366	 * of the memory will become write protected.
367	 */
368	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
369	cbz	x0, 1f
370	/*
371	 * Update boot_cached_mem_end address with load offset since it was
372	 * calculated before relocation.
373	 */
374	adr_l	x5, boot_cached_mem_end
375	ldr	x6, [x5]
376	add	x6, x6, x0
377	str	x6, [x5]
378	adr	x1, _start		/* Load address */
379	bl	relocate
3801:
381#endif
382
383	bl	__get_core_pos
384	bl	enable_mmu
385#ifdef CFG_CORE_ASLR
386	adr_l	x0, boot_mmu_config
387	ldr	x0, [x0, #CORE_MMU_CONFIG_MAP_OFFSET]
388	bl	boot_mem_relocate
389	/*
390	 * Update recorded end_va.
391	 */
392	adr_l	x0, boot_mmu_config
393	ldr	x0, [x0, #CORE_MMU_CONFIG_MAP_OFFSET]
394	msr	spsel, #1
395	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
396	add	x1, x1, x0
397	str	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
398	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
399	add	x1, x1, x0
400	str	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
401	msr	spsel, #0
402	/*
403	 * Reinitialize console, since register_serial_console() has
404	 * previously registered a PA and with ASLR the VA is different
405	 * from the PA.
406	 */
407	bl	console_init
408#endif
409
410#ifdef CFG_MEMTAG
411	bl	boot_clear_memtag
412#endif
413
414#ifdef CFG_NS_VIRTUALIZATION
415	/*
416	 * Initialize partition tables for each partition to
417	 * default_partition which has been relocated now to a different VA
418	 */
419	bl	core_mmu_set_default_prtn_tbl
420#endif
421
422	bl	boot_init_primary_early
423
424#ifdef CFG_MEMTAG
425	init_memtag_per_cpu
426#endif
427	bl	boot_init_primary_late
428#ifndef CFG_NS_VIRTUALIZATION
429	mov	x23, sp
430	adr_l	x0, threads
431	ldr	x0, [x0, #THREAD_CTX_STACK_VA_END]
432	mov	sp, x0
433	bl	thread_get_core_local
434	mov	x24, x0
435	str	wzr, [x24, #THREAD_CORE_LOCAL_FLAGS]
436#endif
437	bl	boot_init_primary_runtime
438#ifdef CFG_CORE_PAUTH
439	adr_l	x0, threads
440	ldp	x1, x2, [x0, #THREAD_CTX_KEYS]
441	write_apiakeyhi x1
442	write_apiakeylo x2
443	mrs	x0, sctlr_el1
444	orr	x0, x0, #SCTLR_ENIA
445	msr	sctlr_el1, x0
446	isb
447#endif
448	bl	boot_init_primary_final
449
450#ifndef CFG_NS_VIRTUALIZATION
451	mov	x0, #THREAD_CLF_TMP
452	str     w0, [x24, #THREAD_CORE_LOCAL_FLAGS]
453	mov	sp, x23
454#ifdef CFG_CORE_PAUTH
455	ldp	x0, x1, [x24, #THREAD_CORE_LOCAL_KEYS]
456	write_apiakeyhi x0
457	write_apiakeylo x1
458	isb
459#endif
460#endif
461
462#ifdef _CFG_CORE_STACK_PROTECTOR
463	/* Update stack canary value */
464	sub	sp, sp, #0x10
465	mov	x0, sp
466	mov	x1, #1
467	mov	x2, #0x8
468	bl	plat_get_random_stack_canaries
469	ldr	x0, [sp]
470	adr_l	x5, __stack_chk_guard
471	str	x0, [x5]
472	add	sp, sp, #0x10
473#endif
474
475	/*
476	 * In case we've touched memory that secondary CPUs will use before
477	 * they have turned on their D-cache, clean and invalidate the
478	 * D-cache before exiting to normal world.
479	 */
480	adr_l	x0, __text_start
481	adr_l	x1, boot_cached_mem_end
482	ldr	x1, [x1]
483	sub	x1, x1, x0
484	bl	dcache_cleaninv_range
485
486
487	/*
488	 * Clear current thread id now to allow the thread to be reused on
489	 * next entry. Matches the thread_init_boot_thread in
490	 * boot.c.
491	 */
492#ifndef CFG_NS_VIRTUALIZATION
493	bl 	thread_clr_boot_thread
494#endif
495
496#ifdef CFG_CORE_FFA
497	adr	x0, cpu_on_handler
498	/*
499	 * Compensate for the virtual map offset since cpu_on_handler() is
500	 * called with MMU off.
501	 */
502	ldr	x1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
503	sub	x0, x0, x1
504	bl	thread_spmc_register_secondary_ep
505	b	thread_ffa_msg_wait
506#else
507	/*
508	 * Pass the vector address returned from main_init Compensate for
509	 * the virtual map offset since cpu_on_handler() is called with MMU
510	 * off.
511	 */
512	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
513	adr	x1, thread_vector_table
514	sub	x1, x1, x0
515	mov	x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
516	smc	#0
517	/* SMC should not return */
518	panic_at_smc_return
519#endif
520END_FUNC _start
521DECLARE_KEEP_INIT _start
522
523#ifndef CFG_WITH_PAGER
524	.section .identity_map.data
525	.balign	8
526LOCAL_DATA boot_embdata_ptr , :
527	.skip	8
528END_DATA boot_embdata_ptr
529#endif
530
531#if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
532LOCAL_FUNC relocate , :
533	/*
534	 * x0 holds relocate offset
535	 * x1 holds load address
536	 */
537#ifdef CFG_WITH_PAGER
538	adr_l	x6, __init_end
539#else
540	adr_l	x6, boot_embdata_ptr
541	ldr	x6, [x6]
542#endif
543	ldp	w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET]
544
545	add	x2, x2, x6	/* start of relocations */
546	add	x3, x3, x2	/* end of relocations */
547
548	/*
549	 * Relocations are not formatted as Rela64, instead they are in a
550	 * compressed format created by get_reloc_bin() in
551	 * scripts/gen_tee_bin.py
552	 *
553	 * All the R_AARCH64_RELATIVE relocations are translated into a
554	 * list of 32-bit offsets from TEE_LOAD_ADDR. At each address a
555	 * 64-bit value pointed out which increased with the load offset.
556	 */
557
558#ifdef CFG_WITH_PAGER
559	/*
560	 * With pager enabled we can only relocate the pager and init
561	 * parts, the rest has to be done when a page is populated.
562	 */
563	sub	x6, x6, x1
564#endif
565
566	b	2f
567	/* Loop over the relocation addresses and process all entries */
5681:	ldr	w4, [x2], #4
569#ifdef CFG_WITH_PAGER
570	/* Skip too large addresses */
571	cmp	x4, x6
572	b.ge	2f
573#endif
574	add	x4, x4, x1
575	ldr	x5, [x4]
576	add	x5, x5, x0
577	str	x5, [x4]
578
5792:	cmp	x2, x3
580	b.ne	1b
581
582	ret
583END_FUNC relocate
584#endif
585
586/*
587 * void enable_mmu(unsigned long core_pos);
588 *
589 * This function depends on being mapped with in the identity map where
590 * physical address and virtual address is the same. After MMU has been
591 * enabled the instruction pointer will be updated to execute as the new
592 * offset instead. Stack pointers and the return address are updated.
593 */
594LOCAL_FUNC enable_mmu , : , .identity_map
595	adr	x1, boot_mmu_config
596	load_xregs x1, 0, 2, 6
597	/*
598	 * x0 = core_pos
599	 * x2 = tcr_el1
600	 * x3 = mair_el1
601	 * x4 = ttbr0_el1_base
602	 * x5 = ttbr0_core_offset
603	 * x6 = load_offset
604	 */
605	msr	tcr_el1, x2
606	msr	mair_el1, x3
607
608	/*
609	 * ttbr0_el1 = ttbr0_el1_base + ttbr0_core_offset * core_pos
610	 */
611	madd	x1, x5, x0, x4
612	msr	ttbr0_el1, x1
613	msr	ttbr1_el1, xzr
614	isb
615
616	/* Invalidate TLB */
617	tlbi	vmalle1
618
619	/*
620	 * Make sure translation table writes have drained into memory and
621	 * the TLB invalidation is complete.
622	 */
623	dsb	sy
624	isb
625
626	/* Enable the MMU */
627	mrs	x1, sctlr_el1
628	orr	x1, x1, #SCTLR_M
629	msr	sctlr_el1, x1
630	isb
631
632	/* Update vbar */
633	mrs	x1, vbar_el1
634	add	x1, x1, x6
635	msr	vbar_el1, x1
636	isb
637
638	/* Invalidate instruction cache and branch predictor */
639	ic	iallu
640	isb
641
642	/* Enable I and D cache */
643	mrs	x1, sctlr_el1
644	orr	x1, x1, #SCTLR_I
645	orr	x1, x1, #SCTLR_C
646	msr	sctlr_el1, x1
647	isb
648
649	/* Adjust stack pointers and return address */
650	msr	spsel, #1
651	add	sp, sp, x6
652	msr	spsel, #0
653	add	sp, sp, x6
654	add	x30, x30, x6
655
656	ret
657END_FUNC enable_mmu
658
659	.section .identity_map.data
660	.balign	8
661DATA boot_mmu_config , : /* struct core_mmu_config */
662	.skip	CORE_MMU_CONFIG_SIZE
663END_DATA boot_mmu_config
664
665FUNC cpu_on_handler , :
666	mov	x19, x0
667	mov	x20, x1
668	mov	x21, x30
669
670	adr	x0, reset_vect_table
671	msr	vbar_el1, x0
672	isb
673
674	set_sctlr_el1
675	isb
676
677#ifdef CFG_PAN
678	init_pan
679#endif
680
681	/* Enable aborts now that we can receive exceptions */
682	msr	daifclr, #DAIFBIT_ABT
683
684	bl	__get_core_pos
685	bl	enable_mmu
686
687	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
688	set_sp
689
690#ifdef CFG_MEMTAG
691	init_memtag_per_cpu
692#endif
693#ifdef CFG_CORE_PAUTH
694	init_pauth_secondary_cpu
695#endif
696
697	mov	x0, x19
698	mov	x1, x20
699#ifdef CFG_CORE_FFA
700	bl	boot_cpu_on_handler
701	b	thread_ffa_msg_wait
702#else
703	mov	x30, x21
704	b	boot_cpu_on_handler
705#endif
706END_FUNC cpu_on_handler
707DECLARE_KEEP_PAGER cpu_on_handler
708
709LOCAL_FUNC unhandled_cpu , :
710	wfi
711	b	unhandled_cpu
712END_FUNC unhandled_cpu
713
714LOCAL_DATA stack_tmp_rel , :
715	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
716END_DATA stack_tmp_rel
717
718	/*
719	 * This macro verifies that the a given vector doesn't exceed the
720	 * architectural limit of 32 instructions. This is meant to be placed
721	 * immedately after the last instruction in the vector. It takes the
722	 * vector entry as the parameter
723	 */
724	.macro check_vector_size since
725	  .if (. - \since) > (32 * 4)
726	    .error "Vector exceeds 32 instructions"
727	  .endif
728	.endm
729
730	.section .identity_map, "ax", %progbits
731	.align	11
732LOCAL_FUNC reset_vect_table , :, .identity_map, , nobti
733	/* -----------------------------------------------------
734	 * Current EL with SP0 : 0x0 - 0x180
735	 * -----------------------------------------------------
736	 */
737SynchronousExceptionSP0:
738	b	SynchronousExceptionSP0
739	check_vector_size SynchronousExceptionSP0
740
741	.align	7
742IrqSP0:
743	b	IrqSP0
744	check_vector_size IrqSP0
745
746	.align	7
747FiqSP0:
748	b	FiqSP0
749	check_vector_size FiqSP0
750
751	.align	7
752SErrorSP0:
753	b	SErrorSP0
754	check_vector_size SErrorSP0
755
756	/* -----------------------------------------------------
757	 * Current EL with SPx: 0x200 - 0x380
758	 * -----------------------------------------------------
759	 */
760	.align	7
761SynchronousExceptionSPx:
762	b	SynchronousExceptionSPx
763	check_vector_size SynchronousExceptionSPx
764
765	.align	7
766IrqSPx:
767	b	IrqSPx
768	check_vector_size IrqSPx
769
770	.align	7
771FiqSPx:
772	b	FiqSPx
773	check_vector_size FiqSPx
774
775	.align	7
776SErrorSPx:
777	b	SErrorSPx
778	check_vector_size SErrorSPx
779
780	/* -----------------------------------------------------
781	 * Lower EL using AArch64 : 0x400 - 0x580
782	 * -----------------------------------------------------
783	 */
784	.align	7
785SynchronousExceptionA64:
786	b	SynchronousExceptionA64
787	check_vector_size SynchronousExceptionA64
788
789	.align	7
790IrqA64:
791	b	IrqA64
792	check_vector_size IrqA64
793
794	.align	7
795FiqA64:
796	b	FiqA64
797	check_vector_size FiqA64
798
799	.align	7
800SErrorA64:
801	b   	SErrorA64
802	check_vector_size SErrorA64
803
804	/* -----------------------------------------------------
805	 * Lower EL using AArch32 : 0x0 - 0x180
806	 * -----------------------------------------------------
807	 */
808	.align	7
809SynchronousExceptionA32:
810	b	SynchronousExceptionA32
811	check_vector_size SynchronousExceptionA32
812
813	.align	7
814IrqA32:
815	b	IrqA32
816	check_vector_size IrqA32
817
818	.align	7
819FiqA32:
820	b	FiqA32
821	check_vector_size FiqA32
822
823	.align	7
824SErrorA32:
825	b	SErrorA32
826	check_vector_size SErrorA32
827
828END_FUNC reset_vect_table
829
830BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
831