xref: /optee_os/core/arch/arm/kernel/entry_a64.S (revision 45c754cea36aa970be953a3e579ff81a63eb461f)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2022, Linaro Limited
4 * Copyright (c) 2021-2023, Arm Limited
5 */
6
7#include <platform_config.h>
8
9#include <arm64_macros.S>
10#include <arm.h>
11#include <asm.S>
12#include <generated/asm-defines.h>
13#include <keep.h>
14#include <kernel/thread.h>
15#include <kernel/thread_private.h>
16#include <mm/core_mmu.h>
17#include <sm/optee_smc.h>
18#include <sm/teesmc_opteed.h>
19#include <sm/teesmc_opteed_macros.h>
20
21	/*
22	 * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0.
23	 * SP_EL0 is assigned:
24	 *   stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD
25	 * SP_EL1 is assigned thread_core_local[cpu_id]
26	 */
27	.macro set_sp
28		bl	__get_core_pos
29		cmp	x0, #CFG_TEE_CORE_NB_CORE
30		/* Unsupported CPU, park it before it breaks something */
31		bge	unhandled_cpu
32		add	x0, x0, #1
33		adr_l	x1, stack_tmp_stride
34		ldr	w1, [x1]
35		mul	x1, x0, x1
36
37		/* x0 = stack_tmp - STACK_TMP_GUARD */
38		adr_l	x2, stack_tmp_rel
39		ldr	w0, [x2]
40		add	x0, x0, x2
41
42		msr	spsel, #0
43		add	sp, x1, x0
44		bl	thread_get_core_local
45		msr	spsel, #1
46		mov	sp, x0
47		msr	spsel, #0
48	.endm
49
50	.macro read_feat_mte reg
51		mrs	\reg, id_aa64pfr1_el1
52		ubfx	\reg, \reg, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
53	.endm
54
55	.macro read_feat_pan reg
56		mrs	\reg, id_mmfr3_el1
57		ubfx	\reg, \reg, #ID_MMFR3_EL1_PAN_SHIFT, #4
58	.endm
59
60	.macro set_sctlr_el1
61		mrs	x0, sctlr_el1
62		orr	x0, x0, #SCTLR_I
63		orr	x0, x0, #SCTLR_SA
64		orr	x0, x0, #SCTLR_SPAN
65#if defined(CFG_CORE_RWDATA_NOEXEC)
66		orr	x0, x0, #SCTLR_WXN
67#endif
68#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
69		orr	x0, x0, #SCTLR_A
70#else
71		bic	x0, x0, #SCTLR_A
72#endif
73#ifdef CFG_MEMTAG
74		read_feat_mte x1
75		cmp	w1, #1
76		b.ls	111f
77		orr	x0, x0, #(SCTLR_ATA | SCTLR_ATA0)
78		bic	x0, x0, #SCTLR_TCF_MASK
79		bic	x0, x0, #SCTLR_TCF0_MASK
80111:
81#endif
82#if defined(CFG_TA_PAUTH) && defined(CFG_TA_BTI)
83		orr	x0, x0, #SCTLR_BT0
84#endif
85#if defined(CFG_CORE_PAUTH) && defined(CFG_CORE_BTI)
86		orr	x0, x0, #SCTLR_BT1
87#endif
88		msr	sctlr_el1, x0
89	.endm
90
91	.macro init_memtag_per_cpu
92		read_feat_mte x0
93		cmp	w0, #1
94		b.ls	11f
95
96#ifdef CFG_TEE_CORE_DEBUG
97		/*
98		 * This together with GCR_EL1.RRND = 0 will make the tags
99		 * acquired with the irg instruction deterministic.
100		 */
101		mov_imm	x0, 0xcafe00
102		msr	rgsr_el1, x0
103		/* Avoid tag = 0x0 and 0xf */
104		mov	x0, #0
105#else
106		/*
107		 * Still avoid tag = 0x0 and 0xf as we use that tag for
108		 * everything which isn't explicitly tagged. Setting
109		 * GCR_EL1.RRND = 1 to allow an implementation specific
110		 * method of generating the tags.
111		 */
112		mov	x0, #GCR_EL1_RRND
113#endif
114		orr	x0, x0, #1
115		orr	x0, x0, #(1 << 15)
116		msr	gcr_el1, x0
117
118		/*
119		 * Enable the tag checks on the current CPU.
120		 *
121		 * Depends on boot_init_memtag() having cleared tags for
122		 * TEE core memory. Well, not really, addresses with the
123		 * tag value 0b0000 will use unchecked access due to
124		 * TCR_TCMA0.
125		 */
126		mrs	x0, tcr_el1
127		orr	x0, x0, #TCR_TBI0
128		orr	x0, x0, #TCR_TCMA0
129		msr	tcr_el1, x0
130
131		mrs	x0, sctlr_el1
132		orr	x0, x0, #SCTLR_TCF_SYNC
133		orr	x0, x0, #SCTLR_TCF0_SYNC
134		msr	sctlr_el1, x0
135
136		isb
13711:
138	.endm
139
140	.macro init_pauth_secondary_cpu
141		msr	spsel, #1
142		ldp	x0, x1, [sp, #THREAD_CORE_LOCAL_KEYS]
143		msr	spsel, #0
144		write_apiakeyhi x0
145		write_apiakeylo x1
146		mrs	x0, sctlr_el1
147		orr	x0, x0, #SCTLR_ENIA
148		msr	sctlr_el1, x0
149		isb
150	.endm
151
152	.macro init_pan
153		read_feat_pan x0
154		cmp	x0, #0
155		b.eq	1f
156		mrs	x0, sctlr_el1
157		bic	x0, x0, #SCTLR_SPAN
158		msr	sctlr_el1, x0
159		write_pan_enable
160	1:
161	.endm
162
163FUNC _start , :
164	/*
165	 * Temporary copy of boot argument registers, will be passed to
166	 * boot_save_args() further down.
167	 */
168	mov	x19, x0
169	mov	x20, x1
170	mov	x21, x2
171	mov	x22, x3
172
173	adr	x0, reset_vect_table
174	msr	vbar_el1, x0
175	isb
176
177#ifdef CFG_PAN
178	init_pan
179#endif
180
181	set_sctlr_el1
182	isb
183
184#ifdef CFG_WITH_PAGER
185	/*
186	 * Move init code into correct location and move hashes to a
187	 * temporary safe location until the heap is initialized.
188	 *
189	 * The binary is built as:
190	 * [Pager code, rodata and data] : In correct location
191	 * [Init code and rodata] : Should be copied to __init_start
192	 * [struct boot_embdata + data] : Should be saved before
193	 * initializing pager, first uint32_t tells the length of the data
194	 */
195	adr	x0, __init_start	/* dst */
196	adr	x1, __data_end		/* src */
197	adr	x2, __init_end
198	sub	x2, x2, x0		/* init len */
199	ldr	w4, [x1, x2]		/* length of hashes etc */
200	add	x2, x2, x4		/* length of init and hashes etc */
201	/* Copy backwards (as memmove) in case we're overlapping */
202	add	x0, x0, x2		/* __init_start + len */
203	add	x1, x1, x2		/* __data_end + len */
204	adr_l	x3, boot_cached_mem_end
205	str	x0, [x3]
206	adr	x2, __init_start
207copy_init:
208	ldp	x3, x4, [x1, #-16]!
209	stp	x3, x4, [x0, #-16]!
210	cmp	x0, x2
211	b.gt	copy_init
212#else
213	/*
214	 * The binary is built as:
215	 * [Core, rodata and data] : In correct location
216	 * [struct boot_embdata + data] : Should be moved to right before
217	 * __vcore_free_end, the first uint32_t tells the length of the
218	 * struct + data
219	 */
220	adr_l	x1, __data_end		/* src */
221	ldr	w2, [x1]		/* struct boot_embdata::total_len */
222	/* dst */
223	adr_l	x0, __vcore_free_end
224	sub	x0, x0, x2
225	/* round down to beginning of page */
226	bic	x0, x0, #(SMALL_PAGE_SIZE - 1)
227	adr_l	x3, boot_embdata_ptr
228	str	x0, [x3]
229
230	/* Copy backwards (as memmove) in case we're overlapping */
231	add	x1, x1, x2
232	add	x2, x0, x2
233	adr_l	x3, boot_cached_mem_end
234	str	x2, [x3]
235
236copy_init:
237	ldp	x3, x4, [x1, #-16]!
238	stp	x3, x4, [x2, #-16]!
239	cmp	x2, x0
240	b.gt	copy_init
241#endif
242
243	/*
244	 * Clear .bss, this code obviously depends on the linker keeping
245	 * start/end of .bss at least 8 byte aligned.
246	 */
247	adr_l	x0, __bss_start
248	adr_l	x1, __bss_end
249clear_bss:
250	str	xzr, [x0], #8
251	cmp	x0, x1
252	b.lt	clear_bss
253
254#ifdef CFG_NS_VIRTUALIZATION
255	/*
256	 * Clear .nex_bss, this code obviously depends on the linker keeping
257	 * start/end of .bss at least 8 byte aligned.
258	 */
259	adr_l	x0, __nex_bss_start
260	adr_l	x1, __nex_bss_end
261clear_nex_bss:
262	str	xzr, [x0], #8
263	cmp	x0, x1
264	b.lt	clear_nex_bss
265#endif
266
267
268#if defined(CFG_CORE_PHYS_RELOCATABLE)
269	/*
270	 * Save the base physical address, it will not change after this
271	 * point.
272	 */
273	adr_l	x2, core_mmu_tee_load_pa
274	adr	x1, _start		/* Load address */
275	str	x1, [x2]
276
277	mov_imm	x0, TEE_LOAD_ADDR	/* Compiled load address */
278	sub	x0, x1, x0		/* Relocatation offset */
279
280	cbz	x0, 1f
281	bl	relocate
2821:
283#endif
284
285	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
286	set_sp
287
288	/* Initialize thread_core_local[current_cpu_id] for early boot */
289	bl	thread_get_abt_stack
290	mov	x1, sp
291	msr	spsel, #1
292	str	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
293	str	x0, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
294	mov	x0, #THREAD_ID_INVALID
295	str	x0, [sp, #THREAD_CORE_LOCAL_CURR_THREAD]
296	mov	w0, #THREAD_CLF_TMP
297	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
298	msr	spsel, #0
299
300	/* Enable aborts now that we can receive exceptions */
301	msr	daifclr, #DAIFBIT_ABT
302
303	/*
304	 * Invalidate dcache for all memory used during initialization to
305	 * avoid nasty surprices when the cache is turned on. We must not
306	 * invalidate memory not used by OP-TEE since we may invalidate
307	 * entries used by for instance ARM Trusted Firmware.
308	 */
309	adr_l	x0, __text_start
310	adr_l	x1, boot_cached_mem_end
311	ldr	x1, [x1]
312	sub	x1, x1, x0
313	bl	dcache_cleaninv_range
314
315	/* Enable Console */
316	bl	console_init
317
318	mov	x0, x19
319	mov	x1, x20
320	mov	x2, x21
321	mov	x3, x22
322	mov	x4, xzr
323	bl	boot_save_args
324
325#ifdef CFG_WITH_PAGER
326	adr_l	x0, __init_end	/* pointer to boot_embdata */
327	ldr	w1, [x0]	/* struct boot_embdata::total_len */
328	add	x0, x0, x1
329	add	x0, x0, #0xfff	/* round up */
330	bic	x0, x0, #0xfff  /* to next page */
331	mov_imm x1, (TEE_RAM_PH_SIZE + TEE_RAM_START)
332	mov	x2, x1
333#else
334	adr_l	x0, __vcore_free_start
335	adr_l	x1, boot_embdata_ptr
336	ldr	x1, [x1]
337	adr_l	x2, __vcore_free_end;
338#endif
339	bl	boot_mem_init
340
341#ifdef CFG_MEMTAG
342	/*
343	 * If FEAT_MTE2 is available, initializes the memtag callbacks.
344	 * Tags for OP-TEE core memory are then cleared to make it safe to
345	 * enable MEMTAG below.
346	 */
347	bl	boot_init_memtag
348#endif
349
350#ifdef CFG_CORE_ASLR
351	bl	get_aslr_seed
352#ifdef CFG_CORE_ASLR_SEED
353	mov_imm	x0, CFG_CORE_ASLR_SEED
354#endif
355#else
356	mov	x0, #0
357#endif
358
359	adr	x1, boot_mmu_config
360	bl	core_init_mmu_map
361
362#ifdef CFG_CORE_ASLR
363	/*
364	 * Process relocation information again updating for the virtual
365	 * map offset. We're doing this now before MMU is enabled as some
366	 * of the memory will become write protected.
367	 */
368	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
369	cbz	x0, 1f
370	/*
371	 * Update boot_cached_mem_end address with load offset since it was
372	 * calculated before relocation.
373	 */
374	adr_l	x5, boot_cached_mem_end
375	ldr	x6, [x5]
376	add	x6, x6, x0
377	str	x6, [x5]
378	adr	x1, _start		/* Load address */
379	bl	relocate
3801:
381#endif
382
383	bl	__get_core_pos
384	bl	enable_mmu
385#ifdef CFG_CORE_ASLR
386	/*
387	 * Update recorded end_va. This must be done before calling into C
388	 * code to make sure that the stack pointer matches what we have in
389	 * thread_core_local[].
390	 */
391	adr_l	x0, boot_mmu_config
392	ldr	x0, [x0, #CORE_MMU_CONFIG_MAP_OFFSET]
393	msr	spsel, #1
394	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
395	add	x1, x1, x0
396	str	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
397	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
398	add	x1, x1, x0
399	str	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
400	msr	spsel, #0
401
402	/* Update relocations recorded with boot_mem_add_reloc() */
403	adr_l	x0, boot_mmu_config
404	ldr	x0, [x0, #CORE_MMU_CONFIG_MAP_OFFSET]
405	bl	boot_mem_relocate
406	/*
407	 * Reinitialize console, since register_serial_console() has
408	 * previously registered a PA and with ASLR the VA is different
409	 * from the PA.
410	 */
411	bl	console_init
412#endif
413
414#ifdef CFG_MEMTAG
415	bl	boot_clear_memtag
416#endif
417
418#ifdef CFG_NS_VIRTUALIZATION
419	/*
420	 * Initialize partition tables for each partition to
421	 * default_partition which has been relocated now to a different VA
422	 */
423	bl	core_mmu_set_default_prtn_tbl
424#endif
425
426	bl	boot_init_primary_early
427
428#ifdef CFG_MEMTAG
429	init_memtag_per_cpu
430#endif
431	bl	boot_init_primary_late
432#ifndef CFG_NS_VIRTUALIZATION
433	mov	x23, sp
434	adr_l	x0, threads
435	ldr	x0, [x0, #THREAD_CTX_STACK_VA_END]
436	mov	sp, x0
437	bl	thread_get_core_local
438	mov	x24, x0
439	str	wzr, [x24, #THREAD_CORE_LOCAL_FLAGS]
440#endif
441	bl	boot_init_primary_runtime
442#ifdef CFG_CORE_PAUTH
443	adr_l	x0, threads
444	ldp	x1, x2, [x0, #THREAD_CTX_KEYS]
445	write_apiakeyhi x1
446	write_apiakeylo x2
447	mrs	x0, sctlr_el1
448	orr	x0, x0, #SCTLR_ENIA
449	msr	sctlr_el1, x0
450	isb
451#endif
452	bl	boot_init_primary_final
453
454#ifndef CFG_NS_VIRTUALIZATION
455	mov	x0, #THREAD_CLF_TMP
456	str     w0, [x24, #THREAD_CORE_LOCAL_FLAGS]
457	mov	sp, x23
458#ifdef CFG_CORE_PAUTH
459	ldp	x0, x1, [x24, #THREAD_CORE_LOCAL_KEYS]
460	write_apiakeyhi x0
461	write_apiakeylo x1
462	isb
463#endif
464#endif
465
466#ifdef _CFG_CORE_STACK_PROTECTOR
467	/* Update stack canary value */
468	sub	sp, sp, #0x10
469	mov	x0, sp
470	mov	x1, #1
471	mov	x2, #0x8
472	bl	plat_get_random_stack_canaries
473	ldr	x0, [sp]
474	adr_l	x5, __stack_chk_guard
475	str	x0, [x5]
476	add	sp, sp, #0x10
477#endif
478
479	/*
480	 * In case we've touched memory that secondary CPUs will use before
481	 * they have turned on their D-cache, clean and invalidate the
482	 * D-cache before exiting to normal world.
483	 */
484	adr_l	x0, __text_start
485	adr_l	x1, boot_cached_mem_end
486	ldr	x1, [x1]
487	sub	x1, x1, x0
488	bl	dcache_cleaninv_range
489
490
491	/*
492	 * Clear current thread id now to allow the thread to be reused on
493	 * next entry. Matches the thread_init_boot_thread in
494	 * boot.c.
495	 */
496#ifndef CFG_NS_VIRTUALIZATION
497	bl 	thread_clr_boot_thread
498#endif
499
500#ifdef CFG_CORE_FFA
501	adr	x0, cpu_on_handler
502	/*
503	 * Compensate for the virtual map offset since cpu_on_handler() is
504	 * called with MMU off.
505	 */
506	ldr	x1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
507	sub	x0, x0, x1
508	bl	thread_spmc_register_secondary_ep
509	b	thread_ffa_msg_wait
510#else
511	/*
512	 * Pass the vector address returned from main_init Compensate for
513	 * the virtual map offset since cpu_on_handler() is called with MMU
514	 * off.
515	 */
516	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
517	adr	x1, thread_vector_table
518	sub	x1, x1, x0
519	mov	x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
520	smc	#0
521	/* SMC should not return */
522	panic_at_smc_return
523#endif
524END_FUNC _start
525DECLARE_KEEP_INIT _start
526
527#ifndef CFG_WITH_PAGER
528	.section .identity_map.data
529	.balign	8
530LOCAL_DATA boot_embdata_ptr , :
531	.skip	8
532END_DATA boot_embdata_ptr
533#endif
534
535#if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
536LOCAL_FUNC relocate , :
537	/*
538	 * x0 holds relocate offset
539	 * x1 holds load address
540	 */
541#ifdef CFG_WITH_PAGER
542	adr_l	x6, __init_end
543#else
544	adr_l	x6, boot_embdata_ptr
545	ldr	x6, [x6]
546#endif
547	ldp	w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET]
548
549	add	x2, x2, x6	/* start of relocations */
550	add	x3, x3, x2	/* end of relocations */
551
552	/*
553	 * Relocations are not formatted as Rela64, instead they are in a
554	 * compressed format created by get_reloc_bin() in
555	 * scripts/gen_tee_bin.py
556	 *
557	 * All the R_AARCH64_RELATIVE relocations are translated into a
558	 * list of 32-bit offsets from TEE_LOAD_ADDR. At each address a
559	 * 64-bit value pointed out which increased with the load offset.
560	 */
561
562#ifdef CFG_WITH_PAGER
563	/*
564	 * With pager enabled we can only relocate the pager and init
565	 * parts, the rest has to be done when a page is populated.
566	 */
567	sub	x6, x6, x1
568#endif
569
570	b	2f
571	/* Loop over the relocation addresses and process all entries */
5721:	ldr	w4, [x2], #4
573#ifdef CFG_WITH_PAGER
574	/* Skip too large addresses */
575	cmp	x4, x6
576	b.ge	2f
577#endif
578	add	x4, x4, x1
579	ldr	x5, [x4]
580	add	x5, x5, x0
581	str	x5, [x4]
582
5832:	cmp	x2, x3
584	b.ne	1b
585
586	ret
587END_FUNC relocate
588#endif
589
590/*
591 * void enable_mmu(unsigned long core_pos);
592 *
593 * This function depends on being mapped with in the identity map where
594 * physical address and virtual address is the same. After MMU has been
595 * enabled the instruction pointer will be updated to execute as the new
596 * offset instead. Stack pointers and the return address are updated.
597 */
598LOCAL_FUNC enable_mmu , : , .identity_map
599	adr	x1, boot_mmu_config
600	load_xregs x1, 0, 2, 6
601	/*
602	 * x0 = core_pos
603	 * x2 = tcr_el1
604	 * x3 = mair_el1
605	 * x4 = ttbr0_el1_base
606	 * x5 = ttbr0_core_offset
607	 * x6 = load_offset
608	 */
609	msr	tcr_el1, x2
610	msr	mair_el1, x3
611
612	/*
613	 * ttbr0_el1 = ttbr0_el1_base + ttbr0_core_offset * core_pos
614	 */
615	madd	x1, x5, x0, x4
616	msr	ttbr0_el1, x1
617	msr	ttbr1_el1, xzr
618	isb
619
620	/* Invalidate TLB */
621	tlbi	vmalle1
622
623	/*
624	 * Make sure translation table writes have drained into memory and
625	 * the TLB invalidation is complete.
626	 */
627	dsb	sy
628	isb
629
630	/* Enable the MMU */
631	mrs	x1, sctlr_el1
632	orr	x1, x1, #SCTLR_M
633	msr	sctlr_el1, x1
634	isb
635
636	/* Update vbar */
637	mrs	x1, vbar_el1
638	add	x1, x1, x6
639	msr	vbar_el1, x1
640	isb
641
642	/* Invalidate instruction cache and branch predictor */
643	ic	iallu
644	isb
645
646	/* Enable I and D cache */
647	mrs	x1, sctlr_el1
648	orr	x1, x1, #SCTLR_I
649	orr	x1, x1, #SCTLR_C
650	msr	sctlr_el1, x1
651	isb
652
653	/* Adjust stack pointers and return address */
654	msr	spsel, #1
655	add	sp, sp, x6
656	msr	spsel, #0
657	add	sp, sp, x6
658	add	x30, x30, x6
659
660	ret
661END_FUNC enable_mmu
662
663	.section .identity_map.data
664	.balign	8
665DATA boot_mmu_config , : /* struct core_mmu_config */
666	.skip	CORE_MMU_CONFIG_SIZE
667END_DATA boot_mmu_config
668
669FUNC cpu_on_handler , :
670	mov	x19, x0
671	mov	x20, x1
672	mov	x21, x30
673
674	adr	x0, reset_vect_table
675	msr	vbar_el1, x0
676	isb
677
678	set_sctlr_el1
679	isb
680
681#ifdef CFG_PAN
682	init_pan
683#endif
684
685	/* Enable aborts now that we can receive exceptions */
686	msr	daifclr, #DAIFBIT_ABT
687
688	bl	__get_core_pos
689	bl	enable_mmu
690
691	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
692	set_sp
693
694#ifdef CFG_MEMTAG
695	init_memtag_per_cpu
696#endif
697#ifdef CFG_CORE_PAUTH
698	init_pauth_secondary_cpu
699#endif
700
701	mov	x0, x19
702	mov	x1, x20
703#ifdef CFG_CORE_FFA
704	bl	boot_cpu_on_handler
705	b	thread_ffa_msg_wait
706#else
707	mov	x30, x21
708	b	boot_cpu_on_handler
709#endif
710END_FUNC cpu_on_handler
711DECLARE_KEEP_PAGER cpu_on_handler
712
713LOCAL_FUNC unhandled_cpu , :
714	wfi
715	b	unhandled_cpu
716END_FUNC unhandled_cpu
717
718LOCAL_DATA stack_tmp_rel , :
719	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
720END_DATA stack_tmp_rel
721
722	/*
723	 * This macro verifies that the a given vector doesn't exceed the
724	 * architectural limit of 32 instructions. This is meant to be placed
725	 * immedately after the last instruction in the vector. It takes the
726	 * vector entry as the parameter
727	 */
728	.macro check_vector_size since
729	  .if (. - \since) > (32 * 4)
730	    .error "Vector exceeds 32 instructions"
731	  .endif
732	.endm
733
734	.section .identity_map, "ax", %progbits
735	.align	11
736LOCAL_FUNC reset_vect_table , :, .identity_map, , nobti
737	/* -----------------------------------------------------
738	 * Current EL with SP0 : 0x0 - 0x180
739	 * -----------------------------------------------------
740	 */
741SynchronousExceptionSP0:
742	b	SynchronousExceptionSP0
743	check_vector_size SynchronousExceptionSP0
744
745	.align	7
746IrqSP0:
747	b	IrqSP0
748	check_vector_size IrqSP0
749
750	.align	7
751FiqSP0:
752	b	FiqSP0
753	check_vector_size FiqSP0
754
755	.align	7
756SErrorSP0:
757	b	SErrorSP0
758	check_vector_size SErrorSP0
759
760	/* -----------------------------------------------------
761	 * Current EL with SPx: 0x200 - 0x380
762	 * -----------------------------------------------------
763	 */
764	.align	7
765SynchronousExceptionSPx:
766	b	SynchronousExceptionSPx
767	check_vector_size SynchronousExceptionSPx
768
769	.align	7
770IrqSPx:
771	b	IrqSPx
772	check_vector_size IrqSPx
773
774	.align	7
775FiqSPx:
776	b	FiqSPx
777	check_vector_size FiqSPx
778
779	.align	7
780SErrorSPx:
781	b	SErrorSPx
782	check_vector_size SErrorSPx
783
784	/* -----------------------------------------------------
785	 * Lower EL using AArch64 : 0x400 - 0x580
786	 * -----------------------------------------------------
787	 */
788	.align	7
789SynchronousExceptionA64:
790	b	SynchronousExceptionA64
791	check_vector_size SynchronousExceptionA64
792
793	.align	7
794IrqA64:
795	b	IrqA64
796	check_vector_size IrqA64
797
798	.align	7
799FiqA64:
800	b	FiqA64
801	check_vector_size FiqA64
802
803	.align	7
804SErrorA64:
805	b   	SErrorA64
806	check_vector_size SErrorA64
807
808	/* -----------------------------------------------------
809	 * Lower EL using AArch32 : 0x0 - 0x180
810	 * -----------------------------------------------------
811	 */
812	.align	7
813SynchronousExceptionA32:
814	b	SynchronousExceptionA32
815	check_vector_size SynchronousExceptionA32
816
817	.align	7
818IrqA32:
819	b	IrqA32
820	check_vector_size IrqA32
821
822	.align	7
823FiqA32:
824	b	FiqA32
825	check_vector_size FiqA32
826
827	.align	7
828SErrorA32:
829	b	SErrorA32
830	check_vector_size SErrorA32
831
832END_FUNC reset_vect_table
833
834BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
835