xref: /optee_os/core/arch/arm/kernel/entry_a64.S (revision 7505c3588f443bf5edd6a01370e58b3a8651bfd8)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2022, Linaro Limited
4 * Copyright (c) 2021-2023, Arm Limited
5 */
6
7#include <platform_config.h>
8
9#include <arm64_macros.S>
10#include <arm.h>
11#include <asm.S>
12#include <generated/asm-defines.h>
13#include <keep.h>
14#include <kernel/thread.h>
15#include <kernel/thread_private.h>
16#include <mm/core_mmu.h>
17#include <sm/optee_smc.h>
18#include <sm/teesmc_opteed.h>
19#include <sm/teesmc_opteed_macros.h>
20
21	/*
22	 * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0.
23	 * SP_EL0 is assigned:
24	 *   stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD
25	 * SP_EL1 is assigned thread_core_local[cpu_id]
26	 */
27	.macro set_sp
28		bl	__get_core_pos
29		cmp	x0, #CFG_TEE_CORE_NB_CORE
30		/* Unsupported CPU, park it before it breaks something */
31		bge	unhandled_cpu
32		add	x0, x0, #1
33		adr_l	x1, stack_tmp_stride
34		ldr	w1, [x1]
35		mul	x1, x0, x1
36
37		/* x0 = stack_tmp - STACK_TMP_GUARD */
38		adr_l	x2, stack_tmp_rel
39		ldr	w0, [x2]
40		add	x0, x0, x2
41
42		msr	spsel, #0
43		add	sp, x1, x0
44		bl	thread_get_core_local
45		msr	spsel, #1
46		mov	sp, x0
47		msr	spsel, #0
48	.endm
49
50	.macro read_feat_mte reg
51		mrs	\reg, id_aa64pfr1_el1
52		ubfx	\reg, \reg, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
53	.endm
54
55	.macro read_feat_pan reg
56		mrs	\reg, id_mmfr3_el1
57		ubfx	\reg, \reg, #ID_MMFR3_EL1_PAN_SHIFT, #4
58	.endm
59
60	.macro set_sctlr_el1
61		mrs	x0, sctlr_el1
62		orr	x0, x0, #SCTLR_I
63		orr	x0, x0, #SCTLR_SA
64		orr	x0, x0, #SCTLR_SPAN
65#if defined(CFG_CORE_RWDATA_NOEXEC)
66		orr	x0, x0, #SCTLR_WXN
67#endif
68#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
69		orr	x0, x0, #SCTLR_A
70#else
71		bic	x0, x0, #SCTLR_A
72#endif
73#ifdef CFG_MEMTAG
74		read_feat_mte x1
75		cmp	w1, #1
76		b.ls	111f
77		orr	x0, x0, #(SCTLR_ATA | SCTLR_ATA0)
78		bic	x0, x0, #SCTLR_TCF_MASK
79		bic	x0, x0, #SCTLR_TCF0_MASK
80111:
81#endif
82#if defined(CFG_TA_PAUTH) && defined(CFG_TA_BTI)
83		orr	x0, x0, #SCTLR_BT0
84#endif
85#if defined(CFG_CORE_PAUTH) && defined(CFG_CORE_BTI)
86		orr	x0, x0, #SCTLR_BT1
87#endif
88		msr	sctlr_el1, x0
89	.endm
90
91	.macro init_memtag_per_cpu
92		read_feat_mte x0
93		cmp	w0, #1
94		b.ls	11f
95
96#ifdef CFG_TEE_CORE_DEBUG
97		/*
98		 * This together with GCR_EL1.RRND = 0 will make the tags
99		 * acquired with the irg instruction deterministic.
100		 */
101		mov_imm	x0, 0xcafe00
102		msr	rgsr_el1, x0
103		/* Avoid tag = 0x0 and 0xf */
104		mov	x0, #0
105#else
106		/*
107		 * Still avoid tag = 0x0 and 0xf as we use that tag for
108		 * everything which isn't explicitly tagged. Setting
109		 * GCR_EL1.RRND = 1 to allow an implementation specific
110		 * method of generating the tags.
111		 */
112		mov	x0, #GCR_EL1_RRND
113#endif
114		orr	x0, x0, #1
115		orr	x0, x0, #(1 << 15)
116		msr	gcr_el1, x0
117
118		/*
119		 * Enable the tag checks on the current CPU.
120		 *
121		 * Depends on boot_init_memtag() having cleared tags for
122		 * TEE core memory. Well, not really, addresses with the
123		 * tag value 0b0000 will use unchecked access due to
124		 * TCR_TCMA0.
125		 */
126		mrs	x0, tcr_el1
127		orr	x0, x0, #TCR_TBI0
128		orr	x0, x0, #TCR_TCMA0
129		msr	tcr_el1, x0
130
131		mrs	x0, sctlr_el1
132		orr	x0, x0, #SCTLR_TCF_SYNC
133		orr	x0, x0, #SCTLR_TCF0_SYNC
134		msr	sctlr_el1, x0
135
136		isb
13711:
138	.endm
139
140	.macro init_pauth_secondary_cpu
141		msr	spsel, #1
142		ldp	x0, x1, [sp, #THREAD_CORE_LOCAL_KEYS]
143		msr	spsel, #0
144		write_apiakeyhi x0
145		write_apiakeylo x1
146		mrs	x0, sctlr_el1
147		orr	x0, x0, #SCTLR_ENIA
148		msr	sctlr_el1, x0
149		isb
150	.endm
151
152	.macro init_pan
153		read_feat_pan x0
154		cmp	x0, #0
155		b.eq	1f
156		mrs	x0, sctlr_el1
157		bic	x0, x0, #SCTLR_SPAN
158		msr	sctlr_el1, x0
159		write_pan_enable
160	1:
161	.endm
162
163FUNC _start , :
164	/*
165	 * Temporary copy of boot argument registers, will be passed to
166	 * boot_save_args() further down.
167	 */
168	mov	x19, x0
169	mov	x20, x1
170	mov	x21, x2
171	mov	x22, x3
172
173	adr	x0, reset_vect_table
174	msr	vbar_el1, x0
175	isb
176
177#ifdef CFG_PAN
178	init_pan
179#endif
180
181	set_sctlr_el1
182	isb
183
184#ifdef CFG_WITH_PAGER
185	/*
186	 * Move init code into correct location and move hashes to a
187	 * temporary safe location until the heap is initialized.
188	 *
189	 * The binary is built as:
190	 * [Pager code, rodata and data] : In correct location
191	 * [Init code and rodata] : Should be copied to __init_start
192	 * [struct boot_embdata + data] : Should be saved before
193	 * initializing pager, first uint32_t tells the length of the data
194	 */
195	adr	x0, __init_start	/* dst */
196	adr	x1, __data_end		/* src */
197	adr	x2, __init_end
198	sub	x2, x2, x0		/* init len */
199	ldr	w4, [x1, x2]		/* length of hashes etc */
200	add	x2, x2, x4		/* length of init and hashes etc */
201	/* Copy backwards (as memmove) in case we're overlapping */
202	add	x0, x0, x2		/* __init_start + len */
203	add	x1, x1, x2		/* __data_end + len */
204	adr_l	x3, boot_cached_mem_end
205	str	x0, [x3]
206	adr	x2, __init_start
207copy_init:
208	ldp	x3, x4, [x1, #-16]!
209	stp	x3, x4, [x0, #-16]!
210	cmp	x0, x2
211	b.gt	copy_init
212#else
213	/*
214	 * The binary is built as:
215	 * [Core, rodata and data] : In correct location
216	 * [struct boot_embdata + data] : Should be moved to right before
217	 * __vcore_free_end, the first uint32_t tells the length of the
218	 * struct + data
219	 */
220	adr_l	x1, __data_end		/* src */
221	ldr	w2, [x1]		/* struct boot_embdata::total_len */
222	/* dst */
223	adr_l	x0, __vcore_free_end
224	sub	x0, x0, x2
225	/* round down to beginning of page */
226	bic	x0, x0, #(SMALL_PAGE_SIZE - 1)
227	adr_l	x3, boot_embdata_ptr
228	str	x0, [x3]
229
230	/* Copy backwards (as memmove) in case we're overlapping */
231	add	x1, x1, x2
232	add	x2, x0, x2
233	adr_l	x3, boot_cached_mem_end
234	str	x2, [x3]
235
236copy_init:
237	ldp	x3, x4, [x1, #-16]!
238	stp	x3, x4, [x2, #-16]!
239	cmp	x2, x0
240	b.gt	copy_init
241#endif
242
243	/*
244	 * Clear .bss, this code obviously depends on the linker keeping
245	 * start/end of .bss at least 8 byte aligned.
246	 */
247	adr_l	x0, __bss_start
248	adr_l	x1, __bss_end
249clear_bss:
250	str	xzr, [x0], #8
251	cmp	x0, x1
252	b.lt	clear_bss
253
254#ifdef CFG_NS_VIRTUALIZATION
255	/*
256	 * Clear .nex_bss, this code obviously depends on the linker keeping
257	 * start/end of .bss at least 8 byte aligned.
258	 */
259	adr_l	x0, __nex_bss_start
260	adr_l	x1, __nex_bss_end
261clear_nex_bss:
262	str	xzr, [x0], #8
263	cmp	x0, x1
264	b.lt	clear_nex_bss
265#endif
266
267
268#if defined(CFG_CORE_PHYS_RELOCATABLE)
269	/*
270	 * Save the base physical address, it will not change after this
271	 * point.
272	 */
273	adr_l	x2, core_mmu_tee_load_pa
274	adr	x1, _start		/* Load address */
275	str	x1, [x2]
276
277	mov_imm	x0, TEE_LOAD_ADDR	/* Compiled load address */
278	sub	x0, x1, x0		/* Relocatation offset */
279
280	cbz	x0, 1f
281	bl	relocate
2821:
283#endif
284
285	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
286	set_sp
287
288	/* Initialize thread_core_local[0] for early boot */
289	bl	thread_get_abt_stack
290	mov	x1, sp
291	msr	spsel, #1
292	str	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
293	str	x0, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
294#ifdef CFG_CORE_DEBUG_CHECK_STACKS
295	mov	x0, #1
296	strb	w0, [sp, #THREAD_CORE_LOCAL_STACKCHECK_RECURSION]
297#endif
298	mov	x0, #THREAD_ID_INVALID
299	str	x0, [sp, #THREAD_CORE_LOCAL_CURR_THREAD]
300	mov	w0, #THREAD_CLF_TMP
301	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
302	msr	spsel, #0
303
304	/* Enable aborts now that we can receive exceptions */
305	msr	daifclr, #DAIFBIT_ABT
306
307	/*
308	 * Invalidate dcache for all memory used during initialization to
309	 * avoid nasty surprices when the cache is turned on. We must not
310	 * invalidate memory not used by OP-TEE since we may invalidate
311	 * entries used by for instance ARM Trusted Firmware.
312	 */
313	adr_l	x0, __text_start
314	adr_l	x1, boot_cached_mem_end
315	ldr	x1, [x1]
316	sub	x1, x1, x0
317	bl	dcache_cleaninv_range
318
319	/* Enable Console */
320	bl	console_init
321
322	mov	x0, x19
323	mov	x1, x20
324	mov	x2, x21
325	mov	x3, x22
326	mov	x4, xzr
327	bl	boot_save_args
328
329#ifdef CFG_WITH_PAGER
330	adr_l	x0, __init_end	/* pointer to boot_embdata */
331	ldr	w1, [x0]	/* struct boot_embdata::total_len */
332	add	x0, x0, x1
333	add	x0, x0, #0xfff	/* round up */
334	bic	x0, x0, #0xfff  /* to next page */
335	mov_imm x1, (TEE_RAM_PH_SIZE + TEE_RAM_START)
336	mov	x2, x1
337#else
338	adr_l	x0, __vcore_free_start
339	adr_l	x1, boot_embdata_ptr
340	ldr	x1, [x1]
341	adr_l	x2, __vcore_free_end;
342#endif
343	bl	boot_mem_init
344
345#ifdef CFG_MEMTAG
346	/*
347	 * If FEAT_MTE2 is available, initializes the memtag callbacks.
348	 * Tags for OP-TEE core memory are then cleared to make it safe to
349	 * enable MEMTAG below.
350	 */
351	bl	boot_init_memtag
352#endif
353
354#ifdef CFG_CORE_ASLR
355	bl	get_aslr_seed
356#ifdef CFG_CORE_ASLR_SEED
357	mov_imm	x0, CFG_CORE_ASLR_SEED
358#endif
359#else
360	mov	x0, #0
361#endif
362
363	adr	x1, boot_mmu_config
364	bl	core_init_mmu_map
365
366#ifdef CFG_CORE_ASLR
367	/*
368	 * Process relocation information again updating for the virtual
369	 * map offset. We're doing this now before MMU is enabled as some
370	 * of the memory will become write protected.
371	 */
372	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
373	cbz	x0, 1f
374	/*
375	 * Update boot_cached_mem_end address with load offset since it was
376	 * calculated before relocation.
377	 */
378	adr_l	x5, boot_cached_mem_end
379	ldr	x6, [x5]
380	add	x6, x6, x0
381	str	x6, [x5]
382	adr	x1, _start		/* Load address */
383	bl	relocate
3841:
385#endif
386
387	bl	__get_core_pos
388	bl	enable_mmu
389#ifdef CFG_CORE_ASLR
390	adr_l	x0, boot_mmu_config
391	ldr	x0, [x0, #CORE_MMU_CONFIG_MAP_OFFSET]
392	bl	boot_mem_relocate
393	/*
394	 * Update recorded end_va.
395	 */
396	adr_l	x0, boot_mmu_config
397	ldr	x0, [x0, #CORE_MMU_CONFIG_MAP_OFFSET]
398	msr	spsel, #1
399	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
400	add	x1, x1, x0
401	str	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
402	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
403	add	x1, x1, x0
404	str	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
405#ifdef CFG_CORE_DEBUG_CHECK_STACKS
406	strb	wzr, [sp, #THREAD_CORE_LOCAL_STACKCHECK_RECURSION]
407#endif
408	msr	spsel, #0
409	/*
410	 * Reinitialize console, since register_serial_console() has
411	 * previously registered a PA and with ASLR the VA is different
412	 * from the PA.
413	 */
414	bl	console_init
415#endif
416
417#ifdef CFG_MEMTAG
418	bl	boot_clear_memtag
419#endif
420
421#ifdef CFG_NS_VIRTUALIZATION
422	/*
423	 * Initialize partition tables for each partition to
424	 * default_partition which has been relocated now to a different VA
425	 */
426	bl	core_mmu_set_default_prtn_tbl
427#endif
428
429	bl	boot_init_primary_early
430
431#ifdef CFG_MEMTAG
432	init_memtag_per_cpu
433#endif
434	bl	boot_init_primary_late
435#ifndef CFG_NS_VIRTUALIZATION
436	mov	x23, sp
437	adr_l	x0, threads
438	ldr	x0, [x0, #THREAD_CTX_STACK_VA_END]
439	mov	sp, x0
440	bl	thread_get_core_local
441	mov	x24, x0
442	str	wzr, [x24, #THREAD_CORE_LOCAL_FLAGS]
443#endif
444#ifdef CFG_CORE_PAUTH
445	adr_l	x0, threads
446	ldp	x1, x2, [x0, #THREAD_CTX_KEYS]
447	write_apiakeyhi x1
448	write_apiakeylo x2
449	mrs	x0, sctlr_el1
450	orr	x0, x0, #SCTLR_ENIA
451	msr	sctlr_el1, x0
452	isb
453#endif
454	bl	boot_init_primary_final
455
456#ifndef CFG_NS_VIRTUALIZATION
457	mov	x0, #THREAD_CLF_TMP
458	str     w0, [x24, #THREAD_CORE_LOCAL_FLAGS]
459	mov	sp, x23
460#ifdef CFG_CORE_PAUTH
461	ldp	x0, x1, [x24, #THREAD_CORE_LOCAL_KEYS]
462	write_apiakeyhi x0
463	write_apiakeylo x1
464	isb
465#endif
466#endif
467
468#ifdef _CFG_CORE_STACK_PROTECTOR
469	/* Update stack canary value */
470	sub	sp, sp, #0x10
471	mov	x0, sp
472	mov	x1, #1
473	mov	x2, #0x8
474	bl	plat_get_random_stack_canaries
475	ldr	x0, [sp]
476	adr_l	x5, __stack_chk_guard
477	str	x0, [x5]
478	add	sp, sp, #0x10
479#endif
480
481	/*
482	 * In case we've touched memory that secondary CPUs will use before
483	 * they have turned on their D-cache, clean and invalidate the
484	 * D-cache before exiting to normal world.
485	 */
486	adr_l	x0, __text_start
487	adr_l	x1, boot_cached_mem_end
488	ldr	x1, [x1]
489	sub	x1, x1, x0
490	bl	dcache_cleaninv_range
491
492
493	/*
494	 * Clear current thread id now to allow the thread to be reused on
495	 * next entry. Matches the thread_init_boot_thread in
496	 * boot.c.
497	 */
498#ifndef CFG_NS_VIRTUALIZATION
499	bl 	thread_clr_boot_thread
500#endif
501
502#ifdef CFG_CORE_FFA
503	adr	x0, cpu_on_handler
504	/*
505	 * Compensate for the virtual map offset since cpu_on_handler() is
506	 * called with MMU off.
507	 */
508	ldr	x1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
509	sub	x0, x0, x1
510	bl	thread_spmc_register_secondary_ep
511	b	thread_ffa_msg_wait
512#else
513	/*
514	 * Pass the vector address returned from main_init Compensate for
515	 * the virtual map offset since cpu_on_handler() is called with MMU
516	 * off.
517	 */
518	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
519	adr	x1, thread_vector_table
520	sub	x1, x1, x0
521	mov	x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
522	smc	#0
523	/* SMC should not return */
524	panic_at_smc_return
525#endif
526END_FUNC _start
527DECLARE_KEEP_INIT _start
528
529#ifndef CFG_WITH_PAGER
530	.section .identity_map.data
531	.balign	8
532LOCAL_DATA boot_embdata_ptr , :
533	.skip	8
534END_DATA boot_embdata_ptr
535#endif
536
537#if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
538LOCAL_FUNC relocate , :
539	/*
540	 * x0 holds relocate offset
541	 * x1 holds load address
542	 */
543#ifdef CFG_WITH_PAGER
544	adr_l	x6, __init_end
545#else
546	adr_l	x6, boot_embdata_ptr
547	ldr	x6, [x6]
548#endif
549	ldp	w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET]
550
551	add	x2, x2, x6	/* start of relocations */
552	add	x3, x3, x2	/* end of relocations */
553
554	/*
555	 * Relocations are not formatted as Rela64, instead they are in a
556	 * compressed format created by get_reloc_bin() in
557	 * scripts/gen_tee_bin.py
558	 *
559	 * All the R_AARCH64_RELATIVE relocations are translated into a
560	 * list of 32-bit offsets from TEE_LOAD_ADDR. At each address a
561	 * 64-bit value pointed out which increased with the load offset.
562	 */
563
564#ifdef CFG_WITH_PAGER
565	/*
566	 * With pager enabled we can only relocate the pager and init
567	 * parts, the rest has to be done when a page is populated.
568	 */
569	sub	x6, x6, x1
570#endif
571
572	b	2f
573	/* Loop over the relocation addresses and process all entries */
5741:	ldr	w4, [x2], #4
575#ifdef CFG_WITH_PAGER
576	/* Skip too large addresses */
577	cmp	x4, x6
578	b.ge	2f
579#endif
580	add	x4, x4, x1
581	ldr	x5, [x4]
582	add	x5, x5, x0
583	str	x5, [x4]
584
5852:	cmp	x2, x3
586	b.ne	1b
587
588	ret
589END_FUNC relocate
590#endif
591
592/*
593 * void enable_mmu(unsigned long core_pos);
594 *
595 * This function depends on being mapped with in the identity map where
596 * physical address and virtual address is the same. After MMU has been
597 * enabled the instruction pointer will be updated to execute as the new
598 * offset instead. Stack pointers and the return address are updated.
599 */
600LOCAL_FUNC enable_mmu , : , .identity_map
601	adr	x1, boot_mmu_config
602	load_xregs x1, 0, 2, 6
603	/*
604	 * x0 = core_pos
605	 * x2 = tcr_el1
606	 * x3 = mair_el1
607	 * x4 = ttbr0_el1_base
608	 * x5 = ttbr0_core_offset
609	 * x6 = load_offset
610	 */
611	msr	tcr_el1, x2
612	msr	mair_el1, x3
613
614	/*
615	 * ttbr0_el1 = ttbr0_el1_base + ttbr0_core_offset * core_pos
616	 */
617	madd	x1, x5, x0, x4
618	msr	ttbr0_el1, x1
619	msr	ttbr1_el1, xzr
620	isb
621
622	/* Invalidate TLB */
623	tlbi	vmalle1
624
625	/*
626	 * Make sure translation table writes have drained into memory and
627	 * the TLB invalidation is complete.
628	 */
629	dsb	sy
630	isb
631
632	/* Enable the MMU */
633	mrs	x1, sctlr_el1
634	orr	x1, x1, #SCTLR_M
635	msr	sctlr_el1, x1
636	isb
637
638	/* Update vbar */
639	mrs	x1, vbar_el1
640	add	x1, x1, x6
641	msr	vbar_el1, x1
642	isb
643
644	/* Invalidate instruction cache and branch predictor */
645	ic	iallu
646	isb
647
648	/* Enable I and D cache */
649	mrs	x1, sctlr_el1
650	orr	x1, x1, #SCTLR_I
651	orr	x1, x1, #SCTLR_C
652	msr	sctlr_el1, x1
653	isb
654
655	/* Adjust stack pointers and return address */
656	msr	spsel, #1
657	add	sp, sp, x6
658	msr	spsel, #0
659	add	sp, sp, x6
660	add	x30, x30, x6
661
662	ret
663END_FUNC enable_mmu
664
665	.section .identity_map.data
666	.balign	8
667DATA boot_mmu_config , : /* struct core_mmu_config */
668	.skip	CORE_MMU_CONFIG_SIZE
669END_DATA boot_mmu_config
670
671FUNC cpu_on_handler , :
672	mov	x19, x0
673	mov	x20, x1
674	mov	x21, x30
675
676	adr	x0, reset_vect_table
677	msr	vbar_el1, x0
678	isb
679
680	set_sctlr_el1
681	isb
682
683#ifdef CFG_PAN
684	init_pan
685#endif
686
687	/* Enable aborts now that we can receive exceptions */
688	msr	daifclr, #DAIFBIT_ABT
689
690	bl	__get_core_pos
691	bl	enable_mmu
692
693	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
694	set_sp
695
696#ifdef CFG_MEMTAG
697	init_memtag_per_cpu
698#endif
699#ifdef CFG_CORE_PAUTH
700	init_pauth_secondary_cpu
701#endif
702
703	mov	x0, x19
704	mov	x1, x20
705#ifdef CFG_CORE_FFA
706	bl	boot_cpu_on_handler
707	b	thread_ffa_msg_wait
708#else
709	mov	x30, x21
710	b	boot_cpu_on_handler
711#endif
712END_FUNC cpu_on_handler
713DECLARE_KEEP_PAGER cpu_on_handler
714
715LOCAL_FUNC unhandled_cpu , :
716	wfi
717	b	unhandled_cpu
718END_FUNC unhandled_cpu
719
720LOCAL_DATA stack_tmp_rel , :
721	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
722END_DATA stack_tmp_rel
723
724	/*
725	 * This macro verifies that the a given vector doesn't exceed the
726	 * architectural limit of 32 instructions. This is meant to be placed
727	 * immedately after the last instruction in the vector. It takes the
728	 * vector entry as the parameter
729	 */
730	.macro check_vector_size since
731	  .if (. - \since) > (32 * 4)
732	    .error "Vector exceeds 32 instructions"
733	  .endif
734	.endm
735
736	.section .identity_map, "ax", %progbits
737	.align	11
738LOCAL_FUNC reset_vect_table , :, .identity_map, , nobti
739	/* -----------------------------------------------------
740	 * Current EL with SP0 : 0x0 - 0x180
741	 * -----------------------------------------------------
742	 */
743SynchronousExceptionSP0:
744	b	SynchronousExceptionSP0
745	check_vector_size SynchronousExceptionSP0
746
747	.align	7
748IrqSP0:
749	b	IrqSP0
750	check_vector_size IrqSP0
751
752	.align	7
753FiqSP0:
754	b	FiqSP0
755	check_vector_size FiqSP0
756
757	.align	7
758SErrorSP0:
759	b	SErrorSP0
760	check_vector_size SErrorSP0
761
762	/* -----------------------------------------------------
763	 * Current EL with SPx: 0x200 - 0x380
764	 * -----------------------------------------------------
765	 */
766	.align	7
767SynchronousExceptionSPx:
768	b	SynchronousExceptionSPx
769	check_vector_size SynchronousExceptionSPx
770
771	.align	7
772IrqSPx:
773	b	IrqSPx
774	check_vector_size IrqSPx
775
776	.align	7
777FiqSPx:
778	b	FiqSPx
779	check_vector_size FiqSPx
780
781	.align	7
782SErrorSPx:
783	b	SErrorSPx
784	check_vector_size SErrorSPx
785
786	/* -----------------------------------------------------
787	 * Lower EL using AArch64 : 0x400 - 0x580
788	 * -----------------------------------------------------
789	 */
790	.align	7
791SynchronousExceptionA64:
792	b	SynchronousExceptionA64
793	check_vector_size SynchronousExceptionA64
794
795	.align	7
796IrqA64:
797	b	IrqA64
798	check_vector_size IrqA64
799
800	.align	7
801FiqA64:
802	b	FiqA64
803	check_vector_size FiqA64
804
805	.align	7
806SErrorA64:
807	b   	SErrorA64
808	check_vector_size SErrorA64
809
810	/* -----------------------------------------------------
811	 * Lower EL using AArch32 : 0x0 - 0x180
812	 * -----------------------------------------------------
813	 */
814	.align	7
815SynchronousExceptionA32:
816	b	SynchronousExceptionA32
817	check_vector_size SynchronousExceptionA32
818
819	.align	7
820IrqA32:
821	b	IrqA32
822	check_vector_size IrqA32
823
824	.align	7
825FiqA32:
826	b	FiqA32
827	check_vector_size FiqA32
828
829	.align	7
830SErrorA32:
831	b	SErrorA32
832	check_vector_size SErrorA32
833
834END_FUNC reset_vect_table
835
836BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
837