xref: /optee_os/core/arch/arm/kernel/entry_a64.S (revision 45fecab081173ef58b1cb14b6ddf6892b0b9d3f6)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2022, Linaro Limited
4 * Copyright (c) 2021-2023, Arm Limited
5 */
6
7#include <platform_config.h>
8
9#include <arm64_macros.S>
10#include <arm.h>
11#include <asm.S>
12#include <generated/asm-defines.h>
13#include <keep.h>
14#include <kernel/thread.h>
15#include <kernel/thread_private.h>
16#include <mm/core_mmu.h>
17#include <sm/optee_smc.h>
18#include <sm/teesmc_opteed.h>
19#include <sm/teesmc_opteed_macros.h>
20
21	/*
22	 * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0.
23	 * SP_EL0 is assigned:
24	 *   stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD
25	 * SP_EL1 is assigned thread_core_local[cpu_id]
26	 */
27	.macro set_sp
28		bl	__get_core_pos
29		cmp	x0, #CFG_TEE_CORE_NB_CORE
30		/* Unsupported CPU, park it before it breaks something */
31		bge	unhandled_cpu
32		add	x0, x0, #1
33		adr_l	x1, stack_tmp_stride
34		ldr	w1, [x1]
35		mul	x1, x0, x1
36
37		/* x0 = stack_tmp - STACK_TMP_GUARD */
38		adr_l	x2, stack_tmp_rel
39		ldr	w0, [x2]
40		add	x0, x0, x2
41
42		msr	spsel, #0
43		add	sp, x1, x0
44		bl	thread_get_core_local
45		msr	spsel, #1
46		mov	sp, x0
47		msr	spsel, #0
48	.endm
49
50	.macro read_feat_mte reg
51		mrs	\reg, id_aa64pfr1_el1
52		ubfx	\reg, \reg, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
53	.endm
54
55	.macro read_feat_pan reg
56		mrs	\reg, id_mmfr3_el1
57		ubfx	\reg, \reg, #ID_MMFR3_EL1_PAN_SHIFT, #4
58	.endm
59
60	.macro set_sctlr_el1
61		mrs	x0, sctlr_el1
62		orr	x0, x0, #SCTLR_I
63		orr	x0, x0, #SCTLR_SA
64		orr	x0, x0, #SCTLR_SPAN
65#if defined(CFG_CORE_RWDATA_NOEXEC)
66		orr	x0, x0, #SCTLR_WXN
67#endif
68#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
69		orr	x0, x0, #SCTLR_A
70#else
71		bic	x0, x0, #SCTLR_A
72#endif
73#ifdef CFG_MEMTAG
74		read_feat_mte x1
75		cmp	w1, #1
76		b.ls	111f
77		orr	x0, x0, #(SCTLR_ATA | SCTLR_ATA0)
78		bic	x0, x0, #SCTLR_TCF_MASK
79		bic	x0, x0, #SCTLR_TCF0_MASK
80111:
81#endif
82#if defined(CFG_TA_PAUTH) && defined(CFG_TA_BTI)
83		orr	x0, x0, #SCTLR_BT0
84#endif
85#if defined(CFG_CORE_PAUTH) && defined(CFG_CORE_BTI)
86		orr	x0, x0, #SCTLR_BT1
87#endif
88		msr	sctlr_el1, x0
89	.endm
90
91	.macro init_memtag_per_cpu
92		read_feat_mte x0
93		cmp	w0, #1
94		b.ls	11f
95
96#ifdef CFG_TEE_CORE_DEBUG
97		/*
98		 * This together with GCR_EL1.RRND = 0 will make the tags
99		 * acquired with the irg instruction deterministic.
100		 */
101		mov_imm	x0, 0xcafe00
102		msr	rgsr_el1, x0
103		/* Avoid tag = 0x0 and 0xf */
104		mov	x0, #0
105#else
106		/*
107		 * Still avoid tag = 0x0 and 0xf as we use that tag for
108		 * everything which isn't explicitly tagged. Setting
109		 * GCR_EL1.RRND = 1 to allow an implementation specific
110		 * method of generating the tags.
111		 */
112		mov	x0, #GCR_EL1_RRND
113#endif
114		orr	x0, x0, #1
115		orr	x0, x0, #(1 << 15)
116		msr	gcr_el1, x0
117
118		/*
119		 * Enable the tag checks on the current CPU.
120		 *
121		 * Depends on boot_init_memtag() having cleared tags for
122		 * TEE core memory. Well, not really, addresses with the
123		 * tag value 0b0000 will use unchecked access due to
124		 * TCR_TCMA0.
125		 */
126		mrs	x0, tcr_el1
127		orr	x0, x0, #TCR_TBI0
128		orr	x0, x0, #TCR_TCMA0
129		msr	tcr_el1, x0
130
131		mrs	x0, sctlr_el1
132		orr	x0, x0, #SCTLR_TCF_SYNC
133		orr	x0, x0, #SCTLR_TCF0_SYNC
134		msr	sctlr_el1, x0
135
136		isb
13711:
138	.endm
139
140	.macro init_pauth_secondary_cpu
141		msr	spsel, #1
142		ldp	x0, x1, [sp, #THREAD_CORE_LOCAL_KEYS]
143		msr	spsel, #0
144		write_apiakeyhi x0
145		write_apiakeylo x1
146		mrs	x0, sctlr_el1
147		orr	x0, x0, #SCTLR_ENIA
148		msr	sctlr_el1, x0
149		isb
150	.endm
151
152	.macro init_pan
153		read_feat_pan x0
154		cmp	x0, #0
155		b.eq	1f
156		mrs	x0, sctlr_el1
157		bic	x0, x0, #SCTLR_SPAN
158		msr	sctlr_el1, x0
159		write_pan_enable
160	1:
161	.endm
162
163FUNC _start , :
164	/*
165	 * Temporary copy of boot argument registers, will be passed to
166	 * boot_save_args() further down.
167	 */
168	mov	x19, x0
169	mov	x20, x1
170	mov	x21, x2
171	mov	x22, x3
172
173	adr	x0, reset_vect_table
174	msr	vbar_el1, x0
175	isb
176
177#ifdef CFG_PAN
178	init_pan
179#endif
180
181	set_sctlr_el1
182	isb
183
184#ifdef CFG_WITH_PAGER
185	/*
186	 * Move init code into correct location and move hashes to a
187	 * temporary safe location until the heap is initialized.
188	 *
189	 * The binary is built as:
190	 * [Pager code, rodata and data] : In correct location
191	 * [Init code and rodata] : Should be copied to __init_start
192	 * [struct boot_embdata + data] : Should be saved before
193	 * initializing pager, first uint32_t tells the length of the data
194	 */
195	adr	x0, __init_start	/* dst */
196	adr	x1, __data_end		/* src */
197	adr	x2, __init_end
198	sub	x2, x2, x0		/* init len */
199	ldr	w4, [x1, x2]		/* length of hashes etc */
200	add	x2, x2, x4		/* length of init and hashes etc */
201	/* Copy backwards (as memmove) in case we're overlapping */
202	add	x0, x0, x2		/* __init_start + len */
203	add	x1, x1, x2		/* __data_end + len */
204	adr_l	x3, boot_cached_mem_end
205	str	x0, [x3]
206	adr	x2, __init_start
207copy_init:
208	ldp	x3, x4, [x1, #-16]!
209	stp	x3, x4, [x0, #-16]!
210	cmp	x0, x2
211	b.gt	copy_init
212#else
213	/*
214	 * The binary is built as:
215	 * [Core, rodata and data] : In correct location
216	 * [struct boot_embdata + data] : Should be moved to right before
217	 * __vcore_free_end, the first uint32_t tells the length of the
218	 * struct + data
219	 */
220	adr_l	x1, __data_end		/* src */
221	ldr	w2, [x1]		/* struct boot_embdata::total_len */
222	/* dst */
223	adr_l	x0, __vcore_free_end
224	sub	x0, x0, x2
225	/* round down to beginning of page */
226	bic	x0, x0, #(SMALL_PAGE_SIZE - 1)
227	adr_l	x3, boot_embdata_ptr
228	str	x0, [x3]
229
230	/* Copy backwards (as memmove) in case we're overlapping */
231	add	x1, x1, x2
232	add	x2, x0, x2
233	adr_l	x3, boot_cached_mem_end
234	str	x2, [x3]
235
236copy_init:
237	ldp	x3, x4, [x1, #-16]!
238	stp	x3, x4, [x2, #-16]!
239	cmp	x2, x0
240	b.gt	copy_init
241#endif
242
243	/*
244	 * Clear .bss, this code obviously depends on the linker keeping
245	 * start/end of .bss at least 8 byte aligned.
246	 */
247	adr_l	x0, __bss_start
248	adr_l	x1, __bss_end
249clear_bss:
250	str	xzr, [x0], #8
251	cmp	x0, x1
252	b.lt	clear_bss
253
254#ifdef CFG_NS_VIRTUALIZATION
255	/*
256	 * Clear .nex_bss, this code obviously depends on the linker keeping
257	 * start/end of .bss at least 8 byte aligned.
258	 */
259	adr_l	x0, __nex_bss_start
260	adr_l	x1, __nex_bss_end
261clear_nex_bss:
262	str	xzr, [x0], #8
263	cmp	x0, x1
264	b.lt	clear_nex_bss
265#endif
266
267
268#if defined(CFG_CORE_PHYS_RELOCATABLE)
269	/*
270	 * Save the base physical address, it will not change after this
271	 * point.
272	 */
273	adr_l	x2, core_mmu_tee_load_pa
274	adr	x1, _start		/* Load address */
275	str	x1, [x2]
276
277	mov_imm	x0, TEE_LOAD_ADDR	/* Compiled load address */
278	sub	x0, x1, x0		/* Relocatation offset */
279
280	cbz	x0, 1f
281	bl	relocate
2821:
283#endif
284
285	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
286	set_sp
287
288	/* Initialize thread_core_local[0] for early boot */
289	bl	thread_get_abt_stack
290	mov	x1, sp
291	msr	spsel, #1
292	str	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
293	str	x0, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
294#ifdef CFG_CORE_DEBUG_CHECK_STACKS
295	mov	x0, #1
296	strb	w0, [sp, #THREAD_CORE_LOCAL_STACKCHECK_RECURSION]
297#endif
298	mov	x0, #THREAD_ID_INVALID
299	str	x0, [sp, #THREAD_CORE_LOCAL_CURR_THREAD]
300	mov	w0, #THREAD_CLF_TMP
301	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
302	msr	spsel, #0
303
304	/* Enable aborts now that we can receive exceptions */
305	msr	daifclr, #DAIFBIT_ABT
306
307	/*
308	 * Invalidate dcache for all memory used during initialization to
309	 * avoid nasty surprices when the cache is turned on. We must not
310	 * invalidate memory not used by OP-TEE since we may invalidate
311	 * entries used by for instance ARM Trusted Firmware.
312	 */
313	adr_l	x0, __text_start
314	adr_l	x1, boot_cached_mem_end
315	ldr	x1, [x1]
316	sub	x1, x1, x0
317	bl	dcache_cleaninv_range
318
319	/* Enable Console */
320	bl	console_init
321
322	mov	x0, x19
323	mov	x1, x20
324	mov	x2, x21
325	mov	x3, x22
326	mov	x4, xzr
327	bl	boot_save_args
328
329#ifdef CFG_WITH_PAGER
330	adr_l	x0, __init_end	/* pointer to boot_embdata */
331	ldr	w1, [x0]	/* struct boot_embdata::total_len */
332	add	x0, x0, x1
333	add	x0, x0, #0xfff	/* round up */
334	bic	x0, x0, #0xfff  /* to next page */
335	mov_imm x1, (TEE_RAM_PH_SIZE + TEE_RAM_START)
336	mov	x2, x1
337#else
338	adr_l	x0, __vcore_free_start
339	adr_l	x1, boot_embdata_ptr
340	ldr	x1, [x1]
341	adr_l	x2, __vcore_free_end;
342#endif
343	bl	boot_mem_init
344
345#ifdef CFG_MEMTAG
346	/*
347	 * If FEAT_MTE2 is available, initializes the memtag callbacks.
348	 * Tags for OP-TEE core memory are then cleared to make it safe to
349	 * enable MEMTAG below.
350	 */
351	bl	boot_init_memtag
352#endif
353
354#ifdef CFG_CORE_ASLR
355	bl	get_aslr_seed
356#ifdef CFG_CORE_ASLR_SEED
357	mov_imm	x0, CFG_CORE_ASLR_SEED
358#endif
359#else
360	mov	x0, #0
361#endif
362
363	adr	x1, boot_mmu_config
364	bl	core_init_mmu_map
365
366#ifdef CFG_CORE_ASLR
367	/*
368	 * Process relocation information again updating for the virtual
369	 * map offset. We're doing this now before MMU is enabled as some
370	 * of the memory will become write protected.
371	 */
372	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
373	cbz	x0, 1f
374	/*
375	 * Update boot_cached_mem_end address with load offset since it was
376	 * calculated before relocation.
377	 */
378	adr_l	x5, boot_cached_mem_end
379	ldr	x6, [x5]
380	add	x6, x6, x0
381	str	x6, [x5]
382	adr	x1, _start		/* Load address */
383	bl	relocate
3841:
385#endif
386
387	bl	__get_core_pos
388	bl	enable_mmu
389#ifdef CFG_CORE_ASLR
390	adr_l	x0, boot_mmu_config
391	ldr	x0, [x0, #CORE_MMU_CONFIG_MAP_OFFSET]
392	bl	boot_mem_relocate
393	/*
394	 * Update recorded end_va.
395	 */
396	adr_l	x0, boot_mmu_config
397	ldr	x0, [x0, #CORE_MMU_CONFIG_MAP_OFFSET]
398	msr	spsel, #1
399	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
400	add	x1, x1, x0
401	str	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
402	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
403	add	x1, x1, x0
404	str	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
405#ifdef CFG_CORE_DEBUG_CHECK_STACKS
406	strb	wzr, [sp, #THREAD_CORE_LOCAL_STACKCHECK_RECURSION]
407#endif
408	msr	spsel, #0
409	/*
410	 * Reinitialize console, since register_serial_console() has
411	 * previously registered a PA and with ASLR the VA is different
412	 * from the PA.
413	 */
414	bl	console_init
415#endif
416
417#ifdef CFG_MEMTAG
418	bl	boot_clear_memtag
419#endif
420
421#ifdef CFG_NS_VIRTUALIZATION
422	/*
423	 * Initialize partition tables for each partition to
424	 * default_partition which has been relocated now to a different VA
425	 */
426	bl	core_mmu_set_default_prtn_tbl
427#endif
428
429	bl	boot_init_primary_early
430
431#ifdef CFG_MEMTAG
432	init_memtag_per_cpu
433#endif
434	bl	boot_init_primary_late
435#ifndef CFG_NS_VIRTUALIZATION
436	mov	x23, sp
437	adr_l	x0, threads
438	ldr	x0, [x0, #THREAD_CTX_STACK_VA_END]
439	mov	sp, x0
440	bl	thread_get_core_local
441	mov	x24, x0
442	str	wzr, [x24, #THREAD_CORE_LOCAL_FLAGS]
443#endif
444	bl	boot_init_primary_runtime
445#ifdef CFG_CORE_PAUTH
446	adr_l	x0, threads
447	ldp	x1, x2, [x0, #THREAD_CTX_KEYS]
448	write_apiakeyhi x1
449	write_apiakeylo x2
450	mrs	x0, sctlr_el1
451	orr	x0, x0, #SCTLR_ENIA
452	msr	sctlr_el1, x0
453	isb
454#endif
455	bl	boot_init_primary_final
456
457#ifndef CFG_NS_VIRTUALIZATION
458	mov	x0, #THREAD_CLF_TMP
459	str     w0, [x24, #THREAD_CORE_LOCAL_FLAGS]
460	mov	sp, x23
461#ifdef CFG_CORE_PAUTH
462	ldp	x0, x1, [x24, #THREAD_CORE_LOCAL_KEYS]
463	write_apiakeyhi x0
464	write_apiakeylo x1
465	isb
466#endif
467#endif
468
469#ifdef _CFG_CORE_STACK_PROTECTOR
470	/* Update stack canary value */
471	sub	sp, sp, #0x10
472	mov	x0, sp
473	mov	x1, #1
474	mov	x2, #0x8
475	bl	plat_get_random_stack_canaries
476	ldr	x0, [sp]
477	adr_l	x5, __stack_chk_guard
478	str	x0, [x5]
479	add	sp, sp, #0x10
480#endif
481
482	/*
483	 * In case we've touched memory that secondary CPUs will use before
484	 * they have turned on their D-cache, clean and invalidate the
485	 * D-cache before exiting to normal world.
486	 */
487	adr_l	x0, __text_start
488	adr_l	x1, boot_cached_mem_end
489	ldr	x1, [x1]
490	sub	x1, x1, x0
491	bl	dcache_cleaninv_range
492
493
494	/*
495	 * Clear current thread id now to allow the thread to be reused on
496	 * next entry. Matches the thread_init_boot_thread in
497	 * boot.c.
498	 */
499#ifndef CFG_NS_VIRTUALIZATION
500	bl 	thread_clr_boot_thread
501#endif
502
503#ifdef CFG_CORE_FFA
504	adr	x0, cpu_on_handler
505	/*
506	 * Compensate for the virtual map offset since cpu_on_handler() is
507	 * called with MMU off.
508	 */
509	ldr	x1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
510	sub	x0, x0, x1
511	bl	thread_spmc_register_secondary_ep
512	b	thread_ffa_msg_wait
513#else
514	/*
515	 * Pass the vector address returned from main_init Compensate for
516	 * the virtual map offset since cpu_on_handler() is called with MMU
517	 * off.
518	 */
519	ldr	x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
520	adr	x1, thread_vector_table
521	sub	x1, x1, x0
522	mov	x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
523	smc	#0
524	/* SMC should not return */
525	panic_at_smc_return
526#endif
527END_FUNC _start
528DECLARE_KEEP_INIT _start
529
530#ifndef CFG_WITH_PAGER
531	.section .identity_map.data
532	.balign	8
533LOCAL_DATA boot_embdata_ptr , :
534	.skip	8
535END_DATA boot_embdata_ptr
536#endif
537
538#if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE)
539LOCAL_FUNC relocate , :
540	/*
541	 * x0 holds relocate offset
542	 * x1 holds load address
543	 */
544#ifdef CFG_WITH_PAGER
545	adr_l	x6, __init_end
546#else
547	adr_l	x6, boot_embdata_ptr
548	ldr	x6, [x6]
549#endif
550	ldp	w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET]
551
552	add	x2, x2, x6	/* start of relocations */
553	add	x3, x3, x2	/* end of relocations */
554
555	/*
556	 * Relocations are not formatted as Rela64, instead they are in a
557	 * compressed format created by get_reloc_bin() in
558	 * scripts/gen_tee_bin.py
559	 *
560	 * All the R_AARCH64_RELATIVE relocations are translated into a
561	 * list of 32-bit offsets from TEE_LOAD_ADDR. At each address a
562	 * 64-bit value pointed out which increased with the load offset.
563	 */
564
565#ifdef CFG_WITH_PAGER
566	/*
567	 * With pager enabled we can only relocate the pager and init
568	 * parts, the rest has to be done when a page is populated.
569	 */
570	sub	x6, x6, x1
571#endif
572
573	b	2f
574	/* Loop over the relocation addresses and process all entries */
5751:	ldr	w4, [x2], #4
576#ifdef CFG_WITH_PAGER
577	/* Skip too large addresses */
578	cmp	x4, x6
579	b.ge	2f
580#endif
581	add	x4, x4, x1
582	ldr	x5, [x4]
583	add	x5, x5, x0
584	str	x5, [x4]
585
5862:	cmp	x2, x3
587	b.ne	1b
588
589	ret
590END_FUNC relocate
591#endif
592
593/*
594 * void enable_mmu(unsigned long core_pos);
595 *
596 * This function depends on being mapped with in the identity map where
597 * physical address and virtual address is the same. After MMU has been
598 * enabled the instruction pointer will be updated to execute as the new
599 * offset instead. Stack pointers and the return address are updated.
600 */
601LOCAL_FUNC enable_mmu , : , .identity_map
602	adr	x1, boot_mmu_config
603	load_xregs x1, 0, 2, 6
604	/*
605	 * x0 = core_pos
606	 * x2 = tcr_el1
607	 * x3 = mair_el1
608	 * x4 = ttbr0_el1_base
609	 * x5 = ttbr0_core_offset
610	 * x6 = load_offset
611	 */
612	msr	tcr_el1, x2
613	msr	mair_el1, x3
614
615	/*
616	 * ttbr0_el1 = ttbr0_el1_base + ttbr0_core_offset * core_pos
617	 */
618	madd	x1, x5, x0, x4
619	msr	ttbr0_el1, x1
620	msr	ttbr1_el1, xzr
621	isb
622
623	/* Invalidate TLB */
624	tlbi	vmalle1
625
626	/*
627	 * Make sure translation table writes have drained into memory and
628	 * the TLB invalidation is complete.
629	 */
630	dsb	sy
631	isb
632
633	/* Enable the MMU */
634	mrs	x1, sctlr_el1
635	orr	x1, x1, #SCTLR_M
636	msr	sctlr_el1, x1
637	isb
638
639	/* Update vbar */
640	mrs	x1, vbar_el1
641	add	x1, x1, x6
642	msr	vbar_el1, x1
643	isb
644
645	/* Invalidate instruction cache and branch predictor */
646	ic	iallu
647	isb
648
649	/* Enable I and D cache */
650	mrs	x1, sctlr_el1
651	orr	x1, x1, #SCTLR_I
652	orr	x1, x1, #SCTLR_C
653	msr	sctlr_el1, x1
654	isb
655
656	/* Adjust stack pointers and return address */
657	msr	spsel, #1
658	add	sp, sp, x6
659	msr	spsel, #0
660	add	sp, sp, x6
661	add	x30, x30, x6
662
663	ret
664END_FUNC enable_mmu
665
666	.section .identity_map.data
667	.balign	8
668DATA boot_mmu_config , : /* struct core_mmu_config */
669	.skip	CORE_MMU_CONFIG_SIZE
670END_DATA boot_mmu_config
671
672FUNC cpu_on_handler , :
673	mov	x19, x0
674	mov	x20, x1
675	mov	x21, x30
676
677	adr	x0, reset_vect_table
678	msr	vbar_el1, x0
679	isb
680
681	set_sctlr_el1
682	isb
683
684#ifdef CFG_PAN
685	init_pan
686#endif
687
688	/* Enable aborts now that we can receive exceptions */
689	msr	daifclr, #DAIFBIT_ABT
690
691	bl	__get_core_pos
692	bl	enable_mmu
693
694	/* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */
695	set_sp
696
697#ifdef CFG_MEMTAG
698	init_memtag_per_cpu
699#endif
700#ifdef CFG_CORE_PAUTH
701	init_pauth_secondary_cpu
702#endif
703
704	mov	x0, x19
705	mov	x1, x20
706#ifdef CFG_CORE_FFA
707	bl	boot_cpu_on_handler
708	b	thread_ffa_msg_wait
709#else
710	mov	x30, x21
711	b	boot_cpu_on_handler
712#endif
713END_FUNC cpu_on_handler
714DECLARE_KEEP_PAGER cpu_on_handler
715
716LOCAL_FUNC unhandled_cpu , :
717	wfi
718	b	unhandled_cpu
719END_FUNC unhandled_cpu
720
721LOCAL_DATA stack_tmp_rel , :
722	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
723END_DATA stack_tmp_rel
724
725	/*
726	 * This macro verifies that the a given vector doesn't exceed the
727	 * architectural limit of 32 instructions. This is meant to be placed
728	 * immedately after the last instruction in the vector. It takes the
729	 * vector entry as the parameter
730	 */
731	.macro check_vector_size since
732	  .if (. - \since) > (32 * 4)
733	    .error "Vector exceeds 32 instructions"
734	  .endif
735	.endm
736
737	.section .identity_map, "ax", %progbits
738	.align	11
739LOCAL_FUNC reset_vect_table , :, .identity_map, , nobti
740	/* -----------------------------------------------------
741	 * Current EL with SP0 : 0x0 - 0x180
742	 * -----------------------------------------------------
743	 */
744SynchronousExceptionSP0:
745	b	SynchronousExceptionSP0
746	check_vector_size SynchronousExceptionSP0
747
748	.align	7
749IrqSP0:
750	b	IrqSP0
751	check_vector_size IrqSP0
752
753	.align	7
754FiqSP0:
755	b	FiqSP0
756	check_vector_size FiqSP0
757
758	.align	7
759SErrorSP0:
760	b	SErrorSP0
761	check_vector_size SErrorSP0
762
763	/* -----------------------------------------------------
764	 * Current EL with SPx: 0x200 - 0x380
765	 * -----------------------------------------------------
766	 */
767	.align	7
768SynchronousExceptionSPx:
769	b	SynchronousExceptionSPx
770	check_vector_size SynchronousExceptionSPx
771
772	.align	7
773IrqSPx:
774	b	IrqSPx
775	check_vector_size IrqSPx
776
777	.align	7
778FiqSPx:
779	b	FiqSPx
780	check_vector_size FiqSPx
781
782	.align	7
783SErrorSPx:
784	b	SErrorSPx
785	check_vector_size SErrorSPx
786
787	/* -----------------------------------------------------
788	 * Lower EL using AArch64 : 0x400 - 0x580
789	 * -----------------------------------------------------
790	 */
791	.align	7
792SynchronousExceptionA64:
793	b	SynchronousExceptionA64
794	check_vector_size SynchronousExceptionA64
795
796	.align	7
797IrqA64:
798	b	IrqA64
799	check_vector_size IrqA64
800
801	.align	7
802FiqA64:
803	b	FiqA64
804	check_vector_size FiqA64
805
806	.align	7
807SErrorA64:
808	b   	SErrorA64
809	check_vector_size SErrorA64
810
811	/* -----------------------------------------------------
812	 * Lower EL using AArch32 : 0x0 - 0x180
813	 * -----------------------------------------------------
814	 */
815	.align	7
816SynchronousExceptionA32:
817	b	SynchronousExceptionA32
818	check_vector_size SynchronousExceptionA32
819
820	.align	7
821IrqA32:
822	b	IrqA32
823	check_vector_size IrqA32
824
825	.align	7
826FiqA32:
827	b	FiqA32
828	check_vector_size FiqA32
829
830	.align	7
831SErrorA32:
832	b	SErrorA32
833	check_vector_size SErrorA32
834
835END_FUNC reset_vect_table
836
837BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
838