xref: /optee_os/core/arch/arm/kernel/entry_a32.S (revision 2a585878a456e15841b3670479ccef1b53b00312)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2014, Linaro Limited
4 * Copyright (c) 2021-2023, Arm Limited
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/asan.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread_private.h>
15#include <mm/core_mmu.h>
16#include <platform_config.h>
17#include <sm/optee_smc.h>
18#include <sm/teesmc_opteed.h>
19#include <sm/teesmc_opteed_macros.h>
20
21.arch_extension sec
22
23.section .data
24.balign 4
25
26#ifdef CFG_BOOT_SYNC_CPU
27.equ SEM_CPU_READY, 1
28#endif
29
30#ifdef CFG_PL310
31.section .rodata.init
32panic_boot_file:
33	.asciz __FILE__
34
35/*
36 * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
37 */
38LOCAL_FUNC __assert_flat_mapped_range , :
39UNWIND(	.cantunwind)
40	push	{ r4-r6, lr }
41	mov	r4, r0
42	mov	r5, r1
43	bl	cpu_mmu_enabled
44	cmp	r0, #0
45	beq	1f
46	mov	r0, r4
47	bl	virt_to_phys
48	cmp	r0, r4
49	beq	1f
50	/*
51	 * this must be compliant with the panic generic routine:
52	 * __do_panic(__FILE__, __LINE__, __func__, str)
53	 */
54	ldr	r0, =panic_boot_file
55	mov	r1, r5
56	mov	r2, #0
57	mov	r3, #0
58	bl	__do_panic
59	b	.		/* should NOT return */
601:	pop	{ r4-r6, pc }
61END_FUNC __assert_flat_mapped_range
62
63	/* panic if mmu is enable and vaddr != paddr (scratch lr) */
64	.macro assert_flat_mapped_range va, line
65		ldr	r0, \va
66		ldr	r1, =\line
67		bl	__assert_flat_mapped_range
68	.endm
69#endif /* CFG_PL310 */
70
71WEAK_FUNC plat_cpu_reset_early , :
72	bx	lr
73END_FUNC plat_cpu_reset_early
74DECLARE_KEEP_PAGER plat_cpu_reset_early
75
76	.section .identity_map, "ax"
77	.align 5
78LOCAL_FUNC reset_vect_table , : , .identity_map
79	b	.
80	b	.	/* Undef */
81	b	.	/* Syscall */
82	b	.	/* Prefetch abort */
83	b	.	/* Data abort */
84	b	.	/* Reserved */
85	b	.	/* IRQ */
86	b	.	/* FIQ */
87END_FUNC reset_vect_table
88
89	.macro cpu_is_ready
90#ifdef CFG_BOOT_SYNC_CPU
91	bl	__get_core_pos
92	lsl	r0, r0, #2
93	ldr	r1,=sem_cpu_sync
94	ldr	r2, =SEM_CPU_READY
95	str	r2, [r1, r0]
96	dsb
97	sev
98#endif
99	.endm
100
101	.macro wait_primary
102#ifdef CFG_BOOT_SYNC_CPU
103	ldr	r0, =sem_cpu_sync
104	mov	r2, #SEM_CPU_READY
105	sev
1061:
107	ldr	r1, [r0]
108	cmp	r1, r2
109	wfene
110	bne	1b
111#endif
112	.endm
113
114	.macro wait_secondary
115#ifdef CFG_BOOT_SYNC_CPU
116	ldr	r0, =sem_cpu_sync
117	mov	r3, #CFG_TEE_CORE_NB_CORE
118	mov	r2, #SEM_CPU_READY
119	sev
1201:
121	subs	r3, r3, #1
122	beq	3f
123	add	r0, r0, #4
1242:
125	ldr	r1, [r0]
126	cmp	r1, r2
127	wfene
128	bne	2b
129	b	1b
1303:
131#endif
132	.endm
133
134	/*
135	 * set_sctlr : Setup some core configuration in CP15 SCTLR
136	 *
137	 * Setup required by current implementation of the OP-TEE core:
138	 * - Disable data and instruction cache.
139	 * - MMU is expected off and exceptions trapped in ARM mode.
140	 * - Enable or disable alignment checks upon platform configuration.
141	 * - Optionally enable write-implies-execute-never.
142	 * - Optionally enable round robin strategy for cache replacement.
143	 *
144	 * Clobbers r0.
145	 */
146	.macro set_sctlr
147		read_sctlr r0
148		bic	r0, r0, #(SCTLR_M | SCTLR_C)
149		bic	r0, r0, #SCTLR_I
150		bic	r0, r0, #SCTLR_TE
151		orr	r0, r0, #SCTLR_SPAN
152#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
153		orr	r0, r0, #SCTLR_A
154#else
155		bic	r0, r0, #SCTLR_A
156#endif
157#if defined(CFG_HWSUPP_MEM_PERM_WXN) && defined(CFG_CORE_RWDATA_NOEXEC)
158		orr	r0, r0, #(SCTLR_WXN | SCTLR_UWXN)
159#endif
160#if defined(CFG_ENABLE_SCTLR_RR)
161		orr	r0, r0, #SCTLR_RR
162#endif
163		write_sctlr r0
164	.endm
165
166	.macro maybe_init_spectre_workaround
167#if !defined(CFG_WITH_ARM_TRUSTED_FW) && \
168    (defined(CFG_CORE_WORKAROUND_SPECTRE_BP) || \
169     defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC))
170	read_midr r0
171	ubfx	r1, r0, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
172	cmp	r1, #MIDR_IMPLEMENTER_ARM
173	bne	1f
174	ubfx	r1, r0, #MIDR_PRIMARY_PART_NUM_SHIFT, \
175			#MIDR_PRIMARY_PART_NUM_WIDTH
176
177	movw	r2, #CORTEX_A8_PART_NUM
178	cmp	r1, r2
179	moveq	r2, #ACTLR_CA8_ENABLE_INVALIDATE_BTB
180	beq	2f
181
182	movw	r2, #CORTEX_A15_PART_NUM
183	cmp	r1, r2
184	moveq	r2, #ACTLR_CA15_ENABLE_INVALIDATE_BTB
185	bne	1f	/* Skip it for all other CPUs */
1862:
187	read_actlr r0
188	orr	r0, r0, r2
189	write_actlr r0
190	isb
1911:
192#endif
193	.endm
194
195FUNC _start , :
196UNWIND(	.cantunwind)
197	/*
198	 * Temporary copy of boot argument registers, will be passed to
199	 * boot_save_args() further down.
200	 */
201	mov	r4, r0
202	mov	r5, r1
203	mov	r6, r2
204	mov	r7, r3
205	mov	r8, lr
206
207	/*
208	 * 32bit entry is expected to execute Supervisor mode,
209	 * some bootloader may enter in Supervisor or Monitor
210	 */
211	cps	#CPSR_MODE_SVC
212
213	/* Early ARM secure MP specific configuration */
214	bl	plat_cpu_reset_early
215	maybe_init_spectre_workaround
216
217	set_sctlr
218	isb
219
220	ldr	r0, =reset_vect_table
221	write_vbar r0
222
223#if defined(CFG_WITH_ARM_TRUSTED_FW)
224	b	reset_primary
225#else
226	bl	__get_core_pos
227	cmp	r0, #0
228	beq	reset_primary
229	b	reset_secondary
230#endif
231END_FUNC _start
232DECLARE_KEEP_INIT _start
233
234	/*
235	 * Setup sp to point to the top of the tmp stack for the current CPU:
236	 * sp is assigned:
237	 *   stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD
238	 */
239	.macro set_sp
240		bl	__get_core_pos
241		cmp	r0, #CFG_TEE_CORE_NB_CORE
242		/* Unsupported CPU, park it before it breaks something */
243		bge	unhandled_cpu
244		add	r0, r0, #1
245
246		/* r2 = stack_tmp - STACK_TMP_GUARD */
247		adr	r3, stack_tmp_rel
248		ldr	r2, [r3]
249		add	r2, r2, r3
250
251		/*
252		 * stack_tmp_stride and stack_tmp_stride_rel are the
253		 * equivalent of:
254		 * extern const u32 stack_tmp_stride;
255		 * u32 stack_tmp_stride_rel = (u32)&stack_tmp_stride -
256		 *			      (u32)&stack_tmp_stride_rel
257		 *
258		 * To load the value of stack_tmp_stride we do the equivalent
259		 * of:
260		 * *(u32 *)(stack_tmp_stride + (u32)&stack_tmp_stride_rel)
261		 */
262		adr	r3, stack_tmp_stride_rel
263		ldr	r1, [r3]
264		ldr	r1, [r1, r3]
265
266		/*
267		 * r0 is core pos + 1
268		 * r1 is value of stack_tmp_stride
269		 * r2 is value of stack_tmp + guard
270		 */
271		mul	r1, r0, r1
272		add	sp, r1, r2
273	.endm
274
275	/*
276	 * Cache maintenance during entry: handle outer cache.
277	 * End address is exclusive: first byte not to be changed.
278	 * Note however arm_clX_inv/cleanbyva operate on full cache lines.
279	 *
280	 * Use ANSI #define to trap source file line number for PL310 assertion
281	 */
282	.macro __inval_cache_vrange vbase, vend, line
283#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
284		assert_flat_mapped_range (\vbase), (\line)
285		bl	pl310_base
286		ldr	r1, \vbase
287		ldr	r2, \vend
288		bl	arm_cl2_invbypa
289#endif
290		ldr	r0, \vbase
291		ldr	r1, \vend
292		sub	r1, r1, r0
293		bl	dcache_inv_range
294	.endm
295
296	.macro __flush_cache_vrange vbase, vend, line
297#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
298		assert_flat_mapped_range (\vbase), (\line)
299		ldr	r0, \vbase
300		ldr	r1, \vend
301		sub	r1, r1, r0
302		bl	dcache_clean_range
303		bl	pl310_base
304		ldr	r1, \vbase
305		ldr	r2, \vend
306		bl	arm_cl2_cleaninvbypa
307#endif
308		ldr	r0, \vbase
309		ldr	r1, \vend
310		sub	r1, r1, r0
311		bl	dcache_cleaninv_range
312	.endm
313
314#define inval_cache_vrange(vbase, vend) \
315		__inval_cache_vrange vbase, vend, __LINE__
316
317#define flush_cache_vrange(vbase, vend) \
318		__flush_cache_vrange vbase, vend, __LINE__
319
320#ifdef CFG_BOOT_SYNC_CPU
321#define flush_cpu_semaphores \
322		flush_cache_vrange(sem_cpu_sync_start, sem_cpu_sync_end)
323#else
324#define flush_cpu_semaphores
325#endif
326
327LOCAL_FUNC reset_primary , : , .identity_map
328UNWIND(	.cantunwind)
329
330	/* preserve r4-r8: bootargs */
331
332#ifdef CFG_WITH_PAGER
333	/*
334	 * Move init code into correct location and move hashes to a
335	 * temporary safe location until the heap is initialized.
336	 *
337	 * The binary is built as:
338	 * [Pager code, rodata and data] : In correct location
339	 * [Init code and rodata] : Should be copied to __init_start
340	 * [struct boot_embdata + data] : Should be saved before
341	 * initializing pager, first uint32_t tells the length of the data
342	 */
343	ldr	r0, =__init_start	/* dst */
344	ldr	r1, =__data_end 	/* src */
345	ldr	r2, =__init_end
346	sub	r2, r2, r0		/* init len */
347	ldr	r12, [r1, r2]		/* length of hashes etc */
348	add	r2, r2, r12		/* length of init and hashes etc */
349	/* Copy backwards (as memmove) in case we're overlapping */
350	add	r0, r0, r2		/* __init_start + len */
351	add	r1, r1, r2		/* __data_end + len */
352	str	r0, cached_mem_end
353	ldr	r2, =__init_start
354copy_init:
355	ldmdb	r1!, {r3, r9-r12}
356	stmdb	r0!, {r3, r9-r12}
357	cmp	r0, r2
358	bgt	copy_init
359#else
360	/*
361	 * The binary is built as:
362	 * [Core, rodata and data] : In correct location
363	 * [struct boot_embdata + data] : Should be moved to __end, first
364	 * uint32_t tells the length of the struct + data
365	 */
366	ldr	r0, =__end		/* dst */
367	ldr	r1, =__data_end		/* src */
368	ldr	r2, [r1]		/* struct boot_embdata::total_len */
369	/* Copy backwards (as memmove) in case we're overlapping */
370	add	r0, r0, r2
371	add	r1, r1, r2
372	str	r0, cached_mem_end
373	ldr	r2, =__end
374
375copy_init:
376	ldmdb	r1!, {r3, r9-r12}
377	stmdb	r0!, {r3, r9-r12}
378	cmp	r0, r2
379	bgt	copy_init
380#endif
381
382	/*
383	 * Clear .bss, this code obviously depends on the linker keeping
384	 * start/end of .bss at least 8 byte aligned.
385	 */
386	ldr	r0, =__bss_start
387	ldr	r1, =__bss_end
388	mov	r2, #0
389	mov	r3, #0
390clear_bss:
391	stmia	r0!, {r2, r3}
392	cmp	r0, r1
393	bls	clear_bss
394
395#ifdef CFG_NS_VIRTUALIZATION
396	/*
397	 * Clear .nex_bss, this code obviously depends on the linker keeping
398	 * start/end of .bss at least 8 byte aligned.
399	 */
400	ldr	r0, =__nex_bss_start
401	ldr	r1, =__nex_bss_end
402	mov	r2, #0
403	mov	r3, #0
404clear_nex_bss:
405	stmia	r0!, {r2, r3}
406	cmp	r0, r1
407	bls	clear_nex_bss
408#endif
409
410#ifdef CFG_CORE_SANITIZE_KADDRESS
411	/* First initialize the entire shadow area with no access */
412	ldr	r0, =__asan_shadow_start	/* start */
413	ldr	r1, =__asan_shadow_end	/* limit */
414	mov	r2, #ASAN_DATA_RED_ZONE
415shadow_no_access:
416	str	r2, [r0], #4
417	cmp	r0, r1
418	bls	shadow_no_access
419
420	/* Mark the entire stack area as OK */
421	ldr	r2, =CFG_ASAN_SHADOW_OFFSET
422	ldr	r0, =__nozi_stack_start	/* start */
423	lsr	r0, r0, #ASAN_BLOCK_SHIFT
424	add	r0, r0, r2
425	ldr	r1, =__nozi_stack_end	/* limit */
426	lsr	r1, r1, #ASAN_BLOCK_SHIFT
427	add	r1, r1, r2
428	mov	r2, #0
429shadow_stack_access_ok:
430	strb	r2, [r0], #1
431	cmp	r0, r1
432	bls	shadow_stack_access_ok
433#endif
434
435	set_sp
436
437	bl	thread_init_thread_core_local
438
439	/* complete ARM secure MP common configuration */
440	bl	plat_primary_init_early
441
442	/* Enable Console */
443	bl	console_init
444
445	mov	r0, r8
446	mov	r1, #0
447	push	{r0, r1}
448	mov	r0, r4
449	mov	r1, r5
450	mov	r2, r6
451	mov	r3, r7
452	bl	boot_save_args
453	add	sp, sp, #(2 * 4)
454
455#ifdef CFG_PL310
456	bl	pl310_base
457	bl	arm_cl2_config
458#endif
459
460	/*
461	 * Invalidate dcache for all memory used during initialization to
462	 * avoid nasty surprices when the cache is turned on. We must not
463	 * invalidate memory not used by OP-TEE since we may invalidate
464	 * entries used by for instance ARM Trusted Firmware.
465	 */
466	inval_cache_vrange(cached_mem_start, cached_mem_end)
467
468#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
469	/* Enable PL310 if not yet enabled */
470	bl	pl310_base
471	bl	arm_cl2_enable
472#endif
473
474#ifdef CFG_CORE_ASLR
475	bl	get_aslr_seed
476#else
477	mov	r0, #0
478#endif
479
480	ldr	r1, =boot_mmu_config
481	bl	core_init_mmu_map
482
483#ifdef CFG_CORE_ASLR
484	/*
485	 * Process relocation information for updating with the virtual map
486	 * offset.  We're doing this now before MMU is enabled as some of
487	 * the memory will become write protected.
488	 */
489	ldr	r0, =boot_mmu_config
490	ldr	r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET]
491	/*
492	 * Update cached_mem_end address with load offset since it was
493	 * calculated before relocation.
494	 */
495	ldr	r2, cached_mem_end
496	add	r2, r2, r0
497	str	r2, cached_mem_end
498
499	bl	relocate
500#endif
501
502	bl	__get_core_pos
503	bl	enable_mmu
504#ifdef CFG_CORE_ASLR
505	/*
506	 * Reinitialize console, since register_serial_console() has
507	 * previously registered a PA and with ASLR the VA is different
508	 * from the PA.
509	 */
510	bl	console_init
511#endif
512
513#ifdef CFG_NS_VIRTUALIZATION
514	/*
515	 * Initialize partition tables for each partition to
516	 * default_partition which has been relocated now to a different VA
517	 */
518	bl	core_mmu_set_default_prtn_tbl
519#endif
520
521	bl	boot_init_primary_early
522#ifndef CFG_NS_VIRTUALIZATION
523	mov	r9, sp
524	ldr	r0, =threads
525	ldr	r0, [r0, #THREAD_CTX_STACK_VA_END]
526	mov	sp, r0
527	bl	thread_get_core_local
528	mov	r8, r0
529	mov	r0, #0
530	str	r0, [r8, #THREAD_CORE_LOCAL_FLAGS]
531#endif
532	bl	boot_init_primary_late
533	bl	boot_init_primary_final
534#ifndef CFG_NS_VIRTUALIZATION
535	mov	r0, #THREAD_CLF_TMP
536	str	r0, [r8, #THREAD_CORE_LOCAL_FLAGS]
537	mov	sp, r9
538#endif
539
540#ifdef _CFG_CORE_STACK_PROTECTOR
541	/* Update stack canary value */
542	sub	sp, sp, #0x8
543	mov	r0, sp
544	mov	r1, #1
545	mov	r2, #0x4
546	bl	plat_get_random_stack_canaries
547	ldr	r0, [sp]
548	ldr	r1, =__stack_chk_guard
549	str	r0, [r1]
550	add	sp, sp, #0x8
551#endif
552
553	/*
554	 * In case we've touched memory that secondary CPUs will use before
555	 * they have turned on their D-cache, clean and invalidate the
556	 * D-cache before exiting to normal world.
557	 */
558	flush_cache_vrange(cached_mem_start, cached_mem_end)
559
560	/* release secondary boot cores and sync with them */
561	cpu_is_ready
562	flush_cpu_semaphores
563	wait_secondary
564
565#ifdef CFG_PL310_LOCKED
566#ifdef CFG_PL310_SIP_PROTOCOL
567#error "CFG_PL310_LOCKED must not be defined when CFG_PL310_SIP_PROTOCOL=y"
568#endif
569	/* lock/invalidate all lines: pl310 behaves as if disable */
570	bl	pl310_base
571	bl	arm_cl2_lockallways
572	bl	pl310_base
573	bl	arm_cl2_cleaninvbyway
574#endif
575
576	/*
577	 * Clear current thread id now to allow the thread to be reused on
578	 * next entry. Matches the thread_init_boot_thread() in
579	 * boot.c.
580	 */
581#ifndef CFG_NS_VIRTUALIZATION
582	bl 	thread_clr_boot_thread
583#endif
584
585#ifdef CFG_CORE_FFA
586	ldr	r0, =cpu_on_handler
587	/*
588	 * Compensate for the virtual map offset since cpu_on_handler() is
589	 * called with MMU off.
590	 */
591	ldr	r1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
592	sub	r0, r0, r1
593	bl	thread_spmc_register_secondary_ep
594	b	thread_ffa_msg_wait
595#else /* CFG_CORE_FFA */
596
597#if defined(CFG_WITH_ARM_TRUSTED_FW)
598	ldr	r0, =boot_mmu_config
599	ldr	r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET]
600	ldr	r1, =thread_vector_table
601	/* Pass the vector address returned from main_init */
602	sub	r1, r1, r0
603#else
604	/* Relay standard bootarg #1 and #2 to non-secure entry */
605	mov	r4, #0
606	mov	r3, r6		/* std bootarg #2 for register R2 */
607	mov	r2, r5		/* std bootarg #1 for register R1 */
608	mov	r1, #0
609#endif /* CFG_WITH_ARM_TRUSTED_FW */
610
611	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
612	smc	#0
613	/* SMC should not return */
614	panic_at_smc_return
615#endif /* CFG_CORE_FFA */
616END_FUNC reset_primary
617
618#ifdef CFG_BOOT_SYNC_CPU
619LOCAL_DATA sem_cpu_sync_start , :
620	.word	sem_cpu_sync
621END_DATA sem_cpu_sync_start
622
623LOCAL_DATA sem_cpu_sync_end , :
624	.word	sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)
625END_DATA sem_cpu_sync_end
626#endif
627
628LOCAL_DATA cached_mem_start , :
629	.word	__text_start
630END_DATA cached_mem_start
631
632LOCAL_DATA cached_mem_end , :
633	.skip	4
634END_DATA cached_mem_end
635
636LOCAL_FUNC unhandled_cpu , :
637	wfi
638	b	unhandled_cpu
639END_FUNC unhandled_cpu
640
641#ifdef CFG_CORE_ASLR
642LOCAL_FUNC relocate , :
643	push	{r4-r5}
644	/* r0 holds load offset */
645#ifdef CFG_WITH_PAGER
646	ldr	r12, =__init_end
647#else
648	ldr	r12, =__end
649#endif
650	ldr	r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET]
651	ldr	r3, [r12, #BOOT_EMBDATA_RELOC_LEN]
652
653	mov_imm	r1, TEE_LOAD_ADDR
654	add	r2, r2, r12	/* start of relocations */
655	add	r3, r3, r2	/* end of relocations */
656
657	/*
658	 * Relocations are not formatted as Rel32, instead they are in a
659	 * compressed format created by get_reloc_bin() in
660	 * scripts/gen_tee_bin.py
661	 *
662	 * All the R_ARM_RELATIVE relocations are translated into a list of
663	 * 32-bit offsets from TEE_LOAD_ADDR. At each address a 32-bit
664	 * value pointed out which increased with the load offset.
665	 */
666
667#ifdef CFG_WITH_PAGER
668	/*
669	 * With pager enabled we can only relocate the pager and init
670	 * parts, the rest has to be done when a page is populated.
671	 */
672	sub	r12, r12, r1
673#endif
674
675	b	2f
676	/* Loop over the relocation addresses and process all entries */
6771:	ldr	r4, [r2], #4
678#ifdef CFG_WITH_PAGER
679	/* Skip too large addresses */
680	cmp	r4, r12
681	bge	2f
682#endif
683	ldr	r5, [r4, r1]
684	add	r5, r5, r0
685	str	r5, [r4, r1]
686
6872:	cmp	r2, r3
688	bne	1b
689
690	pop	{r4-r5}
691	bx	lr
692END_FUNC relocate
693#endif
694
695/*
696 * void enable_mmu(unsigned long core_pos);
697 *
698 * This function depends on being mapped with in the identity map where
699 * physical address and virtual address is the same. After MMU has been
700 * enabled the instruction pointer will be updated to execute as the new
701 * offset instead. Stack pointers and the return address are updated.
702 */
703LOCAL_FUNC enable_mmu , : , .identity_map
704	/* r0 = core pos */
705	adr	r1, boot_mmu_config
706
707#ifdef CFG_WITH_LPAE
708	ldm	r1!, {r2, r3}
709	/*
710	 * r2 = ttbcr
711	 * r3 = mair0
712	 */
713	write_ttbcr r2
714	write_mair0 r3
715
716	ldm	r1!, {r2, r3}
717	/*
718	 * r2 = ttbr0_base
719	 * r3 = ttbr0_core_offset
720	 */
721
722	/*
723	 * ttbr0_el1 = ttbr0_base + ttbr0_core_offset * core_pos
724	 */
725	mla	r12, r0, r3, r2
726	mov	r0, #0
727	write_ttbr0_64bit r12, r0
728	write_ttbr1_64bit r0, r0
729#else
730	ldm	r1!, {r2, r3}
731	/*
732	 * r2 = prrr
733	 * r3 = nmrr
734	 */
735	write_prrr r2
736	write_nmrr r3
737
738	ldm	r1!, {r2, r3}
739	/*
740	 * r2 = dacr
741	 * r3 = ttbcr
742	 */
743	write_dacr r2
744	write_ttbcr r3
745
746	ldm	r1!, {r2}
747	/* r2 = ttbr */
748	write_ttbr0 r2
749	write_ttbr1 r2
750
751	mov	r2, #0
752	write_contextidr r2
753#endif
754	ldm	r1!, {r2}
755	/* r2 = load_offset (always 0 if CFG_CORE_ASLR=n) */
756	isb
757
758	/* Invalidate TLB */
759	write_tlbiall
760
761	/*
762	 * Make sure translation table writes have drained into memory and
763	 * the TLB invalidation is complete.
764	 */
765	dsb	sy
766	isb
767
768	read_sctlr r0
769	orr	r0, r0, #SCTLR_M
770#ifndef CFG_WITH_LPAE
771	/* Enable Access flag (simplified access permissions) and TEX remap */
772	orr	r0, r0, #(SCTLR_AFE | SCTLR_TRE)
773#endif
774	write_sctlr r0
775	isb
776
777	/* Update vbar */
778	read_vbar r1
779	add	r1, r1, r2
780	write_vbar r1
781	isb
782
783	/* Invalidate instruction cache and branch predictor */
784	write_iciallu
785	write_bpiall
786	isb
787
788	read_sctlr r0
789	/* Enable I and D cache */
790	orr	r0, r0, #SCTLR_I
791	orr	r0, r0, #SCTLR_C
792#if defined(CFG_ENABLE_SCTLR_Z)
793	/*
794	 * This is only needed on ARMv7 architecture and hence conditionned
795	 * by configuration directive CFG_ENABLE_SCTLR_Z. For recent
796	 * architectures, the program flow prediction is automatically
797	 * enabled upon MMU enablement.
798	 */
799	orr	r0, r0, #SCTLR_Z
800#endif
801	write_sctlr r0
802	isb
803
804	/* Adjust stack pointer and return address */
805	add	sp, sp, r2
806	add	lr, lr, r2
807
808	bx	lr
809END_FUNC enable_mmu
810
811LOCAL_DATA stack_tmp_rel , :
812	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
813END_DATA stack_tmp_rel
814
815LOCAL_DATA stack_tmp_stride_rel , :
816	.word	stack_tmp_stride - stack_tmp_stride_rel
817END_DATA stack_tmp_stride_rel
818
819DATA boot_mmu_config , : /* struct core_mmu_config */
820	.skip	CORE_MMU_CONFIG_SIZE
821END_DATA boot_mmu_config
822
823#if defined(CFG_WITH_ARM_TRUSTED_FW)
824FUNC cpu_on_handler , : , .identity_map
825UNWIND(	.cantunwind)
826	mov	r4, r0
827	mov	r5, r1
828	mov	r6, lr
829
830	set_sctlr
831	isb
832
833	adr	r0, reset_vect_table
834	write_vbar r0
835
836	mov	r4, lr
837
838	bl	__get_core_pos
839	bl	enable_mmu
840
841	set_sp
842
843	mov	r0, r4
844	mov	r1, r5
845	bl	boot_cpu_on_handler
846#ifdef CFG_CORE_FFA
847	b	thread_ffa_msg_wait
848#else
849	bx	r6
850#endif
851END_FUNC cpu_on_handler
852DECLARE_KEEP_PAGER cpu_on_handler
853
854#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
855
856LOCAL_FUNC reset_secondary , : , .identity_map
857UNWIND(	.cantunwind)
858	adr	r0, reset_vect_table
859	write_vbar r0
860
861	wait_primary
862
863	set_sp
864
865#if defined (CFG_BOOT_SECONDARY_REQUEST)
866	/* if L1 is not invalidated before, do it here */
867	mov	r0, #DCACHE_OP_INV
868	bl	dcache_op_level1
869#endif
870
871	bl	__get_core_pos
872	bl	enable_mmu
873
874	cpu_is_ready
875
876#if defined (CFG_BOOT_SECONDARY_REQUEST)
877	/*
878	 * boot_core_hpen() return value (r0) is address of
879	 * ns entry context structure
880	 */
881	bl	boot_core_hpen
882	ldm	r0, {r0, r6}
883#else
884	mov	r0, r8		/* ns-entry address */
885	mov	r6, #0
886#endif
887	bl	boot_init_secondary
888
889	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
890	mov	r1, r6
891	mov	r2, #0
892	mov	r3, #0
893	mov	r4, #0
894	smc	#0
895	/* SMC should not return */
896	panic_at_smc_return
897END_FUNC reset_secondary
898DECLARE_KEEP_PAGER reset_secondary
899#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
900