xref: /optee_os/core/arch/arm/kernel/entry_a32.S (revision 3ddd5cd700ed03e2265c9c3286697fd3c395a7bc)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2014, Linaro Limited
4 * Copyright (c) 2021, Arm Limited
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/asan.h>
13#include <kernel/cache_helpers.h>
14#include <platform_config.h>
15#include <sm/optee_smc.h>
16#include <sm/teesmc_opteed.h>
17#include <sm/teesmc_opteed_macros.h>
18
19.arch_extension sec
20
21.section .data
22.balign 4
23
24#ifdef CFG_BOOT_SYNC_CPU
25.equ SEM_CPU_READY, 1
26#endif
27
28#ifdef CFG_PL310
29.section .rodata.init
30panic_boot_file:
31	.asciz __FILE__
32
33/*
34 * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
35 */
36LOCAL_FUNC __assert_flat_mapped_range , :
37UNWIND(	.cantunwind)
38	push	{ r4-r6, lr }
39	mov	r4, r0
40	mov	r5, r1
41	bl	cpu_mmu_enabled
42	cmp	r0, #0
43	beq	1f
44	mov	r0, r4
45	bl	virt_to_phys
46	cmp	r0, r4
47	beq	1f
48	/*
49	 * this must be compliant with the panic generic routine:
50	 * __do_panic(__FILE__, __LINE__, __func__, str)
51	 */
52	ldr	r0, =panic_boot_file
53	mov	r1, r5
54	mov	r2, #0
55	mov	r3, #0
56	bl	__do_panic
57	b	.		/* should NOT return */
581:	pop	{ r4-r6, pc }
59END_FUNC __assert_flat_mapped_range
60
61	/* panic if mmu is enable and vaddr != paddr (scratch lr) */
62	.macro assert_flat_mapped_range va, line
63		ldr	r0, \va
64		ldr	r1, =\line
65		bl	__assert_flat_mapped_range
66	.endm
67#endif /* CFG_PL310 */
68
69WEAK_FUNC plat_cpu_reset_early , :
70	bx	lr
71END_FUNC plat_cpu_reset_early
72DECLARE_KEEP_PAGER plat_cpu_reset_early
73
74	.section .identity_map, "ax"
75	.align 5
76LOCAL_FUNC reset_vect_table , : , .identity_map
77	b	.
78	b	.	/* Undef */
79	b	.	/* Syscall */
80	b	.	/* Prefetch abort */
81	b	.	/* Data abort */
82	b	.	/* Reserved */
83	b	.	/* IRQ */
84	b	.	/* FIQ */
85END_FUNC reset_vect_table
86
87	.macro cpu_is_ready
88#ifdef CFG_BOOT_SYNC_CPU
89	bl	__get_core_pos
90	lsl	r0, r0, #2
91	ldr	r1,=sem_cpu_sync
92	ldr	r2, =SEM_CPU_READY
93	str	r2, [r1, r0]
94	dsb
95	sev
96#endif
97	.endm
98
99	.macro wait_primary
100#ifdef CFG_BOOT_SYNC_CPU
101	ldr	r0, =sem_cpu_sync
102	mov	r2, #SEM_CPU_READY
103	sev
1041:
105	ldr	r1, [r0]
106	cmp	r1, r2
107	wfene
108	bne	1b
109#endif
110	.endm
111
112	.macro wait_secondary
113#ifdef CFG_BOOT_SYNC_CPU
114	ldr	r0, =sem_cpu_sync
115	mov	r3, #CFG_TEE_CORE_NB_CORE
116	mov	r2, #SEM_CPU_READY
117	sev
1181:
119	subs	r3, r3, #1
120	beq	3f
121	add	r0, r0, #4
1222:
123	ldr	r1, [r0]
124	cmp	r1, r2
125	wfene
126	bne	2b
127	b	1b
1283:
129#endif
130	.endm
131
132	/*
133	 * set_sctlr : Setup some core configuration in CP15 SCTLR
134	 *
135	 * Setup required by current implementation of the OP-TEE core:
136	 * - Disable data and instruction cache.
137	 * - MMU is expected off and exceptions trapped in ARM mode.
138	 * - Enable or disable alignment checks upon platform configuration.
139	 * - Optionally enable write-implies-execute-never.
140	 * - Optionally enable round robin strategy for cache replacement.
141	 *
142	 * Clobbers r0.
143	 */
144	.macro set_sctlr
145		read_sctlr r0
146		bic	r0, r0, #(SCTLR_M | SCTLR_C)
147		bic	r0, r0, #SCTLR_I
148		bic	r0, r0, #SCTLR_TE
149		orr	r0, r0, #SCTLR_SPAN
150#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
151		orr	r0, r0, #SCTLR_A
152#else
153		bic	r0, r0, #SCTLR_A
154#endif
155#if defined(CFG_HWSUPP_MEM_PERM_WXN) && defined(CFG_CORE_RWDATA_NOEXEC)
156		orr	r0, r0, #(SCTLR_WXN | SCTLR_UWXN)
157#endif
158#if defined(CFG_ENABLE_SCTLR_RR)
159		orr	r0, r0, #SCTLR_RR
160#endif
161		write_sctlr r0
162	.endm
163
164#if defined(CFG_CORE_SEL1_SPMC) && defined(CFG_WITH_ARM_TRUSTED_FW)
165	/*
166	 * With OP-TEE as SPMC at S-EL1 the SPMD (SPD_spmd) in TF-A passes
167	 * the DTB in r0, pagable part in r1, and the rest of the registers
168	 * are unused
169	 *
170	 * Save boot arguments passed
171	 * entry r0, saved r6: device tree address
172	 * entry r1, saved r4: pagestore
173	 * saved r5, r7: Zero
174	 */
175	.macro bootargs_entry
176	mov	r6, r0
177	mov	r4, r1
178	mov	r5, #0
179	mov	r7, #0
180	.endm
181#else
182	/*
183	 * Save boot arguments
184	 * entry r0, saved r4: pagestore
185	 * entry r1, saved r7: (ARMv7 standard bootarg #1)
186	 * entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2)
187	 * entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0)
188	 */
189	.macro bootargs_entry
190#if defined(CFG_NS_ENTRY_ADDR)
191	ldr	r5, =CFG_NS_ENTRY_ADDR
192#else
193	mov	r5, lr
194#endif
195#if defined(CFG_PAGEABLE_ADDR)
196	ldr	r4, =CFG_PAGEABLE_ADDR
197#else
198	mov	r4, r0
199#endif
200#if defined(CFG_DT_ADDR)
201	ldr	r6, =CFG_DT_ADDR
202#else
203	mov	r6, r2
204#endif
205	mov	r7, r1
206	.endm
207#endif
208
209	.macro maybe_init_spectre_workaround
210#if !defined(CFG_WITH_ARM_TRUSTED_FW) && \
211    (defined(CFG_CORE_WORKAROUND_SPECTRE_BP) || \
212     defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC))
213	read_midr r0
214	ubfx	r1, r0, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
215	cmp	r1, #MIDR_IMPLEMENTER_ARM
216	bne	1f
217	ubfx	r1, r0, #MIDR_PRIMARY_PART_NUM_SHIFT, \
218			#MIDR_PRIMARY_PART_NUM_WIDTH
219
220	movw	r2, #CORTEX_A8_PART_NUM
221	cmp	r1, r2
222	moveq	r2, #ACTLR_CA8_ENABLE_INVALIDATE_BTB
223	beq	2f
224
225	movw	r2, #CORTEX_A15_PART_NUM
226	cmp	r1, r2
227	moveq	r2, #ACTLR_CA15_ENABLE_INVALIDATE_BTB
228	bne	1f	/* Skip it for all other CPUs */
2292:
230	read_actlr r0
231	orr	r0, r0, r2
232	write_actlr r0
233	isb
2341:
235#endif
236	.endm
237
238FUNC _start , :
239UNWIND(	.cantunwind)
240
241	bootargs_entry
242
243	/*
244	 * 32bit entry is expected to execute Supervisor mode,
245	 * some bootloader may enter in Supervisor or Monitor
246	 */
247	cps	#CPSR_MODE_SVC
248
249	/* Early ARM secure MP specific configuration */
250	bl	plat_cpu_reset_early
251	maybe_init_spectre_workaround
252
253	set_sctlr
254	isb
255
256	ldr	r0, =reset_vect_table
257	write_vbar r0
258
259#if defined(CFG_WITH_ARM_TRUSTED_FW)
260	b	reset_primary
261#else
262	bl	__get_core_pos
263	cmp	r0, #0
264	beq	reset_primary
265	b	reset_secondary
266#endif
267END_FUNC _start
268DECLARE_KEEP_INIT _start
269
270	/*
271	 * Setup sp to point to the top of the tmp stack for the current CPU:
272	 * sp is assigned stack_tmp_export + cpu_id * stack_tmp_stride
273	 */
274	.macro set_sp
275		bl	__get_core_pos
276		cmp	r0, #CFG_TEE_CORE_NB_CORE
277		/* Unsupported CPU, park it before it breaks something */
278		bge	unhandled_cpu
279
280		/*
281		 * stack_tmp_stride and stack_tmp_stride_rel are the
282		 * equivalent of:
283		 * extern const u32 stack_tmp_stride;
284		 * u32 stack_tmp_stride_rel = (u32)&stack_tmp_stride -
285		 *			      (u32)&stack_tmp_stride_rel
286		 *
287		 * To load the value of stack_tmp_stride we do the equivalent
288		 * of:
289		 * *(u32 *)(stack_tmp_stride + (u32)&stack_tmp_stride_rel)
290		 */
291		adr	r3, stack_tmp_stride_rel
292		ldr	r1, [r3]
293		ldr	r1, [r1, r3]
294
295		/* Same pattern as for stack_tmp_stride above */
296		adr	r3, stack_tmp_export_rel
297		ldr	r2, [r3]
298		ldr	r2, [r2, r3]
299
300		/*
301		 * r0 is core pos
302		 * r1 is value of stack_tmp_stride
303		 * r2 is value of stack_tmp_export
304		 */
305		mul	r1, r0, r1
306		add	sp, r1, r2
307	.endm
308
309	/*
310	 * Cache maintenance during entry: handle outer cache.
311	 * End address is exclusive: first byte not to be changed.
312	 * Note however arm_clX_inv/cleanbyva operate on full cache lines.
313	 *
314	 * Use ANSI #define to trap source file line number for PL310 assertion
315	 */
316	.macro __inval_cache_vrange vbase, vend, line
317#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
318		assert_flat_mapped_range (\vbase), (\line)
319		bl	pl310_base
320		ldr	r1, \vbase
321		ldr	r2, \vend
322		bl	arm_cl2_invbypa
323#endif
324		ldr	r0, \vbase
325		ldr	r1, \vend
326		sub	r1, r1, r0
327		bl	dcache_inv_range
328	.endm
329
330	.macro __flush_cache_vrange vbase, vend, line
331#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
332		assert_flat_mapped_range (\vbase), (\line)
333		ldr	r0, \vbase
334		ldr	r1, \vend
335		sub	r1, r1, r0
336		bl	dcache_clean_range
337		bl	pl310_base
338		ldr	r1, \vbase
339		ldr	r2, \vend
340		bl	arm_cl2_cleaninvbypa
341#endif
342		ldr	r0, \vbase
343		ldr	r1, \vend
344		sub	r1, r1, r0
345		bl	dcache_cleaninv_range
346	.endm
347
348#define inval_cache_vrange(vbase, vend) \
349		__inval_cache_vrange vbase, vend, __LINE__
350
351#define flush_cache_vrange(vbase, vend) \
352		__flush_cache_vrange vbase, vend, __LINE__
353
354#ifdef CFG_BOOT_SYNC_CPU
355#define flush_cpu_semaphores \
356		flush_cache_vrange(sem_cpu_sync_start, sem_cpu_sync_end)
357#else
358#define flush_cpu_semaphores
359#endif
360
361LOCAL_FUNC reset_primary , : , .identity_map
362UNWIND(	.cantunwind)
363
364	/* preserve r4-r7: bootargs */
365
366#ifdef CFG_WITH_PAGER
367	/*
368	 * Move init code into correct location and move hashes to a
369	 * temporary safe location until the heap is initialized.
370	 *
371	 * The binary is built as:
372	 * [Pager code, rodata and data] : In correct location
373	 * [Init code and rodata] : Should be copied to __init_start
374	 * [struct boot_embdata + data] : Should be saved before
375	 * initializing pager, first uint32_t tells the length of the data
376	 */
377	ldr	r0, =__init_start	/* dst */
378	ldr	r1, =__data_end 	/* src */
379	ldr	r2, =__init_end
380	sub	r2, r2, r0		/* init len */
381	ldr	r12, [r1, r2]		/* length of hashes etc */
382	add	r2, r2, r12		/* length of init and hashes etc */
383	/* Copy backwards (as memmove) in case we're overlapping */
384	add	r0, r0, r2		/* __init_start + len */
385	add	r1, r1, r2		/* __data_end + len */
386	str	r0, cached_mem_end
387	ldr	r2, =__init_start
388copy_init:
389	ldmdb	r1!, {r3, r8-r12}
390	stmdb	r0!, {r3, r8-r12}
391	cmp	r0, r2
392	bgt	copy_init
393#else
394	/*
395	 * The binary is built as:
396	 * [Core, rodata and data] : In correct location
397	 * [struct boot_embdata + data] : Should be moved to __end, first
398	 * uint32_t tells the length of the struct + data
399	 */
400	ldr	r0, =__end		/* dst */
401	ldr	r1, =__data_end		/* src */
402	ldr	r2, [r1]		/* struct boot_embdata::total_len */
403	/* Copy backwards (as memmove) in case we're overlapping */
404	add	r0, r0, r2
405	add	r1, r1, r2
406	str	r0, cached_mem_end
407	ldr	r2, =__end
408
409copy_init:
410	ldmdb	r1!, {r3, r8-r12}
411	stmdb	r0!, {r3, r8-r12}
412	cmp	r0, r2
413	bgt	copy_init
414#endif
415
416	/*
417	 * Clear .bss, this code obviously depends on the linker keeping
418	 * start/end of .bss at least 8 byte aligned.
419	 */
420	ldr	r0, =__bss_start
421	ldr	r1, =__bss_end
422	mov	r2, #0
423	mov	r3, #0
424clear_bss:
425	stmia	r0!, {r2, r3}
426	cmp	r0, r1
427	bls	clear_bss
428
429#ifdef CFG_VIRTUALIZATION
430	/*
431	 * Clear .nex_bss, this code obviously depends on the linker keeping
432	 * start/end of .bss at least 8 byte aligned.
433	 */
434	ldr	r0, =__nex_bss_start
435	ldr	r1, =__nex_bss_end
436	mov	r2, #0
437	mov	r3, #0
438clear_nex_bss:
439	stmia	r0!, {r2, r3}
440	cmp	r0, r1
441	bls	clear_nex_bss
442#endif
443
444#ifdef CFG_CORE_SANITIZE_KADDRESS
445	/* First initialize the entire shadow area with no access */
446	ldr	r0, =__asan_shadow_start	/* start */
447	ldr	r1, =__asan_shadow_end	/* limit */
448	mov	r2, #ASAN_DATA_RED_ZONE
449shadow_no_access:
450	str	r2, [r0], #4
451	cmp	r0, r1
452	bls	shadow_no_access
453
454	/* Mark the entire stack area as OK */
455	ldr	r2, =CFG_ASAN_SHADOW_OFFSET
456	ldr	r0, =__nozi_stack_start	/* start */
457	lsr	r0, r0, #ASAN_BLOCK_SHIFT
458	add	r0, r0, r2
459	ldr	r1, =__nozi_stack_end	/* limit */
460	lsr	r1, r1, #ASAN_BLOCK_SHIFT
461	add	r1, r1, r2
462	mov	r2, #0
463shadow_stack_access_ok:
464	strb	r2, [r0], #1
465	cmp	r0, r1
466	bls	shadow_stack_access_ok
467#endif
468
469	set_sp
470
471	bl	thread_init_thread_core_local
472
473	/* complete ARM secure MP common configuration */
474	bl	plat_primary_init_early
475
476	/* Enable Console */
477	bl	console_init
478
479#ifdef CFG_PL310
480	bl	pl310_base
481	bl	arm_cl2_config
482#endif
483
484	/*
485	 * Invalidate dcache for all memory used during initialization to
486	 * avoid nasty surprices when the cache is turned on. We must not
487	 * invalidate memory not used by OP-TEE since we may invalidate
488	 * entries used by for instance ARM Trusted Firmware.
489	 */
490	inval_cache_vrange(cached_mem_start, cached_mem_end)
491
492#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
493	/* Enable PL310 if not yet enabled */
494	bl	pl310_base
495	bl	arm_cl2_enable
496#endif
497
498#ifdef CFG_CORE_ASLR
499	mov	r0, r6
500	bl	get_aslr_seed
501#else
502	mov	r0, #0
503#endif
504
505	ldr	r1, =boot_mmu_config
506	bl	core_init_mmu_map
507
508#ifdef CFG_CORE_ASLR
509	/*
510	 * Process relocation information for updating with the new offset.
511	 * We're doing this now before MMU is enabled as some of the memory
512	 * will become write protected.
513	 */
514	ldr	r0, =boot_mmu_config
515	ldr	r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET]
516	/*
517	 * Update cached_mem_end address with load offset since it was
518	 * calculated before relocation.
519	 */
520	ldr	r2, cached_mem_end
521	add	r2, r2, r0
522	str	r2, cached_mem_end
523
524	bl	relocate
525#endif
526
527	bl	__get_core_pos
528	bl	enable_mmu
529#ifdef CFG_CORE_ASLR
530	/*
531	 * Reinitialize console, since register_serial_console() has
532	 * previously registered a PA and with ASLR the VA is different
533	 * from the PA.
534	 */
535	bl	console_init
536#endif
537
538#ifdef CFG_VIRTUALIZATION
539	/*
540	 * Initialize partition tables for each partition to
541	 * default_partition which has been relocated now to a different VA
542	 */
543	bl	core_mmu_set_default_prtn_tbl
544#endif
545
546	mov	r0, r4		/* pageable part address */
547	mov	r1, r5		/* ns-entry address */
548	bl	boot_init_primary_early
549#ifndef CFG_VIRTUALIZATION
550	mov	r7, sp
551	ldr	r0, =threads
552	ldr	r0, [r0, #THREAD_CTX_STACK_VA_END]
553	mov	sp, r0
554#endif
555	mov	r0, r6		/* DT address */
556	bl	boot_init_primary_late
557#ifndef CFG_VIRTUALIZATION
558	mov	sp, r7
559#endif
560
561	/*
562	 * In case we've touched memory that secondary CPUs will use before
563	 * they have turned on their D-cache, clean and invalidate the
564	 * D-cache before exiting to normal world.
565	 */
566	flush_cache_vrange(cached_mem_start, cached_mem_end)
567
568	/* release secondary boot cores and sync with them */
569	cpu_is_ready
570	flush_cpu_semaphores
571	wait_secondary
572
573#ifdef CFG_PL310_LOCKED
574#ifdef CFG_PL310_SIP_PROTOCOL
575#error "CFG_PL310_LOCKED must not be defined when CFG_PL310_SIP_PROTOCOL=y"
576#endif
577	/* lock/invalidate all lines: pl310 behaves as if disable */
578	bl	pl310_base
579	bl	arm_cl2_lockallways
580	bl	pl310_base
581	bl	arm_cl2_cleaninvbyway
582#endif
583
584	/*
585	 * Clear current thread id now to allow the thread to be reused on
586	 * next entry. Matches the thread_init_boot_thread() in
587	 * boot.c.
588	 */
589#ifndef CFG_VIRTUALIZATION
590	bl 	thread_clr_boot_thread
591#endif
592
593#ifdef CFG_CORE_FFA
594	ldr	r0, =cpu_on_handler
595	/*
596	 * Compensate for the load offset since cpu_on_handler() is
597	 * called with MMU off.
598	 */
599	ldr	r1, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET
600	sub	r0, r0, r1
601	bl	ffa_secondary_cpu_ep_register
602	b	thread_ffa_msg_wait
603#else /* CFG_CORE_FFA */
604
605#if defined(CFG_WITH_ARM_TRUSTED_FW)
606	ldr	r0, =boot_mmu_config
607	ldr	r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET]
608	ldr	r1, =thread_vector_table
609	/* Pass the vector address returned from main_init */
610	sub	r1, r1, r0
611#else
612	/* realy standard bootarg #1 and #2 to non secure entry */
613	mov	r4, #0
614	mov	r3, r6		/* std bootarg #2 for register R2 */
615	mov	r2, r7		/* std bootarg #1 for register R1 */
616	mov	r1, #0
617#endif /* CFG_WITH_ARM_TRUSTED_FW */
618
619	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
620	smc	#0
621	b	.	/* SMC should not return */
622#endif /* CFG_CORE_FFA */
623END_FUNC reset_primary
624
625#ifdef CFG_BOOT_SYNC_CPU
626LOCAL_DATA sem_cpu_sync_start , :
627	.word	sem_cpu_sync
628END_DATA sem_cpu_sync_start
629
630LOCAL_DATA sem_cpu_sync_end , :
631	.word	sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)
632END_DATA sem_cpu_sync_end
633#endif
634
635LOCAL_DATA cached_mem_start , :
636	.word	__text_start
637END_DATA cached_mem_start
638
639LOCAL_DATA cached_mem_end , :
640	.skip	4
641END_DATA cached_mem_end
642
643LOCAL_FUNC unhandled_cpu , :
644	wfi
645	b	unhandled_cpu
646END_FUNC unhandled_cpu
647
648#ifdef CFG_CORE_ASLR
649LOCAL_FUNC relocate , :
650	push	{r4-r5}
651	/* r0 holds load offset */
652#ifdef CFG_WITH_PAGER
653	ldr	r12, =__init_end
654#else
655	ldr	r12, =__end
656#endif
657	ldr	r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET]
658	ldr	r3, [r12, #BOOT_EMBDATA_RELOC_LEN]
659
660	mov_imm	r1, TEE_RAM_START
661	add	r2, r2, r12	/* start of relocations */
662	add	r3, r3, r2	/* end of relocations */
663
664	/*
665	 * Relocations are not formatted as Rel32, instead they are in a
666	 * compressed format created by get_reloc_bin() in
667	 * scripts/gen_tee_bin.py
668	 *
669	 * All the R_ARM_RELATIVE relocations are translated into a list
670	 * list of 32-bit offsets from TEE_RAM_START. At each address a
671	 * 32-bit value pointed out which increased with the load offset.
672	 */
673
674#ifdef CFG_WITH_PAGER
675	/*
676	 * With pager enabled we can only relocate the pager and init
677	 * parts, the rest has to be done when a page is populated.
678	 */
679	sub	r12, r12, r1
680#endif
681
682	b	2f
683	/* Loop over the relocation addresses and process all entries */
6841:	ldr	r4, [r2], #4
685#ifdef CFG_WITH_PAGER
686	/* Skip too large addresses */
687	cmp	r4, r12
688	bge	2f
689#endif
690	ldr	r5, [r4, r1]
691	add	r5, r5, r0
692	str	r5, [r4, r1]
693
6942:	cmp	r2, r3
695	bne	1b
696
697	pop	{r4-r5}
698	bx	lr
699END_FUNC relocate
700#endif
701
702/*
703 * void enable_mmu(unsigned long core_pos);
704 *
705 * This function depends on being mapped with in the identity map where
706 * physical address and virtual address is the same. After MMU has been
707 * enabled the instruction pointer will be updated to execute as the new
708 * offset instead. Stack pointers and the return address are updated.
709 */
710LOCAL_FUNC enable_mmu , : , .identity_map
711	/* r0 = core pos */
712	adr	r1, boot_mmu_config
713
714#ifdef CFG_WITH_LPAE
715	ldm	r1!, {r2, r3}
716	/*
717	 * r2 = ttbcr
718	 * r3 = mair0
719	 */
720	write_ttbcr r2
721	write_mair0 r3
722
723	ldm	r1!, {r2, r3}
724	/*
725	 * r2 = ttbr0_base
726	 * r3 = ttbr0_core_offset
727	 */
728
729	/*
730	 * ttbr0_el1 = ttbr0_base + ttbr0_core_offset * core_pos
731	 */
732	mla	r12, r0, r3, r2
733	mov	r0, #0
734	write_ttbr0_64bit r12, r0
735	write_ttbr1_64bit r0, r0
736#else
737	ldm	r1!, {r2, r3}
738	/*
739	 * r2 = prrr
740	 * r3 = nmrr
741	 */
742	write_prrr r2
743	write_nmrr r3
744
745	ldm	r1!, {r2, r3}
746	/*
747	 * r2 = dacr
748	 * r3 = ttbcr
749	 */
750	write_dacr r2
751	write_ttbcr r3
752
753	ldm	r1!, {r2}
754	/* r2 = ttbr */
755	write_ttbr0 r2
756	write_ttbr1 r2
757
758	mov	r2, #0
759	write_contextidr r2
760#endif
761	ldm	r1!, {r2}
762	/* r2 = load_offset (always 0 if CFG_CORE_ASLR=n) */
763	isb
764
765	/* Invalidate TLB */
766	write_tlbiall
767
768	/*
769	 * Make sure translation table writes have drained into memory and
770	 * the TLB invalidation is complete.
771	 */
772	dsb	sy
773	isb
774
775	read_sctlr r0
776	orr	r0, r0, #SCTLR_M
777#ifndef CFG_WITH_LPAE
778	/* Enable Access flag (simplified access permissions) and TEX remap */
779	orr	r0, r0, #(SCTLR_AFE | SCTLR_TRE)
780#endif
781	write_sctlr r0
782	isb
783
784	/* Update vbar */
785	read_vbar r1
786	add	r1, r1, r2
787	write_vbar r1
788	isb
789
790	/* Invalidate instruction cache and branch predictor */
791	write_iciallu
792	write_bpiall
793	isb
794
795	read_sctlr r0
796	/* Enable I and D cache */
797	orr	r0, r0, #SCTLR_I
798	orr	r0, r0, #SCTLR_C
799#if defined(CFG_ENABLE_SCTLR_Z)
800	/*
801	 * This is only needed on ARMv7 architecture and hence conditionned
802	 * by configuration directive CFG_ENABLE_SCTLR_Z. For recent
803	 * architectures, the program flow prediction is automatically
804	 * enabled upon MMU enablement.
805	 */
806	orr	r0, r0, #SCTLR_Z
807#endif
808	write_sctlr r0
809	isb
810
811	/* Adjust stack pointer and return address */
812	add	sp, sp, r2
813	add	lr, lr, r2
814
815	bx	lr
816END_FUNC enable_mmu
817
818LOCAL_DATA stack_tmp_export_rel , :
819	.word	stack_tmp_export - stack_tmp_export_rel
820END_DATA stack_tmp_export_rel
821
822LOCAL_DATA stack_tmp_stride_rel , :
823	.word	stack_tmp_stride - stack_tmp_stride_rel
824END_DATA stack_tmp_stride_rel
825
826DATA boot_mmu_config , : /* struct core_mmu_config */
827	.skip	CORE_MMU_CONFIG_SIZE
828END_DATA boot_mmu_config
829
830#if defined(CFG_WITH_ARM_TRUSTED_FW)
831FUNC cpu_on_handler , : , .identity_map
832UNWIND(	.cantunwind)
833	mov	r4, r0
834	mov	r5, r1
835	mov	r6, lr
836
837	set_sctlr
838	isb
839
840	adr	r0, reset_vect_table
841	write_vbar r0
842
843	mov	r4, lr
844
845	bl	__get_core_pos
846	bl	enable_mmu
847
848	set_sp
849
850	mov	r0, r4
851	mov	r1, r5
852	bl	boot_cpu_on_handler
853#ifdef CFG_CORE_FFA
854	b	thread_ffa_msg_wait
855#else
856	bx	r6
857#endif
858END_FUNC cpu_on_handler
859DECLARE_KEEP_PAGER cpu_on_handler
860
861#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
862
863LOCAL_FUNC reset_secondary , : , .identity_map
864UNWIND(	.cantunwind)
865	adr	r0, reset_vect_table
866	write_vbar r0
867
868	wait_primary
869
870	set_sp
871#ifdef CFG_CORE_ASLR
872	/*
873	 * stack_tmp_export which is used as base when initializing sp has
874	 * been relocated to the new offset. Since MMU isn't enabled on
875	 * this CPU yet we need to restore the corresponding physical
876	 * address.
877	 */
878	adr	r0, boot_mmu_config
879	ldr	r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET]
880	sub	sp, sp, r0
881#endif
882
883#if defined (CFG_BOOT_SECONDARY_REQUEST)
884	/* if L1 is not invalidated before, do it here */
885	mov	r0, #DCACHE_OP_INV
886	bl	dcache_op_level1
887#endif
888
889	bl	__get_core_pos
890	bl	enable_mmu
891
892	cpu_is_ready
893
894#if defined (CFG_BOOT_SECONDARY_REQUEST)
895	/*
896	 * boot_core_hpen() return value (r0) is address of
897	 * ns entry context structure
898	 */
899	bl	boot_core_hpen
900	ldm	r0, {r0, r6}
901#else
902	mov	r0, r5		/* ns-entry address */
903	mov	r6, #0
904#endif
905	bl	boot_init_secondary
906
907	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
908	mov	r1, r6
909	mov	r2, #0
910	mov	r3, #0
911	mov	r4, #0
912	smc	#0
913	b	.	/* SMC should not return */
914END_FUNC reset_secondary
915DECLARE_KEEP_PAGER reset_secondary
916#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
917