xref: /optee_os/core/arch/arm/kernel/entry_a32.S (revision 827be46c173f31c57006af70ca3a15a5b1a7fba3)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2014, Linaro Limited
4 */
5
6#include <arm32_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/asan.h>
12#include <kernel/cache_helpers.h>
13#include <platform_config.h>
14#include <sm/optee_smc.h>
15#include <sm/teesmc_opteed.h>
16#include <sm/teesmc_opteed_macros.h>
17
18.arch_extension sec
19
20.section .data
21.balign 4
22
23#ifdef CFG_BOOT_SYNC_CPU
24.equ SEM_CPU_READY, 1
25#endif
26
27#ifdef CFG_PL310
28.section .rodata.init
29panic_boot_file:
30	.asciz __FILE__
31
32/*
33 * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
34 */
35LOCAL_FUNC __assert_flat_mapped_range , :
36UNWIND(	.fnstart)
37UNWIND(	.cantunwind)
38	push	{ r4-r6, lr }
39	mov	r4, r0
40	mov	r5, r1
41	bl	cpu_mmu_enabled
42	cmp	r0, #0
43	beq	1f
44	mov	r0, r4
45	bl	virt_to_phys
46	cmp	r0, r4
47	beq	1f
48	/*
49	 * this must be compliant with the panic generic routine:
50	 * __do_panic(__FILE__, __LINE__, __func__, str)
51	 */
52	ldr	r0, =panic_boot_file
53	mov	r1, r5
54	mov	r2, #0
55	mov	r3, #0
56	bl	__do_panic
57	b	.		/* should NOT return */
581:	pop	{ r4-r6, pc }
59UNWIND(	.fnend)
60END_FUNC __assert_flat_mapped_range
61
62	/* panic if mmu is enable and vaddr != paddr (scratch lr) */
63	.macro assert_flat_mapped_range va, line
64		ldr	r0, \va
65		ldr	r1, =\line
66		bl	__assert_flat_mapped_range
67	.endm
68#endif /* CFG_PL310 */
69
70FUNC plat_cpu_reset_early , :
71UNWIND(	.fnstart)
72	bx	lr
73UNWIND(	.fnend)
74END_FUNC plat_cpu_reset_early
75DECLARE_KEEP_PAGER plat_cpu_reset_early
76.weak plat_cpu_reset_early
77
78	.section .identity_map, "ax"
79	.align 5
80LOCAL_FUNC reset_vect_table , : , .identity_map
81	b	.
82	b	.	/* Undef */
83	b	.	/* Syscall */
84	b	.	/* Prefetch abort */
85	b	.	/* Data abort */
86	b	.	/* Reserved */
87	b	.	/* IRQ */
88	b	.	/* FIQ */
89END_FUNC reset_vect_table
90
91	.macro cpu_is_ready
92#ifdef CFG_BOOT_SYNC_CPU
93	bl	__get_core_pos
94	lsl	r0, r0, #2
95	ldr	r1,=sem_cpu_sync
96	ldr	r2, =SEM_CPU_READY
97	str	r2, [r1, r0]
98	dsb
99	sev
100#endif
101	.endm
102
103	.macro wait_primary
104#ifdef CFG_BOOT_SYNC_CPU
105	ldr	r0, =sem_cpu_sync
106	mov	r2, #SEM_CPU_READY
107	sev
1081:
109	ldr	r1, [r0]
110	cmp	r1, r2
111	wfene
112	bne	1b
113#endif
114	.endm
115
116	.macro wait_secondary
117#ifdef CFG_BOOT_SYNC_CPU
118	ldr	r0, =sem_cpu_sync
119	mov	r3, #CFG_TEE_CORE_NB_CORE
120	mov	r2, #SEM_CPU_READY
121	sev
1221:
123	subs	r3, r3, #1
124	beq	3f
125	add	r0, r0, #4
1262:
127	ldr	r1, [r0]
128	cmp	r1, r2
129	wfene
130	bne	2b
131	b	1b
1323:
133#endif
134	.endm
135
136	/*
137	 * set_sctlr : Setup some core configuration in CP15 SCTLR
138	 *
139	 * Setup required by current implementation of the OP-TEE core:
140	 * - Disable data and instruction cache.
141	 * - MMU is expected off and exceptions trapped in ARM mode.
142	 * - Enable or disable alignment checks upon platform configuration.
143	 * - Optionally enable write-implies-execute-never.
144	 * - Optionally enable round robin strategy for cache replacement.
145	 *
146	 * Clobbers r0.
147	 */
148	.macro set_sctlr
149		read_sctlr r0
150		bic	r0, r0, #(SCTLR_M | SCTLR_C)
151		bic	r0, r0, #SCTLR_I
152		bic	r0, r0, #SCTLR_TE
153		orr	r0, r0, #SCTLR_SPAN
154#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
155		orr	r0, r0, #SCTLR_A
156#else
157		bic	r0, r0, #SCTLR_A
158#endif
159#if defined(CFG_HWSUPP_MEM_PERM_WXN) && defined(CFG_CORE_RWDATA_NOEXEC)
160		orr	r0, r0, #(SCTLR_WXN | SCTLR_UWXN)
161#endif
162#if defined(CFG_ENABLE_SCTLR_RR)
163		orr	r0, r0, #SCTLR_RR
164#endif
165		write_sctlr r0
166	.endm
167
168	/*
169	 * Save boot arguments
170	 * entry r0, saved r4: pagestore
171	 * entry r1, saved r7: (ARMv7 standard bootarg #1)
172	 * entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2)
173	 * entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0)
174	 */
175	.macro bootargs_entry
176#if defined(CFG_NS_ENTRY_ADDR)
177	ldr	r5, =CFG_NS_ENTRY_ADDR
178#else
179	mov	r5, lr
180#endif
181#if defined(CFG_PAGEABLE_ADDR)
182	ldr	r4, =CFG_PAGEABLE_ADDR
183#else
184	mov	r4, r0
185#endif
186#if defined(CFG_DT_ADDR)
187	ldr	r6, =CFG_DT_ADDR
188#else
189	mov	r6, r2
190#endif
191	mov	r7, r1
192	.endm
193
194	.macro maybe_init_spectre_workaround
195#if !defined(CFG_WITH_ARM_TRUSTED_FW) && \
196    (defined(CFG_CORE_WORKAROUND_SPECTRE_BP) || \
197     defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC))
198	read_midr r0
199	ubfx	r1, r0, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
200	cmp	r1, #MIDR_IMPLEMENTER_ARM
201	bne	1f
202	ubfx	r1, r0, #MIDR_PRIMARY_PART_NUM_SHIFT, \
203			#MIDR_PRIMARY_PART_NUM_WIDTH
204
205	movw	r2, #CORTEX_A8_PART_NUM
206	cmp	r1, r2
207	moveq	r2, #ACTLR_CA8_ENABLE_INVALIDATE_BTB
208	beq	2f
209
210	movw	r2, #CORTEX_A15_PART_NUM
211	cmp	r1, r2
212	moveq	r2, #ACTLR_CA15_ENABLE_INVALIDATE_BTB
213	bne	1f	/* Skip it for all other CPUs */
2142:
215	read_actlr r0
216	orr	r0, r0, r2
217	write_actlr r0
218	isb
2191:
220#endif
221	.endm
222
223FUNC _start , :
224UNWIND(	.fnstart)
225UNWIND(	.cantunwind)
226
227	bootargs_entry
228
229	/*
230	 * 32bit entry is expected to execute Supervisor mode,
231	 * some bootloader may enter in Supervisor or Monitor
232	 */
233	cps	#CPSR_MODE_SVC
234
235	/* Early ARM secure MP specific configuration */
236	bl	plat_cpu_reset_early
237	maybe_init_spectre_workaround
238
239	set_sctlr
240	isb
241
242	ldr	r0, =reset_vect_table
243	write_vbar r0
244
245#if defined(CFG_WITH_ARM_TRUSTED_FW)
246	b	reset_primary
247#else
248	bl	__get_core_pos
249	cmp	r0, #0
250	beq	reset_primary
251	b	reset_secondary
252#endif
253UNWIND(	.fnend)
254END_FUNC _start
255DECLARE_KEEP_INIT _start
256
257	/*
258	 * Setup sp to point to the top of the tmp stack for the current CPU:
259	 * sp is assigned stack_tmp_export + cpu_id * stack_tmp_stride
260	 */
261	.macro set_sp
262		bl	__get_core_pos
263		cmp	r0, #CFG_TEE_CORE_NB_CORE
264		/* Unsupported CPU, park it before it breaks something */
265		bge	unhandled_cpu
266
267		/*
268		 * stack_tmp_stride and stack_tmp_stride_rel are the
269		 * equivalent of:
270		 * extern const u32 stack_tmp_stride;
271		 * u32 stack_tmp_stride_rel = (u32)&stack_tmp_stride -
272		 *			      (u32)&stack_tmp_stride_rel
273		 *
274		 * To load the value of stack_tmp_stride we do the equivalent
275		 * of:
276		 * *(u32 *)(stack_tmp_stride + (u32)&stack_tmp_stride_rel)
277		 */
278		adr	r3, stack_tmp_stride_rel
279		ldr	r1, [r3]
280		ldr	r1, [r1, r3]
281
282		/* Same pattern as for stack_tmp_stride above */
283		adr	r3, stack_tmp_export_rel
284		ldr	r2, [r3]
285		ldr	r2, [r2, r3]
286
287		/*
288		 * r0 is core pos
289		 * r1 is value of stack_tmp_stride
290		 * r2 is value of stack_tmp_export
291		 */
292		mul	r1, r0, r1
293		add	sp, r1, r2
294	.endm
295
296	/*
297	 * Cache maintenance during entry: handle outer cache.
298	 * End address is exclusive: first byte not to be changed.
299	 * Note however arm_clX_inv/cleanbyva operate on full cache lines.
300	 *
301	 * Use ANSI #define to trap source file line number for PL310 assertion
302	 */
303	.macro __inval_cache_vrange vbase, vend, line
304#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
305		assert_flat_mapped_range (\vbase), (\line)
306		bl	pl310_base
307		ldr	r1, \vbase
308		ldr	r2, \vend
309		bl	arm_cl2_invbypa
310#endif
311		ldr	r0, \vbase
312		ldr	r1, \vend
313		sub	r1, r1, r0
314		bl	dcache_inv_range
315	.endm
316
317	.macro __flush_cache_vrange vbase, vend, line
318#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
319		assert_flat_mapped_range (\vbase), (\line)
320		ldr	r0, \vbase
321		ldr	r1, \vend
322		sub	r1, r1, r0
323		bl	dcache_clean_range
324		bl	pl310_base
325		ldr	r1, \vbase
326		ldr	r2, \vend
327		bl	arm_cl2_cleaninvbypa
328#endif
329		ldr	r0, \vbase
330		ldr	r1, \vend
331		sub	r1, r1, r0
332		bl	dcache_cleaninv_range
333	.endm
334
335#define inval_cache_vrange(vbase, vend) \
336		__inval_cache_vrange vbase, vend, __LINE__
337
338#define flush_cache_vrange(vbase, vend) \
339		__flush_cache_vrange vbase, vend, __LINE__
340
341#ifdef CFG_BOOT_SYNC_CPU
342#define flush_cpu_semaphores \
343		flush_cache_vrange(sem_cpu_sync_start, sem_cpu_sync_end)
344#else
345#define flush_cpu_semaphores
346#endif
347
348LOCAL_FUNC reset_primary , : , .identity_map
349UNWIND(	.fnstart)
350UNWIND(	.cantunwind)
351
352	/* preserve r4-r7: bootargs */
353
354#ifdef CFG_WITH_PAGER
355	/*
356	 * Move init code into correct location and move hashes to a
357	 * temporary safe location until the heap is initialized.
358	 *
359	 * The binary is built as:
360	 * [Pager code, rodata and data] : In correct location
361	 * [Init code and rodata] : Should be copied to __init_start
362	 * [struct boot_embdata + data] : Should be saved before
363	 * initializing pager, first uint32_t tells the length of the data
364	 */
365	ldr	r0, =__init_start	/* dst */
366	ldr	r1, =__data_end 	/* src */
367	ldr	r2, =__init_end
368	sub	r2, r2, r0		/* init len */
369	ldr	r12, [r1, r2]		/* length of hashes etc */
370	add	r2, r2, r12		/* length of init and hashes etc */
371	/* Copy backwards (as memmove) in case we're overlapping */
372	add	r0, r0, r2		/* __init_start + len */
373	add	r1, r1, r2		/* __data_end + len */
374	str	r0, cached_mem_end
375	ldr	r2, =__init_start
376copy_init:
377	ldmdb	r1!, {r3, r8-r12}
378	stmdb	r0!, {r3, r8-r12}
379	cmp	r0, r2
380	bgt	copy_init
381#else
382	/*
383	 * The binary is built as:
384	 * [Core, rodata and data] : In correct location
385	 * [struct boot_embdata + data] : Should be moved to __end, first
386	 * uint32_t tells the length of the struct + data
387	 */
388	ldr	r0, =__end		/* dst */
389	ldr	r1, =__data_end		/* src */
390	ldr	r2, [r1]		/* struct boot_embdata::total_len */
391	/* Copy backwards (as memmove) in case we're overlapping */
392	add	r0, r0, r2
393	add	r1, r1, r2
394	str	r0, cached_mem_end
395	ldr	r2, =__end
396
397copy_init:
398	ldmdb	r1!, {r3, r8-r12}
399	stmdb	r0!, {r3, r8-r12}
400	cmp	r0, r2
401	bgt	copy_init
402#endif
403
404	/*
405	 * Clear .bss, this code obviously depends on the linker keeping
406	 * start/end of .bss at least 8 byte aligned.
407	 */
408	ldr	r0, =__bss_start
409	ldr	r1, =__bss_end
410	mov	r2, #0
411	mov	r3, #0
412clear_bss:
413	stmia	r0!, {r2, r3}
414	cmp	r0, r1
415	bls	clear_bss
416
417#ifdef CFG_VIRTUALIZATION
418	/*
419	 * Clear .nex_bss, this code obviously depends on the linker keeping
420	 * start/end of .bss at least 8 byte aligned.
421	 */
422	ldr	r0, =__nex_bss_start
423	ldr	r1, =__nex_bss_end
424	mov	r2, #0
425	mov	r3, #0
426clear_nex_bss:
427	stmia	r0!, {r2, r3}
428	cmp	r0, r1
429	bls	clear_nex_bss
430#endif
431
432#ifdef CFG_CORE_SANITIZE_KADDRESS
433	/* First initialize the entire shadow area with no access */
434	ldr	r0, =__asan_shadow_start	/* start */
435	ldr	r1, =__asan_shadow_end	/* limit */
436	mov	r2, #ASAN_DATA_RED_ZONE
437shadow_no_access:
438	str	r2, [r0], #4
439	cmp	r0, r1
440	bls	shadow_no_access
441
442	/* Mark the entire stack area as OK */
443	ldr	r2, =CFG_ASAN_SHADOW_OFFSET
444	ldr	r0, =__nozi_stack_start	/* start */
445	lsr	r0, r0, #ASAN_BLOCK_SHIFT
446	add	r0, r0, r2
447	ldr	r1, =__nozi_stack_end	/* limit */
448	lsr	r1, r1, #ASAN_BLOCK_SHIFT
449	add	r1, r1, r2
450	mov	r2, #0
451shadow_stack_access_ok:
452	strb	r2, [r0], #1
453	cmp	r0, r1
454	bls	shadow_stack_access_ok
455#endif
456
457	set_sp
458
459	bl	thread_init_thread_core_local
460
461	/* complete ARM secure MP common configuration */
462	bl	plat_primary_init_early
463
464	/* Enable Console */
465	bl	console_init
466
467#ifdef CFG_PL310
468	bl	pl310_base
469	bl	arm_cl2_config
470#endif
471
472	/*
473	 * Invalidate dcache for all memory used during initialization to
474	 * avoid nasty surprices when the cache is turned on. We must not
475	 * invalidate memory not used by OP-TEE since we may invalidate
476	 * entries used by for instance ARM Trusted Firmware.
477	 */
478	inval_cache_vrange(cached_mem_start, cached_mem_end)
479
480#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
481	/* Enable PL310 if not yet enabled */
482	bl	pl310_base
483	bl	arm_cl2_enable
484#endif
485
486#ifdef CFG_CORE_ASLR
487	mov	r0, r6
488	bl	get_aslr_seed
489#else
490	mov	r0, #0
491#endif
492
493	ldr	r1, =boot_mmu_config
494	bl	core_init_mmu_map
495
496#ifdef CFG_CORE_ASLR
497	/*
498	 * Process relocation information for updating with the new offset.
499	 * We're doing this now before MMU is enabled as some of the memory
500	 * will become write protected.
501	 */
502	ldr	r0, =boot_mmu_config
503	ldr	r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET]
504	/*
505	 * Update cached_mem_end address with load offset since it was
506	 * calculated before relocation.
507	 */
508	ldr	r2, cached_mem_end
509	add	r2, r2, r0
510	str	r2, cached_mem_end
511
512	bl	relocate
513#endif
514
515	bl	__get_core_pos
516	bl	enable_mmu
517#ifdef CFG_CORE_ASLR
518	/*
519	 * Reinitialize console, since register_serial_console() has
520	 * previously registered a PA and with ASLR the VA is different
521	 * from the PA.
522	 */
523	bl	console_init
524#endif
525
526	mov	r0, r4		/* pageable part address */
527	mov	r1, r5		/* ns-entry address */
528	mov	r2, r6		/* DT address */
529	bl	boot_init_primary
530
531	/*
532	 * In case we've touched memory that secondary CPUs will use before
533	 * they have turned on their D-cache, clean and invalidate the
534	 * D-cache before exiting to normal world.
535	 */
536	flush_cache_vrange(cached_mem_start, cached_mem_end)
537
538	/* release secondary boot cores and sync with them */
539	cpu_is_ready
540	flush_cpu_semaphores
541	wait_secondary
542
543#ifdef CFG_PL310_LOCKED
544#ifdef CFG_PL310_SIP_PROTOCOL
545#error "CFG_PL310_LOCKED must not be defined when CFG_PL310_SIP_PROTOCOL=y"
546#endif
547	/* lock/invalidate all lines: pl310 behaves as if disable */
548	bl	pl310_base
549	bl	arm_cl2_lockallways
550	bl	pl310_base
551	bl	arm_cl2_cleaninvbyway
552#endif
553
554	/*
555	 * Clear current thread id now to allow the thread to be reused on
556	 * next entry. Matches the thread_init_boot_thread() in
557	 * boot.c.
558	 */
559	bl 	thread_clr_boot_thread
560
561#ifdef CFG_CORE_FFA
562	ldr	r0, =cpu_on_handler
563	/*
564	 * Compensate for the load offset since cpu_on_handler() is
565	 * called with MMU off.
566	 */
567	ldr	r1, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET
568	sub	r0, r0, r1
569	bl	ffa_secondary_cpu_boot_req
570	b	thread_ffa_msg_wait
571#else /* CFG_CORE_FFA */
572
573#if defined(CFG_WITH_ARM_TRUSTED_FW)
574	ldr	r0, =boot_mmu_config
575	ldr	r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET]
576	ldr	r1, =thread_vector_table
577	/* Pass the vector address returned from main_init */
578	sub	r1, r1, r0
579#else
580	/* realy standard bootarg #1 and #2 to non secure entry */
581	mov	r4, #0
582	mov	r3, r6		/* std bootarg #2 for register R2 */
583	mov	r2, r7		/* std bootarg #1 for register R1 */
584	mov	r1, #0
585#endif /* CFG_WITH_ARM_TRUSTED_FW */
586
587	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
588	smc	#0
589	b	.	/* SMC should not return */
590#endif /* CFG_CORE_FFA */
591UNWIND(	.fnend)
592END_FUNC reset_primary
593
594#ifdef CFG_BOOT_SYNC_CPU
595LOCAL_DATA sem_cpu_sync_start , :
596	.word	sem_cpu_sync
597END_DATA sem_cpu_sync_start
598
599LOCAL_DATA sem_cpu_sync_end , :
600	.word	sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)
601END_DATA sem_cpu_sync_end
602#endif
603
604LOCAL_DATA cached_mem_start , :
605	.word	__text_start
606END_DATA cached_mem_start
607
608LOCAL_DATA cached_mem_end , :
609	.skip	4
610END_DATA cached_mem_end
611
612LOCAL_FUNC unhandled_cpu , :
613UNWIND(	.fnstart)
614	wfi
615	b	unhandled_cpu
616UNWIND(	.fnend)
617END_FUNC unhandled_cpu
618
619#ifdef CFG_CORE_ASLR
620LOCAL_FUNC relocate , :
621	push	{r4-r5}
622	/* r0 holds load offset */
623#ifdef CFG_WITH_PAGER
624	ldr	r12, =__init_end
625#else
626	ldr	r12, =__end
627#endif
628	ldr	r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET]
629	ldr	r3, [r12, #BOOT_EMBDATA_RELOC_LEN]
630
631	mov_imm	r1, TEE_RAM_START
632	add	r2, r2, r12	/* start of relocations */
633	add	r3, r3, r2	/* end of relocations */
634
635	/*
636	 * Relocations are not formatted as Rel32, instead they are in a
637	 * compressed format created by get_reloc_bin() in
638	 * scripts/gen_tee_bin.py
639	 *
640	 * All the R_ARM_RELATIVE relocations are translated into a list
641	 * list of 32-bit offsets from TEE_RAM_START. At each address a
642	 * 32-bit value pointed out which increased with the load offset.
643	 */
644
645#ifdef CFG_WITH_PAGER
646	/*
647	 * With pager enabled we can only relocate the pager and init
648	 * parts, the rest has to be done when a page is populated.
649	 */
650	sub	r12, r12, r1
651#endif
652
653	b	2f
654	/* Loop over the relocation addresses and process all entries */
6551:	ldr	r4, [r2], #4
656#ifdef CFG_WITH_PAGER
657	/* Skip too large addresses */
658	cmp	r4, r12
659	bge	2f
660#endif
661	ldr	r5, [r4, r1]
662	add	r5, r5, r0
663	str	r5, [r4, r1]
664
6652:	cmp	r2, r3
666	bne	1b
667
668	pop	{r4-r5}
669	bx	lr
670END_FUNC relocate
671#endif
672
673/*
674 * void enable_mmu(unsigned long core_pos);
675 *
676 * This function depends on being mapped with in the identity map where
677 * physical address and virtual address is the same. After MMU has been
678 * enabled the instruction pointer will be updated to execute as the new
679 * offset instead. Stack pointers and the return address are updated.
680 */
681LOCAL_FUNC enable_mmu , : , .identity_map
682	/* r0 = core pos */
683	adr	r1, boot_mmu_config
684
685#ifdef CFG_WITH_LPAE
686	ldm	r1!, {r2, r3}
687	/*
688	 * r2 = ttbcr
689	 * r3 = mair0
690	 */
691	write_ttbcr r2
692	write_mair0 r3
693
694	ldm	r1!, {r2, r3}
695	/*
696	 * r2 = ttbr0_base
697	 * r3 = ttbr0_core_offset
698	 */
699
700	/*
701	 * ttbr0_el1 = ttbr0_base + ttbr0_core_offset * core_pos
702	 */
703	mla	r12, r0, r3, r2
704	mov	r0, #0
705	write_ttbr0_64bit r12, r0
706	write_ttbr1_64bit r0, r0
707#else
708	ldm	r1!, {r2, r3}
709	/*
710	 * r2 = prrr
711	 * r3 = nmrr
712	 */
713	write_prrr r2
714	write_nmrr r3
715
716	ldm	r1!, {r2, r3}
717	/*
718	 * r2 = dacr
719	 * r3 = ttbcr
720	 */
721	write_dacr r2
722	write_ttbcr r3
723
724	ldm	r1!, {r2}
725	/* r2 = ttbr */
726	write_ttbr0 r2
727	write_ttbr1 r2
728
729	mov	r2, #0
730	write_contextidr r2
731#endif
732	ldm	r1!, {r2}
733	/* r2 = load_offset (always 0 if CFG_CORE_ASLR=n) */
734	isb
735
736	/* Invalidate TLB */
737	write_tlbiall
738
739	/*
740	 * Make sure translation table writes have drained into memory and
741	 * the TLB invalidation is complete.
742	 */
743	dsb	sy
744	isb
745
746	read_sctlr r0
747	orr	r0, r0, #SCTLR_M
748#ifndef CFG_WITH_LPAE
749	/* Enable Access flag (simplified access permissions) and TEX remap */
750	orr	r0, r0, #(SCTLR_AFE | SCTLR_TRE)
751#endif
752	write_sctlr r0
753	isb
754
755	/* Update vbar */
756	read_vbar r1
757	add	r1, r1, r2
758	write_vbar r1
759	isb
760
761	/* Invalidate instruction cache and branch predictor */
762	write_iciallu
763	write_bpiall
764	isb
765
766	read_sctlr r0
767	/* Enable I and D cache */
768	orr	r0, r0, #SCTLR_I
769	orr	r0, r0, #SCTLR_C
770#if defined(CFG_ENABLE_SCTLR_Z)
771	/*
772	 * This is only needed on ARMv7 architecture and hence conditionned
773	 * by configuration directive CFG_ENABLE_SCTLR_Z. For recent
774	 * architectures, the program flow prediction is automatically
775	 * enabled upon MMU enablement.
776	 */
777	orr	r0, r0, #SCTLR_Z
778#endif
779	write_sctlr r0
780	isb
781
782	/* Adjust stack pointer and return address */
783	add	sp, sp, r2
784	add	lr, lr, r2
785
786	bx	lr
787END_FUNC enable_mmu
788
789LOCAL_DATA stack_tmp_export_rel , :
790	.word	stack_tmp_export - stack_tmp_export_rel
791END_DATA stack_tmp_export_rel
792
793LOCAL_DATA stack_tmp_stride_rel , :
794	.word	stack_tmp_stride - stack_tmp_stride_rel
795END_DATA stack_tmp_stride_rel
796
797DATA boot_mmu_config , : /* struct core_mmu_config */
798	.skip	CORE_MMU_CONFIG_SIZE
799END_DATA boot_mmu_config
800
801#if defined(CFG_WITH_ARM_TRUSTED_FW)
802FUNC cpu_on_handler , : , .identity_map
803UNWIND(	.fnstart)
804UNWIND(	.cantunwind)
805	mov	r4, r0
806	mov	r5, r1
807	mov	r6, lr
808
809	set_sctlr
810	isb
811
812	adr	r0, reset_vect_table
813	write_vbar r0
814
815	mov	r4, lr
816
817	bl	__get_core_pos
818	bl	enable_mmu
819
820	set_sp
821
822	mov	r0, r4
823	mov	r1, r5
824	bl	boot_cpu_on_handler
825#ifdef CFG_CORE_FFA
826	b	thread_ffa_msg_wait
827#else
828	bx	r6
829#endif
830UNWIND(	.fnend)
831END_FUNC cpu_on_handler
832DECLARE_KEEP_PAGER cpu_on_handler
833
834#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
835
836LOCAL_FUNC reset_secondary , : , .identity_map
837UNWIND(	.fnstart)
838UNWIND(	.cantunwind)
839	adr	r0, reset_vect_table
840	write_vbar r0
841
842	wait_primary
843
844	set_sp
845#ifdef CFG_CORE_ASLR
846	/*
847	 * stack_tmp_export which is used as base when initializing sp has
848	 * been relocated to the new offset. Since MMU isn't enabled on
849	 * this CPU yet we need to restore the corresponding physical
850	 * address.
851	 */
852	adr	r0, boot_mmu_config
853	ldr	r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET]
854	sub	sp, sp, r0
855#endif
856
857#if defined (CFG_BOOT_SECONDARY_REQUEST)
858	/* if L1 is not invalidated before, do it here */
859	mov	r0, #DCACHE_OP_INV
860	bl	dcache_op_level1
861#endif
862
863	bl	__get_core_pos
864	bl	enable_mmu
865
866	cpu_is_ready
867
868#if defined (CFG_BOOT_SECONDARY_REQUEST)
869	/*
870	 * boot_core_hpen() return value (r0) is address of
871	 * ns entry context structure
872	 */
873	bl	boot_core_hpen
874	ldm	r0, {r0, r6}
875#else
876	mov	r0, r5		/* ns-entry address */
877	mov	r6, #0
878#endif
879	bl	boot_init_secondary
880
881	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
882	mov	r1, r6
883	mov	r2, #0
884	mov	r3, #0
885	mov	r4, #0
886	smc	#0
887	b	.	/* SMC should not return */
888UNWIND(	.fnend)
889END_FUNC reset_secondary
890DECLARE_KEEP_PAGER reset_secondary
891#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
892