xref: /optee_os/core/arch/arm/kernel/entry_a32.S (revision 9fc2442cc66c279cb962c90c4375746fc9b28bb9)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2014, Linaro Limited
4 */
5
6#include <arm32_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/asan.h>
12#include <kernel/cache_helpers.h>
13#include <platform_config.h>
14#include <sm/optee_smc.h>
15#include <sm/teesmc_opteed.h>
16#include <sm/teesmc_opteed_macros.h>
17
18.arch_extension sec
19
20.section .data
21.balign 4
22
23#ifdef CFG_BOOT_SYNC_CPU
24.equ SEM_CPU_READY, 1
25#endif
26
27#ifdef CFG_PL310
28.section .rodata.init
29panic_boot_file:
30	.asciz __FILE__
31
32/*
33 * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
34 */
35LOCAL_FUNC __assert_flat_mapped_range , :
36UNWIND(	.cantunwind)
37	push	{ r4-r6, lr }
38	mov	r4, r0
39	mov	r5, r1
40	bl	cpu_mmu_enabled
41	cmp	r0, #0
42	beq	1f
43	mov	r0, r4
44	bl	virt_to_phys
45	cmp	r0, r4
46	beq	1f
47	/*
48	 * this must be compliant with the panic generic routine:
49	 * __do_panic(__FILE__, __LINE__, __func__, str)
50	 */
51	ldr	r0, =panic_boot_file
52	mov	r1, r5
53	mov	r2, #0
54	mov	r3, #0
55	bl	__do_panic
56	b	.		/* should NOT return */
571:	pop	{ r4-r6, pc }
58END_FUNC __assert_flat_mapped_range
59
60	/* panic if mmu is enable and vaddr != paddr (scratch lr) */
61	.macro assert_flat_mapped_range va, line
62		ldr	r0, \va
63		ldr	r1, =\line
64		bl	__assert_flat_mapped_range
65	.endm
66#endif /* CFG_PL310 */
67
68FUNC plat_cpu_reset_early , :
69	bx	lr
70END_FUNC plat_cpu_reset_early
71DECLARE_KEEP_PAGER plat_cpu_reset_early
72.weak plat_cpu_reset_early
73
74	.section .identity_map, "ax"
75	.align 5
76LOCAL_FUNC reset_vect_table , : , .identity_map
77	b	.
78	b	.	/* Undef */
79	b	.	/* Syscall */
80	b	.	/* Prefetch abort */
81	b	.	/* Data abort */
82	b	.	/* Reserved */
83	b	.	/* IRQ */
84	b	.	/* FIQ */
85END_FUNC reset_vect_table
86
87	.macro cpu_is_ready
88#ifdef CFG_BOOT_SYNC_CPU
89	bl	__get_core_pos
90	lsl	r0, r0, #2
91	ldr	r1,=sem_cpu_sync
92	ldr	r2, =SEM_CPU_READY
93	str	r2, [r1, r0]
94	dsb
95	sev
96#endif
97	.endm
98
99	.macro wait_primary
100#ifdef CFG_BOOT_SYNC_CPU
101	ldr	r0, =sem_cpu_sync
102	mov	r2, #SEM_CPU_READY
103	sev
1041:
105	ldr	r1, [r0]
106	cmp	r1, r2
107	wfene
108	bne	1b
109#endif
110	.endm
111
112	.macro wait_secondary
113#ifdef CFG_BOOT_SYNC_CPU
114	ldr	r0, =sem_cpu_sync
115	mov	r3, #CFG_TEE_CORE_NB_CORE
116	mov	r2, #SEM_CPU_READY
117	sev
1181:
119	subs	r3, r3, #1
120	beq	3f
121	add	r0, r0, #4
1222:
123	ldr	r1, [r0]
124	cmp	r1, r2
125	wfene
126	bne	2b
127	b	1b
1283:
129#endif
130	.endm
131
132	/*
133	 * set_sctlr : Setup some core configuration in CP15 SCTLR
134	 *
135	 * Setup required by current implementation of the OP-TEE core:
136	 * - Disable data and instruction cache.
137	 * - MMU is expected off and exceptions trapped in ARM mode.
138	 * - Enable or disable alignment checks upon platform configuration.
139	 * - Optionally enable write-implies-execute-never.
140	 * - Optionally enable round robin strategy for cache replacement.
141	 *
142	 * Clobbers r0.
143	 */
144	.macro set_sctlr
145		read_sctlr r0
146		bic	r0, r0, #(SCTLR_M | SCTLR_C)
147		bic	r0, r0, #SCTLR_I
148		bic	r0, r0, #SCTLR_TE
149		orr	r0, r0, #SCTLR_SPAN
150#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
151		orr	r0, r0, #SCTLR_A
152#else
153		bic	r0, r0, #SCTLR_A
154#endif
155#if defined(CFG_HWSUPP_MEM_PERM_WXN) && defined(CFG_CORE_RWDATA_NOEXEC)
156		orr	r0, r0, #(SCTLR_WXN | SCTLR_UWXN)
157#endif
158#if defined(CFG_ENABLE_SCTLR_RR)
159		orr	r0, r0, #SCTLR_RR
160#endif
161		write_sctlr r0
162	.endm
163
164	/*
165	 * Save boot arguments
166	 * entry r0, saved r4: pagestore
167	 * entry r1, saved r7: (ARMv7 standard bootarg #1)
168	 * entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2)
169	 * entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0)
170	 */
171	.macro bootargs_entry
172#if defined(CFG_NS_ENTRY_ADDR)
173	ldr	r5, =CFG_NS_ENTRY_ADDR
174#else
175	mov	r5, lr
176#endif
177#if defined(CFG_PAGEABLE_ADDR)
178	ldr	r4, =CFG_PAGEABLE_ADDR
179#else
180	mov	r4, r0
181#endif
182#if defined(CFG_DT_ADDR)
183	ldr	r6, =CFG_DT_ADDR
184#else
185	mov	r6, r2
186#endif
187	mov	r7, r1
188	.endm
189
190	.macro maybe_init_spectre_workaround
191#if !defined(CFG_WITH_ARM_TRUSTED_FW) && \
192    (defined(CFG_CORE_WORKAROUND_SPECTRE_BP) || \
193     defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC))
194	read_midr r0
195	ubfx	r1, r0, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
196	cmp	r1, #MIDR_IMPLEMENTER_ARM
197	bne	1f
198	ubfx	r1, r0, #MIDR_PRIMARY_PART_NUM_SHIFT, \
199			#MIDR_PRIMARY_PART_NUM_WIDTH
200
201	movw	r2, #CORTEX_A8_PART_NUM
202	cmp	r1, r2
203	moveq	r2, #ACTLR_CA8_ENABLE_INVALIDATE_BTB
204	beq	2f
205
206	movw	r2, #CORTEX_A15_PART_NUM
207	cmp	r1, r2
208	moveq	r2, #ACTLR_CA15_ENABLE_INVALIDATE_BTB
209	bne	1f	/* Skip it for all other CPUs */
2102:
211	read_actlr r0
212	orr	r0, r0, r2
213	write_actlr r0
214	isb
2151:
216#endif
217	.endm
218
219FUNC _start , :
220UNWIND(	.cantunwind)
221
222	bootargs_entry
223
224	/*
225	 * 32bit entry is expected to execute Supervisor mode,
226	 * some bootloader may enter in Supervisor or Monitor
227	 */
228	cps	#CPSR_MODE_SVC
229
230	/* Early ARM secure MP specific configuration */
231	bl	plat_cpu_reset_early
232	maybe_init_spectre_workaround
233
234	set_sctlr
235	isb
236
237	ldr	r0, =reset_vect_table
238	write_vbar r0
239
240#if defined(CFG_WITH_ARM_TRUSTED_FW)
241	b	reset_primary
242#else
243	bl	__get_core_pos
244	cmp	r0, #0
245	beq	reset_primary
246	b	reset_secondary
247#endif
248END_FUNC _start
249DECLARE_KEEP_INIT _start
250
251	/*
252	 * Setup sp to point to the top of the tmp stack for the current CPU:
253	 * sp is assigned stack_tmp_export + cpu_id * stack_tmp_stride
254	 */
255	.macro set_sp
256		bl	__get_core_pos
257		cmp	r0, #CFG_TEE_CORE_NB_CORE
258		/* Unsupported CPU, park it before it breaks something */
259		bge	unhandled_cpu
260
261		/*
262		 * stack_tmp_stride and stack_tmp_stride_rel are the
263		 * equivalent of:
264		 * extern const u32 stack_tmp_stride;
265		 * u32 stack_tmp_stride_rel = (u32)&stack_tmp_stride -
266		 *			      (u32)&stack_tmp_stride_rel
267		 *
268		 * To load the value of stack_tmp_stride we do the equivalent
269		 * of:
270		 * *(u32 *)(stack_tmp_stride + (u32)&stack_tmp_stride_rel)
271		 */
272		adr	r3, stack_tmp_stride_rel
273		ldr	r1, [r3]
274		ldr	r1, [r1, r3]
275
276		/* Same pattern as for stack_tmp_stride above */
277		adr	r3, stack_tmp_export_rel
278		ldr	r2, [r3]
279		ldr	r2, [r2, r3]
280
281		/*
282		 * r0 is core pos
283		 * r1 is value of stack_tmp_stride
284		 * r2 is value of stack_tmp_export
285		 */
286		mul	r1, r0, r1
287		add	sp, r1, r2
288	.endm
289
290	/*
291	 * Cache maintenance during entry: handle outer cache.
292	 * End address is exclusive: first byte not to be changed.
293	 * Note however arm_clX_inv/cleanbyva operate on full cache lines.
294	 *
295	 * Use ANSI #define to trap source file line number for PL310 assertion
296	 */
297	.macro __inval_cache_vrange vbase, vend, line
298#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
299		assert_flat_mapped_range (\vbase), (\line)
300		bl	pl310_base
301		ldr	r1, \vbase
302		ldr	r2, \vend
303		bl	arm_cl2_invbypa
304#endif
305		ldr	r0, \vbase
306		ldr	r1, \vend
307		sub	r1, r1, r0
308		bl	dcache_inv_range
309	.endm
310
311	.macro __flush_cache_vrange vbase, vend, line
312#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
313		assert_flat_mapped_range (\vbase), (\line)
314		ldr	r0, \vbase
315		ldr	r1, \vend
316		sub	r1, r1, r0
317		bl	dcache_clean_range
318		bl	pl310_base
319		ldr	r1, \vbase
320		ldr	r2, \vend
321		bl	arm_cl2_cleaninvbypa
322#endif
323		ldr	r0, \vbase
324		ldr	r1, \vend
325		sub	r1, r1, r0
326		bl	dcache_cleaninv_range
327	.endm
328
329#define inval_cache_vrange(vbase, vend) \
330		__inval_cache_vrange vbase, vend, __LINE__
331
332#define flush_cache_vrange(vbase, vend) \
333		__flush_cache_vrange vbase, vend, __LINE__
334
335#ifdef CFG_BOOT_SYNC_CPU
336#define flush_cpu_semaphores \
337		flush_cache_vrange(sem_cpu_sync_start, sem_cpu_sync_end)
338#else
339#define flush_cpu_semaphores
340#endif
341
342LOCAL_FUNC reset_primary , : , .identity_map
343UNWIND(	.cantunwind)
344
345	/* preserve r4-r7: bootargs */
346
347#ifdef CFG_WITH_PAGER
348	/*
349	 * Move init code into correct location and move hashes to a
350	 * temporary safe location until the heap is initialized.
351	 *
352	 * The binary is built as:
353	 * [Pager code, rodata and data] : In correct location
354	 * [Init code and rodata] : Should be copied to __init_start
355	 * [struct boot_embdata + data] : Should be saved before
356	 * initializing pager, first uint32_t tells the length of the data
357	 */
358	ldr	r0, =__init_start	/* dst */
359	ldr	r1, =__data_end 	/* src */
360	ldr	r2, =__init_end
361	sub	r2, r2, r0		/* init len */
362	ldr	r12, [r1, r2]		/* length of hashes etc */
363	add	r2, r2, r12		/* length of init and hashes etc */
364	/* Copy backwards (as memmove) in case we're overlapping */
365	add	r0, r0, r2		/* __init_start + len */
366	add	r1, r1, r2		/* __data_end + len */
367	str	r0, cached_mem_end
368	ldr	r2, =__init_start
369copy_init:
370	ldmdb	r1!, {r3, r8-r12}
371	stmdb	r0!, {r3, r8-r12}
372	cmp	r0, r2
373	bgt	copy_init
374#else
375	/*
376	 * The binary is built as:
377	 * [Core, rodata and data] : In correct location
378	 * [struct boot_embdata + data] : Should be moved to __end, first
379	 * uint32_t tells the length of the struct + data
380	 */
381	ldr	r0, =__end		/* dst */
382	ldr	r1, =__data_end		/* src */
383	ldr	r2, [r1]		/* struct boot_embdata::total_len */
384	/* Copy backwards (as memmove) in case we're overlapping */
385	add	r0, r0, r2
386	add	r1, r1, r2
387	str	r0, cached_mem_end
388	ldr	r2, =__end
389
390copy_init:
391	ldmdb	r1!, {r3, r8-r12}
392	stmdb	r0!, {r3, r8-r12}
393	cmp	r0, r2
394	bgt	copy_init
395#endif
396
397	/*
398	 * Clear .bss, this code obviously depends on the linker keeping
399	 * start/end of .bss at least 8 byte aligned.
400	 */
401	ldr	r0, =__bss_start
402	ldr	r1, =__bss_end
403	mov	r2, #0
404	mov	r3, #0
405clear_bss:
406	stmia	r0!, {r2, r3}
407	cmp	r0, r1
408	bls	clear_bss
409
410#ifdef CFG_VIRTUALIZATION
411	/*
412	 * Clear .nex_bss, this code obviously depends on the linker keeping
413	 * start/end of .bss at least 8 byte aligned.
414	 */
415	ldr	r0, =__nex_bss_start
416	ldr	r1, =__nex_bss_end
417	mov	r2, #0
418	mov	r3, #0
419clear_nex_bss:
420	stmia	r0!, {r2, r3}
421	cmp	r0, r1
422	bls	clear_nex_bss
423#endif
424
425#ifdef CFG_CORE_SANITIZE_KADDRESS
426	/* First initialize the entire shadow area with no access */
427	ldr	r0, =__asan_shadow_start	/* start */
428	ldr	r1, =__asan_shadow_end	/* limit */
429	mov	r2, #ASAN_DATA_RED_ZONE
430shadow_no_access:
431	str	r2, [r0], #4
432	cmp	r0, r1
433	bls	shadow_no_access
434
435	/* Mark the entire stack area as OK */
436	ldr	r2, =CFG_ASAN_SHADOW_OFFSET
437	ldr	r0, =__nozi_stack_start	/* start */
438	lsr	r0, r0, #ASAN_BLOCK_SHIFT
439	add	r0, r0, r2
440	ldr	r1, =__nozi_stack_end	/* limit */
441	lsr	r1, r1, #ASAN_BLOCK_SHIFT
442	add	r1, r1, r2
443	mov	r2, #0
444shadow_stack_access_ok:
445	strb	r2, [r0], #1
446	cmp	r0, r1
447	bls	shadow_stack_access_ok
448#endif
449
450	set_sp
451
452	bl	thread_init_thread_core_local
453
454	/* complete ARM secure MP common configuration */
455	bl	plat_primary_init_early
456
457	/* Enable Console */
458	bl	console_init
459
460#ifdef CFG_PL310
461	bl	pl310_base
462	bl	arm_cl2_config
463#endif
464
465	/*
466	 * Invalidate dcache for all memory used during initialization to
467	 * avoid nasty surprices when the cache is turned on. We must not
468	 * invalidate memory not used by OP-TEE since we may invalidate
469	 * entries used by for instance ARM Trusted Firmware.
470	 */
471	inval_cache_vrange(cached_mem_start, cached_mem_end)
472
473#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
474	/* Enable PL310 if not yet enabled */
475	bl	pl310_base
476	bl	arm_cl2_enable
477#endif
478
479#ifdef CFG_CORE_ASLR
480	mov	r0, r6
481	bl	get_aslr_seed
482#else
483	mov	r0, #0
484#endif
485
486	ldr	r1, =boot_mmu_config
487	bl	core_init_mmu_map
488
489#ifdef CFG_CORE_ASLR
490	/*
491	 * Process relocation information for updating with the new offset.
492	 * We're doing this now before MMU is enabled as some of the memory
493	 * will become write protected.
494	 */
495	ldr	r0, =boot_mmu_config
496	ldr	r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET]
497	/*
498	 * Update cached_mem_end address with load offset since it was
499	 * calculated before relocation.
500	 */
501	ldr	r2, cached_mem_end
502	add	r2, r2, r0
503	str	r2, cached_mem_end
504
505	bl	relocate
506#endif
507
508	bl	__get_core_pos
509	bl	enable_mmu
510#ifdef CFG_CORE_ASLR
511	/*
512	 * Reinitialize console, since register_serial_console() has
513	 * previously registered a PA and with ASLR the VA is different
514	 * from the PA.
515	 */
516	bl	console_init
517#endif
518
519	mov	r0, r4		/* pageable part address */
520	mov	r1, r5		/* ns-entry address */
521	bl	boot_init_primary_early
522#ifndef CFG_VIRTUALIZATION
523	mov	r7, sp
524	ldr	r0, =threads
525	ldr	r0, [r0, #THREAD_CTX_STACK_VA_END]
526	mov	sp, r0
527#endif
528	mov	r0, r6		/* DT address */
529	bl	boot_init_primary_late
530#ifndef CFG_VIRTUALIZATION
531	mov	sp, r7
532#endif
533
534	/*
535	 * In case we've touched memory that secondary CPUs will use before
536	 * they have turned on their D-cache, clean and invalidate the
537	 * D-cache before exiting to normal world.
538	 */
539	flush_cache_vrange(cached_mem_start, cached_mem_end)
540
541	/* release secondary boot cores and sync with them */
542	cpu_is_ready
543	flush_cpu_semaphores
544	wait_secondary
545
546#ifdef CFG_PL310_LOCKED
547#ifdef CFG_PL310_SIP_PROTOCOL
548#error "CFG_PL310_LOCKED must not be defined when CFG_PL310_SIP_PROTOCOL=y"
549#endif
550	/* lock/invalidate all lines: pl310 behaves as if disable */
551	bl	pl310_base
552	bl	arm_cl2_lockallways
553	bl	pl310_base
554	bl	arm_cl2_cleaninvbyway
555#endif
556
557	/*
558	 * Clear current thread id now to allow the thread to be reused on
559	 * next entry. Matches the thread_init_boot_thread() in
560	 * boot.c.
561	 */
562#ifndef CFG_VIRTUALIZATION
563	bl 	thread_clr_boot_thread
564#endif
565
566#ifdef CFG_CORE_FFA
567	ldr	r0, =cpu_on_handler
568	/*
569	 * Compensate for the load offset since cpu_on_handler() is
570	 * called with MMU off.
571	 */
572	ldr	r1, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET
573	sub	r0, r0, r1
574	bl	ffa_secondary_cpu_boot_req
575	b	thread_ffa_msg_wait
576#else /* CFG_CORE_FFA */
577
578#if defined(CFG_WITH_ARM_TRUSTED_FW)
579	ldr	r0, =boot_mmu_config
580	ldr	r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET]
581	ldr	r1, =thread_vector_table
582	/* Pass the vector address returned from main_init */
583	sub	r1, r1, r0
584#else
585	/* realy standard bootarg #1 and #2 to non secure entry */
586	mov	r4, #0
587	mov	r3, r6		/* std bootarg #2 for register R2 */
588	mov	r2, r7		/* std bootarg #1 for register R1 */
589	mov	r1, #0
590#endif /* CFG_WITH_ARM_TRUSTED_FW */
591
592	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
593	smc	#0
594	b	.	/* SMC should not return */
595#endif /* CFG_CORE_FFA */
596END_FUNC reset_primary
597
598#ifdef CFG_BOOT_SYNC_CPU
599LOCAL_DATA sem_cpu_sync_start , :
600	.word	sem_cpu_sync
601END_DATA sem_cpu_sync_start
602
603LOCAL_DATA sem_cpu_sync_end , :
604	.word	sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)
605END_DATA sem_cpu_sync_end
606#endif
607
608LOCAL_DATA cached_mem_start , :
609	.word	__text_start
610END_DATA cached_mem_start
611
612LOCAL_DATA cached_mem_end , :
613	.skip	4
614END_DATA cached_mem_end
615
616LOCAL_FUNC unhandled_cpu , :
617	wfi
618	b	unhandled_cpu
619END_FUNC unhandled_cpu
620
621#ifdef CFG_CORE_ASLR
622LOCAL_FUNC relocate , :
623	push	{r4-r5}
624	/* r0 holds load offset */
625#ifdef CFG_WITH_PAGER
626	ldr	r12, =__init_end
627#else
628	ldr	r12, =__end
629#endif
630	ldr	r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET]
631	ldr	r3, [r12, #BOOT_EMBDATA_RELOC_LEN]
632
633	mov_imm	r1, TEE_RAM_START
634	add	r2, r2, r12	/* start of relocations */
635	add	r3, r3, r2	/* end of relocations */
636
637	/*
638	 * Relocations are not formatted as Rel32, instead they are in a
639	 * compressed format created by get_reloc_bin() in
640	 * scripts/gen_tee_bin.py
641	 *
642	 * All the R_ARM_RELATIVE relocations are translated into a list
643	 * list of 32-bit offsets from TEE_RAM_START. At each address a
644	 * 32-bit value pointed out which increased with the load offset.
645	 */
646
647#ifdef CFG_WITH_PAGER
648	/*
649	 * With pager enabled we can only relocate the pager and init
650	 * parts, the rest has to be done when a page is populated.
651	 */
652	sub	r12, r12, r1
653#endif
654
655	b	2f
656	/* Loop over the relocation addresses and process all entries */
6571:	ldr	r4, [r2], #4
658#ifdef CFG_WITH_PAGER
659	/* Skip too large addresses */
660	cmp	r4, r12
661	bge	2f
662#endif
663	ldr	r5, [r4, r1]
664	add	r5, r5, r0
665	str	r5, [r4, r1]
666
6672:	cmp	r2, r3
668	bne	1b
669
670	pop	{r4-r5}
671	bx	lr
672END_FUNC relocate
673#endif
674
675/*
676 * void enable_mmu(unsigned long core_pos);
677 *
678 * This function depends on being mapped with in the identity map where
679 * physical address and virtual address is the same. After MMU has been
680 * enabled the instruction pointer will be updated to execute as the new
681 * offset instead. Stack pointers and the return address are updated.
682 */
683LOCAL_FUNC enable_mmu , : , .identity_map
684	/* r0 = core pos */
685	adr	r1, boot_mmu_config
686
687#ifdef CFG_WITH_LPAE
688	ldm	r1!, {r2, r3}
689	/*
690	 * r2 = ttbcr
691	 * r3 = mair0
692	 */
693	write_ttbcr r2
694	write_mair0 r3
695
696	ldm	r1!, {r2, r3}
697	/*
698	 * r2 = ttbr0_base
699	 * r3 = ttbr0_core_offset
700	 */
701
702	/*
703	 * ttbr0_el1 = ttbr0_base + ttbr0_core_offset * core_pos
704	 */
705	mla	r12, r0, r3, r2
706	mov	r0, #0
707	write_ttbr0_64bit r12, r0
708	write_ttbr1_64bit r0, r0
709#else
710	ldm	r1!, {r2, r3}
711	/*
712	 * r2 = prrr
713	 * r3 = nmrr
714	 */
715	write_prrr r2
716	write_nmrr r3
717
718	ldm	r1!, {r2, r3}
719	/*
720	 * r2 = dacr
721	 * r3 = ttbcr
722	 */
723	write_dacr r2
724	write_ttbcr r3
725
726	ldm	r1!, {r2}
727	/* r2 = ttbr */
728	write_ttbr0 r2
729	write_ttbr1 r2
730
731	mov	r2, #0
732	write_contextidr r2
733#endif
734	ldm	r1!, {r2}
735	/* r2 = load_offset (always 0 if CFG_CORE_ASLR=n) */
736	isb
737
738	/* Invalidate TLB */
739	write_tlbiall
740
741	/*
742	 * Make sure translation table writes have drained into memory and
743	 * the TLB invalidation is complete.
744	 */
745	dsb	sy
746	isb
747
748	read_sctlr r0
749	orr	r0, r0, #SCTLR_M
750#ifndef CFG_WITH_LPAE
751	/* Enable Access flag (simplified access permissions) and TEX remap */
752	orr	r0, r0, #(SCTLR_AFE | SCTLR_TRE)
753#endif
754	write_sctlr r0
755	isb
756
757	/* Update vbar */
758	read_vbar r1
759	add	r1, r1, r2
760	write_vbar r1
761	isb
762
763	/* Invalidate instruction cache and branch predictor */
764	write_iciallu
765	write_bpiall
766	isb
767
768	read_sctlr r0
769	/* Enable I and D cache */
770	orr	r0, r0, #SCTLR_I
771	orr	r0, r0, #SCTLR_C
772#if defined(CFG_ENABLE_SCTLR_Z)
773	/*
774	 * This is only needed on ARMv7 architecture and hence conditionned
775	 * by configuration directive CFG_ENABLE_SCTLR_Z. For recent
776	 * architectures, the program flow prediction is automatically
777	 * enabled upon MMU enablement.
778	 */
779	orr	r0, r0, #SCTLR_Z
780#endif
781	write_sctlr r0
782	isb
783
784	/* Adjust stack pointer and return address */
785	add	sp, sp, r2
786	add	lr, lr, r2
787
788	bx	lr
789END_FUNC enable_mmu
790
791LOCAL_DATA stack_tmp_export_rel , :
792	.word	stack_tmp_export - stack_tmp_export_rel
793END_DATA stack_tmp_export_rel
794
795LOCAL_DATA stack_tmp_stride_rel , :
796	.word	stack_tmp_stride - stack_tmp_stride_rel
797END_DATA stack_tmp_stride_rel
798
799DATA boot_mmu_config , : /* struct core_mmu_config */
800	.skip	CORE_MMU_CONFIG_SIZE
801END_DATA boot_mmu_config
802
803#if defined(CFG_WITH_ARM_TRUSTED_FW)
804FUNC cpu_on_handler , : , .identity_map
805UNWIND(	.cantunwind)
806	mov	r4, r0
807	mov	r5, r1
808	mov	r6, lr
809
810	set_sctlr
811	isb
812
813	adr	r0, reset_vect_table
814	write_vbar r0
815
816	mov	r4, lr
817
818	bl	__get_core_pos
819	bl	enable_mmu
820
821	set_sp
822
823	mov	r0, r4
824	mov	r1, r5
825	bl	boot_cpu_on_handler
826#ifdef CFG_CORE_FFA
827	b	thread_ffa_msg_wait
828#else
829	bx	r6
830#endif
831END_FUNC cpu_on_handler
832DECLARE_KEEP_PAGER cpu_on_handler
833
834#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
835
836LOCAL_FUNC reset_secondary , : , .identity_map
837UNWIND(	.cantunwind)
838	adr	r0, reset_vect_table
839	write_vbar r0
840
841	wait_primary
842
843	set_sp
844#ifdef CFG_CORE_ASLR
845	/*
846	 * stack_tmp_export which is used as base when initializing sp has
847	 * been relocated to the new offset. Since MMU isn't enabled on
848	 * this CPU yet we need to restore the corresponding physical
849	 * address.
850	 */
851	adr	r0, boot_mmu_config
852	ldr	r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET]
853	sub	sp, sp, r0
854#endif
855
856#if defined (CFG_BOOT_SECONDARY_REQUEST)
857	/* if L1 is not invalidated before, do it here */
858	mov	r0, #DCACHE_OP_INV
859	bl	dcache_op_level1
860#endif
861
862	bl	__get_core_pos
863	bl	enable_mmu
864
865	cpu_is_ready
866
867#if defined (CFG_BOOT_SECONDARY_REQUEST)
868	/*
869	 * boot_core_hpen() return value (r0) is address of
870	 * ns entry context structure
871	 */
872	bl	boot_core_hpen
873	ldm	r0, {r0, r6}
874#else
875	mov	r0, r5		/* ns-entry address */
876	mov	r6, #0
877#endif
878	bl	boot_init_secondary
879
880	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
881	mov	r1, r6
882	mov	r2, #0
883	mov	r3, #0
884	mov	r4, #0
885	smc	#0
886	b	.	/* SMC should not return */
887END_FUNC reset_secondary
888DECLARE_KEEP_PAGER reset_secondary
889#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
890