xref: /optee_os/core/arch/arm/kernel/entry_a32.S (revision bc12b0e95e3c63f46850c1e69c79cd6879c68543)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2014, Linaro Limited
4 * Copyright (c) 2021-2023, Arm Limited
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/asan.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread_private.h>
15#include <mm/core_mmu.h>
16#include <platform_config.h>
17#include <sm/optee_smc.h>
18#include <sm/teesmc_opteed.h>
19#include <sm/teesmc_opteed_macros.h>
20
21.arch_extension sec
22
23.section .data
24.balign 4
25
26#ifdef CFG_BOOT_SYNC_CPU
27.equ SEM_CPU_READY, 1
28#endif
29
30#ifdef CFG_PL310
31.section .rodata.init
32panic_boot_file:
33	.asciz __FILE__
34
35/*
36 * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
37 */
38LOCAL_FUNC __assert_flat_mapped_range , :
39UNWIND(	.cantunwind)
40	push	{ r4-r6, lr }
41	mov	r4, r0
42	mov	r5, r1
43	bl	cpu_mmu_enabled
44	cmp	r0, #0
45	beq	1f
46	mov	r0, r4
47	bl	virt_to_phys
48	cmp	r0, r4
49	beq	1f
50	/*
51	 * this must be compliant with the panic generic routine:
52	 * __do_panic(__FILE__, __LINE__, __func__, str)
53	 */
54	ldr	r0, =panic_boot_file
55	mov	r1, r5
56	mov	r2, #0
57	mov	r3, #0
58	bl	__do_panic
59	b	.		/* should NOT return */
601:	pop	{ r4-r6, pc }
61END_FUNC __assert_flat_mapped_range
62
63	/* panic if mmu is enable and vaddr != paddr (scratch lr) */
64	.macro assert_flat_mapped_range va, line
65		ldr	r0, \va
66		ldr	r1, =\line
67		bl	__assert_flat_mapped_range
68	.endm
69#endif /* CFG_PL310 */
70
71WEAK_FUNC plat_cpu_reset_early , :
72	bx	lr
73END_FUNC plat_cpu_reset_early
74DECLARE_KEEP_PAGER plat_cpu_reset_early
75
76	.section .identity_map, "ax"
77	.align 5
78LOCAL_FUNC reset_vect_table , : , .identity_map
79	b	.
80	b	.	/* Undef */
81	b	.	/* Syscall */
82	b	.	/* Prefetch abort */
83	b	.	/* Data abort */
84	b	.	/* Reserved */
85	b	.	/* IRQ */
86	b	.	/* FIQ */
87END_FUNC reset_vect_table
88
89	.macro cpu_is_ready
90#ifdef CFG_BOOT_SYNC_CPU
91	bl	__get_core_pos
92	lsl	r0, r0, #2
93	ldr	r1,=sem_cpu_sync
94	ldr	r2, =SEM_CPU_READY
95	str	r2, [r1, r0]
96	dsb
97	sev
98#endif
99	.endm
100
101	.macro wait_primary
102#ifdef CFG_BOOT_SYNC_CPU
103	ldr	r0, =sem_cpu_sync
104	mov	r2, #SEM_CPU_READY
105	sev
1061:
107	ldr	r1, [r0]
108	cmp	r1, r2
109	wfene
110	bne	1b
111#endif
112	.endm
113
114	.macro wait_secondary
115#ifdef CFG_BOOT_SYNC_CPU
116	ldr	r0, =sem_cpu_sync
117	mov	r3, #CFG_TEE_CORE_NB_CORE
118	mov	r2, #SEM_CPU_READY
119	sev
1201:
121	subs	r3, r3, #1
122	beq	3f
123	add	r0, r0, #4
1242:
125	ldr	r1, [r0]
126	cmp	r1, r2
127	wfene
128	bne	2b
129	b	1b
1303:
131#endif
132	.endm
133
134	/*
135	 * set_sctlr : Setup some core configuration in CP15 SCTLR
136	 *
137	 * Setup required by current implementation of the OP-TEE core:
138	 * - Disable data and instruction cache.
139	 * - MMU is expected off and exceptions trapped in ARM mode.
140	 * - Enable or disable alignment checks upon platform configuration.
141	 * - Optionally enable write-implies-execute-never.
142	 * - Optionally enable round robin strategy for cache replacement.
143	 *
144	 * Clobbers r0.
145	 */
146	.macro set_sctlr
147		read_sctlr r0
148		bic	r0, r0, #(SCTLR_M | SCTLR_C)
149		bic	r0, r0, #SCTLR_I
150		bic	r0, r0, #SCTLR_TE
151		orr	r0, r0, #SCTLR_SPAN
152#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
153		orr	r0, r0, #SCTLR_A
154#else
155		bic	r0, r0, #SCTLR_A
156#endif
157#if defined(CFG_HWSUPP_MEM_PERM_WXN) && defined(CFG_CORE_RWDATA_NOEXEC)
158		orr	r0, r0, #(SCTLR_WXN | SCTLR_UWXN)
159#endif
160#if defined(CFG_ENABLE_SCTLR_RR)
161		orr	r0, r0, #SCTLR_RR
162#endif
163		write_sctlr r0
164	.endm
165
166#if defined(CFG_CORE_SEL1_SPMC) && defined(CFG_WITH_ARM_TRUSTED_FW)
167	/*
168	 * With OP-TEE as SPMC at S-EL1 the SPMD (SPD_spmd) in TF-A passes
169	 * the DTB in r0, pagable part in r1, and the rest of the registers
170	 * are unused
171	 *
172	 * Save boot arguments passed
173	 * entry r0, saved r6: device tree address
174	 * entry r1, saved r4: pagestore
175	 * saved r5, r7: Zero
176	 */
177	.macro bootargs_entry
178	mov	r6, r0
179	mov	r4, r1
180	mov	r5, #0
181	mov	r7, #0
182	.endm
183#else
184	/*
185	 * Save boot arguments
186	 * entry r0, saved r4: pagestore
187	 * entry r1, saved r7: (ARMv7 standard bootarg #1)
188	 * entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2)
189	 * entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0)
190	 */
191	.macro bootargs_entry
192#if defined(CFG_NS_ENTRY_ADDR)
193	ldr	r5, =CFG_NS_ENTRY_ADDR
194#else
195	mov	r5, lr
196#endif
197#if defined(CFG_PAGEABLE_ADDR)
198	ldr	r4, =CFG_PAGEABLE_ADDR
199#else
200	mov	r4, r0
201#endif
202#if defined(CFG_DT_ADDR)
203	ldr	r6, =CFG_DT_ADDR
204#else
205	mov	r6, r2
206#endif
207	mov	r7, r1
208	.endm
209#endif
210
211	.macro maybe_init_spectre_workaround
212#if !defined(CFG_WITH_ARM_TRUSTED_FW) && \
213    (defined(CFG_CORE_WORKAROUND_SPECTRE_BP) || \
214     defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC))
215	read_midr r0
216	ubfx	r1, r0, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
217	cmp	r1, #MIDR_IMPLEMENTER_ARM
218	bne	1f
219	ubfx	r1, r0, #MIDR_PRIMARY_PART_NUM_SHIFT, \
220			#MIDR_PRIMARY_PART_NUM_WIDTH
221
222	movw	r2, #CORTEX_A8_PART_NUM
223	cmp	r1, r2
224	moveq	r2, #ACTLR_CA8_ENABLE_INVALIDATE_BTB
225	beq	2f
226
227	movw	r2, #CORTEX_A15_PART_NUM
228	cmp	r1, r2
229	moveq	r2, #ACTLR_CA15_ENABLE_INVALIDATE_BTB
230	bne	1f	/* Skip it for all other CPUs */
2312:
232	read_actlr r0
233	orr	r0, r0, r2
234	write_actlr r0
235	isb
2361:
237#endif
238	.endm
239
240FUNC _start , :
241UNWIND(	.cantunwind)
242
243	bootargs_entry
244
245	/*
246	 * 32bit entry is expected to execute Supervisor mode,
247	 * some bootloader may enter in Supervisor or Monitor
248	 */
249	cps	#CPSR_MODE_SVC
250
251	/* Early ARM secure MP specific configuration */
252	bl	plat_cpu_reset_early
253	maybe_init_spectre_workaround
254
255	set_sctlr
256	isb
257
258	ldr	r0, =reset_vect_table
259	write_vbar r0
260
261#if defined(CFG_WITH_ARM_TRUSTED_FW)
262	b	reset_primary
263#else
264	bl	__get_core_pos
265	cmp	r0, #0
266	beq	reset_primary
267	b	reset_secondary
268#endif
269END_FUNC _start
270DECLARE_KEEP_INIT _start
271
272	/*
273	 * Setup sp to point to the top of the tmp stack for the current CPU:
274	 * sp is assigned:
275	 *   stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD
276	 */
277	.macro set_sp
278		bl	__get_core_pos
279		cmp	r0, #CFG_TEE_CORE_NB_CORE
280		/* Unsupported CPU, park it before it breaks something */
281		bge	unhandled_cpu
282		add	r0, r0, #1
283
284		/* r2 = stack_tmp - STACK_TMP_GUARD */
285		adr	r3, stack_tmp_rel
286		ldr	r2, [r3]
287		add	r2, r2, r3
288
289		/*
290		 * stack_tmp_stride and stack_tmp_stride_rel are the
291		 * equivalent of:
292		 * extern const u32 stack_tmp_stride;
293		 * u32 stack_tmp_stride_rel = (u32)&stack_tmp_stride -
294		 *			      (u32)&stack_tmp_stride_rel
295		 *
296		 * To load the value of stack_tmp_stride we do the equivalent
297		 * of:
298		 * *(u32 *)(stack_tmp_stride + (u32)&stack_tmp_stride_rel)
299		 */
300		adr	r3, stack_tmp_stride_rel
301		ldr	r1, [r3]
302		ldr	r1, [r1, r3]
303
304		/*
305		 * r0 is core pos + 1
306		 * r1 is value of stack_tmp_stride
307		 * r2 is value of stack_tmp + guard
308		 */
309		mul	r1, r0, r1
310		add	sp, r1, r2
311	.endm
312
313	/*
314	 * Cache maintenance during entry: handle outer cache.
315	 * End address is exclusive: first byte not to be changed.
316	 * Note however arm_clX_inv/cleanbyva operate on full cache lines.
317	 *
318	 * Use ANSI #define to trap source file line number for PL310 assertion
319	 */
320	.macro __inval_cache_vrange vbase, vend, line
321#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
322		assert_flat_mapped_range (\vbase), (\line)
323		bl	pl310_base
324		ldr	r1, \vbase
325		ldr	r2, \vend
326		bl	arm_cl2_invbypa
327#endif
328		ldr	r0, \vbase
329		ldr	r1, \vend
330		sub	r1, r1, r0
331		bl	dcache_inv_range
332	.endm
333
334	.macro __flush_cache_vrange vbase, vend, line
335#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
336		assert_flat_mapped_range (\vbase), (\line)
337		ldr	r0, \vbase
338		ldr	r1, \vend
339		sub	r1, r1, r0
340		bl	dcache_clean_range
341		bl	pl310_base
342		ldr	r1, \vbase
343		ldr	r2, \vend
344		bl	arm_cl2_cleaninvbypa
345#endif
346		ldr	r0, \vbase
347		ldr	r1, \vend
348		sub	r1, r1, r0
349		bl	dcache_cleaninv_range
350	.endm
351
352#define inval_cache_vrange(vbase, vend) \
353		__inval_cache_vrange vbase, vend, __LINE__
354
355#define flush_cache_vrange(vbase, vend) \
356		__flush_cache_vrange vbase, vend, __LINE__
357
358#ifdef CFG_BOOT_SYNC_CPU
359#define flush_cpu_semaphores \
360		flush_cache_vrange(sem_cpu_sync_start, sem_cpu_sync_end)
361#else
362#define flush_cpu_semaphores
363#endif
364
365LOCAL_FUNC reset_primary , : , .identity_map
366UNWIND(	.cantunwind)
367
368	/* preserve r4-r7: bootargs */
369
370#ifdef CFG_WITH_PAGER
371	/*
372	 * Move init code into correct location and move hashes to a
373	 * temporary safe location until the heap is initialized.
374	 *
375	 * The binary is built as:
376	 * [Pager code, rodata and data] : In correct location
377	 * [Init code and rodata] : Should be copied to __init_start
378	 * [struct boot_embdata + data] : Should be saved before
379	 * initializing pager, first uint32_t tells the length of the data
380	 */
381	ldr	r0, =__init_start	/* dst */
382	ldr	r1, =__data_end 	/* src */
383	ldr	r2, =__init_end
384	sub	r2, r2, r0		/* init len */
385	ldr	r12, [r1, r2]		/* length of hashes etc */
386	add	r2, r2, r12		/* length of init and hashes etc */
387	/* Copy backwards (as memmove) in case we're overlapping */
388	add	r0, r0, r2		/* __init_start + len */
389	add	r1, r1, r2		/* __data_end + len */
390	str	r0, cached_mem_end
391	ldr	r2, =__init_start
392copy_init:
393	ldmdb	r1!, {r3, r8-r12}
394	stmdb	r0!, {r3, r8-r12}
395	cmp	r0, r2
396	bgt	copy_init
397#else
398	/*
399	 * The binary is built as:
400	 * [Core, rodata and data] : In correct location
401	 * [struct boot_embdata + data] : Should be moved to __end, first
402	 * uint32_t tells the length of the struct + data
403	 */
404	ldr	r0, =__end		/* dst */
405	ldr	r1, =__data_end		/* src */
406	ldr	r2, [r1]		/* struct boot_embdata::total_len */
407	/* Copy backwards (as memmove) in case we're overlapping */
408	add	r0, r0, r2
409	add	r1, r1, r2
410	str	r0, cached_mem_end
411	ldr	r2, =__end
412
413copy_init:
414	ldmdb	r1!, {r3, r8-r12}
415	stmdb	r0!, {r3, r8-r12}
416	cmp	r0, r2
417	bgt	copy_init
418#endif
419
420	/*
421	 * Clear .bss, this code obviously depends on the linker keeping
422	 * start/end of .bss at least 8 byte aligned.
423	 */
424	ldr	r0, =__bss_start
425	ldr	r1, =__bss_end
426	mov	r2, #0
427	mov	r3, #0
428clear_bss:
429	stmia	r0!, {r2, r3}
430	cmp	r0, r1
431	bls	clear_bss
432
433#ifdef CFG_NS_VIRTUALIZATION
434	/*
435	 * Clear .nex_bss, this code obviously depends on the linker keeping
436	 * start/end of .bss at least 8 byte aligned.
437	 */
438	ldr	r0, =__nex_bss_start
439	ldr	r1, =__nex_bss_end
440	mov	r2, #0
441	mov	r3, #0
442clear_nex_bss:
443	stmia	r0!, {r2, r3}
444	cmp	r0, r1
445	bls	clear_nex_bss
446#endif
447
448#ifdef CFG_CORE_SANITIZE_KADDRESS
449	/* First initialize the entire shadow area with no access */
450	ldr	r0, =__asan_shadow_start	/* start */
451	ldr	r1, =__asan_shadow_end	/* limit */
452	mov	r2, #ASAN_DATA_RED_ZONE
453shadow_no_access:
454	str	r2, [r0], #4
455	cmp	r0, r1
456	bls	shadow_no_access
457
458	/* Mark the entire stack area as OK */
459	ldr	r2, =CFG_ASAN_SHADOW_OFFSET
460	ldr	r0, =__nozi_stack_start	/* start */
461	lsr	r0, r0, #ASAN_BLOCK_SHIFT
462	add	r0, r0, r2
463	ldr	r1, =__nozi_stack_end	/* limit */
464	lsr	r1, r1, #ASAN_BLOCK_SHIFT
465	add	r1, r1, r2
466	mov	r2, #0
467shadow_stack_access_ok:
468	strb	r2, [r0], #1
469	cmp	r0, r1
470	bls	shadow_stack_access_ok
471#endif
472
473	set_sp
474
475	bl	thread_init_thread_core_local
476
477	/* complete ARM secure MP common configuration */
478	bl	plat_primary_init_early
479
480	/* Enable Console */
481	bl	console_init
482
483#ifdef CFG_PL310
484	bl	pl310_base
485	bl	arm_cl2_config
486#endif
487
488	/*
489	 * Invalidate dcache for all memory used during initialization to
490	 * avoid nasty surprices when the cache is turned on. We must not
491	 * invalidate memory not used by OP-TEE since we may invalidate
492	 * entries used by for instance ARM Trusted Firmware.
493	 */
494	inval_cache_vrange(cached_mem_start, cached_mem_end)
495
496#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
497	/* Enable PL310 if not yet enabled */
498	bl	pl310_base
499	bl	arm_cl2_enable
500#endif
501
502#ifdef CFG_CORE_ASLR
503	mov	r0, r6
504	bl	get_aslr_seed
505#else
506	mov	r0, #0
507#endif
508
509	ldr	r1, =boot_mmu_config
510	bl	core_init_mmu_map
511
512#ifdef CFG_CORE_ASLR
513	/*
514	 * Process relocation information for updating with the virtual map
515	 * offset.  We're doing this now before MMU is enabled as some of
516	 * the memory will become write protected.
517	 */
518	ldr	r0, =boot_mmu_config
519	ldr	r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET]
520	/*
521	 * Update cached_mem_end address with load offset since it was
522	 * calculated before relocation.
523	 */
524	ldr	r2, cached_mem_end
525	add	r2, r2, r0
526	str	r2, cached_mem_end
527
528	bl	relocate
529#endif
530
531	bl	__get_core_pos
532	bl	enable_mmu
533#ifdef CFG_CORE_ASLR
534	/*
535	 * Reinitialize console, since register_serial_console() has
536	 * previously registered a PA and with ASLR the VA is different
537	 * from the PA.
538	 */
539	bl	console_init
540#endif
541
542#ifdef CFG_NS_VIRTUALIZATION
543	/*
544	 * Initialize partition tables for each partition to
545	 * default_partition which has been relocated now to a different VA
546	 */
547	bl	core_mmu_set_default_prtn_tbl
548#endif
549
550	mov	r0, r4		/* pageable part address */
551	mov	r1, r5		/* ns-entry address */
552	bl	boot_init_primary_early
553#ifndef CFG_NS_VIRTUALIZATION
554	mov	r9, sp
555	ldr	r0, =threads
556	ldr	r0, [r0, #THREAD_CTX_STACK_VA_END]
557	mov	sp, r0
558	bl	thread_get_core_local
559	mov	r8, r0
560	mov	r0, #0
561	str	r0, [r8, #THREAD_CORE_LOCAL_FLAGS]
562#endif
563	mov	r0, r6		/* DT address */
564	mov	r1, #0		/* unused */
565	bl	boot_init_primary_late
566#ifndef CFG_NS_VIRTUALIZATION
567	mov	r0, #THREAD_CLF_TMP
568	str	r0, [r8, #THREAD_CORE_LOCAL_FLAGS]
569	mov	sp, r9
570#endif
571
572#ifdef _CFG_CORE_STACK_PROTECTOR
573	/* Update stack canary value */
574	sub	sp, sp, #0x8
575	mov	r0, sp
576	mov	r1, #1
577	mov	r2, #0x4
578	bl	plat_get_random_stack_canaries
579	ldr	r0, [sp]
580	ldr	r1, =__stack_chk_guard
581	str	r0, [r1]
582	add	sp, sp, #0x8
583#endif
584
585	/*
586	 * In case we've touched memory that secondary CPUs will use before
587	 * they have turned on their D-cache, clean and invalidate the
588	 * D-cache before exiting to normal world.
589	 */
590	flush_cache_vrange(cached_mem_start, cached_mem_end)
591
592	/* release secondary boot cores and sync with them */
593	cpu_is_ready
594	flush_cpu_semaphores
595	wait_secondary
596
597#ifdef CFG_PL310_LOCKED
598#ifdef CFG_PL310_SIP_PROTOCOL
599#error "CFG_PL310_LOCKED must not be defined when CFG_PL310_SIP_PROTOCOL=y"
600#endif
601	/* lock/invalidate all lines: pl310 behaves as if disable */
602	bl	pl310_base
603	bl	arm_cl2_lockallways
604	bl	pl310_base
605	bl	arm_cl2_cleaninvbyway
606#endif
607
608	/*
609	 * Clear current thread id now to allow the thread to be reused on
610	 * next entry. Matches the thread_init_boot_thread() in
611	 * boot.c.
612	 */
613#ifndef CFG_NS_VIRTUALIZATION
614	bl 	thread_clr_boot_thread
615#endif
616
617#ifdef CFG_CORE_FFA
618	ldr	r0, =cpu_on_handler
619	/*
620	 * Compensate for the virtual map offset since cpu_on_handler() is
621	 * called with MMU off.
622	 */
623	ldr	r1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
624	sub	r0, r0, r1
625	bl	thread_spmc_register_secondary_ep
626	b	thread_ffa_msg_wait
627#else /* CFG_CORE_FFA */
628
629#if defined(CFG_WITH_ARM_TRUSTED_FW)
630	ldr	r0, =boot_mmu_config
631	ldr	r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET]
632	ldr	r1, =thread_vector_table
633	/* Pass the vector address returned from main_init */
634	sub	r1, r1, r0
635#else
636	/* realy standard bootarg #1 and #2 to non secure entry */
637	mov	r4, #0
638	mov	r3, r6		/* std bootarg #2 for register R2 */
639	mov	r2, r7		/* std bootarg #1 for register R1 */
640	mov	r1, #0
641#endif /* CFG_WITH_ARM_TRUSTED_FW */
642
643	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
644	smc	#0
645	/* SMC should not return */
646	panic_at_smc_return
647#endif /* CFG_CORE_FFA */
648END_FUNC reset_primary
649
650#ifdef CFG_BOOT_SYNC_CPU
651LOCAL_DATA sem_cpu_sync_start , :
652	.word	sem_cpu_sync
653END_DATA sem_cpu_sync_start
654
655LOCAL_DATA sem_cpu_sync_end , :
656	.word	sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)
657END_DATA sem_cpu_sync_end
658#endif
659
660LOCAL_DATA cached_mem_start , :
661	.word	__text_start
662END_DATA cached_mem_start
663
664LOCAL_DATA cached_mem_end , :
665	.skip	4
666END_DATA cached_mem_end
667
668LOCAL_FUNC unhandled_cpu , :
669	wfi
670	b	unhandled_cpu
671END_FUNC unhandled_cpu
672
673#ifdef CFG_CORE_ASLR
674LOCAL_FUNC relocate , :
675	push	{r4-r5}
676	/* r0 holds load offset */
677#ifdef CFG_WITH_PAGER
678	ldr	r12, =__init_end
679#else
680	ldr	r12, =__end
681#endif
682	ldr	r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET]
683	ldr	r3, [r12, #BOOT_EMBDATA_RELOC_LEN]
684
685	mov_imm	r1, TEE_LOAD_ADDR
686	add	r2, r2, r12	/* start of relocations */
687	add	r3, r3, r2	/* end of relocations */
688
689	/*
690	 * Relocations are not formatted as Rel32, instead they are in a
691	 * compressed format created by get_reloc_bin() in
692	 * scripts/gen_tee_bin.py
693	 *
694	 * All the R_ARM_RELATIVE relocations are translated into a list of
695	 * 32-bit offsets from TEE_LOAD_ADDR. At each address a 32-bit
696	 * value pointed out which increased with the load offset.
697	 */
698
699#ifdef CFG_WITH_PAGER
700	/*
701	 * With pager enabled we can only relocate the pager and init
702	 * parts, the rest has to be done when a page is populated.
703	 */
704	sub	r12, r12, r1
705#endif
706
707	b	2f
708	/* Loop over the relocation addresses and process all entries */
7091:	ldr	r4, [r2], #4
710#ifdef CFG_WITH_PAGER
711	/* Skip too large addresses */
712	cmp	r4, r12
713	bge	2f
714#endif
715	ldr	r5, [r4, r1]
716	add	r5, r5, r0
717	str	r5, [r4, r1]
718
7192:	cmp	r2, r3
720	bne	1b
721
722	pop	{r4-r5}
723	bx	lr
724END_FUNC relocate
725#endif
726
727/*
728 * void enable_mmu(unsigned long core_pos);
729 *
730 * This function depends on being mapped with in the identity map where
731 * physical address and virtual address is the same. After MMU has been
732 * enabled the instruction pointer will be updated to execute as the new
733 * offset instead. Stack pointers and the return address are updated.
734 */
735LOCAL_FUNC enable_mmu , : , .identity_map
736	/* r0 = core pos */
737	adr	r1, boot_mmu_config
738
739#ifdef CFG_WITH_LPAE
740	ldm	r1!, {r2, r3}
741	/*
742	 * r2 = ttbcr
743	 * r3 = mair0
744	 */
745	write_ttbcr r2
746	write_mair0 r3
747
748	ldm	r1!, {r2, r3}
749	/*
750	 * r2 = ttbr0_base
751	 * r3 = ttbr0_core_offset
752	 */
753
754	/*
755	 * ttbr0_el1 = ttbr0_base + ttbr0_core_offset * core_pos
756	 */
757	mla	r12, r0, r3, r2
758	mov	r0, #0
759	write_ttbr0_64bit r12, r0
760	write_ttbr1_64bit r0, r0
761#else
762	ldm	r1!, {r2, r3}
763	/*
764	 * r2 = prrr
765	 * r3 = nmrr
766	 */
767	write_prrr r2
768	write_nmrr r3
769
770	ldm	r1!, {r2, r3}
771	/*
772	 * r2 = dacr
773	 * r3 = ttbcr
774	 */
775	write_dacr r2
776	write_ttbcr r3
777
778	ldm	r1!, {r2}
779	/* r2 = ttbr */
780	write_ttbr0 r2
781	write_ttbr1 r2
782
783	mov	r2, #0
784	write_contextidr r2
785#endif
786	ldm	r1!, {r2}
787	/* r2 = load_offset (always 0 if CFG_CORE_ASLR=n) */
788	isb
789
790	/* Invalidate TLB */
791	write_tlbiall
792
793	/*
794	 * Make sure translation table writes have drained into memory and
795	 * the TLB invalidation is complete.
796	 */
797	dsb	sy
798	isb
799
800	read_sctlr r0
801	orr	r0, r0, #SCTLR_M
802#ifndef CFG_WITH_LPAE
803	/* Enable Access flag (simplified access permissions) and TEX remap */
804	orr	r0, r0, #(SCTLR_AFE | SCTLR_TRE)
805#endif
806	write_sctlr r0
807	isb
808
809	/* Update vbar */
810	read_vbar r1
811	add	r1, r1, r2
812	write_vbar r1
813	isb
814
815	/* Invalidate instruction cache and branch predictor */
816	write_iciallu
817	write_bpiall
818	isb
819
820	read_sctlr r0
821	/* Enable I and D cache */
822	orr	r0, r0, #SCTLR_I
823	orr	r0, r0, #SCTLR_C
824#if defined(CFG_ENABLE_SCTLR_Z)
825	/*
826	 * This is only needed on ARMv7 architecture and hence conditionned
827	 * by configuration directive CFG_ENABLE_SCTLR_Z. For recent
828	 * architectures, the program flow prediction is automatically
829	 * enabled upon MMU enablement.
830	 */
831	orr	r0, r0, #SCTLR_Z
832#endif
833	write_sctlr r0
834	isb
835
836	/* Adjust stack pointer and return address */
837	add	sp, sp, r2
838	add	lr, lr, r2
839
840	bx	lr
841END_FUNC enable_mmu
842
843LOCAL_DATA stack_tmp_rel , :
844	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
845END_DATA stack_tmp_rel
846
847LOCAL_DATA stack_tmp_stride_rel , :
848	.word	stack_tmp_stride - stack_tmp_stride_rel
849END_DATA stack_tmp_stride_rel
850
851DATA boot_mmu_config , : /* struct core_mmu_config */
852	.skip	CORE_MMU_CONFIG_SIZE
853END_DATA boot_mmu_config
854
855#if defined(CFG_WITH_ARM_TRUSTED_FW)
856FUNC cpu_on_handler , : , .identity_map
857UNWIND(	.cantunwind)
858	mov	r4, r0
859	mov	r5, r1
860	mov	r6, lr
861
862	set_sctlr
863	isb
864
865	adr	r0, reset_vect_table
866	write_vbar r0
867
868	mov	r4, lr
869
870	bl	__get_core_pos
871	bl	enable_mmu
872
873	set_sp
874
875	mov	r0, r4
876	mov	r1, r5
877	bl	boot_cpu_on_handler
878#ifdef CFG_CORE_FFA
879	b	thread_ffa_msg_wait
880#else
881	bx	r6
882#endif
883END_FUNC cpu_on_handler
884DECLARE_KEEP_PAGER cpu_on_handler
885
886#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
887
888LOCAL_FUNC reset_secondary , : , .identity_map
889UNWIND(	.cantunwind)
890	adr	r0, reset_vect_table
891	write_vbar r0
892
893	wait_primary
894
895	set_sp
896
897#if defined (CFG_BOOT_SECONDARY_REQUEST)
898	/* if L1 is not invalidated before, do it here */
899	mov	r0, #DCACHE_OP_INV
900	bl	dcache_op_level1
901#endif
902
903	bl	__get_core_pos
904	bl	enable_mmu
905
906	cpu_is_ready
907
908#if defined (CFG_BOOT_SECONDARY_REQUEST)
909	/*
910	 * boot_core_hpen() return value (r0) is address of
911	 * ns entry context structure
912	 */
913	bl	boot_core_hpen
914	ldm	r0, {r0, r6}
915#else
916	mov	r0, r5		/* ns-entry address */
917	mov	r6, #0
918#endif
919	bl	boot_init_secondary
920
921	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
922	mov	r1, r6
923	mov	r2, #0
924	mov	r3, #0
925	mov	r4, #0
926	smc	#0
927	/* SMC should not return */
928	panic_at_smc_return
929END_FUNC reset_secondary
930DECLARE_KEEP_PAGER reset_secondary
931#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
932