xref: /optee_os/core/arch/arm/kernel/entry_a32.S (revision fdc4a8bef4978835f05b1687c99e090c85b84b7c)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2014, Linaro Limited
4 * Copyright (c) 2021-2023, Arm Limited
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/asan.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread_private.h>
15#include <platform_config.h>
16#include <sm/optee_smc.h>
17#include <sm/teesmc_opteed.h>
18#include <sm/teesmc_opteed_macros.h>
19
20.arch_extension sec
21
22.section .data
23.balign 4
24
25#ifdef CFG_BOOT_SYNC_CPU
26.equ SEM_CPU_READY, 1
27#endif
28
29#ifdef CFG_PL310
30.section .rodata.init
31panic_boot_file:
32	.asciz __FILE__
33
34/*
35 * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
36 */
37LOCAL_FUNC __assert_flat_mapped_range , :
38UNWIND(	.cantunwind)
39	push	{ r4-r6, lr }
40	mov	r4, r0
41	mov	r5, r1
42	bl	cpu_mmu_enabled
43	cmp	r0, #0
44	beq	1f
45	mov	r0, r4
46	bl	virt_to_phys
47	cmp	r0, r4
48	beq	1f
49	/*
50	 * this must be compliant with the panic generic routine:
51	 * __do_panic(__FILE__, __LINE__, __func__, str)
52	 */
53	ldr	r0, =panic_boot_file
54	mov	r1, r5
55	mov	r2, #0
56	mov	r3, #0
57	bl	__do_panic
58	b	.		/* should NOT return */
591:	pop	{ r4-r6, pc }
60END_FUNC __assert_flat_mapped_range
61
62	/* panic if mmu is enable and vaddr != paddr (scratch lr) */
63	.macro assert_flat_mapped_range va, line
64		ldr	r0, \va
65		ldr	r1, =\line
66		bl	__assert_flat_mapped_range
67	.endm
68#endif /* CFG_PL310 */
69
70WEAK_FUNC plat_cpu_reset_early , :
71	bx	lr
72END_FUNC plat_cpu_reset_early
73DECLARE_KEEP_PAGER plat_cpu_reset_early
74
75	.section .identity_map, "ax"
76	.align 5
77LOCAL_FUNC reset_vect_table , : , .identity_map
78	b	.
79	b	.	/* Undef */
80	b	.	/* Syscall */
81	b	.	/* Prefetch abort */
82	b	.	/* Data abort */
83	b	.	/* Reserved */
84	b	.	/* IRQ */
85	b	.	/* FIQ */
86END_FUNC reset_vect_table
87
88	.macro cpu_is_ready
89#ifdef CFG_BOOT_SYNC_CPU
90	bl	__get_core_pos
91	lsl	r0, r0, #2
92	ldr	r1,=sem_cpu_sync
93	ldr	r2, =SEM_CPU_READY
94	str	r2, [r1, r0]
95	dsb
96	sev
97#endif
98	.endm
99
100	.macro wait_primary
101#ifdef CFG_BOOT_SYNC_CPU
102	ldr	r0, =sem_cpu_sync
103	mov	r2, #SEM_CPU_READY
104	sev
1051:
106	ldr	r1, [r0]
107	cmp	r1, r2
108	wfene
109	bne	1b
110#endif
111	.endm
112
113	.macro wait_secondary
114#ifdef CFG_BOOT_SYNC_CPU
115	ldr	r0, =sem_cpu_sync
116	mov	r3, #CFG_TEE_CORE_NB_CORE
117	mov	r2, #SEM_CPU_READY
118	sev
1191:
120	subs	r3, r3, #1
121	beq	3f
122	add	r0, r0, #4
1232:
124	ldr	r1, [r0]
125	cmp	r1, r2
126	wfene
127	bne	2b
128	b	1b
1293:
130#endif
131	.endm
132
133	/*
134	 * set_sctlr : Setup some core configuration in CP15 SCTLR
135	 *
136	 * Setup required by current implementation of the OP-TEE core:
137	 * - Disable data and instruction cache.
138	 * - MMU is expected off and exceptions trapped in ARM mode.
139	 * - Enable or disable alignment checks upon platform configuration.
140	 * - Optionally enable write-implies-execute-never.
141	 * - Optionally enable round robin strategy for cache replacement.
142	 *
143	 * Clobbers r0.
144	 */
145	.macro set_sctlr
146		read_sctlr r0
147		bic	r0, r0, #(SCTLR_M | SCTLR_C)
148		bic	r0, r0, #SCTLR_I
149		bic	r0, r0, #SCTLR_TE
150		orr	r0, r0, #SCTLR_SPAN
151#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
152		orr	r0, r0, #SCTLR_A
153#else
154		bic	r0, r0, #SCTLR_A
155#endif
156#if defined(CFG_HWSUPP_MEM_PERM_WXN) && defined(CFG_CORE_RWDATA_NOEXEC)
157		orr	r0, r0, #(SCTLR_WXN | SCTLR_UWXN)
158#endif
159#if defined(CFG_ENABLE_SCTLR_RR)
160		orr	r0, r0, #SCTLR_RR
161#endif
162		write_sctlr r0
163	.endm
164
165#if defined(CFG_CORE_SEL1_SPMC) && defined(CFG_WITH_ARM_TRUSTED_FW)
166	/*
167	 * With OP-TEE as SPMC at S-EL1 the SPMD (SPD_spmd) in TF-A passes
168	 * the DTB in r0, pagable part in r1, and the rest of the registers
169	 * are unused
170	 *
171	 * Save boot arguments passed
172	 * entry r0, saved r6: device tree address
173	 * entry r1, saved r4: pagestore
174	 * saved r5, r7: Zero
175	 */
176	.macro bootargs_entry
177	mov	r6, r0
178	mov	r4, r1
179	mov	r5, #0
180	mov	r7, #0
181	.endm
182#else
183	/*
184	 * Save boot arguments
185	 * entry r0, saved r4: pagestore
186	 * entry r1, saved r7: (ARMv7 standard bootarg #1)
187	 * entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2)
188	 * entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0)
189	 */
190	.macro bootargs_entry
191#if defined(CFG_NS_ENTRY_ADDR)
192	ldr	r5, =CFG_NS_ENTRY_ADDR
193#else
194	mov	r5, lr
195#endif
196#if defined(CFG_PAGEABLE_ADDR)
197	ldr	r4, =CFG_PAGEABLE_ADDR
198#else
199	mov	r4, r0
200#endif
201#if defined(CFG_DT_ADDR)
202	ldr	r6, =CFG_DT_ADDR
203#else
204	mov	r6, r2
205#endif
206	mov	r7, r1
207	.endm
208#endif
209
210	.macro maybe_init_spectre_workaround
211#if !defined(CFG_WITH_ARM_TRUSTED_FW) && \
212    (defined(CFG_CORE_WORKAROUND_SPECTRE_BP) || \
213     defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC))
214	read_midr r0
215	ubfx	r1, r0, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
216	cmp	r1, #MIDR_IMPLEMENTER_ARM
217	bne	1f
218	ubfx	r1, r0, #MIDR_PRIMARY_PART_NUM_SHIFT, \
219			#MIDR_PRIMARY_PART_NUM_WIDTH
220
221	movw	r2, #CORTEX_A8_PART_NUM
222	cmp	r1, r2
223	moveq	r2, #ACTLR_CA8_ENABLE_INVALIDATE_BTB
224	beq	2f
225
226	movw	r2, #CORTEX_A15_PART_NUM
227	cmp	r1, r2
228	moveq	r2, #ACTLR_CA15_ENABLE_INVALIDATE_BTB
229	bne	1f	/* Skip it for all other CPUs */
2302:
231	read_actlr r0
232	orr	r0, r0, r2
233	write_actlr r0
234	isb
2351:
236#endif
237	.endm
238
239FUNC _start , :
240UNWIND(	.cantunwind)
241
242	bootargs_entry
243
244	/*
245	 * 32bit entry is expected to execute Supervisor mode,
246	 * some bootloader may enter in Supervisor or Monitor
247	 */
248	cps	#CPSR_MODE_SVC
249
250	/* Early ARM secure MP specific configuration */
251	bl	plat_cpu_reset_early
252	maybe_init_spectre_workaround
253
254	set_sctlr
255	isb
256
257	ldr	r0, =reset_vect_table
258	write_vbar r0
259
260#if defined(CFG_WITH_ARM_TRUSTED_FW)
261	b	reset_primary
262#else
263	bl	__get_core_pos
264	cmp	r0, #0
265	beq	reset_primary
266	b	reset_secondary
267#endif
268END_FUNC _start
269DECLARE_KEEP_INIT _start
270
271	/*
272	 * Setup sp to point to the top of the tmp stack for the current CPU:
273	 * sp is assigned:
274	 *   stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD
275	 */
276	.macro set_sp
277		bl	__get_core_pos
278		cmp	r0, #CFG_TEE_CORE_NB_CORE
279		/* Unsupported CPU, park it before it breaks something */
280		bge	unhandled_cpu
281		add	r0, r0, #1
282
283		/* r2 = stack_tmp - STACK_TMP_GUARD */
284		adr	r3, stack_tmp_rel
285		ldr	r2, [r3]
286		add	r2, r2, r3
287
288		/*
289		 * stack_tmp_stride and stack_tmp_stride_rel are the
290		 * equivalent of:
291		 * extern const u32 stack_tmp_stride;
292		 * u32 stack_tmp_stride_rel = (u32)&stack_tmp_stride -
293		 *			      (u32)&stack_tmp_stride_rel
294		 *
295		 * To load the value of stack_tmp_stride we do the equivalent
296		 * of:
297		 * *(u32 *)(stack_tmp_stride + (u32)&stack_tmp_stride_rel)
298		 */
299		adr	r3, stack_tmp_stride_rel
300		ldr	r1, [r3]
301		ldr	r1, [r1, r3]
302
303		/*
304		 * r0 is core pos + 1
305		 * r1 is value of stack_tmp_stride
306		 * r2 is value of stack_tmp + guard
307		 */
308		mul	r1, r0, r1
309		add	sp, r1, r2
310	.endm
311
312	/*
313	 * Cache maintenance during entry: handle outer cache.
314	 * End address is exclusive: first byte not to be changed.
315	 * Note however arm_clX_inv/cleanbyva operate on full cache lines.
316	 *
317	 * Use ANSI #define to trap source file line number for PL310 assertion
318	 */
319	.macro __inval_cache_vrange vbase, vend, line
320#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
321		assert_flat_mapped_range (\vbase), (\line)
322		bl	pl310_base
323		ldr	r1, \vbase
324		ldr	r2, \vend
325		bl	arm_cl2_invbypa
326#endif
327		ldr	r0, \vbase
328		ldr	r1, \vend
329		sub	r1, r1, r0
330		bl	dcache_inv_range
331	.endm
332
333	.macro __flush_cache_vrange vbase, vend, line
334#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
335		assert_flat_mapped_range (\vbase), (\line)
336		ldr	r0, \vbase
337		ldr	r1, \vend
338		sub	r1, r1, r0
339		bl	dcache_clean_range
340		bl	pl310_base
341		ldr	r1, \vbase
342		ldr	r2, \vend
343		bl	arm_cl2_cleaninvbypa
344#endif
345		ldr	r0, \vbase
346		ldr	r1, \vend
347		sub	r1, r1, r0
348		bl	dcache_cleaninv_range
349	.endm
350
351#define inval_cache_vrange(vbase, vend) \
352		__inval_cache_vrange vbase, vend, __LINE__
353
354#define flush_cache_vrange(vbase, vend) \
355		__flush_cache_vrange vbase, vend, __LINE__
356
357#ifdef CFG_BOOT_SYNC_CPU
358#define flush_cpu_semaphores \
359		flush_cache_vrange(sem_cpu_sync_start, sem_cpu_sync_end)
360#else
361#define flush_cpu_semaphores
362#endif
363
364LOCAL_FUNC reset_primary , : , .identity_map
365UNWIND(	.cantunwind)
366
367	/* preserve r4-r7: bootargs */
368
369#ifdef CFG_WITH_PAGER
370	/*
371	 * Move init code into correct location and move hashes to a
372	 * temporary safe location until the heap is initialized.
373	 *
374	 * The binary is built as:
375	 * [Pager code, rodata and data] : In correct location
376	 * [Init code and rodata] : Should be copied to __init_start
377	 * [struct boot_embdata + data] : Should be saved before
378	 * initializing pager, first uint32_t tells the length of the data
379	 */
380	ldr	r0, =__init_start	/* dst */
381	ldr	r1, =__data_end 	/* src */
382	ldr	r2, =__init_end
383	sub	r2, r2, r0		/* init len */
384	ldr	r12, [r1, r2]		/* length of hashes etc */
385	add	r2, r2, r12		/* length of init and hashes etc */
386	/* Copy backwards (as memmove) in case we're overlapping */
387	add	r0, r0, r2		/* __init_start + len */
388	add	r1, r1, r2		/* __data_end + len */
389	str	r0, cached_mem_end
390	ldr	r2, =__init_start
391copy_init:
392	ldmdb	r1!, {r3, r8-r12}
393	stmdb	r0!, {r3, r8-r12}
394	cmp	r0, r2
395	bgt	copy_init
396#else
397	/*
398	 * The binary is built as:
399	 * [Core, rodata and data] : In correct location
400	 * [struct boot_embdata + data] : Should be moved to __end, first
401	 * uint32_t tells the length of the struct + data
402	 */
403	ldr	r0, =__end		/* dst */
404	ldr	r1, =__data_end		/* src */
405	ldr	r2, [r1]		/* struct boot_embdata::total_len */
406	/* Copy backwards (as memmove) in case we're overlapping */
407	add	r0, r0, r2
408	add	r1, r1, r2
409	str	r0, cached_mem_end
410	ldr	r2, =__end
411
412copy_init:
413	ldmdb	r1!, {r3, r8-r12}
414	stmdb	r0!, {r3, r8-r12}
415	cmp	r0, r2
416	bgt	copy_init
417#endif
418
419	/*
420	 * Clear .bss, this code obviously depends on the linker keeping
421	 * start/end of .bss at least 8 byte aligned.
422	 */
423	ldr	r0, =__bss_start
424	ldr	r1, =__bss_end
425	mov	r2, #0
426	mov	r3, #0
427clear_bss:
428	stmia	r0!, {r2, r3}
429	cmp	r0, r1
430	bls	clear_bss
431
432#ifdef CFG_NS_VIRTUALIZATION
433	/*
434	 * Clear .nex_bss, this code obviously depends on the linker keeping
435	 * start/end of .bss at least 8 byte aligned.
436	 */
437	ldr	r0, =__nex_bss_start
438	ldr	r1, =__nex_bss_end
439	mov	r2, #0
440	mov	r3, #0
441clear_nex_bss:
442	stmia	r0!, {r2, r3}
443	cmp	r0, r1
444	bls	clear_nex_bss
445#endif
446
447#ifdef CFG_CORE_SANITIZE_KADDRESS
448	/* First initialize the entire shadow area with no access */
449	ldr	r0, =__asan_shadow_start	/* start */
450	ldr	r1, =__asan_shadow_end	/* limit */
451	mov	r2, #ASAN_DATA_RED_ZONE
452shadow_no_access:
453	str	r2, [r0], #4
454	cmp	r0, r1
455	bls	shadow_no_access
456
457	/* Mark the entire stack area as OK */
458	ldr	r2, =CFG_ASAN_SHADOW_OFFSET
459	ldr	r0, =__nozi_stack_start	/* start */
460	lsr	r0, r0, #ASAN_BLOCK_SHIFT
461	add	r0, r0, r2
462	ldr	r1, =__nozi_stack_end	/* limit */
463	lsr	r1, r1, #ASAN_BLOCK_SHIFT
464	add	r1, r1, r2
465	mov	r2, #0
466shadow_stack_access_ok:
467	strb	r2, [r0], #1
468	cmp	r0, r1
469	bls	shadow_stack_access_ok
470#endif
471
472	set_sp
473
474	bl	thread_init_thread_core_local
475
476	/* complete ARM secure MP common configuration */
477	bl	plat_primary_init_early
478
479	/* Enable Console */
480	bl	console_init
481
482#ifdef CFG_PL310
483	bl	pl310_base
484	bl	arm_cl2_config
485#endif
486
487	/*
488	 * Invalidate dcache for all memory used during initialization to
489	 * avoid nasty surprices when the cache is turned on. We must not
490	 * invalidate memory not used by OP-TEE since we may invalidate
491	 * entries used by for instance ARM Trusted Firmware.
492	 */
493	inval_cache_vrange(cached_mem_start, cached_mem_end)
494
495#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
496	/* Enable PL310 if not yet enabled */
497	bl	pl310_base
498	bl	arm_cl2_enable
499#endif
500
501#ifdef CFG_CORE_ASLR
502	mov	r0, r6
503	bl	get_aslr_seed
504#else
505	mov	r0, #0
506#endif
507
508	ldr	r1, =boot_mmu_config
509	bl	core_init_mmu_map
510
511#ifdef CFG_CORE_ASLR
512	/*
513	 * Process relocation information for updating with the new offset.
514	 * We're doing this now before MMU is enabled as some of the memory
515	 * will become write protected.
516	 */
517	ldr	r0, =boot_mmu_config
518	ldr	r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET]
519	/*
520	 * Update cached_mem_end address with load offset since it was
521	 * calculated before relocation.
522	 */
523	ldr	r2, cached_mem_end
524	add	r2, r2, r0
525	str	r2, cached_mem_end
526
527	bl	relocate
528#endif
529
530	bl	__get_core_pos
531	bl	enable_mmu
532#ifdef CFG_CORE_ASLR
533	/*
534	 * Reinitialize console, since register_serial_console() has
535	 * previously registered a PA and with ASLR the VA is different
536	 * from the PA.
537	 */
538	bl	console_init
539#endif
540
541#ifdef CFG_NS_VIRTUALIZATION
542	/*
543	 * Initialize partition tables for each partition to
544	 * default_partition which has been relocated now to a different VA
545	 */
546	bl	core_mmu_set_default_prtn_tbl
547#endif
548
549	mov	r0, r4		/* pageable part address */
550	mov	r1, r5		/* ns-entry address */
551	bl	boot_init_primary_early
552#ifndef CFG_NS_VIRTUALIZATION
553	mov	r9, sp
554	ldr	r0, =threads
555	ldr	r0, [r0, #THREAD_CTX_STACK_VA_END]
556	mov	sp, r0
557	bl	thread_get_core_local
558	mov	r8, r0
559	mov	r0, #0
560	str	r0, [r8, #THREAD_CORE_LOCAL_FLAGS]
561#endif
562	mov	r0, r6		/* DT address */
563	mov	r1, #0		/* unused */
564	bl	boot_init_primary_late
565#ifndef CFG_NS_VIRTUALIZATION
566	mov	r0, #THREAD_CLF_TMP
567	str	r0, [r8, #THREAD_CORE_LOCAL_FLAGS]
568	mov	sp, r9
569#endif
570
571#ifdef _CFG_CORE_STACK_PROTECTOR
572	/* Update stack canary value */
573	bl	plat_get_random_stack_canary
574	ldr	r1, =__stack_chk_guard
575	str	r0, [r1]
576#endif
577
578	/*
579	 * In case we've touched memory that secondary CPUs will use before
580	 * they have turned on their D-cache, clean and invalidate the
581	 * D-cache before exiting to normal world.
582	 */
583	flush_cache_vrange(cached_mem_start, cached_mem_end)
584
585	/* release secondary boot cores and sync with them */
586	cpu_is_ready
587	flush_cpu_semaphores
588	wait_secondary
589
590#ifdef CFG_PL310_LOCKED
591#ifdef CFG_PL310_SIP_PROTOCOL
592#error "CFG_PL310_LOCKED must not be defined when CFG_PL310_SIP_PROTOCOL=y"
593#endif
594	/* lock/invalidate all lines: pl310 behaves as if disable */
595	bl	pl310_base
596	bl	arm_cl2_lockallways
597	bl	pl310_base
598	bl	arm_cl2_cleaninvbyway
599#endif
600
601	/*
602	 * Clear current thread id now to allow the thread to be reused on
603	 * next entry. Matches the thread_init_boot_thread() in
604	 * boot.c.
605	 */
606#ifndef CFG_NS_VIRTUALIZATION
607	bl 	thread_clr_boot_thread
608#endif
609
610#ifdef CFG_CORE_FFA
611	ldr	r0, =cpu_on_handler
612	/*
613	 * Compensate for the load offset since cpu_on_handler() is
614	 * called with MMU off.
615	 */
616	ldr	r1, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET
617	sub	r0, r0, r1
618	bl	thread_spmc_register_secondary_ep
619	b	thread_ffa_msg_wait
620#else /* CFG_CORE_FFA */
621
622#if defined(CFG_WITH_ARM_TRUSTED_FW)
623	ldr	r0, =boot_mmu_config
624	ldr	r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET]
625	ldr	r1, =thread_vector_table
626	/* Pass the vector address returned from main_init */
627	sub	r1, r1, r0
628#else
629	/* realy standard bootarg #1 and #2 to non secure entry */
630	mov	r4, #0
631	mov	r3, r6		/* std bootarg #2 for register R2 */
632	mov	r2, r7		/* std bootarg #1 for register R1 */
633	mov	r1, #0
634#endif /* CFG_WITH_ARM_TRUSTED_FW */
635
636	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
637	smc	#0
638	/* SMC should not return */
639	panic_at_smc_return
640#endif /* CFG_CORE_FFA */
641END_FUNC reset_primary
642
643#ifdef CFG_BOOT_SYNC_CPU
644LOCAL_DATA sem_cpu_sync_start , :
645	.word	sem_cpu_sync
646END_DATA sem_cpu_sync_start
647
648LOCAL_DATA sem_cpu_sync_end , :
649	.word	sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)
650END_DATA sem_cpu_sync_end
651#endif
652
653LOCAL_DATA cached_mem_start , :
654	.word	__text_start
655END_DATA cached_mem_start
656
657LOCAL_DATA cached_mem_end , :
658	.skip	4
659END_DATA cached_mem_end
660
661LOCAL_FUNC unhandled_cpu , :
662	wfi
663	b	unhandled_cpu
664END_FUNC unhandled_cpu
665
666#ifdef CFG_CORE_ASLR
667LOCAL_FUNC relocate , :
668	push	{r4-r5}
669	/* r0 holds load offset */
670#ifdef CFG_WITH_PAGER
671	ldr	r12, =__init_end
672#else
673	ldr	r12, =__end
674#endif
675	ldr	r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET]
676	ldr	r3, [r12, #BOOT_EMBDATA_RELOC_LEN]
677
678	mov_imm	r1, TEE_RAM_START
679	add	r2, r2, r12	/* start of relocations */
680	add	r3, r3, r2	/* end of relocations */
681
682	/*
683	 * Relocations are not formatted as Rel32, instead they are in a
684	 * compressed format created by get_reloc_bin() in
685	 * scripts/gen_tee_bin.py
686	 *
687	 * All the R_ARM_RELATIVE relocations are translated into a list
688	 * list of 32-bit offsets from TEE_RAM_START. At each address a
689	 * 32-bit value pointed out which increased with the load offset.
690	 */
691
692#ifdef CFG_WITH_PAGER
693	/*
694	 * With pager enabled we can only relocate the pager and init
695	 * parts, the rest has to be done when a page is populated.
696	 */
697	sub	r12, r12, r1
698#endif
699
700	b	2f
701	/* Loop over the relocation addresses and process all entries */
7021:	ldr	r4, [r2], #4
703#ifdef CFG_WITH_PAGER
704	/* Skip too large addresses */
705	cmp	r4, r12
706	bge	2f
707#endif
708	ldr	r5, [r4, r1]
709	add	r5, r5, r0
710	str	r5, [r4, r1]
711
7122:	cmp	r2, r3
713	bne	1b
714
715	pop	{r4-r5}
716	bx	lr
717END_FUNC relocate
718#endif
719
720/*
721 * void enable_mmu(unsigned long core_pos);
722 *
723 * This function depends on being mapped with in the identity map where
724 * physical address and virtual address is the same. After MMU has been
725 * enabled the instruction pointer will be updated to execute as the new
726 * offset instead. Stack pointers and the return address are updated.
727 */
728LOCAL_FUNC enable_mmu , : , .identity_map
729	/* r0 = core pos */
730	adr	r1, boot_mmu_config
731
732#ifdef CFG_WITH_LPAE
733	ldm	r1!, {r2, r3}
734	/*
735	 * r2 = ttbcr
736	 * r3 = mair0
737	 */
738	write_ttbcr r2
739	write_mair0 r3
740
741	ldm	r1!, {r2, r3}
742	/*
743	 * r2 = ttbr0_base
744	 * r3 = ttbr0_core_offset
745	 */
746
747	/*
748	 * ttbr0_el1 = ttbr0_base + ttbr0_core_offset * core_pos
749	 */
750	mla	r12, r0, r3, r2
751	mov	r0, #0
752	write_ttbr0_64bit r12, r0
753	write_ttbr1_64bit r0, r0
754#else
755	ldm	r1!, {r2, r3}
756	/*
757	 * r2 = prrr
758	 * r3 = nmrr
759	 */
760	write_prrr r2
761	write_nmrr r3
762
763	ldm	r1!, {r2, r3}
764	/*
765	 * r2 = dacr
766	 * r3 = ttbcr
767	 */
768	write_dacr r2
769	write_ttbcr r3
770
771	ldm	r1!, {r2}
772	/* r2 = ttbr */
773	write_ttbr0 r2
774	write_ttbr1 r2
775
776	mov	r2, #0
777	write_contextidr r2
778#endif
779	ldm	r1!, {r2}
780	/* r2 = load_offset (always 0 if CFG_CORE_ASLR=n) */
781	isb
782
783	/* Invalidate TLB */
784	write_tlbiall
785
786	/*
787	 * Make sure translation table writes have drained into memory and
788	 * the TLB invalidation is complete.
789	 */
790	dsb	sy
791	isb
792
793	read_sctlr r0
794	orr	r0, r0, #SCTLR_M
795#ifndef CFG_WITH_LPAE
796	/* Enable Access flag (simplified access permissions) and TEX remap */
797	orr	r0, r0, #(SCTLR_AFE | SCTLR_TRE)
798#endif
799	write_sctlr r0
800	isb
801
802	/* Update vbar */
803	read_vbar r1
804	add	r1, r1, r2
805	write_vbar r1
806	isb
807
808	/* Invalidate instruction cache and branch predictor */
809	write_iciallu
810	write_bpiall
811	isb
812
813	read_sctlr r0
814	/* Enable I and D cache */
815	orr	r0, r0, #SCTLR_I
816	orr	r0, r0, #SCTLR_C
817#if defined(CFG_ENABLE_SCTLR_Z)
818	/*
819	 * This is only needed on ARMv7 architecture and hence conditionned
820	 * by configuration directive CFG_ENABLE_SCTLR_Z. For recent
821	 * architectures, the program flow prediction is automatically
822	 * enabled upon MMU enablement.
823	 */
824	orr	r0, r0, #SCTLR_Z
825#endif
826	write_sctlr r0
827	isb
828
829	/* Adjust stack pointer and return address */
830	add	sp, sp, r2
831	add	lr, lr, r2
832
833	bx	lr
834END_FUNC enable_mmu
835
836LOCAL_DATA stack_tmp_rel , :
837	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
838END_DATA stack_tmp_rel
839
840LOCAL_DATA stack_tmp_stride_rel , :
841	.word	stack_tmp_stride - stack_tmp_stride_rel
842END_DATA stack_tmp_stride_rel
843
844DATA boot_mmu_config , : /* struct core_mmu_config */
845	.skip	CORE_MMU_CONFIG_SIZE
846END_DATA boot_mmu_config
847
848#if defined(CFG_WITH_ARM_TRUSTED_FW)
849FUNC cpu_on_handler , : , .identity_map
850UNWIND(	.cantunwind)
851	mov	r4, r0
852	mov	r5, r1
853	mov	r6, lr
854
855	set_sctlr
856	isb
857
858	adr	r0, reset_vect_table
859	write_vbar r0
860
861	mov	r4, lr
862
863	bl	__get_core_pos
864	bl	enable_mmu
865
866	set_sp
867
868	mov	r0, r4
869	mov	r1, r5
870	bl	boot_cpu_on_handler
871#ifdef CFG_CORE_FFA
872	b	thread_ffa_msg_wait
873#else
874	bx	r6
875#endif
876END_FUNC cpu_on_handler
877DECLARE_KEEP_PAGER cpu_on_handler
878
879#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
880
881LOCAL_FUNC reset_secondary , : , .identity_map
882UNWIND(	.cantunwind)
883	adr	r0, reset_vect_table
884	write_vbar r0
885
886	wait_primary
887
888	set_sp
889
890#if defined (CFG_BOOT_SECONDARY_REQUEST)
891	/* if L1 is not invalidated before, do it here */
892	mov	r0, #DCACHE_OP_INV
893	bl	dcache_op_level1
894#endif
895
896	bl	__get_core_pos
897	bl	enable_mmu
898
899	cpu_is_ready
900
901#if defined (CFG_BOOT_SECONDARY_REQUEST)
902	/*
903	 * boot_core_hpen() return value (r0) is address of
904	 * ns entry context structure
905	 */
906	bl	boot_core_hpen
907	ldm	r0, {r0, r6}
908#else
909	mov	r0, r5		/* ns-entry address */
910	mov	r6, #0
911#endif
912	bl	boot_init_secondary
913
914	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
915	mov	r1, r6
916	mov	r2, #0
917	mov	r3, #0
918	mov	r4, #0
919	smc	#0
920	/* SMC should not return */
921	panic_at_smc_return
922END_FUNC reset_secondary
923DECLARE_KEEP_PAGER reset_secondary
924#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
925