xref: /optee_os/core/arch/arm/kernel/entry_a32.S (revision 76d6685e5f3b91d66dc2091b9d61601c050298bb)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2014, Linaro Limited
4 * Copyright (c) 2021-2023, Arm Limited
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/asan.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread_private.h>
15#include <mm/core_mmu.h>
16#include <platform_config.h>
17#include <sm/optee_smc.h>
18#include <sm/teesmc_opteed.h>
19#include <sm/teesmc_opteed_macros.h>
20
21.arch_extension sec
22
23.section .data
24.balign 4
25
26#ifdef CFG_BOOT_SYNC_CPU
27.equ SEM_CPU_READY, 1
28#endif
29
30#ifdef CFG_PL310
31.section .rodata.init
32panic_boot_file:
33	.asciz __FILE__
34
35/*
36 * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
37 */
38LOCAL_FUNC __assert_flat_mapped_range , :
39UNWIND(	.cantunwind)
40	push	{ r4-r6, lr }
41	mov	r4, r0
42	mov	r5, r1
43	bl	cpu_mmu_enabled
44	cmp	r0, #0
45	beq	1f
46	mov	r0, r4
47	bl	virt_to_phys
48	cmp	r0, r4
49	beq	1f
50	/*
51	 * this must be compliant with the panic generic routine:
52	 * __do_panic(__FILE__, __LINE__, __func__, str)
53	 */
54	ldr	r0, =panic_boot_file
55	mov	r1, r5
56	mov	r2, #0
57	mov	r3, #0
58	bl	__do_panic
59	b	.		/* should NOT return */
601:	pop	{ r4-r6, pc }
61END_FUNC __assert_flat_mapped_range
62
63	/* panic if mmu is enable and vaddr != paddr (scratch lr) */
64	.macro assert_flat_mapped_range va, line
65		ldr	r0, \va
66		ldr	r1, =\line
67		bl	__assert_flat_mapped_range
68	.endm
69#endif /* CFG_PL310 */
70
71WEAK_FUNC plat_cpu_reset_early , :
72	bx	lr
73END_FUNC plat_cpu_reset_early
74DECLARE_KEEP_PAGER plat_cpu_reset_early
75
76	.section .identity_map, "ax"
77	.align 5
78LOCAL_FUNC reset_vect_table , : , .identity_map
79	b	.
80	b	.	/* Undef */
81	b	.	/* Syscall */
82	b	.	/* Prefetch abort */
83	b	.	/* Data abort */
84	b	.	/* Reserved */
85	b	.	/* IRQ */
86	b	.	/* FIQ */
87END_FUNC reset_vect_table
88
89	.macro cpu_is_ready
90#ifdef CFG_BOOT_SYNC_CPU
91	bl	__get_core_pos
92	lsl	r0, r0, #2
93	ldr	r1,=sem_cpu_sync
94	ldr	r2, =SEM_CPU_READY
95	str	r2, [r1, r0]
96	dsb
97	sev
98#endif
99	.endm
100
101	.macro wait_primary
102#ifdef CFG_BOOT_SYNC_CPU
103	ldr	r0, =sem_cpu_sync
104	mov	r2, #SEM_CPU_READY
105	sev
1061:
107	ldr	r1, [r0]
108	cmp	r1, r2
109	wfene
110	bne	1b
111#endif
112	.endm
113
114	.macro wait_secondary
115#ifdef CFG_BOOT_SYNC_CPU
116	ldr	r0, =sem_cpu_sync
117	mov	r3, #CFG_TEE_CORE_NB_CORE
118	mov	r2, #SEM_CPU_READY
119	sev
1201:
121	subs	r3, r3, #1
122	beq	3f
123	add	r0, r0, #4
1242:
125	ldr	r1, [r0]
126	cmp	r1, r2
127	wfene
128	bne	2b
129	b	1b
1303:
131#endif
132	.endm
133
134	/*
135	 * set_sctlr : Setup some core configuration in CP15 SCTLR
136	 *
137	 * Setup required by current implementation of the OP-TEE core:
138	 * - Disable data and instruction cache.
139	 * - MMU is expected off and exceptions trapped in ARM mode.
140	 * - Enable or disable alignment checks upon platform configuration.
141	 * - Optionally enable write-implies-execute-never.
142	 * - Optionally enable round robin strategy for cache replacement.
143	 *
144	 * Clobbers r0.
145	 */
146	.macro set_sctlr
147		read_sctlr r0
148		bic	r0, r0, #(SCTLR_M | SCTLR_C)
149		bic	r0, r0, #SCTLR_I
150		bic	r0, r0, #SCTLR_TE
151		orr	r0, r0, #SCTLR_SPAN
152#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
153		orr	r0, r0, #SCTLR_A
154#else
155		bic	r0, r0, #SCTLR_A
156#endif
157#if defined(CFG_HWSUPP_MEM_PERM_WXN) && defined(CFG_CORE_RWDATA_NOEXEC)
158		orr	r0, r0, #(SCTLR_WXN | SCTLR_UWXN)
159#endif
160#if defined(CFG_ENABLE_SCTLR_RR)
161		orr	r0, r0, #SCTLR_RR
162#endif
163		write_sctlr r0
164	.endm
165
166	.macro maybe_init_spectre_workaround
167#if !defined(CFG_WITH_ARM_TRUSTED_FW) && \
168    (defined(CFG_CORE_WORKAROUND_SPECTRE_BP) || \
169     defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC))
170	read_midr r0
171	ubfx	r1, r0, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
172	cmp	r1, #MIDR_IMPLEMENTER_ARM
173	bne	1f
174	ubfx	r1, r0, #MIDR_PRIMARY_PART_NUM_SHIFT, \
175			#MIDR_PRIMARY_PART_NUM_WIDTH
176
177	movw	r2, #CORTEX_A8_PART_NUM
178	cmp	r1, r2
179	moveq	r2, #ACTLR_CA8_ENABLE_INVALIDATE_BTB
180	beq	2f
181
182	movw	r2, #CORTEX_A15_PART_NUM
183	cmp	r1, r2
184	moveq	r2, #ACTLR_CA15_ENABLE_INVALIDATE_BTB
185	bne	1f	/* Skip it for all other CPUs */
1862:
187	read_actlr r0
188	orr	r0, r0, r2
189	write_actlr r0
190	isb
1911:
192#endif
193	.endm
194
195FUNC _start , :
196UNWIND(	.cantunwind)
197	/*
198	 * Temporary copy of boot argument registers, will be passed to
199	 * boot_save_args() further down.
200	 */
201	mov	r4, r0
202	mov	r5, r1
203	mov	r6, r2
204	mov	r7, r3
205	mov	r8, lr
206
207	/*
208	 * 32bit entry is expected to execute Supervisor mode,
209	 * some bootloader may enter in Supervisor or Monitor
210	 */
211	cps	#CPSR_MODE_SVC
212
213	/* Early ARM secure MP specific configuration */
214	bl	plat_cpu_reset_early
215	maybe_init_spectre_workaround
216
217	set_sctlr
218	isb
219
220	ldr	r0, =reset_vect_table
221	write_vbar r0
222
223#if defined(CFG_WITH_ARM_TRUSTED_FW)
224	b	reset_primary
225#else
226	bl	__get_core_pos
227	cmp	r0, #0
228	beq	reset_primary
229	b	reset_secondary
230#endif
231END_FUNC _start
232DECLARE_KEEP_INIT _start
233
234	/*
235	 * Setup sp to point to the top of the tmp stack for the current CPU:
236	 * sp is assigned:
237	 *   stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD
238	 */
239	.macro set_sp
240		bl	__get_core_pos
241		cmp	r0, #CFG_TEE_CORE_NB_CORE
242		/* Unsupported CPU, park it before it breaks something */
243		bge	unhandled_cpu
244		add	r0, r0, #1
245
246		/* r2 = stack_tmp - STACK_TMP_GUARD */
247		adr	r3, stack_tmp_rel
248		ldr	r2, [r3]
249		add	r2, r2, r3
250
251		/*
252		 * stack_tmp_stride and stack_tmp_stride_rel are the
253		 * equivalent of:
254		 * extern const u32 stack_tmp_stride;
255		 * u32 stack_tmp_stride_rel = (u32)&stack_tmp_stride -
256		 *			      (u32)&stack_tmp_stride_rel
257		 *
258		 * To load the value of stack_tmp_stride we do the equivalent
259		 * of:
260		 * *(u32 *)(stack_tmp_stride + (u32)&stack_tmp_stride_rel)
261		 */
262		adr	r3, stack_tmp_stride_rel
263		ldr	r1, [r3]
264		ldr	r1, [r1, r3]
265
266		/*
267		 * r0 is core pos + 1
268		 * r1 is value of stack_tmp_stride
269		 * r2 is value of stack_tmp + guard
270		 */
271		mul	r1, r0, r1
272		add	sp, r1, r2
273	.endm
274
275	/*
276	 * Cache maintenance during entry: handle outer cache.
277	 * End address is exclusive: first byte not to be changed.
278	 * Note however arm_clX_inv/cleanbyva operate on full cache lines.
279	 *
280	 * Use ANSI #define to trap source file line number for PL310 assertion
281	 */
282	.macro __inval_cache_vrange vbase, vend, line
283#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
284		assert_flat_mapped_range (\vbase), (\line)
285		bl	pl310_base
286		ldr	r1, \vbase
287		ldr	r2, =\vend
288		ldr	r2, [r2]
289		bl	arm_cl2_invbypa
290#endif
291		ldr	r0, \vbase
292		ldr	r1, =\vend
293		ldr	r1, [r1]
294		sub	r1, r1, r0
295		bl	dcache_inv_range
296	.endm
297
298	.macro __flush_cache_vrange vbase, vend, line
299#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
300		assert_flat_mapped_range (\vbase), (\line)
301		ldr	r0, \vbase
302		ldr	r1, =\vend
303		ldr	r1, [r1]
304		sub	r1, r1, r0
305		bl	dcache_clean_range
306		bl	pl310_base
307		ldr	r1, \vbase
308		ldr	r2, =\vend
309		ldr	r2, [r2]
310		bl	arm_cl2_cleaninvbypa
311#endif
312		ldr	r0, \vbase
313		ldr	r1, =\vend
314		ldr	r1, [r1]
315		sub	r1, r1, r0
316		bl	dcache_cleaninv_range
317	.endm
318
319#define inval_cache_vrange(vbase, vend) \
320		__inval_cache_vrange vbase, vend, __LINE__
321
322#define flush_cache_vrange(vbase, vend) \
323		__flush_cache_vrange vbase, vend, __LINE__
324
325#ifdef CFG_BOOT_SYNC_CPU
326#define flush_cpu_semaphores \
327		flush_cache_vrange(sem_cpu_sync_start, sem_cpu_sync_end)
328#else
329#define flush_cpu_semaphores
330#endif
331
332LOCAL_FUNC reset_primary , : , .identity_map
333UNWIND(	.cantunwind)
334
335	/* preserve r4-r8: bootargs */
336
337#ifdef CFG_WITH_PAGER
338	/*
339	 * Move init code into correct location and move hashes to a
340	 * temporary safe location until the heap is initialized.
341	 *
342	 * The binary is built as:
343	 * [Pager code, rodata and data] : In correct location
344	 * [Init code and rodata] : Should be copied to __init_start
345	 * [struct boot_embdata + data] : Should be saved before
346	 * initializing pager, first uint32_t tells the length of the data
347	 */
348	ldr	r0, =__init_start	/* dst */
349	ldr	r1, =__data_end 	/* src */
350	ldr	r2, =__init_end
351	sub	r2, r2, r0		/* init len */
352	ldr	r12, [r1, r2]		/* length of hashes etc */
353	add	r2, r2, r12		/* length of init and hashes etc */
354	/* Copy backwards (as memmove) in case we're overlapping */
355	add	r0, r0, r2		/* __init_start + len */
356	add	r1, r1, r2		/* __data_end + len */
357	ldr	r3, =boot_cached_mem_end
358	str	r0, [r3]
359	ldr	r2, =__init_start
360copy_init:
361	ldmdb	r1!, {r3, r9-r12}
362	stmdb	r0!, {r3, r9-r12}
363	cmp	r0, r2
364	bgt	copy_init
365#else
366	/*
367	 * The binary is built as:
368	 * [Core, rodata and data] : In correct location
369	 * [struct boot_embdata + data] : Should be moved to right before
370	 * __vcore_free_end, the first uint32_t tells the length of the
371	 * struct + data
372	 */
373	ldr	r1, =__data_end		/* src */
374	ldr	r2, [r1]		/* struct boot_embdata::total_len */
375	/* dst */
376	ldr	r0, =__vcore_free_end
377	sub	r0, r0, r2
378	/* round down to beginning of page */
379	mov	r3,  #(SMALL_PAGE_SIZE - 1)
380	bic	r0, r0, r3
381	ldr	r3, =boot_embdata_ptr
382	str	r0, [r3]
383	/* Copy backwards (as memmove) in case we're overlapping */
384	add	r1, r1, r2
385	add	r2, r0, r2
386	ldr	r3, =boot_cached_mem_end
387	str	r2, [r3]
388
389copy_init:
390	ldmdb	r1!, {r3, r9-r12}
391	stmdb	r2!, {r3, r9-r12}
392	cmp	r2, r0
393	bgt	copy_init
394#endif
395
396	/*
397	 * Clear .bss, this code obviously depends on the linker keeping
398	 * start/end of .bss at least 8 byte aligned.
399	 */
400	ldr	r0, =__bss_start
401	ldr	r1, =__bss_end
402	mov	r2, #0
403	mov	r3, #0
404clear_bss:
405	stmia	r0!, {r2, r3}
406	cmp	r0, r1
407	bls	clear_bss
408
409#ifdef CFG_NS_VIRTUALIZATION
410	/*
411	 * Clear .nex_bss, this code obviously depends on the linker keeping
412	 * start/end of .bss at least 8 byte aligned.
413	 */
414	ldr	r0, =__nex_bss_start
415	ldr	r1, =__nex_bss_end
416	mov	r2, #0
417	mov	r3, #0
418clear_nex_bss:
419	stmia	r0!, {r2, r3}
420	cmp	r0, r1
421	bls	clear_nex_bss
422#endif
423
424#ifdef CFG_CORE_SANITIZE_KADDRESS
425	/* First initialize the entire shadow area with no access */
426	ldr	r0, =__asan_shadow_start	/* start */
427	ldr	r1, =__asan_shadow_end	/* limit */
428	mov	r2, #ASAN_DATA_RED_ZONE
429shadow_no_access:
430	str	r2, [r0], #4
431	cmp	r0, r1
432	bls	shadow_no_access
433
434	/* Mark the entire stack area as OK */
435	ldr	r2, =CFG_ASAN_SHADOW_OFFSET
436	ldr	r0, =__nozi_stack_start	/* start */
437	lsr	r0, r0, #ASAN_BLOCK_SHIFT
438	add	r0, r0, r2
439	ldr	r1, =__nozi_stack_end	/* limit */
440	lsr	r1, r1, #ASAN_BLOCK_SHIFT
441	add	r1, r1, r2
442	mov	r2, #0
443shadow_stack_access_ok:
444	strb	r2, [r0], #1
445	cmp	r0, r1
446	bls	shadow_stack_access_ok
447#endif
448
449	set_sp
450
451	bl	thread_init_thread_core_local
452
453	/* complete ARM secure MP common configuration */
454	bl	plat_primary_init_early
455
456	/* Enable Console */
457	bl	console_init
458
459	mov	r0, r8
460	mov	r1, #0
461	push	{r0, r1}
462	mov	r0, r4
463	mov	r1, r5
464	mov	r2, r6
465	mov	r3, r7
466	bl	boot_save_args
467	add	sp, sp, #(2 * 4)
468
469#ifdef CFG_WITH_PAGER
470	ldr	r0, =__init_end	/* pointer to boot_embdata */
471	ldr	r1, [r0]	/* struct boot_embdata::total_len */
472	add	r0, r0, r1
473	mov_imm	r1, 0xfff
474	add	r0, r0, r1	/* round up */
475	bic	r0, r0, r1	/* to next page */
476	mov_imm r1, (TEE_RAM_PH_SIZE + TEE_RAM_START)
477	mov	r2, r1
478#else
479	ldr	r0, =__vcore_free_start
480	ldr	r1, =boot_embdata_ptr
481	ldr	r1, [r1]
482	ldr	r2, =__vcore_free_end
483#endif
484	bl	boot_mem_init
485
486#ifdef CFG_PL310
487	bl	pl310_base
488	bl	arm_cl2_config
489#endif
490
491	/*
492	 * Invalidate dcache for all memory used during initialization to
493	 * avoid nasty surprices when the cache is turned on. We must not
494	 * invalidate memory not used by OP-TEE since we may invalidate
495	 * entries used by for instance ARM Trusted Firmware.
496	 */
497	inval_cache_vrange(cached_mem_start, boot_cached_mem_end)
498
499#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL)
500	/* Enable PL310 if not yet enabled */
501	bl	pl310_base
502	bl	arm_cl2_enable
503#endif
504
505#ifdef CFG_CORE_ASLR
506	bl	get_aslr_seed
507#ifdef CFG_CORE_ASLR_SEED
508	mov_imm	r0, CFG_CORE_ASLR_SEED
509#endif
510#else
511	mov	r0, #0
512#endif
513
514	ldr	r1, =boot_mmu_config
515	bl	core_init_mmu_map
516
517#ifdef CFG_CORE_ASLR
518	/*
519	 * Process relocation information for updating with the virtual map
520	 * offset.  We're doing this now before MMU is enabled as some of
521	 * the memory will become write protected.
522	 */
523	ldr	r0, =boot_mmu_config
524	ldr	r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET]
525	/*
526	 * Update boot_cached_mem_end address with load offset since it was
527	 * calculated before relocation.
528	 */
529	ldr	r3, =boot_cached_mem_end
530	ldr	r2, [r3]
531	add	r2, r2, r0
532	str	r2, [r3]
533
534	bl	relocate
535#endif
536
537	bl	__get_core_pos
538	bl	enable_mmu
539#ifdef CFG_CORE_ASLR
540	ldr	r0, =boot_mmu_config
541	ldr	r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET]
542	bl	boot_mem_relocate
543	/*
544	 * Reinitialize console, since register_serial_console() has
545	 * previously registered a PA and with ASLR the VA is different
546	 * from the PA.
547	 */
548	bl	console_init
549#endif
550
551#ifdef CFG_NS_VIRTUALIZATION
552	/*
553	 * Initialize partition tables for each partition to
554	 * default_partition which has been relocated now to a different VA
555	 */
556	bl	core_mmu_set_default_prtn_tbl
557#endif
558
559	bl	boot_init_primary_early
560#ifndef CFG_NS_VIRTUALIZATION
561	mov	r9, sp
562	ldr	r0, =threads
563	ldr	r0, [r0, #THREAD_CTX_STACK_VA_END]
564	mov	sp, r0
565	bl	thread_get_core_local
566	mov	r8, r0
567	mov	r0, #0
568	str	r0, [r8, #THREAD_CORE_LOCAL_FLAGS]
569#endif
570	bl	boot_init_primary_late
571	bl	boot_init_primary_final
572#ifndef CFG_NS_VIRTUALIZATION
573	mov	r0, #THREAD_CLF_TMP
574	str	r0, [r8, #THREAD_CORE_LOCAL_FLAGS]
575	mov	sp, r9
576#endif
577
578#ifdef _CFG_CORE_STACK_PROTECTOR
579	/* Update stack canary value */
580	sub	sp, sp, #0x8
581	mov	r0, sp
582	mov	r1, #1
583	mov	r2, #0x4
584	bl	plat_get_random_stack_canaries
585	ldr	r0, [sp]
586	ldr	r1, =__stack_chk_guard
587	str	r0, [r1]
588	add	sp, sp, #0x8
589#endif
590
591	/*
592	 * In case we've touched memory that secondary CPUs will use before
593	 * they have turned on their D-cache, clean and invalidate the
594	 * D-cache before exiting to normal world.
595	 */
596	flush_cache_vrange(cached_mem_start, boot_cached_mem_end)
597
598	/* release secondary boot cores and sync with them */
599	cpu_is_ready
600	flush_cpu_semaphores
601	wait_secondary
602
603#ifdef CFG_PL310_LOCKED
604#ifdef CFG_PL310_SIP_PROTOCOL
605#error "CFG_PL310_LOCKED must not be defined when CFG_PL310_SIP_PROTOCOL=y"
606#endif
607	/* lock/invalidate all lines: pl310 behaves as if disable */
608	bl	pl310_base
609	bl	arm_cl2_lockallways
610	bl	pl310_base
611	bl	arm_cl2_cleaninvbyway
612#endif
613
614	/*
615	 * Clear current thread id now to allow the thread to be reused on
616	 * next entry. Matches the thread_init_boot_thread() in
617	 * boot.c.
618	 */
619#ifndef CFG_NS_VIRTUALIZATION
620	bl 	thread_clr_boot_thread
621#endif
622
623#ifdef CFG_CORE_FFA
624	ldr	r0, =cpu_on_handler
625	/*
626	 * Compensate for the virtual map offset since cpu_on_handler() is
627	 * called with MMU off.
628	 */
629	ldr	r1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET
630	sub	r0, r0, r1
631	bl	thread_spmc_register_secondary_ep
632	b	thread_ffa_msg_wait
633#else /* CFG_CORE_FFA */
634
635#if defined(CFG_WITH_ARM_TRUSTED_FW)
636	ldr	r0, =boot_mmu_config
637	ldr	r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET]
638	ldr	r1, =thread_vector_table
639	/* Pass the vector address returned from main_init */
640	sub	r1, r1, r0
641#else
642	/* Relay standard bootarg #1 and #2 to non-secure entry */
643	mov	r4, #0
644	mov	r3, r6		/* std bootarg #2 for register R2 */
645	mov	r2, r5		/* std bootarg #1 for register R1 */
646	mov	r1, #0
647#endif /* CFG_WITH_ARM_TRUSTED_FW */
648
649	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
650	smc	#0
651	/* SMC should not return */
652	panic_at_smc_return
653#endif /* CFG_CORE_FFA */
654END_FUNC reset_primary
655
656#ifdef CFG_BOOT_SYNC_CPU
657LOCAL_DATA sem_cpu_sync_start , :
658	.word	sem_cpu_sync
659END_DATA sem_cpu_sync_start
660
661LOCAL_DATA sem_cpu_sync_end , :
662	.word	sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)
663END_DATA sem_cpu_sync_end
664#endif
665
666LOCAL_DATA cached_mem_start , :
667	.word	__text_start
668END_DATA cached_mem_start
669
670#ifndef CFG_WITH_PAGER
671LOCAL_DATA boot_embdata_ptr , :
672	.skip	4
673END_DATA boot_embdata_ptr
674#endif
675
676LOCAL_FUNC unhandled_cpu , :
677	wfi
678	b	unhandled_cpu
679END_FUNC unhandled_cpu
680
681#ifdef CFG_CORE_ASLR
682LOCAL_FUNC relocate , :
683	push	{r4-r5}
684	/* r0 holds load offset */
685#ifdef CFG_WITH_PAGER
686	ldr	r12, =__init_end
687#else
688	ldr	r12, =boot_embdata_ptr
689	ldr	r12, [r12]
690#endif
691	ldr	r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET]
692	ldr	r3, [r12, #BOOT_EMBDATA_RELOC_LEN]
693
694	mov_imm	r1, TEE_LOAD_ADDR
695	add	r2, r2, r12	/* start of relocations */
696	add	r3, r3, r2	/* end of relocations */
697
698	/*
699	 * Relocations are not formatted as Rel32, instead they are in a
700	 * compressed format created by get_reloc_bin() in
701	 * scripts/gen_tee_bin.py
702	 *
703	 * All the R_ARM_RELATIVE relocations are translated into a list of
704	 * 32-bit offsets from TEE_LOAD_ADDR. At each address a 32-bit
705	 * value pointed out which increased with the load offset.
706	 */
707
708#ifdef CFG_WITH_PAGER
709	/*
710	 * With pager enabled we can only relocate the pager and init
711	 * parts, the rest has to be done when a page is populated.
712	 */
713	sub	r12, r12, r1
714#endif
715
716	b	2f
717	/* Loop over the relocation addresses and process all entries */
7181:	ldr	r4, [r2], #4
719#ifdef CFG_WITH_PAGER
720	/* Skip too large addresses */
721	cmp	r4, r12
722	bge	2f
723#endif
724	ldr	r5, [r4, r1]
725	add	r5, r5, r0
726	str	r5, [r4, r1]
727
7282:	cmp	r2, r3
729	bne	1b
730
731	pop	{r4-r5}
732	bx	lr
733END_FUNC relocate
734#endif
735
736/*
737 * void enable_mmu(unsigned long core_pos);
738 *
739 * This function depends on being mapped with in the identity map where
740 * physical address and virtual address is the same. After MMU has been
741 * enabled the instruction pointer will be updated to execute as the new
742 * offset instead. Stack pointers and the return address are updated.
743 */
744LOCAL_FUNC enable_mmu , : , .identity_map
745	/* r0 = core pos */
746	adr	r1, boot_mmu_config
747
748#ifdef CFG_WITH_LPAE
749	ldm	r1!, {r2, r3}
750	/*
751	 * r2 = ttbcr
752	 * r3 = mair0
753	 */
754	write_ttbcr r2
755	write_mair0 r3
756
757	ldm	r1!, {r2, r3}
758	/*
759	 * r2 = ttbr0_base
760	 * r3 = ttbr0_core_offset
761	 */
762
763	/*
764	 * ttbr0_el1 = ttbr0_base + ttbr0_core_offset * core_pos
765	 */
766	mla	r12, r0, r3, r2
767	mov	r0, #0
768	write_ttbr0_64bit r12, r0
769	write_ttbr1_64bit r0, r0
770#else
771	ldm	r1!, {r2, r3}
772	/*
773	 * r2 = prrr
774	 * r3 = nmrr
775	 */
776	write_prrr r2
777	write_nmrr r3
778
779	ldm	r1!, {r2, r3}
780	/*
781	 * r2 = dacr
782	 * r3 = ttbcr
783	 */
784	write_dacr r2
785	write_ttbcr r3
786
787	ldm	r1!, {r2}
788	/* r2 = ttbr */
789	write_ttbr0 r2
790	write_ttbr1 r2
791
792	mov	r2, #0
793	write_contextidr r2
794#endif
795	ldm	r1!, {r2}
796	/* r2 = load_offset (always 0 if CFG_CORE_ASLR=n) */
797	isb
798
799	/* Invalidate TLB */
800	write_tlbiall
801
802	/*
803	 * Make sure translation table writes have drained into memory and
804	 * the TLB invalidation is complete.
805	 */
806	dsb	sy
807	isb
808
809	read_sctlr r0
810	orr	r0, r0, #SCTLR_M
811#ifndef CFG_WITH_LPAE
812	/* Enable Access flag (simplified access permissions) and TEX remap */
813	orr	r0, r0, #(SCTLR_AFE | SCTLR_TRE)
814#endif
815	write_sctlr r0
816	isb
817
818	/* Update vbar */
819	read_vbar r1
820	add	r1, r1, r2
821	write_vbar r1
822	isb
823
824	/* Invalidate instruction cache and branch predictor */
825	write_iciallu
826	write_bpiall
827	isb
828
829	read_sctlr r0
830	/* Enable I and D cache */
831	orr	r0, r0, #SCTLR_I
832	orr	r0, r0, #SCTLR_C
833#if defined(CFG_ENABLE_SCTLR_Z)
834	/*
835	 * This is only needed on ARMv7 architecture and hence conditionned
836	 * by configuration directive CFG_ENABLE_SCTLR_Z. For recent
837	 * architectures, the program flow prediction is automatically
838	 * enabled upon MMU enablement.
839	 */
840	orr	r0, r0, #SCTLR_Z
841#endif
842	write_sctlr r0
843	isb
844
845	/* Adjust stack pointer and return address */
846	add	sp, sp, r2
847	add	lr, lr, r2
848
849	bx	lr
850END_FUNC enable_mmu
851
852LOCAL_DATA stack_tmp_rel , :
853	.word	stack_tmp - stack_tmp_rel - STACK_TMP_GUARD
854END_DATA stack_tmp_rel
855
856LOCAL_DATA stack_tmp_stride_rel , :
857	.word	stack_tmp_stride - stack_tmp_stride_rel
858END_DATA stack_tmp_stride_rel
859
860DATA boot_mmu_config , : /* struct core_mmu_config */
861	.skip	CORE_MMU_CONFIG_SIZE
862END_DATA boot_mmu_config
863
864#if defined(CFG_WITH_ARM_TRUSTED_FW)
865FUNC cpu_on_handler , : , .identity_map
866UNWIND(	.cantunwind)
867	mov	r4, r0
868	mov	r5, r1
869	mov	r6, lr
870
871	set_sctlr
872	isb
873
874	adr	r0, reset_vect_table
875	write_vbar r0
876
877	mov	r4, lr
878
879	bl	__get_core_pos
880	bl	enable_mmu
881
882	set_sp
883
884	mov	r0, r4
885	mov	r1, r5
886	bl	boot_cpu_on_handler
887#ifdef CFG_CORE_FFA
888	b	thread_ffa_msg_wait
889#else
890	bx	r6
891#endif
892END_FUNC cpu_on_handler
893DECLARE_KEEP_PAGER cpu_on_handler
894
895#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
896
897LOCAL_FUNC reset_secondary , : , .identity_map
898UNWIND(	.cantunwind)
899	adr	r0, reset_vect_table
900	write_vbar r0
901
902	wait_primary
903
904	set_sp
905
906#if defined (CFG_BOOT_SECONDARY_REQUEST)
907	/* if L1 is not invalidated before, do it here */
908	mov	r0, #DCACHE_OP_INV
909	bl	dcache_op_level1
910#endif
911
912	bl	__get_core_pos
913	bl	enable_mmu
914
915	cpu_is_ready
916
917#if defined (CFG_BOOT_SECONDARY_REQUEST)
918	/*
919	 * boot_core_hpen() return value (r0) is address of
920	 * ns entry context structure
921	 */
922	bl	boot_core_hpen
923	ldm	r0, {r0, r6}
924#else
925	mov	r0, r8		/* ns-entry address */
926	mov	r6, #0
927#endif
928	bl	boot_init_secondary
929
930	mov	r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
931	mov	r1, r6
932	mov	r2, #0
933	mov	r3, #0
934	mov	r4, #0
935	smc	#0
936	/* SMC should not return */
937	panic_at_smc_return
938END_FUNC reset_secondary
939DECLARE_KEEP_PAGER reset_secondary
940#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
941